From 76194c7ee78fc38f1a3efcaf93528102be4ba1c3 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 29 Oct 2025 09:00:50 +0100 Subject: [PATCH 01/60] Upgrade `rand` to v0.9.2 We bump our rand dependency to the latest stable version. --- Cargo.toml | 2 +- src/event.rs | 4 ++-- src/io/test_utils.rs | 6 +++--- src/io/utils.rs | 6 +++--- src/io/vss_store.rs | 10 +++++----- src/lib.rs | 2 +- src/liquidity.rs | 2 +- src/payment/bolt12.rs | 6 +++--- tests/common/mod.rs | 14 +++++++------- tests/integration_tests_cln.rs | 6 +++--- 10 files changed, 29 insertions(+), 29 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a70e74dd4..794f7b859 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -90,7 +90,7 @@ bip39 = "2.0.0" bip21 = { version = "0.5", features = ["std"], default-features = false } base64 = { version = "0.22.1", default-features = false, features = ["std"] } -rand = "0.8.5" +rand = { version = "0.9.2", default-features = false, features = ["std", "thread_rng", "os_rng"] } chrono = { version = "0.4", default-features = false, features = ["clock"] } tokio = { version = "1.37", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] } esplora-client = { version = "0.12", default-features = false, features = ["tokio", "async-https-rustls"] } diff --git a/src/event.rs b/src/event.rs index eedfb1c14..13913466c 100644 --- a/src/event.rs +++ b/src/event.rs @@ -30,7 +30,7 @@ use lightning::util::persist::KVStoreSync; use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; use lightning_liquidity::lsps2::utils::compute_opening_fee; use lightning_types::payment::{PaymentHash, PaymentPreimage}; -use rand::{thread_rng, Rng}; +use rand::{rng, Rng}; use crate::config::{may_announce_channel, Config}; use crate::connection::ConnectionManager; @@ -1137,7 +1137,7 @@ where } } - let user_channel_id: u128 = thread_rng().gen::(); + let user_channel_id: u128 = rng().random::(); let allow_0conf = self.config.trusted_peers_0conf.contains(&counterparty_node_id); let mut channel_override_config = None; if let Some((lsp_node_id, _)) = self diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index 59ad09458..84517a695 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -18,8 +18,8 @@ use lightning::util::persist::{ }; use lightning::util::test_utils; use lightning::{check_added_monitors, check_closed_broadcast, check_closed_event}; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; +use rand::distr::Alphanumeric; +use rand::{rng, Rng}; type TestMonitorUpdatePersister<'a, K> = MonitorUpdatingPersister< &'a K, @@ -34,7 +34,7 @@ const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5; pub(crate) fn random_storage_path() -> PathBuf { let mut temp_path = std::env::temp_dir(); - let mut rng = thread_rng(); + let mut rng = rng(); let rand_dir: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); temp_path.push(rand_dir); temp_path diff --git a/src/io/utils.rs b/src/io/utils.rs index 98993ff11..c723ca26b 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -35,7 +35,7 @@ use lightning::util::persist::{ }; use lightning::util::ser::{Readable, ReadableArgs, Writeable}; use lightning_types::string::PrintableString; -use rand::{thread_rng, RngCore}; +use rand::{rng, RngCore}; use super::*; use crate::chain::ChainSource; @@ -63,7 +63,7 @@ pub const EXTERNAL_PATHFINDING_SCORES_CACHE_KEY: &str = "external_pathfinding_sc pub fn generate_entropy_mnemonic() -> Mnemonic { // bip39::Mnemonic supports 256 bit entropy max let mut entropy = [0; 32]; - thread_rng().fill_bytes(&mut entropy); + rng().fill_bytes(&mut entropy); Mnemonic::from_entropy(&entropy).unwrap() } @@ -96,7 +96,7 @@ where Ok(key) } else { let mut key = [0; WALLET_KEYS_SEED_LEN]; - thread_rng().fill_bytes(&mut key); + rng().fill_bytes(&mut key); if let Some(parent_dir) = Path::new(&keys_seed_path).parent() { fs::create_dir_all(parent_dir).map_err(|e| { diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index ed8e13890..d97c47f81 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -592,7 +592,7 @@ pub(crate) struct RandEntropySource; impl EntropySource for RandEntropySource { fn fill_bytes(&self, buffer: &mut [u8]) { - rand::thread_rng().fill_bytes(buffer); + rand::rng().fill_bytes(buffer); } } @@ -604,8 +604,8 @@ impl RefUnwindSafe for VssStore {} mod tests { use std::collections::HashMap; - use rand::distributions::Alphanumeric; - use rand::{thread_rng, Rng, RngCore}; + use rand::distr::Alphanumeric; + use rand::{rng, Rng, RngCore}; use vss_client::headers::FixedHeaders; use super::*; @@ -615,7 +615,7 @@ mod tests { #[test] fn vss_read_write_remove_list_persist() { let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); - let mut rng = thread_rng(); + let mut rng = rng(); let rand_store_id: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); @@ -631,7 +631,7 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn vss_read_write_remove_list_persist_in_runtime_context() { let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); - let mut rng = thread_rng(); + let mut rng = rng(); let rand_store_id: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); diff --git a/src/lib.rs b/src/lib.rs index 6a26c6c5b..7d8aff4b3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1117,7 +1117,7 @@ impl Node { } let push_msat = push_to_counterparty_msat.unwrap_or(0); - let user_channel_id: u128 = rand::thread_rng().gen::(); + let user_channel_id: u128 = rand::rng().random::(); match self.channel_manager.create_channel( peer_info.node_id, diff --git a/src/liquidity.rs b/src/liquidity.rs index 81d48e530..401222c47 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -562,7 +562,7 @@ where return; }; - let user_channel_id: u128 = rand::thread_rng().gen::(); + let user_channel_id: u128 = rand::rng().random::(); let intercept_scid = self.channel_manager.get_intercept_scid(); if let Some(payment_size_msat) = payment_size_msat { diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 337eedf96..2b299739d 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -84,7 +84,7 @@ impl Bolt12Payment { let offer = maybe_deref(offer); let mut random_bytes = [0u8; 32]; - rand::thread_rng().fill_bytes(&mut random_bytes); + rand::rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); let route_params_config = RouteParametersConfig::default(); @@ -191,7 +191,7 @@ impl Bolt12Payment { let offer = maybe_deref(offer); let mut random_bytes = [0u8; 32]; - rand::thread_rng().fill_bytes(&mut random_bytes); + rand::rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); let route_params_config = RouteParametersConfig::default(); @@ -408,7 +408,7 @@ impl Bolt12Payment { payer_note: Option, ) -> Result { let mut random_bytes = [0u8; 32]; - rand::thread_rng().fill_bytes(&mut random_bytes); + rand::rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); let absolute_expiry = (SystemTime::now() + Duration::from_secs(expiry_secs as u64)) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 05326b03d..c96ab8b36 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -43,8 +43,8 @@ use lightning_invoice::{Bolt11InvoiceDescription, Description}; use lightning_persister::fs_store::FilesystemStore; use lightning_types::payment::{PaymentHash, PaymentPreimage}; use logging::TestLogWriter; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; +use rand::distr::Alphanumeric; +use rand::{rng, Rng}; use serde_json::{json, Value}; macro_rules! expect_event { @@ -191,15 +191,15 @@ pub(crate) fn setup_bitcoind_and_electrsd() -> (BitcoinD, ElectrsD) { pub(crate) fn random_storage_path() -> PathBuf { let mut temp_path = std::env::temp_dir(); - let mut rng = thread_rng(); + let mut rng = rng(); let rand_dir: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); temp_path.push(rand_dir); temp_path } pub(crate) fn random_port() -> u16 { - let mut rng = thread_rng(); - rng.gen_range(5000..32768) + let mut rng = rng(); + rng.random_range(5000..32768) } pub(crate) fn random_listening_addresses() -> Vec { @@ -216,8 +216,8 @@ pub(crate) fn random_listening_addresses() -> Vec { } pub(crate) fn random_node_alias() -> Option { - let mut rng = thread_rng(); - let rand_val = rng.gen_range(0..1000); + let mut rng = rng(); + let rand_val = rng.random_range(0..1000); let alias = format!("ldk-node-{}", rand_val); let mut bytes = [0u8; 32]; bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); diff --git a/tests/integration_tests_cln.rs b/tests/integration_tests_cln.rs index 38e345f15..e8eb72a1d 100644 --- a/tests/integration_tests_cln.rs +++ b/tests/integration_tests_cln.rs @@ -22,8 +22,8 @@ use ldk_node::bitcoin::Amount; use ldk_node::lightning::ln::msgs::SocketAddress; use ldk_node::{Builder, Event}; use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; +use rand::distr::Alphanumeric; +use rand::{rng, Rng}; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_cln() { @@ -99,7 +99,7 @@ async fn test_cln() { let user_channel_id = common::expect_channel_ready_event!(node, cln_node_id); // Send a payment to CLN - let mut rng = thread_rng(); + let mut rng = rng(); let rand_label: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); let cln_invoice = cln_client.invoice(Some(10_000_000), &rand_label, &rand_label, None, None, None).unwrap(); From e1739971794d668531916cdd58e798b7e9d10e67 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 29 Oct 2025 09:02:47 +0100 Subject: [PATCH 02/60] Use `os_rng` for seed/mnemonic generation The previously-used `thread_rng` should be fine, but `os_rng` is guaranteed to block until there is sufficient entropy available (e.g., after startup), which might slightly improve security here. --- src/io/utils.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/io/utils.rs b/src/io/utils.rs index c723ca26b..d92c9486b 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -35,7 +35,8 @@ use lightning::util::persist::{ }; use lightning::util::ser::{Readable, ReadableArgs, Writeable}; use lightning_types::string::PrintableString; -use rand::{rng, RngCore}; +use rand::rngs::OsRng; +use rand::TryRngCore; use super::*; use crate::chain::ChainSource; @@ -63,7 +64,7 @@ pub const EXTERNAL_PATHFINDING_SCORES_CACHE_KEY: &str = "external_pathfinding_sc pub fn generate_entropy_mnemonic() -> Mnemonic { // bip39::Mnemonic supports 256 bit entropy max let mut entropy = [0; 32]; - rng().fill_bytes(&mut entropy); + OsRng.try_fill_bytes(&mut entropy).expect("Failed to generate entropy"); Mnemonic::from_entropy(&entropy).unwrap() } @@ -96,7 +97,10 @@ where Ok(key) } else { let mut key = [0; WALLET_KEYS_SEED_LEN]; - rng().fill_bytes(&mut key); + OsRng.try_fill_bytes(&mut key).map_err(|e| { + log_error!(logger, "Failed to generate entropy: {}", e); + std::io::Error::new(std::io::ErrorKind::Other, "Failed to generate seed bytes") + })?; if let Some(parent_dir) = Path::new(&keys_seed_path).parent() { fs::create_dir_all(parent_dir).map_err(|e| { From 7063f2c0b09776c4f89b7c7de06c99e7b98e09fe Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 30 Oct 2025 09:45:32 +0100 Subject: [PATCH 03/60] No turbofishing for `user_channel_id` --- src/event.rs | 2 +- src/lib.rs | 2 +- src/liquidity.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/event.rs b/src/event.rs index 13913466c..1946350a3 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1137,7 +1137,7 @@ where } } - let user_channel_id: u128 = rng().random::(); + let user_channel_id: u128 = rng().random(); let allow_0conf = self.config.trusted_peers_0conf.contains(&counterparty_node_id); let mut channel_override_config = None; if let Some((lsp_node_id, _)) = self diff --git a/src/lib.rs b/src/lib.rs index 7d8aff4b3..482866a14 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1117,7 +1117,7 @@ impl Node { } let push_msat = push_to_counterparty_msat.unwrap_or(0); - let user_channel_id: u128 = rand::rng().random::(); + let user_channel_id: u128 = rand::rng().random(); match self.channel_manager.create_channel( peer_info.node_id, diff --git a/src/liquidity.rs b/src/liquidity.rs index 401222c47..57e2ad488 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -562,7 +562,7 @@ where return; }; - let user_channel_id: u128 = rand::rng().random::(); + let user_channel_id: u128 = rand::rng().random(); let intercept_scid = self.channel_manager.get_intercept_scid(); if let Some(payment_size_msat) = payment_size_msat { From 2c99aa04e77bcb394ad4a8af20d70825f7aff306 Mon Sep 17 00:00:00 2001 From: Chuks Agbakuru Date: Fri, 31 Oct 2025 13:27:56 +0100 Subject: [PATCH 04/60] Implement Display for UserChannelId --- src/types.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/types.rs b/src/types.rs index 800d9462d..71512b2cd 100644 --- a/src/types.rs +++ b/src/types.rs @@ -5,6 +5,7 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::fmt; use std::sync::{Arc, Mutex}; use bitcoin::secp256k1::PublicKey; @@ -192,6 +193,12 @@ impl Readable for UserChannelId { } } +impl fmt::Display for UserChannelId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "UserChannelId({})", self.0) + } +} + /// Details of a channel as returned by [`Node::list_channels`]. /// /// [`Node::list_channels`]: crate::Node::list_channels From 222321d61242a635b5d77b423b25dd3c6ae9b196 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 15 Oct 2025 09:58:58 +0200 Subject: [PATCH 05/60] Configurable test store To enable more realistic testing with sqlite as a backend. --- tests/common/mod.rs | 45 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 5 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 058a8df19..0d6ba30ae 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -29,10 +29,11 @@ use electrsd::corepc_node::{Client as BitcoindClient, Node as BitcoinD}; use electrsd::{corepc_node, ElectrsD}; use electrum_client::ElectrumApi; use ldk_node::config::{AsyncPaymentsRole, Config, ElectrumSyncConfig, EsploraSyncConfig}; -use ldk_node::io::sqlite_store::SqliteStore; +use ldk_node::io::sqlite_store::{SqliteStore, KV_TABLE_NAME, SQLITE_DB_FILE_NAME}; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{ - Builder, CustomTlvRecord, Event, LightningBalance, Node, NodeError, PendingSweepBalance, + Builder, CustomTlvRecord, DynStore, Event, LightningBalance, Node, NodeError, + PendingSweepBalance, }; use lightning::io; use lightning::ln::msgs::SocketAddress; @@ -262,10 +263,23 @@ pub(crate) enum TestChainSource<'a> { BitcoindRestSync(&'a BitcoinD), } +#[derive(Clone, Copy)] +pub(crate) enum TestStoreType { + TestSyncStore, + Sqlite, +} + +impl Default for TestStoreType { + fn default() -> Self { + TestStoreType::TestSyncStore + } +} + #[derive(Clone, Default)] pub(crate) struct TestConfig { pub node_config: Config, pub log_writer: TestLogWriter, + pub store_type: TestStoreType, } macro_rules! setup_builder { @@ -282,13 +296,28 @@ pub(crate) use setup_builder; pub(crate) fn setup_two_nodes( chain_source: &TestChainSource, allow_0conf: bool, anchor_channels: bool, anchors_trusted_no_reserve: bool, +) -> (TestNode, TestNode) { + setup_two_nodes_with_store( + chain_source, + allow_0conf, + anchor_channels, + anchors_trusted_no_reserve, + TestStoreType::TestSyncStore, + ) +} + +pub(crate) fn setup_two_nodes_with_store( + chain_source: &TestChainSource, allow_0conf: bool, anchor_channels: bool, + anchors_trusted_no_reserve: bool, store_type: TestStoreType, ) -> (TestNode, TestNode) { println!("== Node A =="); - let config_a = random_config(anchor_channels); + let mut config_a = random_config(anchor_channels); + config_a.store_type = store_type; let node_a = setup_node(chain_source, config_a, None); println!("\n== Node B =="); let mut config_b = random_config(anchor_channels); + config_b.store_type = store_type; if allow_0conf { config_b.node_config.trusted_peers_0conf.push(node_a.node_id()); } @@ -381,8 +410,14 @@ pub(crate) fn setup_node_for_async_payments( builder.set_async_payments_role(async_payments_role).unwrap(); - let test_sync_store = Arc::new(TestSyncStore::new(config.node_config.storage_dir_path.into())); - let node = builder.build_with_store(test_sync_store).unwrap(); + let node = match config.store_type { + TestStoreType::TestSyncStore => { + let kv_store = Arc::new(TestSyncStore::new(config.node_config.storage_dir_path.into())); + builder.build_with_store(kv_store).unwrap() + }, + TestStoreType::Sqlite => builder.build().unwrap(), + }; + node.start().unwrap(); assert!(node.status().is_running); assert!(node.status().latest_fee_rate_cache_update_timestamp.is_some()); From 14c1cff0ec19ba6871c53cda7990395cdbc5edfc Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Oct 2025 13:57:41 +0200 Subject: [PATCH 06/60] Add async payment throughput benchmark Introduces a criterion-based benchmark that sends 1000 concurrent payments between two LDK nodes to measure total duration. Also adds a CI job to automatically run the benchmark. --- .github/workflows/benchmarks.yml | 46 ++++++++ Cargo.toml | 5 + benches/payments.rs | 195 +++++++++++++++++++++++++++++++ tests/common/mod.rs | 5 +- 4 files changed, 248 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/benchmarks.yml create mode 100644 benches/payments.rs diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml new file mode 100644 index 000000000..ef049ad85 --- /dev/null +++ b/.github/workflows/benchmarks.yml @@ -0,0 +1,46 @@ +name: CI Checks - Benchmarks + +on: [push, pull_request] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + benchmark: + runs-on: ubuntu-latest + env: + TOOLCHAIN: stable + steps: + - name: Checkout source code + uses: actions/checkout@v3 + - name: Install Rust toolchain + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain stable + rustup override set stable + - name: Enable caching for bitcoind + id: cache-bitcoind + uses: actions/cache@v4 + with: + path: bin/bitcoind-${{ runner.os }}-${{ runner.arch }} + key: bitcoind-${{ runner.os }}-${{ runner.arch }} + - name: Enable caching for electrs + id: cache-electrs + uses: actions/cache@v4 + with: + path: bin/electrs-${{ runner.os }}-${{ runner.arch }} + key: electrs-${{ runner.os }}-${{ runner.arch }} + - name: Download bitcoind/electrs + if: "(steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true')" + run: | + source ./scripts/download_bitcoind_electrs.sh + mkdir bin + mv "$BITCOIND_EXE" bin/bitcoind-${{ runner.os }}-${{ runner.arch }} + mv "$ELECTRS_EXE" bin/electrs-${{ runner.os }}-${{ runner.arch }} + - name: Set bitcoind/electrs environment variables + run: | + echo "BITCOIND_EXE=$( pwd )/bin/bitcoind-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" + echo "ELECTRS_EXE=$( pwd )/bin/electrs-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" + - name: Run benchmarks + run: | + cargo bench diff --git a/Cargo.toml b/Cargo.toml index 51b0329c4..701d9ddb3 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -114,6 +114,7 @@ lightning = { version = "0.2.0-rc1", features = ["std", "_test_utils"] } #lightning = { path = "../rust-lightning/lightning", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" +criterion = { version = "0.7.0", features = ["async_tokio"] } [target.'cfg(not(no_download))'.dev-dependencies] electrsd = { version = "0.36.1", default-features = false, features = ["legacy", "esplora_a33e97e1", "corepc-node_27_2"] } @@ -148,3 +149,7 @@ check-cfg = [ "cfg(cln_test)", "cfg(lnd_test)", ] + +[[bench]] +name = "payments" +harness = false diff --git a/benches/payments.rs b/benches/payments.rs new file mode 100644 index 000000000..75b7f0513 --- /dev/null +++ b/benches/payments.rs @@ -0,0 +1,195 @@ +#[path = "../tests/common/mod.rs"] +mod common; + +use std::time::Instant; +use std::{sync::Arc, time::Duration}; + +use bitcoin::hex::DisplayHex; +use bitcoin::Amount; +use common::{ + expect_channel_ready_event, generate_blocks_and_wait, premine_and_distribute_funds, + setup_bitcoind_and_electrsd, setup_two_nodes_with_store, TestChainSource, +}; +use criterion::{criterion_group, criterion_main, Criterion}; +use ldk_node::{Event, Node}; +use lightning_types::payment::{PaymentHash, PaymentPreimage}; +use rand::RngCore; +use tokio::task::{self}; + +use crate::common::open_channel_push_amt; + +fn spawn_payment(node_a: Arc, node_b: Arc, amount_msat: u64) { + let mut preimage_bytes = [0u8; 32]; + rand::thread_rng().fill_bytes(&mut preimage_bytes); + let preimage = PaymentPreimage(preimage_bytes); + let payment_hash: PaymentHash = preimage.into(); + + // Spawn each payment as a separate async task + task::spawn(async move { + println!("{}: Starting payment", payment_hash.0.as_hex()); + + loop { + // Pre-check the HTLC slots to try to avoid the performance impact of a failed payment. + while node_a.list_channels()[0].next_outbound_htlc_limit_msat == 0 { + println!("{}: Waiting for HTLC slots to free up", payment_hash.0.as_hex()); + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + + let payment_id = node_a.spontaneous_payment().send_with_preimage( + amount_msat, + node_b.node_id(), + preimage, + None, + ); + + match payment_id { + Ok(payment_id) => { + println!( + "{}: Awaiting payment with id {}", + payment_hash.0.as_hex(), + payment_id + ); + break; + }, + Err(e) => { + println!("{}: Payment attempt failed: {:?}", payment_hash.0.as_hex(), e); + + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + }, + } + } + }); +} + +async fn send_payments(node_a: Arc, node_b: Arc) -> std::time::Duration { + let start = Instant::now(); + + let total_payments = 1000; + let amount_msat = 10_000_000; + + let mut success_count = 0; + for _ in 0..total_payments { + spawn_payment(node_a.clone(), node_b.clone(), amount_msat); + } + + while success_count < total_payments { + match node_a.next_event_async().await { + Event::PaymentSuccessful { payment_id, payment_hash, .. } => { + if let Some(id) = payment_id { + success_count += 1; + println!("{}: Payment with id {:?} completed", payment_hash.0.as_hex(), id); + } else { + println!("Payment completed (no payment_id)"); + } + }, + Event::PaymentFailed { payment_id, payment_hash, .. } => { + println!("{}: Payment {:?} failed", payment_hash.unwrap().0.as_hex(), payment_id); + + // The payment failed, so we need to respawn it. + spawn_payment(node_a.clone(), node_b.clone(), amount_msat); + }, + ref e => { + println!("Received non-payment event: {:?}", e); + }, + } + + node_a.event_handled().unwrap(); + } + + let duration = start.elapsed(); + println!("Time elapsed: {:?}", duration); + + // Send back the money for the next iteration. + let mut preimage_bytes = [0u8; 32]; + rand::thread_rng().fill_bytes(&mut preimage_bytes); + node_b + .spontaneous_payment() + .send_with_preimage( + amount_msat * total_payments, + node_a.node_id(), + PaymentPreimage(preimage_bytes), + None, + ) + .ok() + .unwrap(); + + duration +} + +fn payment_benchmark(c: &mut Criterion) { + // Set up two nodes. Because this is slow, we reuse the same nodes for each sample. + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + + let (node_a, node_b) = setup_two_nodes_with_store( + &chain_source, + false, + true, + false, + common::TestStoreType::Sqlite, + ); + + let runtime = + tokio::runtime::Builder::new_multi_thread().worker_threads(4).enable_all().build().unwrap(); + + let node_a = Arc::new(node_a); + let node_b = Arc::new(node_b); + + // Fund the nodes and setup a channel between them. The criterion function cannot be async, so we need to execute + // the setup using a runtime. + let node_a_cloned = Arc::clone(&node_a); + let node_b_cloned = Arc::clone(&node_b); + runtime.block_on(async move { + let address_a = node_a_cloned.onchain_payment().new_address().unwrap(); + let premine_sat = 25_000_000; + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![address_a], + Amount::from_sat(premine_sat), + ) + .await; + node_a_cloned.sync_wallets().unwrap(); + node_b_cloned.sync_wallets().unwrap(); + open_channel_push_amt( + &node_a_cloned, + &node_b_cloned, + 16_000_000, + Some(1_000_000_000), + false, + &electrsd, + ) + .await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + node_a_cloned.sync_wallets().unwrap(); + node_b_cloned.sync_wallets().unwrap(); + expect_channel_ready_event!(node_a_cloned, node_b_cloned.node_id()); + expect_channel_ready_event!(node_b_cloned, node_a_cloned.node_id()); + }); + + let mut group = c.benchmark_group("payments"); + group.sample_size(10); + + group.bench_function("payments", |b| { + // Use custom timing so that sending back the money at the end of each iteration isn't included in the + // measurement. + b.to_async(&runtime).iter_custom(|iter| { + let node_a = Arc::clone(&node_a); + let node_b = Arc::clone(&node_b); + + async move { + let mut total = Duration::ZERO; + for _i in 0..iter { + let node_a = Arc::clone(&node_a); + let node_b = Arc::clone(&node_b); + + total += send_payments(node_a, node_b).await; + } + total + } + }); + }); +} + +criterion_group!(benches, payment_benchmark); +criterion_main!(benches); diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 0d6ba30ae..dd680488c 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -29,11 +29,10 @@ use electrsd::corepc_node::{Client as BitcoindClient, Node as BitcoinD}; use electrsd::{corepc_node, ElectrsD}; use electrum_client::ElectrumApi; use ldk_node::config::{AsyncPaymentsRole, Config, ElectrumSyncConfig, EsploraSyncConfig}; -use ldk_node::io::sqlite_store::{SqliteStore, KV_TABLE_NAME, SQLITE_DB_FILE_NAME}; +use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{ - Builder, CustomTlvRecord, DynStore, Event, LightningBalance, Node, NodeError, - PendingSweepBalance, + Builder, CustomTlvRecord, Event, LightningBalance, Node, NodeError, PendingSweepBalance, }; use lightning::io; use lightning::ln::msgs::SocketAddress; From 2c72e671312edb61fa5a80612207238bafe932e6 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 5 Nov 2025 11:22:00 +0100 Subject: [PATCH 07/60] Use `[patch]` instead of switching all the dependencies We previously added a bunch of commented-out `rust-lightning` dependencies in our `Cargo.toml` to be able to easily switch between `rust-lightning` locations. However, this is exactly what the `[patch]` command is for, which in particular also allows to patch a dependency for the whole tree, not only this one project. Therefore, we move the examples to a commented-out `patch` section. --- Cargo.toml | 76 ++++++++++++++++++++++++++---------------------------- 1 file changed, 37 insertions(+), 39 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 701d9ddb3..d91db014a 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,42 +41,6 @@ lightning-transaction-sync = { version = "0.2.0-rc1", features = ["esplora-async lightning-liquidity = { version = "0.2.0-rc1", features = ["std"] } lightning-macros = { version = "0.2.0-rc1" } -#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["std"] } -#lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -#lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["std"] } -#lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -#lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["tokio"] } -#lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -#lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -#lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["rest-client", "rpc-client", "tokio"] } -#lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } -#lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -#lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } - -#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["std"] } -#lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } -#lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["std"] } -#lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } -#lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["tokio"] } -#lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } -#lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } -#lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["rest-client", "rpc-client", "tokio"] } -#lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } -#lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } -#lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } - -#lightning = { path = "../rust-lightning/lightning", features = ["std"] } -#lightning-types = { path = "../rust-lightning/lightning-types" } -#lightning-invoice = { path = "../rust-lightning/lightning-invoice", features = ["std"] } -#lightning-net-tokio = { path = "../rust-lightning/lightning-net-tokio" } -#lightning-persister = { path = "../rust-lightning/lightning-persister", features = ["tokio"] } -#lightning-background-processor = { path = "../rust-lightning/lightning-background-processor" } -#lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } -#lightning-block-sync = { path = "../rust-lightning/lightning-block-sync", features = ["rest-client", "rpc-client", "tokio"] } -#lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } -#lightning-liquidity = { path = "../rust-lightning/lightning-liquidity", features = ["std"] } -#lightning-macros = { path = "../rust-lightning/lightning-macros" } - bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} bdk_electrum = { version = "0.23.0", default-features = false, features = ["use-rustls-ring"]} @@ -109,9 +73,6 @@ winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] lightning = { version = "0.2.0-rc1", features = ["std", "_test_utils"] } -#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std", "_test_utils"] } -#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["std", "_test_utils"] } -#lightning = { path = "../rust-lightning/lightning", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" criterion = { version = "0.7.0", features = ["async_tokio"] } @@ -153,3 +114,40 @@ check-cfg = [ [[bench]] name = "payments" harness = false + +#[patch.crates-io] +#lightning = { path = "../rust-lightning/lightning" } +#lightning-types = { path = "../rust-lightning/lightning-types" } +#lightning-invoice = { path = "../rust-lightning/lightning-invoice" } +#lightning-net-tokio = { path = "../rust-lightning/lightning-net-tokio" } +#lightning-persister = { path = "../rust-lightning/lightning-persister" } +#lightning-background-processor = { path = "../rust-lightning/lightning-background-processor" } +#lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } +#lightning-block-sync = { path = "../rust-lightning/lightning-block-sync" } +#lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync" } +#lightning-liquidity = { path = "../rust-lightning/lightning-liquidity" } +#lightning-macros = { path = "../rust-lightning/lightning-macros" } + +#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } + +#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } From fb41112266fe0de6860020849d9cd118d5e74e07 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 5 Nov 2025 12:16:09 +0100 Subject: [PATCH 08/60] Elevate permissions of weekly `rustfmt` worflow .. which might be necessary for it to be able to run successfully. --- .github/workflows/cron-weekly-rustfmt.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/cron-weekly-rustfmt.yml b/.github/workflows/cron-weekly-rustfmt.yml index 626953c8e..d6326f03b 100644 --- a/.github/workflows/cron-weekly-rustfmt.yml +++ b/.github/workflows/cron-weekly-rustfmt.yml @@ -1,4 +1,9 @@ name: Nightly rustfmt + +permissions: + contents: write + pull-requests: write + on: schedule: - cron: "0 0 * * 0" # runs weekly on Sunday at 00:00 From dbfbf83e327cde37b246fd73e06b1af32a7a3175 Mon Sep 17 00:00:00 2001 From: Fmt Bot Date: Wed, 5 Nov 2025 14:27:24 +0000 Subject: [PATCH 09/60] 2025-11-05 automated rustfmt nightly --- benches/payments.rs | 4 ++-- src/lib.rs | 4 ++-- src/scoring.rs | 30 +++++++++++++----------------- src/types.rs | 3 +-- 4 files changed, 18 insertions(+), 23 deletions(-) diff --git a/benches/payments.rs b/benches/payments.rs index 75b7f0513..86dee39d8 100644 --- a/benches/payments.rs +++ b/benches/payments.rs @@ -1,8 +1,8 @@ #[path = "../tests/common/mod.rs"] mod common; -use std::time::Instant; -use std::{sync::Arc, time::Duration}; +use std::sync::Arc; +use std::time::{Duration, Instant}; use bitcoin::hex::DisplayHex; use bitcoin::Amount; diff --git a/src/lib.rs b/src/lib.rs index 6a26c6c5b..fb0fb9f66 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -107,7 +107,6 @@ use std::net::ToSocketAddrs; use std::sync::{Arc, Mutex, RwLock}; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; -use crate::scoring::setup_background_pathfinding_scores_sync; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; use bitcoin::secp256k1::PublicKey; #[cfg(feature = "uniffi")] @@ -158,12 +157,13 @@ use types::{ pub use types::{ ChannelDetails, CustomTlvRecord, DynStore, PeerDetails, SyncAndAsyncKVStore, UserChannelId, }; - pub use { bip39, bitcoin, lightning, lightning_invoice, lightning_liquidity, lightning_types, tokio, vss_client, }; +use crate::scoring::setup_background_pathfinding_scores_sync; + #[cfg(feature = "uniffi")] uniffi::include_scaffolding!("ldk_node"); diff --git a/src/scoring.rs b/src/scoring.rs index 107f63f65..e85abade3 100644 --- a/src/scoring.rs +++ b/src/scoring.rs @@ -1,22 +1,18 @@ -use std::{ - io::Cursor, - sync::{Arc, Mutex, RwLock}, - time::{Duration, SystemTime}, -}; +use std::io::Cursor; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::{Duration, SystemTime}; -use crate::{ - config::{ - EXTERNAL_PATHFINDING_SCORES_SYNC_INTERVAL, EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS, - }, - io::utils::write_external_pathfinding_scores_to_cache, - logger::LdkLogger, - runtime::Runtime, - NodeMetrics, Scorer, -}; -use crate::{write_node_metrics, DynStore, Logger}; -use lightning::{ - log_error, log_info, log_trace, routing::scoring::ChannelLiquidities, util::ser::Readable, +use lightning::routing::scoring::ChannelLiquidities; +use lightning::util::ser::Readable; +use lightning::{log_error, log_info, log_trace}; + +use crate::config::{ + EXTERNAL_PATHFINDING_SCORES_SYNC_INTERVAL, EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS, }; +use crate::io::utils::write_external_pathfinding_scores_to_cache; +use crate::logger::LdkLogger; +use crate::runtime::Runtime; +use crate::{write_node_metrics, DynStore, Logger, NodeMetrics, Scorer}; /// Start a background task that periodically downloads scores via an external url and merges them into the local /// pathfinding scores. diff --git a/src/types.rs b/src/types.rs index 71512b2cd..b8dc10b18 100644 --- a/src/types.rs +++ b/src/types.rs @@ -18,8 +18,7 @@ use lightning::ln::peer_handler::IgnoringMessageHandler; use lightning::ln::types::ChannelId; use lightning::routing::gossip; use lightning::routing::router::DefaultRouter; -use lightning::routing::scoring::CombinedScorer; -use lightning::routing::scoring::ProbabilisticScoringFeeParameters; +use lightning::routing::scoring::{CombinedScorer, ProbabilisticScoringFeeParameters}; use lightning::sign::InMemorySigner; use lightning::util::persist::{KVStore, KVStoreSync, MonitorUpdatingPersister}; use lightning::util::ser::{Readable, Writeable, Writer}; From f5822a02329eacdc4fa756f422c274dda25cc127 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 13 Nov 2025 13:45:46 +0100 Subject: [PATCH 10/60] Introduce `InMemoryStore` for testing Recently, `rust-lightning` broke the (async) API of the `TestStore`, making it ~impossible to use in regular tests. Here, we un-DRY our `TestStore` implementation and simply copy over the previous `TestStore` version, now named `InMemoryStore` to discern the objects. We also switch all feasible instances over to use `InMemoryStore` rather than LDK's `test_utils::TestStore`. --- src/data_store.rs | 5 +- src/event.rs | 7 +- src/io/test_utils.rs | 128 +++++++++++++++++- .../asynchronous/static_invoice_store.rs | 4 +- src/peer_store.rs | 5 +- 5 files changed, 138 insertions(+), 11 deletions(-) diff --git a/src/data_store.rs b/src/data_store.rs index 83cbf4476..87bd831c9 100644 --- a/src/data_store.rs +++ b/src/data_store.rs @@ -172,10 +172,11 @@ where #[cfg(test)] mod tests { use lightning::impl_writeable_tlv_based; - use lightning::util::test_utils::{TestLogger, TestStore}; + use lightning::util::test_utils::TestLogger; use super::*; use crate::hex_utils; + use crate::io::test_utils::InMemoryStore; #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] struct TestObjectId { @@ -234,7 +235,7 @@ mod tests { #[test] fn data_is_persisted() { - let store: Arc = Arc::new(TestStore::new(false)); + let store: Arc = Arc::new(InMemoryStore::new()); let logger = Arc::new(TestLogger::new()); let primary_namespace = "datastore_test_primary".to_string(); let secondary_namespace = "datastore_test_secondary".to_string(); diff --git a/src/event.rs b/src/event.rs index 1946350a3..42b60e213 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1605,13 +1605,14 @@ mod tests { use std::sync::atomic::{AtomicU16, Ordering}; use std::time::Duration; - use lightning::util::test_utils::{TestLogger, TestStore}; + use lightning::util::test_utils::TestLogger; use super::*; + use crate::io::test_utils::InMemoryStore; #[tokio::test] async fn event_queue_persistence() { - let store: Arc = Arc::new(TestStore::new(false)); + let store: Arc = Arc::new(InMemoryStore::new()); let logger = Arc::new(TestLogger::new()); let event_queue = Arc::new(EventQueue::new(Arc::clone(&store), Arc::clone(&logger))); assert_eq!(event_queue.next_event(), None); @@ -1647,7 +1648,7 @@ mod tests { #[tokio::test] async fn event_queue_concurrency() { - let store: Arc = Arc::new(TestStore::new(false)); + let store: Arc = Arc::new(InMemoryStore::new()); let logger = Arc::new(TestLogger::new()); let event_queue = Arc::new(EventQueue::new(Arc::clone(&store), Arc::clone(&logger))); assert_eq!(event_queue.next_event(), None); diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index fd4de1c9f..310638dd8 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -5,8 +5,13 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::boxed::Box; +use std::collections::{hash_map, HashMap}; +use std::future::Future; use std::panic::RefUnwindSafe; use std::path::PathBuf; +use std::pin::Pin; +use std::sync::Mutex; use lightning::events::ClosureReason; use lightning::ln::functional_test_utils::{ @@ -14,10 +19,10 @@ use lightning::ln::functional_test_utils::{ create_network, create_node_cfgs, create_node_chanmgrs, send_payment, TestChanMonCfg, }; use lightning::util::persist::{ - KVStoreSync, MonitorUpdatingPersister, KVSTORE_NAMESPACE_KEY_MAX_LEN, + KVStore, KVStoreSync, MonitorUpdatingPersister, KVSTORE_NAMESPACE_KEY_MAX_LEN, }; use lightning::util::test_utils; -use lightning::{check_added_monitors, check_closed_broadcast, check_closed_event}; +use lightning::{check_added_monitors, check_closed_broadcast, check_closed_event, io}; use rand::distr::Alphanumeric; use rand::{rng, Rng}; @@ -32,6 +37,125 @@ type TestMonitorUpdatePersister<'a, K> = MonitorUpdatingPersister< const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5; +pub struct InMemoryStore { + persisted_bytes: Mutex>>>, +} + +impl InMemoryStore { + pub fn new() -> Self { + let persisted_bytes = Mutex::new(HashMap::new()); + Self { persisted_bytes } + } + + fn read_internal( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + let persisted_lock = self.persisted_bytes.lock().unwrap(); + let prefixed = format!("{primary_namespace}/{secondary_namespace}"); + + if let Some(outer_ref) = persisted_lock.get(&prefixed) { + if let Some(inner_ref) = outer_ref.get(key) { + let bytes = inner_ref.clone(); + Ok(bytes) + } else { + Err(io::Error::new(io::ErrorKind::NotFound, "Key not found")) + } + } else { + Err(io::Error::new(io::ErrorKind::NotFound, "Namespace not found")) + } + } + + fn write_internal( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + let mut persisted_lock = self.persisted_bytes.lock().unwrap(); + + let prefixed = format!("{primary_namespace}/{secondary_namespace}"); + let outer_e = persisted_lock.entry(prefixed).or_insert(HashMap::new()); + outer_e.insert(key.to_string(), buf); + Ok(()) + } + + fn remove_internal( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + ) -> io::Result<()> { + let mut persisted_lock = self.persisted_bytes.lock().unwrap(); + + let prefixed = format!("{primary_namespace}/{secondary_namespace}"); + if let Some(outer_ref) = persisted_lock.get_mut(&prefixed) { + outer_ref.remove(&key.to_string()); + } + + Ok(()) + } + + fn list_internal( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> io::Result> { + let mut persisted_lock = self.persisted_bytes.lock().unwrap(); + + let prefixed = format!("{primary_namespace}/{secondary_namespace}"); + match persisted_lock.entry(prefixed) { + hash_map::Entry::Occupied(e) => Ok(e.get().keys().cloned().collect()), + hash_map::Entry::Vacant(_) => Ok(Vec::new()), + } + } +} + +impl KVStore for InMemoryStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Pin, io::Error>> + 'static + Send>> { + let res = self.read_internal(&primary_namespace, &secondary_namespace, &key); + Box::pin(async move { res }) + } + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Pin> + 'static + Send>> { + let res = self.write_internal(&primary_namespace, &secondary_namespace, &key, buf); + Box::pin(async move { res }) + } + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> Pin> + 'static + Send>> { + let res = self.remove_internal(&primary_namespace, &secondary_namespace, &key, lazy); + Box::pin(async move { res }) + } + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Pin, io::Error>> + 'static + Send>> { + let res = self.list_internal(primary_namespace, secondary_namespace); + Box::pin(async move { res }) + } +} + +impl KVStoreSync for InMemoryStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + self.read_internal(primary_namespace, secondary_namespace, key) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + self.write_internal(primary_namespace, secondary_namespace, key, buf) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + self.remove_internal(primary_namespace, secondary_namespace, key, lazy) + } + + fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { + self.list_internal(primary_namespace, secondary_namespace) + } +} + +unsafe impl Sync for InMemoryStore {} +unsafe impl Send for InMemoryStore {} + pub(crate) fn random_storage_path() -> PathBuf { let mut temp_path = std::env::temp_dir(); let mut rng = rng(); diff --git a/src/payment/asynchronous/static_invoice_store.rs b/src/payment/asynchronous/static_invoice_store.rs index a7e2d2f9e..45125cfee 100644 --- a/src/payment/asynchronous/static_invoice_store.rs +++ b/src/payment/asynchronous/static_invoice_store.rs @@ -157,15 +157,15 @@ mod tests { use lightning::offers::offer::OfferBuilder; use lightning::offers::static_invoice::{StaticInvoice, StaticInvoiceBuilder}; use lightning::sign::EntropySource; - use lightning::util::test_utils::TestStore; use lightning_types::features::BlindedHopFeatures; + use crate::io::test_utils::InMemoryStore; use crate::payment::asynchronous::static_invoice_store::StaticInvoiceStore; use crate::types::DynStore; #[tokio::test] async fn static_invoice_store_test() { - let store: Arc = Arc::new(TestStore::new(false)); + let store: Arc = Arc::new(InMemoryStore::new()); let static_invoice_store = StaticInvoiceStore::new(Arc::clone(&store)); let static_invoice = invoice(); diff --git a/src/peer_store.rs b/src/peer_store.rs index 82c80c396..59cd3d94f 100644 --- a/src/peer_store.rs +++ b/src/peer_store.rs @@ -152,13 +152,14 @@ mod tests { use std::str::FromStr; use std::sync::Arc; - use lightning::util::test_utils::{TestLogger, TestStore}; + use lightning::util::test_utils::TestLogger; use super::*; + use crate::io::test_utils::InMemoryStore; #[test] fn peer_info_persistence() { - let store: Arc = Arc::new(TestStore::new(false)); + let store: Arc = Arc::new(InMemoryStore::new()); let logger = Arc::new(TestLogger::new()); let peer_store = PeerStore::new(Arc::clone(&store), Arc::clone(&logger)); From 4c7254139dc2eb09424cefda7c8c357b71d69c14 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 10 Nov 2025 13:48:51 +0100 Subject: [PATCH 11/60] Make `EventQueue` persistence `async` Previously, we'd still use `KVStoreSync` for persistence of our event queue, which also meant calling the sync persistence through our otherwise-async background processor/event handling flow. Here we switch our `EventQueue` persistence to be async, which gets us one step further towards async-everything. --- src/event.rs | 63 ++++++++++++++++++++++++++++------------------------ src/lib.rs | 5 ++++- 2 files changed, 38 insertions(+), 30 deletions(-) diff --git a/src/event.rs b/src/event.rs index 42b60e213..3de2c3261 100644 --- a/src/event.rs +++ b/src/event.rs @@ -26,7 +26,7 @@ use lightning::util::config::{ ChannelConfigOverrides, ChannelConfigUpdate, ChannelHandshakeConfigUpdate, }; use lightning::util::errors::APIError; -use lightning::util::persist::KVStoreSync; +use lightning::util::persist::KVStore; use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; use lightning_liquidity::lsps2::utils::compute_opening_fee; use lightning_types::payment::{PaymentHash, PaymentPreimage}; @@ -301,12 +301,14 @@ where Self { queue, waker, kv_store, logger } } - pub(crate) fn add_event(&self, event: Event) -> Result<(), Error> { - { + pub(crate) async fn add_event(&self, event: Event) -> Result<(), Error> { + let data = { let mut locked_queue = self.queue.lock().unwrap(); locked_queue.push_back(event); - self.persist_queue(&locked_queue)?; - } + EventQueueSerWrapper(&locked_queue).encode() + }; + + self.persist_queue(data).await?; if let Some(waker) = self.waker.lock().unwrap().take() { waker.wake(); @@ -323,12 +325,14 @@ where EventFuture { event_queue: Arc::clone(&self.queue), waker: Arc::clone(&self.waker) }.await } - pub(crate) fn event_handled(&self) -> Result<(), Error> { - { + pub(crate) async fn event_handled(&self) -> Result<(), Error> { + let data = { let mut locked_queue = self.queue.lock().unwrap(); locked_queue.pop_front(); - self.persist_queue(&locked_queue)?; - } + EventQueueSerWrapper(&locked_queue).encode() + }; + + self.persist_queue(data).await?; if let Some(waker) = self.waker.lock().unwrap().take() { waker.wake(); @@ -336,15 +340,15 @@ where Ok(()) } - fn persist_queue(&self, locked_queue: &VecDeque) -> Result<(), Error> { - let data = EventQueueSerWrapper(locked_queue).encode(); - KVStoreSync::write( + async fn persist_queue(&self, encoded_queue: Vec) -> Result<(), Error> { + KVStore::write( &*self.kv_store, EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_KEY, - data, + encoded_queue, ) + .await .map_err(|e| { log_error!( self.logger, @@ -694,7 +698,7 @@ where claim_deadline, custom_records, }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => return Ok(()), Err(e) => { log_error!( @@ -928,7 +932,7 @@ where .map(|cf| cf.custom_tlvs().into_iter().map(|tlv| tlv.into()).collect()) .unwrap_or_default(), }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => return Ok(()), Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); @@ -988,7 +992,7 @@ where fee_paid_msat, }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => return Ok(()), Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); @@ -1019,7 +1023,7 @@ where let event = Event::PaymentFailed { payment_id: Some(payment_id), payment_hash, reason }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => return Ok(()), Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); @@ -1295,7 +1299,7 @@ where claim_from_onchain_tx, outbound_amount_forwarded_msat, }; - self.event_queue.add_event(event).map_err(|e| { + self.event_queue.add_event(event).await.map_err(|e| { log_error!(self.logger, "Failed to push to event queue: {}", e); ReplayEvent() })?; @@ -1322,7 +1326,7 @@ where counterparty_node_id, funding_txo, }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => {}, Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); @@ -1383,7 +1387,7 @@ where user_channel_id: UserChannelId(user_channel_id), counterparty_node_id: Some(counterparty_node_id), }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => {}, Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); @@ -1407,7 +1411,7 @@ where reason: Some(reason), }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => {}, Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); @@ -1622,7 +1626,7 @@ mod tests { user_channel_id: UserChannelId(2323), counterparty_node_id: None, }; - event_queue.add_event(expected_event.clone()).unwrap(); + event_queue.add_event(expected_event.clone()).await.unwrap(); // Check we get the expected event and that it is returned until we mark it handled. for _ in 0..5 { @@ -1631,18 +1635,19 @@ mod tests { } // Check we can read back what we persisted. - let persisted_bytes = KVStoreSync::read( + let persisted_bytes = KVStore::read( &*store, EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_KEY, ) + .await .unwrap(); let deser_event_queue = EventQueue::read(&mut &persisted_bytes[..], (Arc::clone(&store), logger)).unwrap(); assert_eq!(deser_event_queue.next_event_async().await, expected_event); - event_queue.event_handled().unwrap(); + event_queue.event_handled().await.unwrap(); assert_eq!(event_queue.next_event(), None); } @@ -1676,28 +1681,28 @@ mod tests { let mut delayed_enqueue = false; for _ in 0..25 { - event_queue.add_event(expected_event.clone()).unwrap(); + event_queue.add_event(expected_event.clone()).await.unwrap(); enqueued_events.fetch_add(1, Ordering::SeqCst); } loop { tokio::select! { _ = tokio::time::sleep(Duration::from_millis(10)), if !delayed_enqueue => { - event_queue.add_event(expected_event.clone()).unwrap(); + event_queue.add_event(expected_event.clone()).await.unwrap(); enqueued_events.fetch_add(1, Ordering::SeqCst); delayed_enqueue = true; } e = event_queue.next_event_async() => { assert_eq!(e, expected_event); - event_queue.event_handled().unwrap(); + event_queue.event_handled().await.unwrap(); received_events.fetch_add(1, Ordering::SeqCst); - event_queue.add_event(expected_event.clone()).unwrap(); + event_queue.add_event(expected_event.clone()).await.unwrap(); enqueued_events.fetch_add(1, Ordering::SeqCst); } e = event_queue.next_event_async() => { assert_eq!(e, expected_event); - event_queue.event_handled().unwrap(); + event_queue.event_handled().await.unwrap(); received_events.fetch_add(1, Ordering::SeqCst); } } diff --git a/src/lib.rs b/src/lib.rs index 701a14dde..982673f4a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -777,7 +777,10 @@ impl Node { /// /// **Note:** This **MUST** be called after each event has been handled. pub fn event_handled(&self) -> Result<(), Error> { - self.event_queue.event_handled().map_err(|e| { + // We use our runtime for the sync variant to ensure `tokio::task::block_in_place` is + // always called if we'd ever hit this in an outer runtime context. + let fut = self.event_queue.event_handled(); + self.runtime.block_on(fut).map_err(|e| { log_error!( self.logger, "Couldn't mark event handled due to persistence failure: {}", From 7c352341f75ce032e90e5b92f20d5cc46fbfc208 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 13 Nov 2025 14:14:01 +0100 Subject: [PATCH 12/60] Allow to set optional `RouteParametersConfig` in BOLT12 API Previously, LDK only allowed to set this for BOLT11 payments. Since we now can, we allow to specify the `RouteParametersConfig` in BOLT12 and `UnifiedQrPayment` APIs. --- bindings/ldk_node.udl | 8 ++++---- src/lib.rs | 2 ++ src/payment/bolt12.rs | 35 +++++++++++++++++++++++---------- src/payment/unified_qr.rs | 12 ++++++++--- tests/integration_tests_rust.rs | 21 +++++++++++++------- 5 files changed, 54 insertions(+), 24 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index ab2f483a1..86727231d 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -201,9 +201,9 @@ interface Bolt11Payment { interface Bolt12Payment { [Throws=NodeError] - PaymentId send([ByRef]Offer offer, u64? quantity, string? payer_note); + PaymentId send([ByRef]Offer offer, u64? quantity, string? payer_note, RouteParametersConfig? route_parameters); [Throws=NodeError] - PaymentId send_using_amount([ByRef]Offer offer, u64 amount_msat, u64? quantity, string? payer_note); + PaymentId send_using_amount([ByRef]Offer offer, u64 amount_msat, u64? quantity, string? payer_note, RouteParametersConfig? route_parameters); [Throws=NodeError] Offer receive(u64 amount_msat, [ByRef]string description, u32? expiry_secs, u64? quantity); [Throws=NodeError] @@ -211,7 +211,7 @@ interface Bolt12Payment { [Throws=NodeError] Bolt12Invoice request_refund_payment([ByRef]Refund refund); [Throws=NodeError] - Refund initiate_refund(u64 amount_msat, u32 expiry_secs, u64? quantity, string? payer_note); + Refund initiate_refund(u64 amount_msat, u32 expiry_secs, u64? quantity, string? payer_note, RouteParametersConfig? route_parameters); [Throws=NodeError] Offer receive_async(); [Throws=NodeError] @@ -256,7 +256,7 @@ interface UnifiedQrPayment { [Throws=NodeError] string receive(u64 amount_sats, [ByRef]string message, u32 expiry_sec); [Throws=NodeError] - QrPaymentResult send([ByRef]string uri_str); + QrPaymentResult send([ByRef]string uri_str, RouteParametersConfig? route_parameters); }; interface LSPS1Liquidity { diff --git a/src/lib.rs b/src/lib.rs index 701a14dde..ff4f6ad55 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -854,6 +854,7 @@ impl Node { Bolt12Payment::new( Arc::clone(&self.channel_manager), Arc::clone(&self.payment_store), + Arc::clone(&self.config), Arc::clone(&self.is_running), Arc::clone(&self.logger), self.async_payments_role, @@ -868,6 +869,7 @@ impl Node { Arc::new(Bolt12Payment::new( Arc::clone(&self.channel_manager), Arc::clone(&self.payment_store), + Arc::clone(&self.config), Arc::clone(&self.is_running), Arc::clone(&self.logger), self.async_payments_role, diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 2b299739d..0dd38edca 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -23,7 +23,7 @@ use lightning::util::ser::{Readable, Writeable}; use lightning_types::string::UntrustedString; use rand::RngCore; -use crate::config::{AsyncPaymentsRole, LDK_PAYMENT_RETRY_TIMEOUT}; +use crate::config::{AsyncPaymentsRole, Config, LDK_PAYMENT_RETRY_TIMEOUT}; use crate::error::Error; use crate::ffi::{maybe_deref, maybe_wrap}; use crate::logger::{log_error, log_info, LdkLogger, Logger}; @@ -54,6 +54,7 @@ type Refund = Arc; pub struct Bolt12Payment { channel_manager: Arc, payment_store: Arc, + config: Arc, is_running: Arc>, logger: Arc, async_payments_role: Option, @@ -62,10 +63,10 @@ pub struct Bolt12Payment { impl Bolt12Payment { pub(crate) fn new( channel_manager: Arc, payment_store: Arc, - is_running: Arc>, logger: Arc, + config: Arc, is_running: Arc>, logger: Arc, async_payments_role: Option, ) -> Self { - Self { channel_manager, payment_store, is_running, logger, async_payments_role } + Self { channel_manager, payment_store, config, is_running, logger, async_payments_role } } /// Send a payment given an offer. @@ -74,8 +75,12 @@ impl Bolt12Payment { /// response. /// /// If `quantity` is `Some` it represents the number of items requested. + /// + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. pub fn send( &self, offer: &Offer, quantity: Option, payer_note: Option, + route_parameters: Option, ) -> Result { if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); @@ -87,7 +92,8 @@ impl Bolt12Payment { rand::rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); - let route_params_config = RouteParametersConfig::default(); + let route_parameters = + route_parameters.or(self.config.route_parameters).unwrap_or_default(); let offer_amount_msat = match offer.amount() { Some(Amount::Bitcoin { amount_msats }) => amount_msats, @@ -104,7 +110,7 @@ impl Bolt12Payment { let params = OptionalOfferPaymentParams { payer_note: payer_note.clone(), retry_strategy, - route_params_config, + route_params_config: route_parameters, }; let res = if let Some(quantity) = quantity { self.channel_manager @@ -181,8 +187,12 @@ impl Bolt12Payment { /// /// If `payer_note` is `Some` it will be seen by the recipient and reflected back in the invoice /// response. + /// + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. pub fn send_using_amount( &self, offer: &Offer, amount_msat: u64, quantity: Option, payer_note: Option, + route_parameters: Option, ) -> Result { if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); @@ -194,7 +204,8 @@ impl Bolt12Payment { rand::rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); - let route_params_config = RouteParametersConfig::default(); + let route_parameters = + route_parameters.or(self.config.route_parameters).unwrap_or_default(); let offer_amount_msat = match offer.amount() { Some(Amount::Bitcoin { amount_msats }) => amount_msats, @@ -215,7 +226,7 @@ impl Bolt12Payment { let params = OptionalOfferPaymentParams { payer_note: payer_note.clone(), retry_strategy, - route_params_config, + route_params_config: route_parameters, }; let res = if let Some(quantity) = quantity { self.channel_manager.pay_for_offer_with_quantity( @@ -402,10 +413,13 @@ impl Bolt12Payment { /// Returns a [`Refund`] object that can be used to offer a refund payment of the amount given. /// + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. + /// /// [`Refund`]: lightning::offers::refund::Refund pub fn initiate_refund( &self, amount_msat: u64, expiry_secs: u32, quantity: Option, - payer_note: Option, + payer_note: Option, route_parameters: Option, ) -> Result { let mut random_bytes = [0u8; 32]; rand::rng().fill_bytes(&mut random_bytes); @@ -415,7 +429,8 @@ impl Bolt12Payment { .duration_since(UNIX_EPOCH) .unwrap(); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); - let route_params_config = RouteParametersConfig::default(); + let route_parameters = + route_parameters.or(self.config.route_parameters).unwrap_or_default(); let mut refund_builder = self .channel_manager @@ -424,7 +439,7 @@ impl Bolt12Payment { absolute_expiry, payment_id, retry_strategy, - route_params_config, + route_parameters, ) .map_err(|e| { log_error!(self.logger, "Failed to create refund builder: {:?}", e); diff --git a/src/payment/unified_qr.rs b/src/payment/unified_qr.rs index fc2eca150..6ebf25563 100644 --- a/src/payment/unified_qr.rs +++ b/src/payment/unified_qr.rs @@ -20,6 +20,7 @@ use bitcoin::address::{NetworkChecked, NetworkUnchecked}; use bitcoin::{Amount, Txid}; use lightning::ln::channelmanager::PaymentId; use lightning::offers::offer::Offer; +use lightning::routing::router::RouteParametersConfig; use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; use crate::error::Error; @@ -137,8 +138,13 @@ impl UnifiedQrPayment { /// Returns a `QrPaymentResult` indicating the outcome of the payment. If an error /// occurs, an `Error` is returned detailing the issue encountered. /// + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. + /// /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki - pub fn send(&self, uri_str: &str) -> Result { + pub fn send( + &self, uri_str: &str, route_parameters: Option, + ) -> Result { let uri: bip21::Uri = uri_str.parse().map_err(|_| Error::InvalidUri)?; @@ -147,7 +153,7 @@ impl UnifiedQrPayment { if let Some(offer) = uri_network_checked.extras.bolt12_offer { let offer = maybe_wrap(offer); - match self.bolt12_payment.send(&offer, None, None) { + match self.bolt12_payment.send(&offer, None, None, route_parameters) { Ok(payment_id) => return Ok(QrPaymentResult::Bolt12 { payment_id }), Err(e) => log_error!(self.logger, "Failed to send BOLT12 offer: {:?}. This is part of a unified QR code payment. Falling back to the BOLT11 invoice.", e), } @@ -155,7 +161,7 @@ impl UnifiedQrPayment { if let Some(invoice) = uri_network_checked.extras.bolt11_invoice { let invoice = maybe_wrap(invoice); - match self.bolt11_invoice.send(&invoice, None) { + match self.bolt11_invoice.send(&invoice, route_parameters) { Ok(payment_id) => return Ok(QrPaymentResult::Bolt11 { payment_id }), Err(e) => log_error!(self.logger, "Failed to send BOLT11 invoice: {:?}. This is part of a unified QR code payment. Falling back to the on-chain transaction.", e), } diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index e2d4207cd..399fe0f58 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -967,7 +967,7 @@ async fn simple_bolt12_send_receive() { let expected_payer_note = Some("Test".to_string()); let payment_id = node_a .bolt12_payment() - .send(&offer, expected_quantity, expected_payer_note.clone()) + .send(&offer, expected_quantity, expected_payer_note.clone(), None) .unwrap(); expect_payment_successful_event!(node_a, Some(payment_id), None); @@ -1023,7 +1023,7 @@ async fn simple_bolt12_send_receive() { let expected_payer_note = Some("Test".to_string()); assert!(node_a .bolt12_payment() - .send_using_amount(&offer, less_than_offer_amount, None, None) + .send_using_amount(&offer, less_than_offer_amount, None, None, None) .is_err()); let payment_id = node_a .bolt12_payment() @@ -1032,6 +1032,7 @@ async fn simple_bolt12_send_receive() { expected_amount_msat, expected_quantity, expected_payer_note.clone(), + None, ) .unwrap(); @@ -1089,7 +1090,13 @@ async fn simple_bolt12_send_receive() { let expected_payer_note = Some("Test".to_string()); let refund = node_b .bolt12_payment() - .initiate_refund(overpaid_amount, 3600, expected_quantity, expected_payer_note.clone()) + .initiate_refund( + overpaid_amount, + 3600, + expected_quantity, + expected_payer_note.clone(), + None, + ) .unwrap(); let invoice = node_a.bolt12_payment().request_refund_payment(&refund).unwrap(); expect_payment_received_event!(node_a, overpaid_amount); @@ -1275,7 +1282,7 @@ async fn async_payment() { node_receiver.stop().unwrap(); let payment_id = - node_sender.bolt12_payment().send_using_amount(&offer, 5_000, None, None).unwrap(); + node_sender.bolt12_payment().send_using_amount(&offer, 5_000, None, None, None).unwrap(); // Sleep to allow the payment reach a state where the htlc is held and waiting for the receiver to come online. tokio::time::sleep(std::time::Duration::from_millis(3000)).await; @@ -1473,7 +1480,7 @@ async fn unified_qr_send_receive() { let uqr_payment = node_b.unified_qr_payment().receive(expected_amount_sats, "asdf", expiry_sec); let uri_str = uqr_payment.clone().unwrap(); - let offer_payment_id: PaymentId = match node_a.unified_qr_payment().send(&uri_str) { + let offer_payment_id: PaymentId = match node_a.unified_qr_payment().send(&uri_str, None) { Ok(QrPaymentResult::Bolt12 { payment_id }) => { println!("\nBolt12 payment sent successfully with PaymentID: {:?}", payment_id); payment_id @@ -1494,7 +1501,7 @@ async fn unified_qr_send_receive() { // Cut off the BOLT12 part to fallback to BOLT11. let uri_str_without_offer = uri_str.split("&lno=").next().unwrap(); let invoice_payment_id: PaymentId = - match node_a.unified_qr_payment().send(uri_str_without_offer) { + match node_a.unified_qr_payment().send(uri_str_without_offer, None) { Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { panic!("Expected Bolt11 payment but got Bolt12"); }, @@ -1517,7 +1524,7 @@ async fn unified_qr_send_receive() { // Cut off any lightning part to fallback to on-chain only. let uri_str_without_lightning = onchain_uqr_payment.split("&lightning=").next().unwrap(); - let txid = match node_a.unified_qr_payment().send(&uri_str_without_lightning) { + let txid = match node_a.unified_qr_payment().send(&uri_str_without_lightning, None) { Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { panic!("Expected on-chain payment but got Bolt12") }, From 996b58e62b7ca618649ea09f6a6b637e3c583008 Mon Sep 17 00:00:00 2001 From: Chuks Agbakuru Date: Thu, 13 Nov 2025 17:29:39 +0100 Subject: [PATCH 13/60] Add explicit type definition for ChannelDetails This change uses an alias (LdkChannelDetails) and an explicit Vec type annotation for 'open_channels' in close_channel_internal and update_channel_config. This resolves type ambiguity caused by a name collision with the local ChannelDetails struct, which prevents rust-analyzer from correctly inferring the type as Vec, leading to an incorrect 'len() is private' error. --- src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 701a14dde..d1090bd89 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -133,7 +133,7 @@ use io::utils::write_node_metrics; use lightning::chain::BestBlock; use lightning::events::bump_transaction::Wallet as LdkWallet; use lightning::impl_writeable_tlv_based; -use lightning::ln::channel_state::ChannelShutdownState; +use lightning::ln::channel_state::{ChannelDetails as LdkChannelDetails, ChannelShutdownState}; use lightning::ln::channelmanager::PaymentId; use lightning::ln::msgs::SocketAddress; use lightning::routing::gossip::NodeAlias; @@ -1289,7 +1289,7 @@ impl Node { force_close_reason.is_none() || force, "Reason can only be set for force closures" ); - let open_channels = + let open_channels: Vec = self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); if let Some(channel_details) = open_channels.iter().find(|c| c.user_channel_id == user_channel_id.0) @@ -1328,7 +1328,7 @@ impl Node { &self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, channel_config: ChannelConfig, ) -> Result<(), Error> { - let open_channels = + let open_channels: Vec = self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); if let Some(channel_details) = open_channels.iter().find(|c| c.user_channel_id == user_channel_id.0) From ac948f6dbe019a2565572025c668ce0c1f4ce846 Mon Sep 17 00:00:00 2001 From: Chuks Agbakuru Date: Thu, 13 Nov 2025 17:52:52 +0100 Subject: [PATCH 14/60] Replace deprecated thread_rng with rng --- benches/payments.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benches/payments.rs b/benches/payments.rs index 86dee39d8..ba69e046d 100644 --- a/benches/payments.rs +++ b/benches/payments.rs @@ -20,7 +20,7 @@ use crate::common::open_channel_push_amt; fn spawn_payment(node_a: Arc, node_b: Arc, amount_msat: u64) { let mut preimage_bytes = [0u8; 32]; - rand::thread_rng().fill_bytes(&mut preimage_bytes); + rand::rng().fill_bytes(&mut preimage_bytes); let preimage = PaymentPreimage(preimage_bytes); let payment_hash: PaymentHash = preimage.into(); @@ -101,7 +101,7 @@ async fn send_payments(node_a: Arc, node_b: Arc) -> std::time::Durat // Send back the money for the next iteration. let mut preimage_bytes = [0u8; 32]; - rand::thread_rng().fill_bytes(&mut preimage_bytes); + rand::rng().fill_bytes(&mut preimage_bytes); node_b .spontaneous_payment() .send_with_preimage( From 20ffc9cdc1306d5c34c5602e5a2954ed38b520c4 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 31 Oct 2025 11:49:42 +0100 Subject: [PATCH 15/60] Implement `lazy` deletes for `VssStore` We implement `lazy` deletion for `VssStore` by tracking pending lazy deletes and supplying them as `delete_items` on the next `put` operation. --- src/io/vss_store.rs | 39 ++++++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 0e7d0872a..cea1e5864 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -15,6 +15,7 @@ use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; use std::time::Duration; +use bdk_chain::Merge; use bitcoin::hashes::{sha256, Hash, HashEngine, Hmac, HmacEngine}; use lightning::io::{self, Error, ErrorKind}; use lightning::util::persist::{KVStore, KVStoreSync}; @@ -181,7 +182,7 @@ impl KVStoreSync for VssStore { } fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> io::Result<()> { let internal_runtime = self.internal_runtime.as_ref().ok_or_else(|| { debug_assert!(false, "Failed to access internal runtime"); @@ -203,6 +204,7 @@ impl KVStoreSync for VssStore { primary_namespace, secondary_namespace, key, + lazy, ) .await }; @@ -275,7 +277,7 @@ impl KVStore for VssStore { }) } fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> Pin> + Send>> { let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); @@ -292,6 +294,7 @@ impl KVStore for VssStore { primary_namespace, secondary_namespace, key, + lazy, ) .await }) @@ -321,6 +324,7 @@ struct VssStoreInner { // Per-key locks that ensures that we don't have concurrent writes to the same namespace/key. // The lock also encapsulates the latest written version per key. locks: Mutex>>>, + pending_lazy_deletes: Mutex>, } impl VssStoreInner { @@ -347,7 +351,8 @@ impl VssStoreInner { let client = VssClient::new_with_headers(base_url, retry_policy, header_provider); let locks = Mutex::new(HashMap::new()); - Self { client, store_id, storable_builder, key_obfuscator, locks } + let pending_lazy_deletes = Mutex::new(Vec::new()); + Self { client, store_id, storable_builder, key_obfuscator, locks, pending_lazy_deletes } } fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { @@ -451,6 +456,12 @@ impl VssStoreInner { "write", )?; + let delete_items = self + .pending_lazy_deletes + .try_lock() + .ok() + .and_then(|mut guard| guard.take()) + .unwrap_or_default(); self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { let obfuscated_key = self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); @@ -464,7 +475,7 @@ impl VssStoreInner { version: vss_version, value: storable.encode_to_vec(), }], - delete_items: vec![], + delete_items, }; self.client.put_object(&request).await.map_err(|e| { @@ -482,7 +493,7 @@ impl VssStoreInner { async fn remove_internal( &self, inner_lock_ref: Arc>, locking_key: String, version: u64, - primary_namespace: String, secondary_namespace: String, key: String, + primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, ) -> io::Result<()> { check_namespace_key_validity( &primary_namespace, @@ -491,13 +502,19 @@ impl VssStoreInner { "remove", )?; + let obfuscated_key = + self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); + + let key_value = KeyValue { key: obfuscated_key, version: -1, value: vec![] }; + if lazy { + let mut pending_lazy_deletes = self.pending_lazy_deletes.lock().unwrap(); + pending_lazy_deletes.push(key_value); + return Ok(()); + } + self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { - let obfuscated_key = - self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); - let request = DeleteObjectRequest { - store_id: self.store_id.clone(), - key_value: Some(KeyValue { key: obfuscated_key, version: -1, value: vec![] }), - }; + let request = + DeleteObjectRequest { store_id: self.store_id.clone(), key_value: Some(key_value) }; self.client.delete_object(&request).await.map_err(|e| { let msg = format!( From db246fc035276ba5d5810e28568114ce9a927efb Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 17 Nov 2025 10:11:26 +0100 Subject: [PATCH 16/60] f Restore delete_items on failed write --- src/io/vss_store.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index cea1e5864..f05e16669 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -475,10 +475,15 @@ impl VssStoreInner { version: vss_version, value: storable.encode_to_vec(), }], - delete_items, + delete_items: delete_items.clone(), }; self.client.put_object(&request).await.map_err(|e| { + // Restore delete items so they'll be retried on next write. + if !delete_items.is_empty() { + self.pending_lazy_deletes.lock().unwrap().extend(delete_items); + } + let msg = format!( "Failed to write to key {}/{}/{}: {}", primary_namespace, secondary_namespace, key, e From c2b6b18d2978de35062fb1b521e60226242f1a44 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 31 Oct 2025 12:03:22 +0100 Subject: [PATCH 17/60] Add test for `lazy` deletion behavior We add a testcase that ensures we only delete a lazily-deleted key after the next write operation succeeds. Co-authored by Claude AI --- src/io/vss_store.rs | 83 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index f05e16669..49f038997 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -666,4 +666,87 @@ mod tests { do_read_write_remove_list_persist(&vss_store); drop(vss_store) } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn vss_lazy_delete() { + let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); + let mut rng = rng(); + let rand_store_id: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); + let mut vss_seed = [0u8; 32]; + rng.fill_bytes(&mut vss_seed); + let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); + let logger = Arc::new(Logger::new_log_facade()); + let runtime = Arc::new(Runtime::new(logger).unwrap()); + let vss_store = + VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider, runtime); + + let primary_namespace = "test_namespace"; + let secondary_namespace = ""; + let key_to_delete = "key_to_delete"; + let key_for_trigger = "key_for_trigger"; + let data_to_delete = b"data_to_delete".to_vec(); + let trigger_data = b"trigger_data".to_vec(); + + // Write the key that we'll later lazily delete + KVStore::write( + &vss_store, + primary_namespace, + secondary_namespace, + key_to_delete, + data_to_delete.clone(), + ) + .await + .unwrap(); + + // Verify the key exists + let read_data = + KVStore::read(&vss_store, primary_namespace, secondary_namespace, key_to_delete) + .await + .unwrap(); + assert_eq!(read_data, data_to_delete); + + // Perform a lazy delete + KVStore::remove(&vss_store, primary_namespace, secondary_namespace, key_to_delete, true) + .await + .unwrap(); + + // Verify the key still exists (lazy delete doesn't immediately remove it) + let read_data = + KVStore::read(&vss_store, primary_namespace, secondary_namespace, key_to_delete) + .await + .unwrap(); + assert_eq!(read_data, data_to_delete); + + // Verify the key is still in the list + let keys = KVStore::list(&vss_store, primary_namespace, secondary_namespace).await.unwrap(); + assert!(keys.contains(&key_to_delete.to_string())); + + // Trigger the actual deletion by performing a write operation + KVStore::write( + &vss_store, + primary_namespace, + secondary_namespace, + key_for_trigger, + trigger_data.clone(), + ) + .await + .unwrap(); + + // Now verify the key is actually deleted + let read_result = + KVStore::read(&vss_store, primary_namespace, secondary_namespace, key_to_delete).await; + assert!(read_result.is_err()); + assert_eq!(read_result.unwrap_err().kind(), ErrorKind::NotFound); + + // Verify the key is no longer in the list + let keys = KVStore::list(&vss_store, primary_namespace, secondary_namespace).await.unwrap(); + assert!(!keys.contains(&key_to_delete.to_string())); + + // Verify the trigger key still exists + let read_data = + KVStore::read(&vss_store, primary_namespace, secondary_namespace, key_for_trigger) + .await + .unwrap(); + assert_eq!(read_data, trigger_data); + } } From 3d5013b5f5ba4d68ef3b8e1fd8c6f2da82984dbc Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 11 Nov 2025 12:54:04 +0100 Subject: [PATCH 18/60] Introduce schema versioning We introduce an `enum VssSchemaVersion` that will allow us to discern different behaviors based on the schema version based on a backwards compatible manner. --- src/io/vss_store.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 49f038997..d6f157ae4 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -45,6 +45,13 @@ type CustomRetryPolicy = FilteredRetryPolicy< Box bool + 'static + Send + Sync>, >; +enum VssSchemaVersion { + // The initial schema version. + // This used an empty `aad` and unobfuscated `primary_namespace`/`secondary_namespace`s in the + // stored key. + V0, +} + // We set this to a small number of threads that would still allow to make some progress if one // would hit a blocking case const INTERNAL_RUNTIME_WORKERS: usize = 2; From aa25497fb7df4338df56f7783ae2a2964c7ff807 Mon Sep 17 00:00:00 2001 From: Martin Saposnic Date: Wed, 10 Sep 2025 14:46:04 -0300 Subject: [PATCH 19/60] Support client_trusts_lsp=true on ldk-node Implement changes introduced on https://github.com/lightningdevkit/rust-lightning/pull/3838 as discussed, client_trusts_lsp is a flag set at startup. --- bindings/ldk_node.udl | 1 + src/event.rs | 44 ++++- src/liquidity.rs | 73 +++++++- tests/integration_tests_rust.rs | 307 +++++++++++++++++++++++++++++++- 4 files changed, 413 insertions(+), 12 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 86727231d..077a20433 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -44,6 +44,7 @@ dictionary LSPS2ServiceConfig { u32 max_client_to_self_delay; u64 min_payment_size_msat; u64 max_payment_size_msat; + boolean client_trusts_lsp; }; enum LogLevel { diff --git a/src/event.rs b/src/event.rs index 3de2c3261..46488549c 100644 --- a/src/event.rs +++ b/src/event.rs @@ -491,7 +491,7 @@ where counterparty_node_id, channel_value_satoshis, output_script, - .. + user_channel_id, } => { // Construct the raw transaction with the output that is paid the amount of the // channel. @@ -510,12 +510,36 @@ where locktime, ) { Ok(final_tx) => { - // Give the funding transaction back to LDK for opening the channel. - match self.channel_manager.funding_transaction_generated( - temporary_channel_id, - counterparty_node_id, - final_tx, - ) { + let needs_manual_broadcast = + self.liquidity_source.as_ref().map_or(false, |ls| { + ls.as_ref().lsps2_channel_needs_manual_broadcast( + counterparty_node_id, + user_channel_id, + ) + }); + + let result = if needs_manual_broadcast { + self.liquidity_source.as_ref().map(|ls| { + ls.lsps2_store_funding_transaction( + user_channel_id, + counterparty_node_id, + final_tx.clone(), + ); + }); + self.channel_manager.funding_transaction_generated_manual_broadcast( + temporary_channel_id, + counterparty_node_id, + final_tx, + ) + } else { + self.channel_manager.funding_transaction_generated( + temporary_channel_id, + counterparty_node_id, + final_tx, + ) + }; + + match result { Ok(()) => {}, Err(APIError::APIMisuseError { err }) => { log_error!(self.logger, "Panicking due to APIMisuseError: {}", err); @@ -554,8 +578,10 @@ where }, } }, - LdkEvent::FundingTxBroadcastSafe { .. } => { - debug_assert!(false, "We currently only support safe funding, so this event should never be emitted."); + LdkEvent::FundingTxBroadcastSafe { user_channel_id, counterparty_node_id, .. } => { + self.liquidity_source.as_ref().map(|ls| { + ls.lsps2_funding_tx_broadcast_safe(user_channel_id, counterparty_node_id); + }); }, LdkEvent::PaymentClaimable { payment_hash, diff --git a/src/liquidity.rs b/src/liquidity.rs index 57e2ad488..ee520e14d 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -14,6 +14,7 @@ use std::time::Duration; use bitcoin::hashes::{sha256, Hash}; use bitcoin::secp256k1::{PublicKey, Secp256k1}; +use bitcoin::Transaction; use chrono::Utc; use lightning::events::HTLCHandlingFailureType; use lightning::ln::channelmanager::{InterceptId, MIN_FINAL_CLTV_EXPIRY_DELTA}; @@ -51,7 +52,6 @@ use crate::{total_anchor_channels_reserve_sats, Config, Error}; const LIQUIDITY_REQUEST_TIMEOUT_SECS: u64 = 5; const LSPS2_GETINFO_REQUEST_EXPIRY: Duration = Duration::from_secs(60 * 60 * 24); -const LSPS2_CLIENT_TRUSTS_LSP_MODE: bool = true; const LSPS2_CHANNEL_CLTV_EXPIRY_DELTA: u32 = 72; struct LSPS1Client { @@ -130,6 +130,8 @@ pub struct LSPS2ServiceConfig { pub min_payment_size_msat: u64, /// The maximum payment size that we will accept when opening a channel. pub max_payment_size_msat: u64, + /// Use the client trusts lsp model + pub client_trusts_lsp: bool, } pub(crate) struct LiquiditySourceBuilder @@ -305,6 +307,73 @@ where self.lsps2_client.as_ref().map(|s| (s.lsp_node_id, s.lsp_address.clone())) } + pub(crate) fn lsps2_channel_needs_manual_broadcast( + &self, counterparty_node_id: PublicKey, user_channel_id: u128, + ) -> bool { + self.lsps2_service.as_ref().map_or(false, |lsps2_service| { + lsps2_service.service_config.client_trusts_lsp + && self + .liquidity_manager() + .lsps2_service_handler() + .and_then(|handler| { + handler + .channel_needs_manual_broadcast(user_channel_id, &counterparty_node_id) + .ok() + }) + .unwrap_or(false) + }) + } + + pub(crate) fn lsps2_store_funding_transaction( + &self, user_channel_id: u128, counterparty_node_id: PublicKey, funding_tx: Transaction, + ) { + if self.lsps2_service.as_ref().map_or(false, |svc| !svc.service_config.client_trusts_lsp) { + // Only necessary for client-trusts-LSP flow + return; + } + + let lsps2_service_handler = self.liquidity_manager.lsps2_service_handler(); + if let Some(handler) = lsps2_service_handler { + handler + .store_funding_transaction(user_channel_id, &counterparty_node_id, funding_tx) + .unwrap_or_else(|e| { + debug_assert!(false, "Failed to store funding transaction: {:?}", e); + log_error!(self.logger, "Failed to store funding transaction: {:?}", e); + }); + } else { + log_error!(self.logger, "LSPS2 service handler is not available."); + } + } + + pub(crate) fn lsps2_funding_tx_broadcast_safe( + &self, user_channel_id: u128, counterparty_node_id: PublicKey, + ) { + if self.lsps2_service.as_ref().map_or(false, |svc| !svc.service_config.client_trusts_lsp) { + // Only necessary for client-trusts-LSP flow + return; + } + + let lsps2_service_handler = self.liquidity_manager.lsps2_service_handler(); + if let Some(handler) = lsps2_service_handler { + handler + .set_funding_tx_broadcast_safe(user_channel_id, &counterparty_node_id) + .unwrap_or_else(|e| { + debug_assert!( + false, + "Failed to mark funding transaction safe to broadcast: {:?}", + e + ); + log_error!( + self.logger, + "Failed to mark funding transaction safe to broadcast: {:?}", + e + ); + }); + } else { + log_error!(self.logger, "LSPS2 service handler is not available."); + } + } + pub(crate) async fn handle_next_event(&self) { match self.liquidity_manager.next_event_async().await { LiquidityEvent::LSPS1Client(LSPS1ClientEvent::SupportedOptionsReady { @@ -594,7 +663,7 @@ where request_id, intercept_scid, LSPS2_CHANNEL_CLTV_EXPIRY_DELTA, - LSPS2_CLIENT_TRUSTS_LSP_MODE, + service_config.client_trusts_lsp, user_channel_id, ) .await diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 399fe0f58..69df12710 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -1552,8 +1552,12 @@ async fn unified_qr_send_receive() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn lsps2_client_service_integration() { - let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + do_lsps2_client_service_integration(true).await; + do_lsps2_client_service_integration(false).await; +} +async fn do_lsps2_client_service_integration(client_trusts_lsp: bool) { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); let sync_config = EsploraSyncConfig { background_sync_config: None }; @@ -1571,6 +1575,7 @@ async fn lsps2_client_service_integration() { min_channel_lifetime: 100, min_channel_opening_fee_msat: 0, max_client_to_self_delay: 1024, + client_trusts_lsp, }; let service_config = random_config(true); @@ -1867,3 +1872,303 @@ async fn drop_in_async_context() { let node = setup_node(&chain_source, config, Some(seed_bytes)); node.stop().unwrap(); } + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn lsps2_client_trusts_lsp() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + + let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); + + let sync_config = EsploraSyncConfig { background_sync_config: None }; + + // Setup three nodes: service, client, and payer + let channel_opening_fee_ppm = 10_000; + let channel_over_provisioning_ppm = 100_000; + let lsps2_service_config = LSPS2ServiceConfig { + require_token: None, + advertise_service: false, + channel_opening_fee_ppm, + channel_over_provisioning_ppm, + max_payment_size_msat: 1_000_000_000, + min_payment_size_msat: 0, + min_channel_lifetime: 100, + min_channel_opening_fee_msat: 0, + max_client_to_self_delay: 1024, + client_trusts_lsp: true, + }; + + let service_config = random_config(true); + setup_builder!(service_builder, service_config.node_config); + service_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + service_builder.set_liquidity_provider_lsps2(lsps2_service_config); + let service_node = service_builder.build().unwrap(); + service_node.start().unwrap(); + let service_node_id = service_node.node_id(); + let service_addr = service_node.listening_addresses().unwrap().first().unwrap().clone(); + + let client_config = random_config(true); + setup_builder!(client_builder, client_config.node_config); + client_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + client_builder.set_liquidity_source_lsps2(service_node_id, service_addr.clone(), None); + let client_node = client_builder.build().unwrap(); + client_node.start().unwrap(); + let client_node_id = client_node.node_id(); + + let payer_config = random_config(true); + setup_builder!(payer_builder, payer_config.node_config); + payer_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + let payer_node = payer_builder.build().unwrap(); + payer_node.start().unwrap(); + + let service_addr_onchain = service_node.onchain_payment().new_address().unwrap(); + let client_addr_onchain = client_node.onchain_payment().new_address().unwrap(); + let payer_addr_onchain = payer_node.onchain_payment().new_address().unwrap(); + + let premine_amount_sat = 10_000_000; + + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![service_addr_onchain, client_addr_onchain, payer_addr_onchain], + Amount::from_sat(premine_amount_sat), + ) + .await; + service_node.sync_wallets().unwrap(); + client_node.sync_wallets().unwrap(); + payer_node.sync_wallets().unwrap(); + println!("Premine complete!"); + // Open a channel payer -> service that will allow paying the JIT invoice + open_channel(&payer_node, &service_node, 5_000_000, false, &electrsd).await; + + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + service_node.sync_wallets().unwrap(); + payer_node.sync_wallets().unwrap(); + expect_channel_ready_event!(payer_node, service_node.node_id()); + expect_channel_ready_event!(service_node, payer_node.node_id()); + + let invoice_description = + Bolt11InvoiceDescription::Direct(Description::new(String::from("asdf")).unwrap()); + let jit_amount_msat = 100_000_000; + + println!("Generating JIT invoice!"); + let manual_preimage = PaymentPreimage([42u8; 32]); + let manual_payment_hash: PaymentHash = manual_preimage.into(); + let res = client_node + .bolt11_payment() + .receive_via_jit_channel_for_hash( + jit_amount_msat, + &invoice_description.into(), + 1024, + None, + manual_payment_hash, + ) + .unwrap(); + + // Have the payer_node pay the invoice, therby triggering channel open service_node -> client_node. + println!("Paying JIT invoice!"); + let payment_id = payer_node.bolt11_payment().send(&res, None).unwrap(); + println!("Payment ID: {:?}", payment_id); + let funding_txo = expect_channel_pending_event!(service_node, client_node.node_id()); + expect_channel_ready_event!(service_node, client_node.node_id()); + expect_channel_pending_event!(client_node, service_node.node_id()); + expect_channel_ready_event!(client_node, service_node.node_id()); + + // Check the funding transaction hasn't been broadcasted yet and nodes aren't seeing it. + println!("Try to find funding tx... It won't be found yet, as the client has not claimed it."); + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + let mempool = bitcoind.client.get_raw_mempool().unwrap().into_model().unwrap(); + let funding_tx_found = mempool.0.iter().any(|txid| *txid == funding_txo.txid); + assert!(!funding_tx_found, "Funding transaction should NOT be broadcast yet"); + + service_node.sync_wallets().unwrap(); + client_node.sync_wallets().unwrap(); + assert_eq!( + client_node + .list_channels() + .iter() + .find(|c| c.counterparty_node_id == service_node_id) + .unwrap() + .confirmations, + Some(0) + ); + assert_eq!( + service_node + .list_channels() + .iter() + .find(|c| c.counterparty_node_id == client_node_id) + .unwrap() + .confirmations, + Some(0) + ); + + // Now claim the JIT payment, which should release the funding transaction + let service_fee_msat = (jit_amount_msat * channel_opening_fee_ppm as u64) / 1_000_000; + let expected_received_amount_msat = jit_amount_msat - service_fee_msat; + + let _ = expect_payment_claimable_event!( + client_node, + payment_id, + manual_payment_hash, + expected_received_amount_msat + ); + + client_node + .bolt11_payment() + .claim_for_hash(manual_payment_hash, jit_amount_msat, manual_preimage) + .unwrap(); + + expect_payment_successful_event!(payer_node, Some(payment_id), None); + + let _ = expect_payment_received_event!(client_node, expected_received_amount_msat).unwrap(); + + // Check the nodes pick up on the confirmed funding tx now. + wait_for_tx(&electrsd.client, funding_txo.txid).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + service_node.sync_wallets().unwrap(); + client_node.sync_wallets().unwrap(); + assert_eq!( + client_node + .list_channels() + .iter() + .find(|c| c.counterparty_node_id == service_node_id) + .unwrap() + .confirmations, + Some(6) + ); + assert_eq!( + service_node + .list_channels() + .iter() + .find(|c| c.counterparty_node_id == client_node_id) + .unwrap() + .confirmations, + Some(6) + ); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn lsps2_lsp_trusts_client_but_client_does_not_claim() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + + let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); + + let sync_config = EsploraSyncConfig { background_sync_config: None }; + + // Setup three nodes: service, client, and payer + let channel_opening_fee_ppm = 10_000; + let channel_over_provisioning_ppm = 100_000; + let lsps2_service_config = LSPS2ServiceConfig { + require_token: None, + advertise_service: false, + channel_opening_fee_ppm, + channel_over_provisioning_ppm, + max_payment_size_msat: 1_000_000_000, + min_payment_size_msat: 0, + min_channel_lifetime: 100, + min_channel_opening_fee_msat: 0, + max_client_to_self_delay: 1024, + client_trusts_lsp: false, + }; + + let service_config = random_config(true); + setup_builder!(service_builder, service_config.node_config); + service_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + service_builder.set_liquidity_provider_lsps2(lsps2_service_config); + let service_node = service_builder.build().unwrap(); + service_node.start().unwrap(); + + let service_node_id = service_node.node_id(); + let service_addr = service_node.listening_addresses().unwrap().first().unwrap().clone(); + + let client_config = random_config(true); + setup_builder!(client_builder, client_config.node_config); + client_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + client_builder.set_liquidity_source_lsps2(service_node_id, service_addr.clone(), None); + let client_node = client_builder.build().unwrap(); + client_node.start().unwrap(); + + let client_node_id = client_node.node_id(); + + let payer_config = random_config(true); + setup_builder!(payer_builder, payer_config.node_config); + payer_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + let payer_node = payer_builder.build().unwrap(); + payer_node.start().unwrap(); + + let service_addr_onchain = service_node.onchain_payment().new_address().unwrap(); + let client_addr_onchain = client_node.onchain_payment().new_address().unwrap(); + let payer_addr_onchain = payer_node.onchain_payment().new_address().unwrap(); + + let premine_amount_sat = 10_000_000; + + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![service_addr_onchain, client_addr_onchain, payer_addr_onchain], + Amount::from_sat(premine_amount_sat), + ) + .await; + service_node.sync_wallets().unwrap(); + client_node.sync_wallets().unwrap(); + payer_node.sync_wallets().unwrap(); + println!("Premine complete!"); + // Open a channel payer -> service that will allow paying the JIT invoice + open_channel(&payer_node, &service_node, 5_000_000, false, &electrsd).await; + + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + service_node.sync_wallets().unwrap(); + payer_node.sync_wallets().unwrap(); + expect_channel_ready_event!(payer_node, service_node.node_id()); + expect_channel_ready_event!(service_node, payer_node.node_id()); + + let invoice_description = + Bolt11InvoiceDescription::Direct(Description::new(String::from("asdf")).unwrap()); + let jit_amount_msat = 100_000_000; + + println!("Generating JIT invoice!"); + let manual_preimage = PaymentPreimage([42u8; 32]); + let manual_payment_hash: PaymentHash = manual_preimage.into(); + let res = client_node + .bolt11_payment() + .receive_via_jit_channel_for_hash( + jit_amount_msat, + &invoice_description.into(), + 1024, + None, + manual_payment_hash, + ) + .unwrap(); + + // Have the payer_node pay the invoice, therby triggering channel open service_node -> client_node. + println!("Paying JIT invoice!"); + let _payment_id = payer_node.bolt11_payment().send(&res, None).unwrap(); + let funding_txo = expect_channel_pending_event!(service_node, client_node.node_id()); + expect_channel_ready_event!(service_node, client_node.node_id()); + expect_channel_pending_event!(client_node, service_node.node_id()); + expect_channel_ready_event!(client_node, service_node.node_id()); + println!("Waiting for funding transaction to be broadcast..."); + + // Check the nodes pick up on the confirmed funding tx now. + wait_for_tx(&electrsd.client, funding_txo.txid).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + service_node.sync_wallets().unwrap(); + client_node.sync_wallets().unwrap(); + assert_eq!( + client_node + .list_channels() + .iter() + .find(|c| c.counterparty_node_id == service_node_id) + .unwrap() + .confirmations, + Some(6) + ); + assert_eq!( + service_node + .list_channels() + .iter() + .find(|c| c.counterparty_node_id == client_node_id) + .unwrap() + .confirmations, + Some(6) + ); +} From 039aad4b0644d35b388b88c0641eb0f72a526d16 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 17 Nov 2025 15:48:45 +0100 Subject: [PATCH 20/60] Avoid explicit `panic`s in `handle_event` Previously, we'd explicitly `panic` on an APIMisuseError. While this error type should still never happen, we avoid explicit panics in favor of `debug_assert`s here. --- src/event.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/event.rs b/src/event.rs index 46488549c..a55ddb7fd 100644 --- a/src/event.rs +++ b/src/event.rs @@ -542,8 +542,12 @@ where match result { Ok(()) => {}, Err(APIError::APIMisuseError { err }) => { - log_error!(self.logger, "Panicking due to APIMisuseError: {}", err); - panic!("APIMisuseError: {}", err); + log_error!( + self.logger, + "Encountered APIMisuseError, this should never happen: {}", + err + ); + debug_assert!(false, "APIMisuseError: {}", err); }, Err(APIError::ChannelUnavailable { err }) => { log_error!( @@ -571,7 +575,7 @@ where ) .unwrap_or_else(|e| { log_error!(self.logger, "Failed to force close channel after funding generation failed: {:?}", e); - panic!( + debug_assert!(false, "Failed to force close channel after funding generation failed" ); }); From 65f6f7b8e0617a0ff686cc3bfa2225a2e9d09d93 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 22 Aug 2025 11:06:53 +0200 Subject: [PATCH 21/60] Use obf. key as `aad` for `StorableBuilder` and obfuscate namespaces We bump our `vss-client` dependency to include the changes to the `StorableBuilder` interface. Previously, we the `vss-client` didn't allow to set `ChaCha20Poly1305RFC`'s `aad` field, which had the `tag` not commit to any particular key. This would allow a malicious VSS provider to substitute blobs stored under a different key without the client noticing. Here, we now set the `aad` field to the key under which the `Storable` will be stored, ensuring that the retrieved data was originally stored under the key we expected, if `VssSchemaVersion::V1` is set. We also now obfuscate primary and secondary namespaces in the persisted keys, if `VssSchemaVersion::V1` is set. We also account for `StorableBuilder` now taking `data_decryption_key` by reference on `build`/`deconstruct`. --- Cargo.toml | 5 ++- src/io/vss_store.rs | 105 ++++++++++++++++++++++++++++++++------------ 2 files changed, 80 insertions(+), 30 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 544dfca08..8e2f04447 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -65,7 +65,7 @@ serde = { version = "1.0.210", default-features = false, features = ["std", "der serde_json = { version = "1.0.128", default-features = false, features = ["std"] } log = { version = "0.4.22", default-features = false, features = ["std"]} -vss-client = "0.3" +vss-client = { package = "vss-client-ng", version = "0.4" } prost = { version = "0.11.6", default-features = false} [target.'cfg(windows)'.dependencies] @@ -151,3 +151,6 @@ harness = false #lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } #lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } #lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } + +#vss-client-ng = { path = "../vss-client" } +#vss-client-ng = { git = "https://github.com/lightningdevkit/vss-client", branch = "main" } diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index d6f157ae4..6da3a53db 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -45,11 +45,15 @@ type CustomRetryPolicy = FilteredRetryPolicy< Box bool + 'static + Send + Sync>, >; +#[derive(Debug, PartialEq)] enum VssSchemaVersion { // The initial schema version. // This used an empty `aad` and unobfuscated `primary_namespace`/`secondary_namespace`s in the // stored key. V0, + // The second deployed schema version. + // Here we started to obfuscate the primary and secondary namespaces and the obfuscated `store_key` (`obfuscate(primary_namespace#secondary_namespace)#obfuscate(key)`) is now used as `aad` for encryption, ensuring that the encrypted blobs commit to the key they're stored under. + V1, } // We set this to a small number of threads that would still allow to make some progress if one @@ -324,9 +328,10 @@ impl Drop for VssStore { } struct VssStoreInner { + schema_version: VssSchemaVersion, client: VssClient, store_id: String, - storable_builder: StorableBuilder, + data_encryption_key: [u8; 32], key_obfuscator: KeyObfuscator, // Per-key locks that ensures that we don't have concurrent writes to the same namespace/key. // The lock also encapsulates the latest written version per key. @@ -339,10 +344,10 @@ impl VssStoreInner { base_url: String, store_id: String, vss_seed: [u8; 32], header_provider: Arc, ) -> Self { + let schema_version = VssSchemaVersion::V0; let (data_encryption_key, obfuscation_master_key) = derive_data_encryption_and_obfuscation_keys(&vss_seed); let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); - let storable_builder = StorableBuilder::new(data_encryption_key, RandEntropySource); let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)) .with_max_attempts(10) .with_max_total_delay(Duration::from_secs(15)) @@ -359,7 +364,15 @@ impl VssStoreInner { let client = VssClient::new_with_headers(base_url, retry_policy, header_provider); let locks = Mutex::new(HashMap::new()); let pending_lazy_deletes = Mutex::new(Vec::new()); - Self { client, store_id, storable_builder, key_obfuscator, locks, pending_lazy_deletes } + Self { + schema_version, + client, + store_id, + data_encryption_key, + key_obfuscator, + locks, + pending_lazy_deletes, + } } fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { @@ -370,17 +383,45 @@ impl VssStoreInner { fn build_obfuscated_key( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> String { - let obfuscated_key = self.key_obfuscator.obfuscate(key); - if primary_namespace.is_empty() { - obfuscated_key + if self.schema_version == VssSchemaVersion::V1 { + let obfuscated_prefix = + self.build_obfuscated_prefix(primary_namespace, secondary_namespace); + let obfuscated_key = self.key_obfuscator.obfuscate(key); + format!("{}#{}", obfuscated_prefix, obfuscated_key) + } else { + // Default to V0 schema + let obfuscated_key = self.key_obfuscator.obfuscate(key); + if primary_namespace.is_empty() { + obfuscated_key + } else { + format!("{}#{}#{}", primary_namespace, secondary_namespace, obfuscated_key) + } + } + } + + fn build_obfuscated_prefix( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> String { + if self.schema_version == VssSchemaVersion::V1 { + let prefix = format!("{}#{}", primary_namespace, secondary_namespace); + self.key_obfuscator.obfuscate(&prefix) } else { - format!("{}#{}#{}", primary_namespace, secondary_namespace, obfuscated_key) + // Default to V0 schema + format!("{}#{}", primary_namespace, secondary_namespace) } } fn extract_key(&self, unified_key: &str) -> io::Result { - let mut parts = unified_key.splitn(3, '#'); - let (_primary_namespace, _secondary_namespace) = (parts.next(), parts.next()); + let mut parts = if self.schema_version == VssSchemaVersion::V1 { + let mut parts = unified_key.splitn(2, '#'); + let _obfuscated_namespace = parts.next(); + parts + } else { + // Default to V0 schema + let mut parts = unified_key.splitn(3, '#'); + let (_primary_namespace, _secondary_namespace) = (parts.next(), parts.next()); + parts + }; match parts.next() { Some(obfuscated_key) => { let actual_key = self.key_obfuscator.deobfuscate(obfuscated_key)?; @@ -395,7 +436,7 @@ impl VssStoreInner { ) -> io::Result> { let mut page_token = None; let mut keys = vec![]; - let key_prefix = format!("{}#{}", primary_namespace, secondary_namespace); + let key_prefix = self.build_obfuscated_prefix(primary_namespace, secondary_namespace); while page_token != Some("".to_string()) { let request = ListKeyVersionsRequest { store_id: self.store_id.clone(), @@ -425,9 +466,8 @@ impl VssStoreInner { ) -> io::Result> { check_namespace_key_validity(&primary_namespace, &secondary_namespace, Some(&key), "read")?; - let obfuscated_key = - self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); - let request = GetObjectRequest { store_id: self.store_id.clone(), key: obfuscated_key }; + let store_key = self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); + let request = GetObjectRequest { store_id: self.store_id.clone(), key: store_key.clone() }; let resp = self.client.get_object(&request).await.map_err(|e| { let msg = format!( "Failed to read from key {}/{}/{}: {}", @@ -449,7 +489,11 @@ impl VssStoreInner { Error::new(ErrorKind::Other, msg) })?; - Ok(self.storable_builder.deconstruct(storable)?.0) + let storable_builder = StorableBuilder::new(RandEntropySource); + let aad = + if self.schema_version == VssSchemaVersion::V1 { store_key.as_bytes() } else { &[] }; + let decrypted = storable_builder.deconstruct(storable, &self.data_encryption_key, aad)?.0; + Ok(decrypted) } async fn write_internal( @@ -469,22 +513,25 @@ impl VssStoreInner { .ok() .and_then(|mut guard| guard.take()) .unwrap_or_default(); - self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { - let obfuscated_key = - self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); - let vss_version = -1; - let storable = self.storable_builder.build(buf, vss_version); - let request = PutObjectRequest { - store_id: self.store_id.clone(), - global_version: None, - transaction_items: vec![KeyValue { - key: obfuscated_key, - version: vss_version, - value: storable.encode_to_vec(), - }], - delete_items: delete_items.clone(), - }; + let store_key = self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); + let vss_version = -1; + let storable_builder = StorableBuilder::new(RandEntropySource); + let aad = + if self.schema_version == VssSchemaVersion::V1 { store_key.as_bytes() } else { &[] }; + let storable = + storable_builder.build(buf.to_vec(), vss_version, &self.data_encryption_key, aad); + let request = PutObjectRequest { + store_id: self.store_id.clone(), + global_version: None, + transaction_items: vec![KeyValue { + key: store_key, + version: vss_version, + value: storable.encode_to_vec(), + }], + delete_items: delete_items.clone(), + }; + self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { self.client.put_object(&request).await.map_err(|e| { // Restore delete items so they'll be retried on next write. if !delete_items.is_empty() { From 86239cf9873ce2ad43b023b688218c9e7a87356e Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 11 Nov 2025 13:35:34 +0100 Subject: [PATCH 22/60] Prefactor: move client construction out to `VssStore` While having it in `VssStoreInner` makes more sense, we now opt to construt the client (soon, clients) in `VssStore` and then hand it down to `VssStoreInner`. That will allow us to use the client once for checking the schema version before actually instantiating `VssStoreInner`. --- src/io/vss_store.rs | 50 ++++++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 21 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 6da3a53db..6ccf77080 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -82,7 +82,6 @@ impl VssStore { base_url: String, store_id: String, vss_seed: [u8; 32], header_provider: Arc, runtime: Arc, ) -> Self { - let inner = Arc::new(VssStoreInner::new(base_url, store_id, vss_seed, header_provider)); let next_version = AtomicU64::new(1); let internal_runtime = Some( tokio::runtime::Builder::new_multi_thread() @@ -98,6 +97,33 @@ impl VssStore { .unwrap(), ); + let schema_version = VssSchemaVersion::V0; + let (data_encryption_key, obfuscation_master_key) = + derive_data_encryption_and_obfuscation_keys(&vss_seed); + let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); + let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)) + .with_max_attempts(10) + .with_max_total_delay(Duration::from_secs(15)) + .with_max_jitter(Duration::from_millis(10)) + .skip_retry_on_error(Box::new(|e: &VssError| { + matches!( + e, + VssError::NoSuchKeyError(..) + | VssError::InvalidRequestError(..) + | VssError::ConflictError(..) + ) + }) as _); + + let client = VssClient::new_with_headers(base_url, retry_policy, header_provider); + + let inner = Arc::new(VssStoreInner::new( + schema_version, + client, + store_id, + data_encryption_key, + key_obfuscator, + )); + Self { inner, next_version, runtime, internal_runtime } } @@ -341,27 +367,9 @@ struct VssStoreInner { impl VssStoreInner { pub(crate) fn new( - base_url: String, store_id: String, vss_seed: [u8; 32], - header_provider: Arc, + schema_version: VssSchemaVersion, client: VssClient, store_id: String, + data_encryption_key: [u8; 32], key_obfuscator: KeyObfuscator, ) -> Self { - let schema_version = VssSchemaVersion::V0; - let (data_encryption_key, obfuscation_master_key) = - derive_data_encryption_and_obfuscation_keys(&vss_seed); - let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); - let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)) - .with_max_attempts(10) - .with_max_total_delay(Duration::from_secs(15)) - .with_max_jitter(Duration::from_millis(10)) - .skip_retry_on_error(Box::new(|e: &VssError| { - matches!( - e, - VssError::NoSuchKeyError(..) - | VssError::InvalidRequestError(..) - | VssError::ConflictError(..) - ) - }) as _); - - let client = VssClient::new_with_headers(base_url, retry_policy, header_provider); let locks = Mutex::new(HashMap::new()); let pending_lazy_deletes = Mutex::new(Vec::new()); Self { From a37517905a923f9b3ed443cb8142ef8a08c6f599 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 3 Nov 2025 13:15:36 +0100 Subject: [PATCH 23/60] Only use internal runtime in `VssStore` We previously attempted to drop the internal runtime from `VssStore`, resulting into blocking behavior. While we recently made changes that improved our situation (having VSS CI pass again pretty reliably), we just ran into yet another case where the VSS CI hung (cf. https://github.com/lightningdevkit/vss-server/actions/runs/19023212819/job/54322173817?pr=59). Here we attempt to restore even more of the original pre- ab3d78d1ecd05a755c836915284e5ca60c65692a / #623 behavior to get rid of the reappearing blocking behavior, i.e., only use the internal runtime in `VssStore`. --- src/builder.rs | 3 +- src/io/vss_store.rs | 83 ++++++++++++++++++++------------------------- 2 files changed, 38 insertions(+), 48 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index c0e39af7a..59f5b9b46 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -731,8 +731,7 @@ impl NodeBuilder { let vss_seed_bytes: [u8; 32] = vss_xprv.private_key.secret_bytes(); - let vss_store = - VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider, Arc::clone(&runtime)); + let vss_store = VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider); build_with_store_internal( config, self.chain_data_source_config.as_ref(), diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 6ccf77080..31b7d71cb 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -36,7 +36,6 @@ use vss_client::util::retry::{ use vss_client::util::storable_builder::{EntropySource, StorableBuilder}; use crate::io::utils::check_namespace_key_validity; -use crate::runtime::Runtime; type CustomRetryPolicy = FilteredRetryPolicy< JitteredRetryPolicy< @@ -67,7 +66,6 @@ pub struct VssStore { // Version counter to ensure that writes are applied in the correct order. It is assumed that read and list // operations aren't sensitive to the order of execution. next_version: AtomicU64, - runtime: Arc, // A VSS-internal runtime we use to avoid any deadlocks we could hit when waiting on a spawned // blocking task to finish while the blocked thread had acquired the reactor. In particular, // this works around a previously-hit case where a concurrent call to @@ -80,7 +78,7 @@ pub struct VssStore { impl VssStore { pub(crate) fn new( base_url: String, store_id: String, vss_seed: [u8; 32], - header_provider: Arc, runtime: Arc, + header_provider: Arc, ) -> Self { let next_version = AtomicU64::new(1); let internal_runtime = Some( @@ -124,7 +122,7 @@ impl VssStore { key_obfuscator, )); - Self { inner, next_version, runtime, internal_runtime } + Self { inner, next_version, internal_runtime } } // Same logic as for the obfuscated keys below, but just for locking, using the plaintext keys @@ -171,13 +169,14 @@ impl KVStoreSync for VssStore { async move { inner.read_internal(primary_namespace, secondary_namespace, key).await }; // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always // times out. - let spawned_fut = internal_runtime.spawn(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::read timed out"; - Error::new(ErrorKind::Other, msg) - }) - }); - self.runtime.block_on(spawned_fut).expect("We should always finish")? + tokio::task::block_in_place(move || { + internal_runtime.block_on(async move { + tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { + let msg = "VssStore::read timed out"; + Error::new(ErrorKind::Other, msg) + }) + })? + }) } fn write( @@ -209,13 +208,14 @@ impl KVStoreSync for VssStore { }; // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always // times out. - let spawned_fut = internal_runtime.spawn(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::write timed out"; - Error::new(ErrorKind::Other, msg) - }) - }); - self.runtime.block_on(spawned_fut).expect("We should always finish")? + tokio::task::block_in_place(move || { + internal_runtime.block_on(async move { + tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { + let msg = "VssStore::write timed out"; + Error::new(ErrorKind::Other, msg) + }) + })? + }) } fn remove( @@ -247,13 +247,14 @@ impl KVStoreSync for VssStore { }; // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always // times out. - let spawned_fut = internal_runtime.spawn(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::remove timed out"; - Error::new(ErrorKind::Other, msg) - }) - }); - self.runtime.block_on(spawned_fut).expect("We should always finish")? + tokio::task::block_in_place(move || { + internal_runtime.block_on(async move { + tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { + let msg = "VssStore::remove timed out"; + Error::new(ErrorKind::Other, msg) + }) + })? + }) } fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { @@ -268,13 +269,14 @@ impl KVStoreSync for VssStore { let fut = async move { inner.list_internal(primary_namespace, secondary_namespace).await }; // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always // times out. - let spawned_fut = internal_runtime.spawn(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::list timed out"; - Error::new(ErrorKind::Other, msg) - }) - }); - self.runtime.block_on(spawned_fut).expect("We should always finish")? + tokio::task::block_in_place(move || { + internal_runtime.block_on(async move { + tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { + let msg = "VssStore::list timed out"; + Error::new(ErrorKind::Other, msg) + }) + })? + }) } } @@ -694,7 +696,6 @@ mod tests { use super::*; use crate::io::test_utils::do_read_write_remove_list_persist; - use crate::logger::Logger; #[test] fn vss_read_write_remove_list_persist() { @@ -704,11 +705,7 @@ mod tests { let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); - let logger = Arc::new(Logger::new_log_facade()); - let runtime = Arc::new(Runtime::new(logger).unwrap()); - let vss_store = - VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider, runtime); - + let vss_store = VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider); do_read_write_remove_list_persist(&vss_store); } @@ -720,10 +717,7 @@ mod tests { let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); - let logger = Arc::new(Logger::new_log_facade()); - let runtime = Arc::new(Runtime::new(logger).unwrap()); - let vss_store = - VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider, runtime); + let vss_store = VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider); do_read_write_remove_list_persist(&vss_store); drop(vss_store) @@ -737,10 +731,7 @@ mod tests { let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); - let logger = Arc::new(Logger::new_log_facade()); - let runtime = Arc::new(Runtime::new(logger).unwrap()); - let vss_store = - VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider, runtime); + let vss_store = VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider); let primary_namespace = "test_namespace"; let secondary_namespace = ""; From 2af810335c2cbdd60cf6680b81f3bd432f747cbb Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 6 Nov 2025 12:39:51 +0100 Subject: [PATCH 24/60] Drop redundant `tokio::timeout`s for VSS IO Now that we rely on `reqwest` v0.12.* retry logic as well as client-side timeouts, we can address the remaining TODOs here and simply drop the redundant `tokio::timeout`s we previously added as a safeguard to blocking tasks (even though in the worst cases we saw they never actually fired). --- src/io/vss_store.rs | 45 ++++----------------------------------------- 1 file changed, 4 insertions(+), 41 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 31b7d71cb..0416b0463 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -58,7 +58,6 @@ enum VssSchemaVersion { // We set this to a small number of threads that would still allow to make some progress if one // would hit a blocking case const INTERNAL_RUNTIME_WORKERS: usize = 2; -const VSS_IO_TIMEOUT: Duration = Duration::from_secs(5); /// A [`KVStoreSync`] implementation that writes to and reads from a [VSS](https://github.com/lightningdevkit/vss-server/blob/main/README.md) backend. pub struct VssStore { @@ -167,16 +166,7 @@ impl KVStoreSync for VssStore { let inner = Arc::clone(&self.inner); let fut = async move { inner.read_internal(primary_namespace, secondary_namespace, key).await }; - // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always - // times out. - tokio::task::block_in_place(move || { - internal_runtime.block_on(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::read timed out"; - Error::new(ErrorKind::Other, msg) - }) - })? - }) + tokio::task::block_in_place(move || internal_runtime.block_on(fut)) } fn write( @@ -206,16 +196,7 @@ impl KVStoreSync for VssStore { ) .await }; - // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always - // times out. - tokio::task::block_in_place(move || { - internal_runtime.block_on(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::write timed out"; - Error::new(ErrorKind::Other, msg) - }) - })? - }) + tokio::task::block_in_place(move || internal_runtime.block_on(fut)) } fn remove( @@ -245,16 +226,7 @@ impl KVStoreSync for VssStore { ) .await }; - // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always - // times out. - tokio::task::block_in_place(move || { - internal_runtime.block_on(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::remove timed out"; - Error::new(ErrorKind::Other, msg) - }) - })? - }) + tokio::task::block_in_place(move || internal_runtime.block_on(fut)) } fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { @@ -267,16 +239,7 @@ impl KVStoreSync for VssStore { let secondary_namespace = secondary_namespace.to_string(); let inner = Arc::clone(&self.inner); let fut = async move { inner.list_internal(primary_namespace, secondary_namespace).await }; - // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always - // times out. - tokio::task::block_in_place(move || { - internal_runtime.block_on(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::list timed out"; - Error::new(ErrorKind::Other, msg) - }) - })? - }) + tokio::task::block_in_place(move || internal_runtime.block_on(fut)) } } From 37ee256162dbd236e31e460a546c6a294b8f15bf Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 10 Nov 2025 16:31:05 +0100 Subject: [PATCH 25/60] Bump retries and timeouts considerably --- src/io/vss_store.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 0416b0463..ffff2b833 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -99,9 +99,9 @@ impl VssStore { derive_data_encryption_and_obfuscation_keys(&vss_seed); let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)) - .with_max_attempts(10) - .with_max_total_delay(Duration::from_secs(15)) - .with_max_jitter(Duration::from_millis(10)) + .with_max_attempts(100) + .with_max_total_delay(Duration::from_secs(180)) + .with_max_jitter(Duration::from_millis(100)) .skip_retry_on_error(Box::new(|e: &VssError| { matches!( e, From 37ed5c4a7b921d5b4238481847b54e49f1d21946 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 10 Nov 2025 16:44:26 +0100 Subject: [PATCH 26/60] Introduce two separate `VssClient`s for async/blocking contexts To avoid any blocking cross-runtime behavior that could arise from reusing a single client's TCP connections in different runtime contexts, we here split out the `VssStore` behavior to use one dedicated `VssClient` per context. I.e., we're now using two connections/connection pools and make sure only the `blocking_client` is used in `KVStoreSync` contexts, and `async_client` in `KVStore` contexts. --- src/io/vss_store.rs | 114 ++++++++++++++++++++++++++++++-------------- 1 file changed, 77 insertions(+), 37 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index ffff2b833..f9ce602f0 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -98,24 +98,22 @@ impl VssStore { let (data_encryption_key, obfuscation_master_key) = derive_data_encryption_and_obfuscation_keys(&vss_seed); let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); - let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)) - .with_max_attempts(100) - .with_max_total_delay(Duration::from_secs(180)) - .with_max_jitter(Duration::from_millis(100)) - .skip_retry_on_error(Box::new(|e: &VssError| { - matches!( - e, - VssError::NoSuchKeyError(..) - | VssError::InvalidRequestError(..) - | VssError::ConflictError(..) - ) - }) as _); - let client = VssClient::new_with_headers(base_url, retry_policy, header_provider); + let sync_retry_policy = retry_policy(); + let blocking_client = VssClient::new_with_headers( + base_url.clone(), + sync_retry_policy, + header_provider.clone(), + ); + + let async_retry_policy = retry_policy(); + let async_client = + VssClient::new_with_headers(base_url, async_retry_policy, header_provider); let inner = Arc::new(VssStoreInner::new( schema_version, - client, + blocking_client, + async_client, store_id, data_encryption_key, key_obfuscator, @@ -164,8 +162,11 @@ impl KVStoreSync for VssStore { let secondary_namespace = secondary_namespace.to_string(); let key = key.to_string(); let inner = Arc::clone(&self.inner); - let fut = - async move { inner.read_internal(primary_namespace, secondary_namespace, key).await }; + let fut = async move { + inner + .read_internal(&inner.blocking_client, primary_namespace, secondary_namespace, key) + .await + }; tokio::task::block_in_place(move || internal_runtime.block_on(fut)) } @@ -186,6 +187,7 @@ impl KVStoreSync for VssStore { let fut = async move { inner .write_internal( + &inner.blocking_client, inner_lock_ref, locking_key, version, @@ -216,6 +218,7 @@ impl KVStoreSync for VssStore { let fut = async move { inner .remove_internal( + &inner.blocking_client, inner_lock_ref, locking_key, version, @@ -238,7 +241,11 @@ impl KVStoreSync for VssStore { let primary_namespace = primary_namespace.to_string(); let secondary_namespace = secondary_namespace.to_string(); let inner = Arc::clone(&self.inner); - let fut = async move { inner.list_internal(primary_namespace, secondary_namespace).await }; + let fut = async move { + inner + .list_internal(&inner.blocking_client, primary_namespace, secondary_namespace) + .await + }; tokio::task::block_in_place(move || internal_runtime.block_on(fut)) } } @@ -251,9 +258,11 @@ impl KVStore for VssStore { let secondary_namespace = secondary_namespace.to_string(); let key = key.to_string(); let inner = Arc::clone(&self.inner); - Box::pin( - async move { inner.read_internal(primary_namespace, secondary_namespace, key).await }, - ) + Box::pin(async move { + inner + .read_internal(&inner.async_client, primary_namespace, secondary_namespace, key) + .await + }) } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, @@ -267,6 +276,7 @@ impl KVStore for VssStore { Box::pin(async move { inner .write_internal( + &inner.async_client, inner_lock_ref, locking_key, version, @@ -290,6 +300,7 @@ impl KVStore for VssStore { Box::pin(async move { inner .remove_internal( + &inner.async_client, inner_lock_ref, locking_key, version, @@ -307,7 +318,9 @@ impl KVStore for VssStore { let primary_namespace = primary_namespace.to_string(); let secondary_namespace = secondary_namespace.to_string(); let inner = Arc::clone(&self.inner); - Box::pin(async move { inner.list_internal(primary_namespace, secondary_namespace).await }) + Box::pin(async move { + inner.list_internal(&inner.async_client, primary_namespace, secondary_namespace).await + }) } } @@ -320,7 +333,10 @@ impl Drop for VssStore { struct VssStoreInner { schema_version: VssSchemaVersion, - client: VssClient, + blocking_client: VssClient, + // A secondary client that will only be used for async persistence via `KVStore`, to ensure TCP + // connections aren't shared between our outer and the internal runtime. + async_client: VssClient, store_id: String, data_encryption_key: [u8; 32], key_obfuscator: KeyObfuscator, @@ -332,14 +348,16 @@ struct VssStoreInner { impl VssStoreInner { pub(crate) fn new( - schema_version: VssSchemaVersion, client: VssClient, store_id: String, + schema_version: VssSchemaVersion, blocking_client: VssClient, + async_client: VssClient, store_id: String, data_encryption_key: [u8; 32], key_obfuscator: KeyObfuscator, ) -> Self { let locks = Mutex::new(HashMap::new()); let pending_lazy_deletes = Mutex::new(Vec::new()); Self { schema_version, - client, + blocking_client, + async_client, store_id, data_encryption_key, key_obfuscator, @@ -405,7 +423,8 @@ impl VssStoreInner { } async fn list_all_keys( - &self, primary_namespace: &str, secondary_namespace: &str, + &self, client: &VssClient, primary_namespace: &str, + secondary_namespace: &str, ) -> io::Result> { let mut page_token = None; let mut keys = vec![]; @@ -418,7 +437,7 @@ impl VssStoreInner { page_size: None, }; - let response = self.client.list_key_versions(&request).await.map_err(|e| { + let response = client.list_key_versions(&request).await.map_err(|e| { let msg = format!( "Failed to list keys in {}/{}: {}", primary_namespace, secondary_namespace, e @@ -435,13 +454,14 @@ impl VssStoreInner { } async fn read_internal( - &self, primary_namespace: String, secondary_namespace: String, key: String, + &self, client: &VssClient, primary_namespace: String, + secondary_namespace: String, key: String, ) -> io::Result> { check_namespace_key_validity(&primary_namespace, &secondary_namespace, Some(&key), "read")?; let store_key = self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); let request = GetObjectRequest { store_id: self.store_id.clone(), key: store_key.clone() }; - let resp = self.client.get_object(&request).await.map_err(|e| { + let resp = client.get_object(&request).await.map_err(|e| { let msg = format!( "Failed to read from key {}/{}/{}: {}", primary_namespace, secondary_namespace, key, e @@ -470,8 +490,9 @@ impl VssStoreInner { } async fn write_internal( - &self, inner_lock_ref: Arc>, locking_key: String, version: u64, - primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + &self, client: &VssClient, inner_lock_ref: Arc>, + locking_key: String, version: u64, primary_namespace: String, secondary_namespace: String, + key: String, buf: Vec, ) -> io::Result<()> { check_namespace_key_validity( &primary_namespace, @@ -505,7 +526,7 @@ impl VssStoreInner { }; self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { - self.client.put_object(&request).await.map_err(|e| { + client.put_object(&request).await.map_err(|e| { // Restore delete items so they'll be retried on next write. if !delete_items.is_empty() { self.pending_lazy_deletes.lock().unwrap().extend(delete_items); @@ -524,8 +545,9 @@ impl VssStoreInner { } async fn remove_internal( - &self, inner_lock_ref: Arc>, locking_key: String, version: u64, - primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + &self, client: &VssClient, inner_lock_ref: Arc>, + locking_key: String, version: u64, primary_namespace: String, secondary_namespace: String, + key: String, lazy: bool, ) -> io::Result<()> { check_namespace_key_validity( &primary_namespace, @@ -548,7 +570,7 @@ impl VssStoreInner { let request = DeleteObjectRequest { store_id: self.store_id.clone(), key_value: Some(key_value) }; - self.client.delete_object(&request).await.map_err(|e| { + client.delete_object(&request).await.map_err(|e| { let msg = format!( "Failed to delete key {}/{}/{}: {}", primary_namespace, secondary_namespace, key, e @@ -562,12 +584,15 @@ impl VssStoreInner { } async fn list_internal( - &self, primary_namespace: String, secondary_namespace: String, + &self, client: &VssClient, primary_namespace: String, + secondary_namespace: String, ) -> io::Result> { check_namespace_key_validity(&primary_namespace, &secondary_namespace, None, "list")?; - let keys = - self.list_all_keys(&primary_namespace, &secondary_namespace).await.map_err(|e| { + let keys = self + .list_all_keys(client, &primary_namespace, &secondary_namespace) + .await + .map_err(|e| { let msg = format!( "Failed to retrieve keys in namespace: {}/{} : {}", primary_namespace, secondary_namespace, e @@ -636,6 +661,21 @@ fn derive_data_encryption_and_obfuscation_keys(vss_seed: &[u8; 32]) -> ([u8; 32] (k1, k2) } +fn retry_policy() -> CustomRetryPolicy { + ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)) + .with_max_attempts(100) + .with_max_total_delay(Duration::from_secs(180)) + .with_max_jitter(Duration::from_millis(100)) + .skip_retry_on_error(Box::new(|e: &VssError| { + matches!( + e, + VssError::NoSuchKeyError(..) + | VssError::InvalidRequestError(..) + | VssError::ConflictError(..) + ) + }) as _) +} + /// A source for generating entropy/randomness using [`rand`]. pub(crate) struct RandEntropySource; From 20a93c5758862307e8a2d7d1feb45238fe6db076 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 12 Nov 2025 11:31:12 +0100 Subject: [PATCH 27/60] Determine VSS schema version at startup Since we just made some breaking changes to how exactly we persist data via VSS (now using an `aad` that commits to the key and also obfuscating namespaces), we have to detect which schema version we're on to ensure backwards compatibility. To this end, we here start reading a persisted `vss_schema_version` key in `VssStore::new`. If it is present, we just return the encoded value (right now that can only be V1). If it is not present, it can either mean we run for the first time *or* we're on V0, which we determine checking if anything related to the `bdk_wallet` descriptors are present in the store. If we're running for the first time, we also persist the schema version to save us these rather inefficient steps on following startups. --- src/builder.rs | 7 +- src/io/vss_store.rs | 165 +++++++++++++++++++++++++++++++++++++++----- 2 files changed, 152 insertions(+), 20 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 59f5b9b46..b45f03f6d 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -731,7 +731,12 @@ impl NodeBuilder { let vss_seed_bytes: [u8; 32] = vss_xprv.private_key.secret_bytes(); - let vss_store = VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider); + let vss_store = + VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider).map_err(|e| { + log_error!(logger, "Failed to setup VSS store: {}", e); + BuildError::KVStoreSetupFailed + })?; + build_with_store_internal( config, self.chain_data_source_config.as_ref(), diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index f9ce602f0..2906b89ca 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -17,8 +17,10 @@ use std::time::Duration; use bdk_chain::Merge; use bitcoin::hashes::{sha256, Hash, HashEngine, Hmac, HmacEngine}; +use lightning::impl_writeable_tlv_based_enum; use lightning::io::{self, Error, ErrorKind}; use lightning::util::persist::{KVStore, KVStoreSync}; +use lightning::util::ser::{Readable, Writeable}; use prost::Message; use rand::RngCore; use vss_client::client::VssClient; @@ -55,6 +57,13 @@ enum VssSchemaVersion { V1, } +impl_writeable_tlv_based_enum!(VssSchemaVersion, + (0, V0) => {}, + (1, V1) => {}, +); + +const VSS_SCHEMA_VERSION_KEY: &str = "vss_schema_version"; + // We set this to a small number of threads that would still allow to make some progress if one // would hit a blocking case const INTERNAL_RUNTIME_WORKERS: usize = 2; @@ -78,23 +87,20 @@ impl VssStore { pub(crate) fn new( base_url: String, store_id: String, vss_seed: [u8; 32], header_provider: Arc, - ) -> Self { + ) -> io::Result { let next_version = AtomicU64::new(1); - let internal_runtime = Some( - tokio::runtime::Builder::new_multi_thread() - .enable_all() - .thread_name_fn(|| { - static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0); - let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst); - format!("ldk-node-vss-runtime-{}", id) - }) - .worker_threads(INTERNAL_RUNTIME_WORKERS) - .max_blocking_threads(INTERNAL_RUNTIME_WORKERS) - .build() - .unwrap(), - ); + let internal_runtime = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .thread_name_fn(|| { + static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0); + let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst); + format!("ldk-node-vss-runtime-{}", id) + }) + .worker_threads(INTERNAL_RUNTIME_WORKERS) + .max_blocking_threads(INTERNAL_RUNTIME_WORKERS) + .build() + .unwrap(); - let schema_version = VssSchemaVersion::V0; let (data_encryption_key, obfuscation_master_key) = derive_data_encryption_and_obfuscation_keys(&vss_seed); let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); @@ -106,6 +112,19 @@ impl VssStore { header_provider.clone(), ); + let runtime_handle = internal_runtime.handle(); + let schema_version = tokio::task::block_in_place(|| { + runtime_handle.block_on(async { + determine_and_write_schema_version( + &blocking_client, + &store_id, + data_encryption_key, + &key_obfuscator, + ) + .await + }) + })?; + let async_retry_policy = retry_policy(); let async_client = VssClient::new_with_headers(base_url, async_retry_policy, header_provider); @@ -119,7 +138,7 @@ impl VssStore { key_obfuscator, )); - Self { inner, next_version, internal_runtime } + Ok(Self { inner, next_version, internal_runtime: Some(internal_runtime) }) } // Same logic as for the obfuscated keys below, but just for locking, using the plaintext keys @@ -676,6 +695,111 @@ fn retry_policy() -> CustomRetryPolicy { }) as _) } +async fn determine_and_write_schema_version( + client: &VssClient, store_id: &String, data_encryption_key: [u8; 32], + key_obfuscator: &KeyObfuscator, +) -> io::Result { + // Build the obfuscated `vss_schema_version` key. + let obfuscated_prefix = key_obfuscator.obfuscate(&format! {"{}#{}", "", ""}); + let obfuscated_key = key_obfuscator.obfuscate(VSS_SCHEMA_VERSION_KEY); + let store_key = format!("{}#{}", obfuscated_prefix, obfuscated_key); + + // Try to read the stored schema version. + let request = GetObjectRequest { store_id: store_id.clone(), key: store_key.clone() }; + let resp = match client.get_object(&request).await { + Ok(resp) => Some(resp), + Err(VssError::NoSuchKeyError(..)) => { + // The value is not set. + None + }, + Err(e) => { + let msg = format!("Failed to read schema version: {}", e); + return Err(Error::new(ErrorKind::Other, msg)); + }, + }; + + if let Some(resp) = resp { + // The schema version was present, so just decrypt the stored data. + + // unwrap safety: resp.value must be always present for a non-erroneous VSS response, otherwise + // it is an API-violation which is converted to [`VssError::InternalServerError`] in [`VssClient`] + let storable = Storable::decode(&resp.value.unwrap().value[..]).map_err(|e| { + let msg = format!("Failed to decode schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })?; + + let storable_builder = StorableBuilder::new(RandEntropySource); + // Schema version was added starting with V1, so if set at all, we use the key as `aad` + let aad = store_key.as_bytes(); + let decrypted = storable_builder + .deconstruct(storable, &data_encryption_key, aad) + .map_err(|e| { + let msg = format!("Failed to decode schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })? + .0; + + let schema_version: VssSchemaVersion = Readable::read(&mut io::Cursor::new(decrypted)) + .map_err(|e| { + let msg = format!("Failed to decode schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })?; + Ok(schema_version) + } else { + // The schema version wasn't present, this either means we're running for the first time *or* it's V0 pre-migration (predating writing of the schema version). + + // Check if any `bdk_wallet` data was written by listing keys under the respective + // (unobfuscated) prefix. + const V0_BDK_WALLET_PREFIX: &str = "bdk_wallet#"; + let request = ListKeyVersionsRequest { + store_id: store_id.clone(), + key_prefix: Some(V0_BDK_WALLET_PREFIX.to_string()), + page_token: None, + page_size: None, + }; + + let response = client.list_key_versions(&request).await.map_err(|e| { + let msg = format!("Failed to determine schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })?; + + let wallet_data_present = !response.key_versions.is_empty(); + if wallet_data_present { + // If the wallet data is present, it means we're not running for the first time. + Ok(VssSchemaVersion::V0) + } else { + // We're running for the first time, write the schema version to save unnecessary IOps + // on future startup. + let schema_version = VssSchemaVersion::V1; + let encoded_version = schema_version.encode(); + + let storable_builder = StorableBuilder::new(RandEntropySource); + let vss_version = -1; + let aad = store_key.as_bytes(); + let storable = + storable_builder.build(encoded_version, vss_version, &data_encryption_key, aad); + + let request = PutObjectRequest { + store_id: store_id.clone(), + global_version: None, + transaction_items: vec![KeyValue { + key: store_key, + version: vss_version, + value: storable.encode_to_vec(), + }], + delete_items: vec![], + }; + + client.put_object(&request).await.map_err(|e| { + let msg = format!("Failed to write schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })?; + + Ok(schema_version) + } + } +} + /// A source for generating entropy/randomness using [`rand`]. pub(crate) struct RandEntropySource; @@ -708,7 +832,8 @@ mod tests { let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); - let vss_store = VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider); + let vss_store = + VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider).unwrap(); do_read_write_remove_list_persist(&vss_store); } @@ -720,7 +845,8 @@ mod tests { let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); - let vss_store = VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider); + let vss_store = + VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider).unwrap(); do_read_write_remove_list_persist(&vss_store); drop(vss_store) @@ -734,7 +860,8 @@ mod tests { let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); - let vss_store = VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider); + let vss_store = + VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider).unwrap(); let primary_namespace = "test_namespace"; let secondary_namespace = ""; From 8e5503c89707d0c48ef4d5b820b5bcaedfb8b4a9 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 13 Nov 2025 11:07:44 +0100 Subject: [PATCH 28/60] Add test ensuring backwards compatibility with VSS schema `v0` We add a test case that ensures that a node started and persisted on LDK Node v0.6.2 can still be successfully started with the new schema changes. Co-authored by Claude AI --- Cargo.toml | 3 ++ tests/integration_tests_vss.rs | 75 ++++++++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 8e2f04447..34b16994f 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,6 +91,9 @@ clightningrpc = { version = "0.3.0-beta.8", default-features = false } lnd_grpc_rust = { version = "2.10.0", default-features = false } tokio = { version = "1.37", features = ["fs"] } +[target.'cfg(vss_test)'.dev-dependencies] +ldk-node-062 = { package = "ldk-node", version = "=0.6.2" } + [build-dependencies] uniffi = { version = "0.28.3", features = ["build"], optional = true } diff --git a/tests/integration_tests_vss.rs b/tests/integration_tests_vss.rs index 93f167dae..03b3c8c06 100644 --- a/tests/integration_tests_vss.rs +++ b/tests/integration_tests_vss.rs @@ -12,6 +12,7 @@ mod common; use std::collections::HashMap; use ldk_node::Builder; +use rand::{rng, Rng}; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn channel_full_cycle_with_vss_store() { @@ -55,3 +56,77 @@ async fn channel_full_cycle_with_vss_store() { ) .await; } + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn vss_v0_schema_backwards_compatibility() { + let (bitcoind, electrsd) = common::setup_bitcoind_and_electrsd(); + let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); + let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); + + let rand_suffix: String = + (0..7).map(|_| rng().sample(rand::distr::Alphanumeric) as char).collect(); + let store_id = format!("v0_compat_test_{}", rand_suffix); + let storage_path = common::random_storage_path().to_str().unwrap().to_owned(); + let seed_bytes = [42u8; 64]; + + // Setup a v0.6.2 `Node` persisted with the v0 scheme. + let (old_balance, old_node_id) = { + let mut builder_old = ldk_node_062::Builder::new(); + builder_old.set_network(bitcoin::Network::Regtest); + builder_old.set_storage_dir_path(storage_path.clone()); + builder_old.set_entropy_seed_bytes(seed_bytes); + builder_old.set_chain_source_esplora(esplora_url.clone(), None); + let node_old = builder_old + .build_with_vss_store_and_fixed_headers( + vss_base_url.clone(), + store_id.clone(), + HashMap::new(), + ) + .unwrap(); + + node_old.start().unwrap(); + let addr_old = node_old.onchain_payment().new_address().unwrap(); + common::premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![addr_old], + bitcoin::Amount::from_sat(100_000), + ) + .await; + node_old.sync_wallets().unwrap(); + + let balance = node_old.list_balances().spendable_onchain_balance_sats; + assert!(balance > 0); + let node_id = node_old.node_id(); + + // Workaround necessary as v0.6.2's VSS runtime wasn't dropsafe in a tokio context. + tokio::task::block_in_place(move || { + node_old.stop().unwrap(); + drop(node_old); + }); + + (balance, node_id) + }; + + // Now ensure we can still reinit from the same backend. + let mut builder_new = Builder::new(); + builder_new.set_network(bitcoin::Network::Regtest); + builder_new.set_storage_dir_path(storage_path); + builder_new.set_entropy_seed_bytes(seed_bytes); + builder_new.set_chain_source_esplora(esplora_url, None); + + let node_new = builder_new + .build_with_vss_store_and_fixed_headers(vss_base_url, store_id, HashMap::new()) + .unwrap(); + + node_new.start().unwrap(); + node_new.sync_wallets().unwrap(); + + let new_balance = node_new.list_balances().spendable_onchain_balance_sats; + let new_node_id = node_new.node_id(); + + assert_eq!(old_node_id, new_node_id); + assert_eq!(old_balance, new_balance); + + node_new.stop().unwrap(); +} From d2153f2c50b3294978df4af87c461c4b6f92cf42 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 13 Nov 2025 11:50:30 +0100 Subject: [PATCH 29/60] Add simple test ensuring we can restart from a VSS backend This is close to the backwards compatibility test we just added for v0, now just making sure we can actually read the data we persisted with our current (V1+) code. Co-authored by Claude AI --- tests/integration_tests_vss.rs | 66 ++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/tests/integration_tests_vss.rs b/tests/integration_tests_vss.rs index 03b3c8c06..3b384ec45 100644 --- a/tests/integration_tests_vss.rs +++ b/tests/integration_tests_vss.rs @@ -130,3 +130,69 @@ async fn vss_v0_schema_backwards_compatibility() { node_new.stop().unwrap(); } + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn vss_node_restart() { + let (bitcoind, electrsd) = common::setup_bitcoind_and_electrsd(); + let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); + let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); + + let rand_suffix: String = + (0..7).map(|_| rng().sample(rand::distr::Alphanumeric) as char).collect(); + let store_id = format!("restart_test_{}", rand_suffix); + let storage_path = common::random_storage_path().to_str().unwrap().to_owned(); + let seed_bytes = [42u8; 64]; + + // Setup initial node and fund it. + let (expected_balance_sats, expected_node_id) = { + let mut builder = Builder::new(); + builder.set_network(bitcoin::Network::Regtest); + builder.set_storage_dir_path(storage_path.clone()); + builder.set_entropy_seed_bytes(seed_bytes); + builder.set_chain_source_esplora(esplora_url.clone(), None); + let node = builder + .build_with_vss_store_and_fixed_headers( + vss_base_url.clone(), + store_id.clone(), + HashMap::new(), + ) + .unwrap(); + + node.start().unwrap(); + let addr = node.onchain_payment().new_address().unwrap(); + common::premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![addr], + bitcoin::Amount::from_sat(100_000), + ) + .await; + node.sync_wallets().unwrap(); + + let balance = node.list_balances().spendable_onchain_balance_sats; + assert!(balance > 0); + let node_id = node.node_id(); + + node.stop().unwrap(); + (balance, node_id) + }; + + // Verify node can be restarted from VSS backend. + let mut builder = Builder::new(); + builder.set_network(bitcoin::Network::Regtest); + builder.set_storage_dir_path(storage_path); + builder.set_entropy_seed_bytes(seed_bytes); + builder.set_chain_source_esplora(esplora_url, None); + + let node = builder + .build_with_vss_store_and_fixed_headers(vss_base_url, store_id, HashMap::new()) + .unwrap(); + + node.start().unwrap(); + node.sync_wallets().unwrap(); + + assert_eq!(expected_node_id, node.node_id()); + assert_eq!(expected_balance_sats, node.list_balances().spendable_onchain_balance_sats); + + node.stop().unwrap(); +} From 5f1a872efa8b831a3b35bfdd2b6bd749fc7113a7 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 18 Nov 2025 11:13:19 +0100 Subject: [PATCH 30/60] Prefactor: Move `ChainSource` creation before `Wallet` creation In the following commits we will use the chain source to poll a best tip before intializing the listener objects. As a prefactor, we here move the creation of our onchain wallet after creation of the chain source, which in turn means we'll need to use the same pattern as for the other listeners, i.e., not giving the wallet reference to `ChainSource` on creation but rather handing it in when it's being used at runtime. --- src/builder.rs | 120 ++++++++++++++++++++---------------------- src/chain/bitcoind.rs | 54 +++++++++---------- src/chain/electrum.rs | 20 +++---- src/chain/esplora.rs | 21 ++++---- src/chain/mod.rs | 57 +++++++++++--------- src/lib.rs | 19 +++++-- 6 files changed, 150 insertions(+), 141 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index b45f03f6d..98650aa1a 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1178,54 +1178,6 @@ fn build_with_store_internal( } }, }; - - // Initialize the on-chain wallet and chain access - let xprv = bitcoin::bip32::Xpriv::new_master(config.network, &seed_bytes).map_err(|e| { - log_error!(logger, "Failed to derive master secret: {}", e); - BuildError::InvalidSeedBytes - })?; - - let descriptor = Bip84(xprv, KeychainKind::External); - let change_descriptor = Bip84(xprv, KeychainKind::Internal); - let mut wallet_persister = - KVStoreWalletPersister::new(Arc::clone(&kv_store), Arc::clone(&logger)); - let wallet_opt = BdkWallet::load() - .descriptor(KeychainKind::External, Some(descriptor.clone())) - .descriptor(KeychainKind::Internal, Some(change_descriptor.clone())) - .extract_keys() - .check_network(config.network) - .load_wallet(&mut wallet_persister) - .map_err(|e| match e { - bdk_wallet::LoadWithPersistError::InvalidChangeSet( - bdk_wallet::LoadError::Mismatch(bdk_wallet::LoadMismatch::Network { - loaded, - expected, - }), - ) => { - log_error!( - logger, - "Failed to setup wallet: Networks do not match. Expected {} but got {}", - expected, - loaded - ); - BuildError::NetworkMismatch - }, - _ => { - log_error!(logger, "Failed to set up wallet: {}", e); - BuildError::WalletSetupFailed - }, - })?; - let bdk_wallet = match wallet_opt { - Some(wallet) => wallet, - None => BdkWallet::create(descriptor, change_descriptor) - .network(config.network) - .create_wallet(&mut wallet_persister) - .map_err(|e| { - log_error!(logger, "Failed to set up wallet: {}", e); - BuildError::WalletSetupFailed - })?, - }; - let tx_broadcaster = Arc::new(TransactionBroadcaster::new(Arc::clone(&logger))); let fee_estimator = Arc::new(OnchainFeeEstimator::new()); @@ -1243,16 +1195,6 @@ fn build_with_store_internal( }, }; - let wallet = Arc::new(Wallet::new( - bdk_wallet, - wallet_persister, - Arc::clone(&tx_broadcaster), - Arc::clone(&fee_estimator), - Arc::clone(&payment_store), - Arc::clone(&config), - Arc::clone(&logger), - )); - let chain_source = match chain_data_source_config { Some(ChainDataSourceConfig::Esplora { server_url, headers, sync_config }) => { let sync_config = sync_config.unwrap_or(EsploraSyncConfig::default()); @@ -1260,7 +1202,6 @@ fn build_with_store_internal( server_url.clone(), headers.clone(), sync_config, - Arc::clone(&wallet), Arc::clone(&fee_estimator), Arc::clone(&tx_broadcaster), Arc::clone(&kv_store), @@ -1274,7 +1215,6 @@ fn build_with_store_internal( Arc::new(ChainSource::new_electrum( server_url.clone(), sync_config, - Arc::clone(&wallet), Arc::clone(&fee_estimator), Arc::clone(&tx_broadcaster), Arc::clone(&kv_store), @@ -1295,7 +1235,6 @@ fn build_with_store_internal( *rpc_port, rpc_user.clone(), rpc_password.clone(), - Arc::clone(&wallet), Arc::clone(&fee_estimator), Arc::clone(&tx_broadcaster), Arc::clone(&kv_store), @@ -1309,7 +1248,6 @@ fn build_with_store_internal( *rpc_port, rpc_user.clone(), rpc_password.clone(), - Arc::clone(&wallet), Arc::clone(&fee_estimator), Arc::clone(&tx_broadcaster), Arc::clone(&kv_store), @@ -1327,7 +1265,6 @@ fn build_with_store_internal( server_url.clone(), HashMap::new(), sync_config, - Arc::clone(&wallet), Arc::clone(&fee_estimator), Arc::clone(&tx_broadcaster), Arc::clone(&kv_store), @@ -1338,6 +1275,63 @@ fn build_with_store_internal( }, }; + // Initialize the on-chain wallet and chain access + let xprv = bitcoin::bip32::Xpriv::new_master(config.network, &seed_bytes).map_err(|e| { + log_error!(logger, "Failed to derive master secret: {}", e); + BuildError::InvalidSeedBytes + })?; + + let descriptor = Bip84(xprv, KeychainKind::External); + let change_descriptor = Bip84(xprv, KeychainKind::Internal); + let mut wallet_persister = + KVStoreWalletPersister::new(Arc::clone(&kv_store), Arc::clone(&logger)); + let wallet_opt = BdkWallet::load() + .descriptor(KeychainKind::External, Some(descriptor.clone())) + .descriptor(KeychainKind::Internal, Some(change_descriptor.clone())) + .extract_keys() + .check_network(config.network) + .load_wallet(&mut wallet_persister) + .map_err(|e| match e { + bdk_wallet::LoadWithPersistError::InvalidChangeSet( + bdk_wallet::LoadError::Mismatch(bdk_wallet::LoadMismatch::Network { + loaded, + expected, + }), + ) => { + log_error!( + logger, + "Failed to setup wallet: Networks do not match. Expected {} but got {}", + expected, + loaded + ); + BuildError::NetworkMismatch + }, + _ => { + log_error!(logger, "Failed to set up wallet: {}", e); + BuildError::WalletSetupFailed + }, + })?; + let bdk_wallet = match wallet_opt { + Some(wallet) => wallet, + None => BdkWallet::create(descriptor, change_descriptor) + .network(config.network) + .create_wallet(&mut wallet_persister) + .map_err(|e| { + log_error!(logger, "Failed to set up wallet: {}", e); + BuildError::WalletSetupFailed + })?, + }; + + let wallet = Arc::new(Wallet::new( + bdk_wallet, + wallet_persister, + Arc::clone(&tx_broadcaster), + Arc::clone(&fee_estimator), + Arc::clone(&payment_store), + Arc::clone(&config), + Arc::clone(&logger), + )); + // Initialize the KeysManager let cur_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).map_err(|e| { log_error!(logger, "Failed to get current time: {}", e); diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index 4b7cd588f..4d7a4a0fe 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -47,7 +47,6 @@ pub(super) struct BitcoindChainSource { api_client: Arc, header_cache: tokio::sync::Mutex, latest_chain_tip: RwLock>, - onchain_wallet: Arc, wallet_polling_status: Mutex, fee_estimator: Arc, kv_store: Arc, @@ -59,9 +58,8 @@ pub(super) struct BitcoindChainSource { impl BitcoindChainSource { pub(crate) fn new_rpc( rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, - onchain_wallet: Arc, fee_estimator: Arc, - kv_store: Arc, config: Arc, logger: Arc, - node_metrics: Arc>, + fee_estimator: Arc, kv_store: Arc, config: Arc, + logger: Arc, node_metrics: Arc>, ) -> Self { let api_client = Arc::new(BitcoindClient::new_rpc( rpc_host.clone(), @@ -77,7 +75,6 @@ impl BitcoindChainSource { api_client, header_cache, latest_chain_tip, - onchain_wallet, wallet_polling_status, fee_estimator, kv_store, @@ -89,9 +86,9 @@ impl BitcoindChainSource { pub(crate) fn new_rest( rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, - onchain_wallet: Arc, fee_estimator: Arc, - kv_store: Arc, config: Arc, rest_client_config: BitcoindRestClientConfig, - logger: Arc, node_metrics: Arc>, + fee_estimator: Arc, kv_store: Arc, config: Arc, + rest_client_config: BitcoindRestClientConfig, logger: Arc, + node_metrics: Arc>, ) -> Self { let api_client = Arc::new(BitcoindClient::new_rest( rest_client_config.rest_host, @@ -111,7 +108,6 @@ impl BitcoindChainSource { header_cache, latest_chain_tip, wallet_polling_status, - onchain_wallet, fee_estimator, kv_store, config, @@ -126,8 +122,8 @@ impl BitcoindChainSource { pub(super) async fn continuously_sync_wallets( &self, mut stop_sync_receiver: tokio::sync::watch::Receiver<()>, - channel_manager: Arc, chain_monitor: Arc, - output_sweeper: Arc, + onchain_wallet: Arc, channel_manager: Arc, + chain_monitor: Arc, output_sweeper: Arc, ) { // First register for the wallet polling status to make sure `Node::sync_wallets` calls // wait on the result before proceeding. @@ -155,14 +151,10 @@ impl BitcoindChainSource { let channel_manager_best_block_hash = channel_manager.current_best_block().block_hash; let sweeper_best_block_hash = output_sweeper.current_best_block().block_hash; - let onchain_wallet_best_block_hash = - self.onchain_wallet.current_best_block().block_hash; + let onchain_wallet_best_block_hash = onchain_wallet.current_best_block().block_hash; let mut chain_listeners = vec![ - ( - onchain_wallet_best_block_hash, - &*self.onchain_wallet as &(dyn Listen + Send + Sync), - ), + (onchain_wallet_best_block_hash, &*onchain_wallet as &(dyn Listen + Send + Sync)), (channel_manager_best_block_hash, &*channel_manager as &(dyn Listen + Send + Sync)), (sweeper_best_block_hash, &*output_sweeper as &(dyn Listen + Send + Sync)), ]; @@ -307,6 +299,7 @@ impl BitcoindChainSource { return; } _ = self.poll_and_update_listeners( + Arc::clone(&onchain_wallet), Arc::clone(&channel_manager), Arc::clone(&chain_monitor), Arc::clone(&output_sweeper) @@ -337,8 +330,8 @@ impl BitcoindChainSource { } pub(super) async fn poll_and_update_listeners( - &self, channel_manager: Arc, chain_monitor: Arc, - output_sweeper: Arc, + &self, onchain_wallet: Arc, channel_manager: Arc, + chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { let receiver_res = { let mut status_lock = self.wallet_polling_status.lock().unwrap(); @@ -355,7 +348,12 @@ impl BitcoindChainSource { } let res = self - .poll_and_update_listeners_inner(channel_manager, chain_monitor, output_sweeper) + .poll_and_update_listeners_inner( + onchain_wallet, + channel_manager, + chain_monitor, + output_sweeper, + ) .await; self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); @@ -364,8 +362,8 @@ impl BitcoindChainSource { } async fn poll_and_update_listeners_inner( - &self, channel_manager: Arc, chain_monitor: Arc, - output_sweeper: Arc, + &self, onchain_wallet: Arc, channel_manager: Arc, + chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { let latest_chain_tip_opt = self.latest_chain_tip.read().unwrap().clone(); let chain_tip = if let Some(tip) = latest_chain_tip_opt { @@ -386,7 +384,7 @@ impl BitcoindChainSource { let mut locked_header_cache = self.header_cache.lock().await; let chain_poller = ChainPoller::new(Arc::clone(&self.api_client), self.config.network); let chain_listener = ChainListener { - onchain_wallet: Arc::clone(&self.onchain_wallet), + onchain_wallet: Arc::clone(&onchain_wallet), channel_manager: Arc::clone(&channel_manager), chain_monitor: Arc::clone(&chain_monitor), output_sweeper, @@ -422,7 +420,7 @@ impl BitcoindChainSource { let cur_height = channel_manager.current_best_block().height; let now = SystemTime::now(); - let bdk_unconfirmed_txids = self.onchain_wallet.get_unconfirmed_txids(); + let bdk_unconfirmed_txids = onchain_wallet.get_unconfirmed_txids(); match self .api_client .get_updated_mempool_transactions(cur_height, bdk_unconfirmed_txids) @@ -436,11 +434,11 @@ impl BitcoindChainSource { evicted_txids.len(), now.elapsed().unwrap().as_millis() ); - self.onchain_wallet - .apply_mempool_txs(unconfirmed_txs, evicted_txids) - .unwrap_or_else(|e| { + onchain_wallet.apply_mempool_txs(unconfirmed_txs, evicted_txids).unwrap_or_else( + |e| { log_error!(self.logger, "Failed to apply mempool transactions: {:?}", e); - }); + }, + ); }, Err(e) => { log_error!(self.logger, "Failed to poll for mempool transactions: {:?}", e); diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index dbd0d9f7f..9e05dfaee 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -47,7 +47,6 @@ pub(super) struct ElectrumChainSource { server_url: String, pub(super) sync_config: ElectrumSyncConfig, electrum_runtime_status: RwLock, - onchain_wallet: Arc, onchain_wallet_sync_status: Mutex, lightning_wallet_sync_status: Mutex, fee_estimator: Arc, @@ -59,7 +58,7 @@ pub(super) struct ElectrumChainSource { impl ElectrumChainSource { pub(super) fn new( - server_url: String, sync_config: ElectrumSyncConfig, onchain_wallet: Arc, + server_url: String, sync_config: ElectrumSyncConfig, fee_estimator: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, ) -> Self { @@ -70,7 +69,6 @@ impl ElectrumChainSource { server_url, sync_config, electrum_runtime_status, - onchain_wallet, onchain_wallet_sync_status, lightning_wallet_sync_status, fee_estimator, @@ -94,7 +92,9 @@ impl ElectrumChainSource { self.electrum_runtime_status.write().unwrap().stop(); } - pub(crate) async fn sync_onchain_wallet(&self) -> Result<(), Error> { + pub(crate) async fn sync_onchain_wallet( + &self, onchain_wallet: Arc, + ) -> Result<(), Error> { let receiver_res = { let mut status_lock = self.onchain_wallet_sync_status.lock().unwrap(); status_lock.register_or_subscribe_pending_sync() @@ -108,14 +108,14 @@ impl ElectrumChainSource { })?; } - let res = self.sync_onchain_wallet_inner().await; + let res = self.sync_onchain_wallet_inner(onchain_wallet).await; self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); res } - async fn sync_onchain_wallet_inner(&self) -> Result<(), Error> { + async fn sync_onchain_wallet_inner(&self, onchain_wallet: Arc) -> Result<(), Error> { let electrum_client: Arc = if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { Arc::clone(client) @@ -133,7 +133,7 @@ impl ElectrumChainSource { let apply_wallet_update = |update_res: Result, now: Instant| match update_res { - Ok(update) => match self.onchain_wallet.apply_update(update) { + Ok(update) => match onchain_wallet.apply_update(update) { Ok(()) => { log_info!( self.logger, @@ -160,10 +160,10 @@ impl ElectrumChainSource { Err(e) => Err(e), }; - let cached_txs = self.onchain_wallet.get_cached_txs(); + let cached_txs = onchain_wallet.get_cached_txs(); let res = if incremental_sync { - let incremental_sync_request = self.onchain_wallet.get_incremental_sync_request(); + let incremental_sync_request = onchain_wallet.get_incremental_sync_request(); let incremental_sync_fut = electrum_client .get_incremental_sync_wallet_update(incremental_sync_request, cached_txs); @@ -171,7 +171,7 @@ impl ElectrumChainSource { let update_res = incremental_sync_fut.await.map(|u| u.into()); apply_wallet_update(update_res, now) } else { - let full_scan_request = self.onchain_wallet.get_full_scan_request(); + let full_scan_request = onchain_wallet.get_full_scan_request(); let full_scan_fut = electrum_client.get_full_scan_wallet_update(full_scan_request, cached_txs); let now = Instant::now(); diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs index be6f2fb86..f6f313955 100644 --- a/src/chain/esplora.rs +++ b/src/chain/esplora.rs @@ -34,7 +34,6 @@ use crate::{Error, NodeMetrics}; pub(super) struct EsploraChainSource { pub(super) sync_config: EsploraSyncConfig, esplora_client: EsploraAsyncClient, - onchain_wallet: Arc, onchain_wallet_sync_status: Mutex, tx_sync: Arc>>, lightning_wallet_sync_status: Mutex, @@ -48,9 +47,8 @@ pub(super) struct EsploraChainSource { impl EsploraChainSource { pub(crate) fn new( server_url: String, headers: HashMap, sync_config: EsploraSyncConfig, - onchain_wallet: Arc, fee_estimator: Arc, - kv_store: Arc, config: Arc, logger: Arc, - node_metrics: Arc>, + fee_estimator: Arc, kv_store: Arc, config: Arc, + logger: Arc, node_metrics: Arc>, ) -> Self { let mut client_builder = esplora_client::Builder::new(&server_url); client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); @@ -68,7 +66,6 @@ impl EsploraChainSource { Self { sync_config, esplora_client, - onchain_wallet, onchain_wallet_sync_status, tx_sync, lightning_wallet_sync_status, @@ -80,7 +77,9 @@ impl EsploraChainSource { } } - pub(super) async fn sync_onchain_wallet(&self) -> Result<(), Error> { + pub(super) async fn sync_onchain_wallet( + &self, onchain_wallet: Arc, + ) -> Result<(), Error> { let receiver_res = { let mut status_lock = self.onchain_wallet_sync_status.lock().unwrap(); status_lock.register_or_subscribe_pending_sync() @@ -94,14 +93,14 @@ impl EsploraChainSource { })?; } - let res = self.sync_onchain_wallet_inner().await; + let res = self.sync_onchain_wallet_inner(onchain_wallet).await; self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); res } - async fn sync_onchain_wallet_inner(&self) -> Result<(), Error> { + async fn sync_onchain_wallet_inner(&self, onchain_wallet: Arc) -> Result<(), Error> { // If this is our first sync, do a full scan with the configured gap limit. // Otherwise just do an incremental sync. let incremental_sync = @@ -112,7 +111,7 @@ impl EsploraChainSource { let now = Instant::now(); match $sync_future.await { Ok(res) => match res { - Ok(update) => match self.onchain_wallet.apply_update(update) { + Ok(update) => match onchain_wallet.apply_update(update) { Ok(()) => { log_info!( self.logger, @@ -182,14 +181,14 @@ impl EsploraChainSource { } if incremental_sync { - let sync_request = self.onchain_wallet.get_incremental_sync_request(); + let sync_request = onchain_wallet.get_incremental_sync_request(); let wallet_sync_timeout_fut = tokio::time::timeout( Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), self.esplora_client.sync(sync_request, BDK_CLIENT_CONCURRENCY), ); get_and_apply_wallet_update!(wallet_sync_timeout_fut) } else { - let full_scan_request = self.onchain_wallet.get_full_scan_request(); + let full_scan_request = onchain_wallet.get_full_scan_request(); let wallet_sync_timeout_fut = tokio::time::timeout( Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), self.esplora_client.full_scan( diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 309d60eab..9c7ddd817 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -99,15 +99,14 @@ enum ChainSourceKind { impl ChainSource { pub(crate) fn new_esplora( server_url: String, headers: HashMap, sync_config: EsploraSyncConfig, - onchain_wallet: Arc, fee_estimator: Arc, - tx_broadcaster: Arc, kv_store: Arc, config: Arc, - logger: Arc, node_metrics: Arc>, + fee_estimator: Arc, tx_broadcaster: Arc, + kv_store: Arc, config: Arc, logger: Arc, + node_metrics: Arc>, ) -> Self { let esplora_chain_source = EsploraChainSource::new( server_url, headers, sync_config, - onchain_wallet, fee_estimator, kv_store, config, @@ -119,7 +118,7 @@ impl ChainSource { } pub(crate) fn new_electrum( - server_url: String, sync_config: ElectrumSyncConfig, onchain_wallet: Arc, + server_url: String, sync_config: ElectrumSyncConfig, fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, @@ -127,7 +126,6 @@ impl ChainSource { let electrum_chain_source = ElectrumChainSource::new( server_url, sync_config, - onchain_wallet, fee_estimator, kv_store, config, @@ -140,16 +138,15 @@ impl ChainSource { pub(crate) fn new_bitcoind_rpc( rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, - onchain_wallet: Arc, fee_estimator: Arc, - tx_broadcaster: Arc, kv_store: Arc, config: Arc, - logger: Arc, node_metrics: Arc>, + fee_estimator: Arc, tx_broadcaster: Arc, + kv_store: Arc, config: Arc, logger: Arc, + node_metrics: Arc>, ) -> Self { let bitcoind_chain_source = BitcoindChainSource::new_rpc( rpc_host, rpc_port, rpc_user, rpc_password, - onchain_wallet, fee_estimator, kv_store, config, @@ -162,17 +159,15 @@ impl ChainSource { pub(crate) fn new_bitcoind_rest( rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, - onchain_wallet: Arc, fee_estimator: Arc, - tx_broadcaster: Arc, kv_store: Arc, config: Arc, - rest_client_config: BitcoindRestClientConfig, logger: Arc, - node_metrics: Arc>, + fee_estimator: Arc, tx_broadcaster: Arc, + kv_store: Arc, config: Arc, rest_client_config: BitcoindRestClientConfig, + logger: Arc, node_metrics: Arc>, ) -> Self { let bitcoind_chain_source = BitcoindChainSource::new_rest( rpc_host, rpc_port, rpc_user, rpc_password, - onchain_wallet, fee_estimator, kv_store, config, @@ -223,7 +218,7 @@ impl ChainSource { } pub(crate) async fn continuously_sync_wallets( - &self, stop_sync_receiver: tokio::sync::watch::Receiver<()>, + &self, stop_sync_receiver: tokio::sync::watch::Receiver<()>, onchain_wallet: Arc, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, ) { @@ -234,6 +229,7 @@ impl ChainSource { { self.start_tx_based_sync_loop( stop_sync_receiver, + onchain_wallet, channel_manager, chain_monitor, output_sweeper, @@ -256,6 +252,7 @@ impl ChainSource { { self.start_tx_based_sync_loop( stop_sync_receiver, + onchain_wallet, channel_manager, chain_monitor, output_sweeper, @@ -276,6 +273,7 @@ impl ChainSource { bitcoind_chain_source .continuously_sync_wallets( stop_sync_receiver, + onchain_wallet, channel_manager, chain_monitor, output_sweeper, @@ -287,9 +285,9 @@ impl ChainSource { async fn start_tx_based_sync_loop( &self, mut stop_sync_receiver: tokio::sync::watch::Receiver<()>, - channel_manager: Arc, chain_monitor: Arc, - output_sweeper: Arc, background_sync_config: &BackgroundSyncConfig, - logger: Arc, + onchain_wallet: Arc, channel_manager: Arc, + chain_monitor: Arc, output_sweeper: Arc, + background_sync_config: &BackgroundSyncConfig, logger: Arc, ) { // Setup syncing intervals let onchain_wallet_sync_interval_secs = background_sync_config @@ -328,7 +326,7 @@ impl ChainSource { return; } _ = onchain_wallet_sync_interval.tick() => { - let _ = self.sync_onchain_wallet().await; + let _ = self.sync_onchain_wallet(Arc::clone(&onchain_wallet)).await; } _ = fee_rate_update_interval.tick() => { let _ = self.update_fee_rate_estimates().await; @@ -346,13 +344,15 @@ impl ChainSource { // Synchronize the onchain wallet via transaction-based protocols (i.e., Esplora, Electrum, // etc.) - pub(crate) async fn sync_onchain_wallet(&self) -> Result<(), Error> { + pub(crate) async fn sync_onchain_wallet( + &self, onchain_wallet: Arc, + ) -> Result<(), Error> { match &self.kind { ChainSourceKind::Esplora(esplora_chain_source) => { - esplora_chain_source.sync_onchain_wallet().await + esplora_chain_source.sync_onchain_wallet(onchain_wallet).await }, ChainSourceKind::Electrum(electrum_chain_source) => { - electrum_chain_source.sync_onchain_wallet().await + electrum_chain_source.sync_onchain_wallet(onchain_wallet).await }, ChainSourceKind::Bitcoind { .. } => { // In BitcoindRpc mode we sync lightning and onchain wallet in one go via @@ -388,8 +388,8 @@ impl ChainSource { } pub(crate) async fn poll_and_update_listeners( - &self, channel_manager: Arc, chain_monitor: Arc, - output_sweeper: Arc, + &self, onchain_wallet: Arc, channel_manager: Arc, + chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { match &self.kind { ChainSourceKind::Esplora { .. } => { @@ -404,7 +404,12 @@ impl ChainSource { }, ChainSourceKind::Bitcoind(bitcoind_chain_source) => { bitcoind_chain_source - .poll_and_update_listeners(channel_manager, chain_monitor, output_sweeper) + .poll_and_update_listeners( + onchain_wallet, + channel_manager, + chain_monitor, + output_sweeper, + ) .await }, } diff --git a/src/lib.rs b/src/lib.rs index 9c2a733b0..4d84c3c99 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -238,12 +238,19 @@ impl Node { // Spawn background task continuously syncing onchain, lightning, and fee rate cache. let stop_sync_receiver = self.stop_sender.subscribe(); let chain_source = Arc::clone(&self.chain_source); + let sync_wallet = Arc::clone(&self.wallet); let sync_cman = Arc::clone(&self.channel_manager); let sync_cmon = Arc::clone(&self.chain_monitor); let sync_sweeper = Arc::clone(&self.output_sweeper); self.runtime.spawn_background_task(async move { chain_source - .continuously_sync_wallets(stop_sync_receiver, sync_cman, sync_cmon, sync_sweeper) + .continuously_sync_wallets( + stop_sync_receiver, + sync_wallet, + sync_cman, + sync_cmon, + sync_sweeper, + ) .await; }); @@ -1235,6 +1242,7 @@ impl Node { } let chain_source = Arc::clone(&self.chain_source); + let sync_wallet = Arc::clone(&self.wallet); let sync_cman = Arc::clone(&self.channel_manager); let sync_cmon = Arc::clone(&self.chain_monitor); let sync_sweeper = Arc::clone(&self.output_sweeper); @@ -1244,11 +1252,16 @@ impl Node { chain_source .sync_lightning_wallet(sync_cman, sync_cmon, Arc::clone(&sync_sweeper)) .await?; - chain_source.sync_onchain_wallet().await?; + chain_source.sync_onchain_wallet(sync_wallet).await?; } else { chain_source.update_fee_rate_estimates().await?; chain_source - .poll_and_update_listeners(sync_cman, sync_cmon, Arc::clone(&sync_sweeper)) + .poll_and_update_listeners( + sync_wallet, + sync_cman, + sync_cmon, + Arc::clone(&sync_sweeper), + ) .await?; } let _ = sync_sweeper.regenerate_and_broadcast_spend_if_necessary().await; From c0880d9d974f5cd6740151401bd6bfa4193172dd Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 18 Nov 2025 09:58:15 +0100 Subject: [PATCH 31/60] Try to poll chain tip on initialization Previously, we couldn't poll the chain tip in `Builder::build` as we wouldn't have a runtime available. Since we now do, we can at least attempt to poll for the chain tip before initializing objects, avoiding that fresh nodes need to re-validate everything from genesis. --- src/builder.rs | 112 +++++++++++++++++++++++++----------------- src/chain/bitcoind.rs | 46 +++++++++++------ src/chain/mod.rs | 24 ++++----- 3 files changed, 111 insertions(+), 71 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 98650aa1a..183c7513b 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1195,10 +1195,10 @@ fn build_with_store_internal( }, }; - let chain_source = match chain_data_source_config { + let (chain_source, chain_tip_opt) = match chain_data_source_config { Some(ChainDataSourceConfig::Esplora { server_url, headers, sync_config }) => { let sync_config = sync_config.unwrap_or(EsploraSyncConfig::default()); - Arc::new(ChainSource::new_esplora( + ChainSource::new_esplora( server_url.clone(), headers.clone(), sync_config, @@ -1208,11 +1208,11 @@ fn build_with_store_internal( Arc::clone(&config), Arc::clone(&logger), Arc::clone(&node_metrics), - )) + ) }, Some(ChainDataSourceConfig::Electrum { server_url, sync_config }) => { let sync_config = sync_config.unwrap_or(ElectrumSyncConfig::default()); - Arc::new(ChainSource::new_electrum( + ChainSource::new_electrum( server_url.clone(), sync_config, Arc::clone(&fee_estimator), @@ -1221,7 +1221,7 @@ fn build_with_store_internal( Arc::clone(&config), Arc::clone(&logger), Arc::clone(&node_metrics), - )) + ) }, Some(ChainDataSourceConfig::Bitcoind { rpc_host, @@ -1230,38 +1230,44 @@ fn build_with_store_internal( rpc_password, rest_client_config, }) => match rest_client_config { - Some(rest_client_config) => Arc::new(ChainSource::new_bitcoind_rest( - rpc_host.clone(), - *rpc_port, - rpc_user.clone(), - rpc_password.clone(), - Arc::clone(&fee_estimator), - Arc::clone(&tx_broadcaster), - Arc::clone(&kv_store), - Arc::clone(&config), - rest_client_config.clone(), - Arc::clone(&logger), - Arc::clone(&node_metrics), - )), - None => Arc::new(ChainSource::new_bitcoind_rpc( - rpc_host.clone(), - *rpc_port, - rpc_user.clone(), - rpc_password.clone(), - Arc::clone(&fee_estimator), - Arc::clone(&tx_broadcaster), - Arc::clone(&kv_store), - Arc::clone(&config), - Arc::clone(&logger), - Arc::clone(&node_metrics), - )), + Some(rest_client_config) => runtime.block_on(async { + ChainSource::new_bitcoind_rest( + rpc_host.clone(), + *rpc_port, + rpc_user.clone(), + rpc_password.clone(), + Arc::clone(&fee_estimator), + Arc::clone(&tx_broadcaster), + Arc::clone(&kv_store), + Arc::clone(&config), + rest_client_config.clone(), + Arc::clone(&logger), + Arc::clone(&node_metrics), + ) + .await + }), + None => runtime.block_on(async { + ChainSource::new_bitcoind_rpc( + rpc_host.clone(), + *rpc_port, + rpc_user.clone(), + rpc_password.clone(), + Arc::clone(&fee_estimator), + Arc::clone(&tx_broadcaster), + Arc::clone(&kv_store), + Arc::clone(&config), + Arc::clone(&logger), + Arc::clone(&node_metrics), + ) + .await + }), }, None => { // Default to Esplora client. let server_url = DEFAULT_ESPLORA_SERVER_URL.to_string(); let sync_config = EsploraSyncConfig::default(); - Arc::new(ChainSource::new_esplora( + ChainSource::new_esplora( server_url.clone(), HashMap::new(), sync_config, @@ -1271,9 +1277,10 @@ fn build_with_store_internal( Arc::clone(&config), Arc::clone(&logger), Arc::clone(&node_metrics), - )) + ) }, }; + let chain_source = Arc::new(chain_source); // Initialize the on-chain wallet and chain access let xprv = bitcoin::bip32::Xpriv::new_master(config.network, &seed_bytes).map_err(|e| { @@ -1313,13 +1320,31 @@ fn build_with_store_internal( })?; let bdk_wallet = match wallet_opt { Some(wallet) => wallet, - None => BdkWallet::create(descriptor, change_descriptor) - .network(config.network) - .create_wallet(&mut wallet_persister) - .map_err(|e| { - log_error!(logger, "Failed to set up wallet: {}", e); - BuildError::WalletSetupFailed - })?, + None => { + let mut wallet = BdkWallet::create(descriptor, change_descriptor) + .network(config.network) + .create_wallet(&mut wallet_persister) + .map_err(|e| { + log_error!(logger, "Failed to set up wallet: {}", e); + BuildError::WalletSetupFailed + })?; + + if let Some(best_block) = chain_tip_opt { + // Insert the first checkpoint if we have it, to avoid resyncing from genesis. + // TODO: Use a proper wallet birthday once BDK supports it. + let mut latest_checkpoint = wallet.latest_checkpoint(); + let block_id = + bdk_chain::BlockId { height: best_block.height, hash: best_block.block_hash }; + latest_checkpoint = latest_checkpoint.insert(block_id); + let update = + bdk_wallet::Update { chain: Some(latest_checkpoint), ..Default::default() }; + wallet.apply_update(update).map_err(|e| { + log_error!(logger, "Failed to apply checkpoint during wallet setup: {}", e); + BuildError::WalletSetupFailed + })?; + } + wallet + }, }; let wallet = Arc::new(Wallet::new( @@ -1499,13 +1524,10 @@ fn build_with_store_internal( channel_manager } else { // We're starting a fresh node. - let genesis_block_hash = - bitcoin::blockdata::constants::genesis_block(config.network).block_hash(); + let best_block = + chain_tip_opt.unwrap_or_else(|| BestBlock::from_network(config.network)); - let chain_params = ChainParameters { - network: config.network.into(), - best_block: BestBlock::new(genesis_block_hash, 0), - }; + let chain_params = ChainParameters { network: config.network.into(), best_block }; channelmanager::ChannelManager::new( Arc::clone(&fee_estimator), Arc::clone(&chain_monitor), diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index 4d7a4a0fe..b3d7880d6 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -14,7 +14,7 @@ use base64::prelude::BASE64_STANDARD; use base64::Engine; use bitcoin::{BlockHash, FeeRate, Network, Transaction, Txid}; use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarget; -use lightning::chain::Listen; +use lightning::chain::{BestBlock, Listen}; use lightning::util::ser::Writeable; use lightning_block_sync::gossip::UtxoSource; use lightning_block_sync::http::{HttpEndpoint, JsonResponse}; @@ -42,6 +42,7 @@ use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; const CHAIN_POLLING_INTERVAL_SECS: u64 = 2; +const CHAIN_POLLING_TIMEOUT_SECS: u64 = 10; pub(super) struct BitcoindChainSource { api_client: Arc, @@ -329,6 +330,33 @@ impl BitcoindChainSource { } } + pub(super) async fn poll_best_block(&self) -> Result { + self.poll_chain_tip().await.map(|tip| tip.to_best_block()) + } + + async fn poll_chain_tip(&self) -> Result { + let validate_res = tokio::time::timeout( + Duration::from_secs(CHAIN_POLLING_TIMEOUT_SECS), + validate_best_block_header(self.api_client.as_ref()), + ) + .await + .map_err(|e| { + log_error!(self.logger, "Failed to poll for chain data: {:?}", e); + Error::TxSyncTimeout + })?; + + match validate_res { + Ok(tip) => { + *self.latest_chain_tip.write().unwrap() = Some(tip); + Ok(tip) + }, + Err(e) => { + log_error!(self.logger, "Failed to poll for chain data: {:?}", e); + return Err(Error::TxSyncFailed); + }, + } + } + pub(super) async fn poll_and_update_listeners( &self, onchain_wallet: Arc, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, @@ -366,20 +394,8 @@ impl BitcoindChainSource { chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { let latest_chain_tip_opt = self.latest_chain_tip.read().unwrap().clone(); - let chain_tip = if let Some(tip) = latest_chain_tip_opt { - tip - } else { - match validate_best_block_header(self.api_client.as_ref()).await { - Ok(tip) => { - *self.latest_chain_tip.write().unwrap() = Some(tip); - tip - }, - Err(e) => { - log_error!(self.logger, "Failed to poll for chain data: {:?}", e); - return Err(Error::TxSyncFailed); - }, - } - }; + let chain_tip = + if let Some(tip) = latest_chain_tip_opt { tip } else { self.poll_chain_tip().await? }; let mut locked_header_cache = self.header_cache.lock().await; let chain_poller = ChainPoller::new(Arc::clone(&self.api_client), self.config.network); diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 9c7ddd817..2cd98e20d 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -14,7 +14,7 @@ use std::sync::{Arc, RwLock}; use std::time::Duration; use bitcoin::{Script, Txid}; -use lightning::chain::Filter; +use lightning::chain::{BestBlock, Filter}; use lightning_block_sync::gossip::UtxoSource; use crate::chain::bitcoind::BitcoindChainSource; @@ -102,7 +102,7 @@ impl ChainSource { fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, - ) -> Self { + ) -> (Self, Option) { let esplora_chain_source = EsploraChainSource::new( server_url, headers, @@ -114,7 +114,7 @@ impl ChainSource { node_metrics, ); let kind = ChainSourceKind::Esplora(esplora_chain_source); - Self { kind, tx_broadcaster, logger } + (Self { kind, tx_broadcaster, logger }, None) } pub(crate) fn new_electrum( @@ -122,7 +122,7 @@ impl ChainSource { fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, - ) -> Self { + ) -> (Self, Option) { let electrum_chain_source = ElectrumChainSource::new( server_url, sync_config, @@ -133,15 +133,15 @@ impl ChainSource { node_metrics, ); let kind = ChainSourceKind::Electrum(electrum_chain_source); - Self { kind, tx_broadcaster, logger } + (Self { kind, tx_broadcaster, logger }, None) } - pub(crate) fn new_bitcoind_rpc( + pub(crate) async fn new_bitcoind_rpc( rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, - ) -> Self { + ) -> (Self, Option) { let bitcoind_chain_source = BitcoindChainSource::new_rpc( rpc_host, rpc_port, @@ -153,16 +153,17 @@ impl ChainSource { Arc::clone(&logger), node_metrics, ); + let best_block = bitcoind_chain_source.poll_best_block().await.ok(); let kind = ChainSourceKind::Bitcoind(bitcoind_chain_source); - Self { kind, tx_broadcaster, logger } + (Self { kind, tx_broadcaster, logger }, best_block) } - pub(crate) fn new_bitcoind_rest( + pub(crate) async fn new_bitcoind_rest( rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, config: Arc, rest_client_config: BitcoindRestClientConfig, logger: Arc, node_metrics: Arc>, - ) -> Self { + ) -> (Self, Option) { let bitcoind_chain_source = BitcoindChainSource::new_rest( rpc_host, rpc_port, @@ -175,8 +176,9 @@ impl ChainSource { Arc::clone(&logger), node_metrics, ); + let best_block = bitcoind_chain_source.poll_best_block().await.ok(); let kind = ChainSourceKind::Bitcoind(bitcoind_chain_source); - Self { kind, tx_broadcaster, logger } + (Self { kind, tx_broadcaster, logger }, best_block) } pub(crate) fn start(&self, runtime: Arc) -> Result<(), Error> { From 0a860d46da4ba2d597cbbe139f25e08931fa12da Mon Sep 17 00:00:00 2001 From: coreyphillips Date: Mon, 10 Nov 2025 13:16:20 -0500 Subject: [PATCH 32/60] feat: add configurable BIP39 mnemonic word counts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Support generating BIP39 mnemonics with configurable word counts (12, 15, 18, 21, 24). Defaults to 24 words (256-bit entropy) for backward compatibility. - Add WordCount enum (12–24 variants) - Update generate_entropy_mnemonic to accept optional word_count - Remove need for entropy_bytes in generate_entropy_mnemonic by passing WordCount enum directly to generate() instead - Add rand feature to bip39 dependency - Extend tests for all word count options and defaults - Expose enum and updated function in UDL bindings --- Cargo.toml | 2 +- bindings/ldk_node.udl | 10 +++++++++- src/io/utils.rs | 44 ++++++++++++++++++++++++++++++++++--------- src/lib.rs | 1 + src/types.rs | 28 +++++++++++++++++++++++++++ 5 files changed, 74 insertions(+), 11 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 544dfca08..2aa147a77 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,7 +50,7 @@ reqwest = { version = "0.12", default-features = false, features = ["json", "rus rustls = { version = "0.23", default-features = false } rusqlite = { version = "0.31.0", features = ["bundled"] } bitcoin = "0.32.7" -bip39 = "2.0.0" +bip39 = { version = "2.0.0", features = ["rand"] } bip21 = { version = "0.5", features = ["std"], default-features = false } base64 = { version = "0.22.1", default-features = false, features = ["std"] } diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index ab2f483a1..009126feb 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -1,5 +1,5 @@ namespace ldk_node { - Mnemonic generate_entropy_mnemonic(); + Mnemonic generate_entropy_mnemonic(WordCount? word_count); Config default_config(); }; @@ -46,6 +46,14 @@ dictionary LSPS2ServiceConfig { u64 max_payment_size_msat; }; +enum WordCount { + "Words12", + "Words15", + "Words18", + "Words21", + "Words24", +}; + enum LogLevel { "Gossip", "Trace", diff --git a/src/io/utils.rs b/src/io/utils.rs index d92c9486b..1b4b02a82 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -47,13 +47,15 @@ use crate::io::{ }; use crate::logger::{log_error, LdkLogger, Logger}; use crate::peer_store::PeerStore; -use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper}; +use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper, WordCount}; use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; use crate::{Error, EventQueue, NodeMetrics, PaymentDetails}; pub const EXTERNAL_PATHFINDING_SCORES_CACHE_KEY: &str = "external_pathfinding_scores_cache"; -/// Generates a random [BIP 39] mnemonic. +/// Generates a random [BIP 39] mnemonic with the specified word count. +/// +/// If no word count is specified, defaults to 24 words (256-bit entropy). /// /// The result may be used to initialize the [`Node`] entropy, i.e., can be given to /// [`Builder::set_entropy_bip39_mnemonic`]. @@ -61,11 +63,9 @@ pub const EXTERNAL_PATHFINDING_SCORES_CACHE_KEY: &str = "external_pathfinding_sc /// [BIP 39]: https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki /// [`Node`]: crate::Node /// [`Builder::set_entropy_bip39_mnemonic`]: crate::Builder::set_entropy_bip39_mnemonic -pub fn generate_entropy_mnemonic() -> Mnemonic { - // bip39::Mnemonic supports 256 bit entropy max - let mut entropy = [0; 32]; - OsRng.try_fill_bytes(&mut entropy).expect("Failed to generate entropy"); - Mnemonic::from_entropy(&entropy).unwrap() +pub fn generate_entropy_mnemonic(word_count: Option) -> Mnemonic { + let word_count = word_count.unwrap_or(WordCount::Words24).word_count(); + Mnemonic::generate(word_count).expect("Failed to generate mnemonic") } pub(crate) fn read_or_generate_seed_file( @@ -627,9 +627,35 @@ mod tests { #[test] fn mnemonic_to_entropy_to_mnemonic() { - let mnemonic = generate_entropy_mnemonic(); - + // Test default (24 words) + let mnemonic = generate_entropy_mnemonic(None); let entropy = mnemonic.to_entropy(); assert_eq!(mnemonic, Mnemonic::from_entropy(&entropy).unwrap()); + assert_eq!(mnemonic.word_count(), 24); + + // Test with different word counts + let word_counts = [ + WordCount::Words12, + WordCount::Words15, + WordCount::Words18, + WordCount::Words21, + WordCount::Words24, + ]; + + for word_count in word_counts { + let mnemonic = generate_entropy_mnemonic(Some(word_count)); + let entropy = mnemonic.to_entropy(); + assert_eq!(mnemonic, Mnemonic::from_entropy(&entropy).unwrap()); + + // Verify expected word count + let expected_words = match word_count { + WordCount::Words12 => 12, + WordCount::Words15 => 15, + WordCount::Words18 => 18, + WordCount::Words21 => 21, + WordCount::Words24 => 24, + }; + assert_eq!(mnemonic.word_count(), expected_words); + } } } diff --git a/src/lib.rs b/src/lib.rs index 701a14dde..21fc93fe8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -156,6 +156,7 @@ use types::{ }; pub use types::{ ChannelDetails, CustomTlvRecord, DynStore, PeerDetails, SyncAndAsyncKVStore, UserChannelId, + WordCount, }; pub use { bip39, bitcoin, lightning, lightning_invoice, lightning_liquidity, lightning_types, tokio, diff --git a/src/types.rs b/src/types.rs index b8dc10b18..6d6bdcd20 100644 --- a/src/types.rs +++ b/src/types.rs @@ -36,6 +36,34 @@ use crate::logger::Logger; use crate::message_handler::NodeCustomMessageHandler; use crate::payment::PaymentDetails; +/// Supported BIP39 mnemonic word counts for entropy generation. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum WordCount { + /// 12-word mnemonic (128-bit entropy) + Words12, + /// 15-word mnemonic (160-bit entropy) + Words15, + /// 18-word mnemonic (192-bit entropy) + Words18, + /// 21-word mnemonic (224-bit entropy) + Words21, + /// 24-word mnemonic (256-bit entropy) + Words24, +} + +impl WordCount { + /// Returns the word count as a usize value. + pub fn word_count(&self) -> usize { + match self { + WordCount::Words12 => 12, + WordCount::Words15 => 15, + WordCount::Words18 => 18, + WordCount::Words21 => 21, + WordCount::Words24 => 24, + } + } +} + /// A supertrait that requires that a type implements both [`KVStore`] and [`KVStoreSync`] at the /// same time. pub trait SyncAndAsyncKVStore: KVStore + KVStoreSync {} From ba335eb0867d4f693aa128208abb5ca76a38063a Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Mon, 22 Sep 2025 09:18:58 -0500 Subject: [PATCH 33/60] Add funding_txo to ChannelReady event When a channel is spliced, the existing funding transaction's output is spent and a new funding transaction output is formed. Once the splice is considered locked by both parties, LDK will emit a ChannelReady event which will include the new funding_txo. Additionally, the initial ChannelReady event now includes the original funding_txo. Include this data in LDK Node's ChannelReady event. --- bindings/ldk_node.udl | 2 +- src/event.rs | 44 ++++++++++++++++++++++++++++++++++++------- 2 files changed, 38 insertions(+), 8 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index d29f04d02..6c0603af6 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -393,7 +393,7 @@ interface Event { PaymentForwarded(ChannelId prev_channel_id, ChannelId next_channel_id, UserChannelId? prev_user_channel_id, UserChannelId? next_user_channel_id, PublicKey? prev_node_id, PublicKey? next_node_id, u64? total_fee_earned_msat, u64? skimmed_fee_msat, boolean claim_from_onchain_tx, u64? outbound_amount_forwarded_msat); ChannelPending(ChannelId channel_id, UserChannelId user_channel_id, ChannelId former_temporary_channel_id, PublicKey counterparty_node_id, OutPoint funding_txo); - ChannelReady(ChannelId channel_id, UserChannelId user_channel_id, PublicKey? counterparty_node_id); + ChannelReady(ChannelId channel_id, UserChannelId user_channel_id, PublicKey? counterparty_node_id, OutPoint? funding_txo); ChannelClosed(ChannelId channel_id, UserChannelId user_channel_id, PublicKey? counterparty_node_id, ClosureReason? reason); }; diff --git a/src/event.rs b/src/event.rs index a55ddb7fd..375dc97ee 100644 --- a/src/event.rs +++ b/src/event.rs @@ -199,6 +199,10 @@ pub enum Event { funding_txo: OutPoint, }, /// A channel is ready to be used. + /// + /// This event is emitted when: + /// - A new channel has been established and is ready for use + /// - An existing channel has been spliced and is ready with the new funding output ChannelReady { /// The `channel_id` of the channel. channel_id: ChannelId, @@ -208,6 +212,14 @@ pub enum Event { /// /// This will be `None` for events serialized by LDK Node v0.1.0 and prior. counterparty_node_id: Option, + /// The outpoint of the channel's funding transaction. + /// + /// This represents the channel's current funding output, which may change when the + /// channel is spliced. For spliced channels, this will contain the new funding output + /// from the confirmed splice transaction. + /// + /// This will be `None` for events serialized by LDK Node v0.6.0 and prior. + funding_txo: Option, }, /// A channel has been closed. ChannelClosed { @@ -246,6 +258,7 @@ impl_writeable_tlv_based_enum!(Event, (0, channel_id, required), (1, counterparty_node_id, option), (2, user_channel_id, required), + (3, funding_txo, option), }, (4, ChannelPending) => { (0, channel_id, required), @@ -1397,14 +1410,28 @@ where } }, LdkEvent::ChannelReady { - channel_id, user_channel_id, counterparty_node_id, .. + channel_id, + user_channel_id, + counterparty_node_id, + funding_txo, + .. } => { - log_info!( - self.logger, - "Channel {} with counterparty {} ready to be used.", - channel_id, - counterparty_node_id, - ); + if let Some(funding_txo) = funding_txo { + log_info!( + self.logger, + "Channel {} with counterparty {} ready to be used with funding_txo {}", + channel_id, + counterparty_node_id, + funding_txo, + ); + } else { + log_info!( + self.logger, + "Channel {} with counterparty {} ready to be used", + channel_id, + counterparty_node_id, + ); + } if let Some(liquidity_source) = self.liquidity_source.as_ref() { liquidity_source @@ -1416,6 +1443,7 @@ where channel_id, user_channel_id: UserChannelId(user_channel_id), counterparty_node_id: Some(counterparty_node_id), + funding_txo, }; match self.event_queue.add_event(event).await { Ok(_) => {}, @@ -1655,6 +1683,7 @@ mod tests { channel_id: ChannelId([23u8; 32]), user_channel_id: UserChannelId(2323), counterparty_node_id: None, + funding_txo: None, }; event_queue.add_event(expected_event.clone()).await.unwrap(); @@ -1692,6 +1721,7 @@ mod tests { channel_id: ChannelId([23u8; 32]), user_channel_id: UserChannelId(2323), counterparty_node_id: None, + funding_txo: None, }; // Check `next_event_async` won't return if the queue is empty and always rather timeout. From c585275b4f59082ac9cf4fa7f370d9a4fbf1198c Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Tue, 21 Oct 2025 21:35:17 -0500 Subject: [PATCH 34/60] Add SplicePending and SpiceFailed events LDK introduced similar events with splicing. SplicePending is largely informational like ChannelPending. SpliceFailed indicates the used UTXOs can be reclaimed. This requires UTXO locking, which is not yet implemented. --- bindings/ldk_node.udl | 2 + src/event.rs | 111 ++++++++++++++++++++++++++++++++++++++---- 2 files changed, 103 insertions(+), 10 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 6c0603af6..ae2fa7555 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -395,6 +395,8 @@ interface Event { ChannelPending(ChannelId channel_id, UserChannelId user_channel_id, ChannelId former_temporary_channel_id, PublicKey counterparty_node_id, OutPoint funding_txo); ChannelReady(ChannelId channel_id, UserChannelId user_channel_id, PublicKey? counterparty_node_id, OutPoint? funding_txo); ChannelClosed(ChannelId channel_id, UserChannelId user_channel_id, PublicKey? counterparty_node_id, ClosureReason? reason); + SplicePending(ChannelId channel_id, UserChannelId user_channel_id, PublicKey counterparty_node_id, OutPoint new_funding_txo); + SpliceFailed(ChannelId channel_id, UserChannelId user_channel_id, PublicKey counterparty_node_id, OutPoint? abandoned_funding_txo); }; enum PaymentFailureReason { diff --git a/src/event.rs b/src/event.rs index 375dc97ee..566265d84 100644 --- a/src/event.rs +++ b/src/event.rs @@ -234,6 +234,28 @@ pub enum Event { /// This will be `None` for events serialized by LDK Node v0.2.1 and prior. reason: Option, }, + /// A channel splice is pending confirmation on-chain. + SplicePending { + /// The `channel_id` of the channel. + channel_id: ChannelId, + /// The `user_channel_id` of the channel. + user_channel_id: UserChannelId, + /// The `node_id` of the channel counterparty. + counterparty_node_id: PublicKey, + /// The outpoint of the channel's splice funding transaction. + new_funding_txo: OutPoint, + }, + /// A channel splice has failed. + SpliceFailed { + /// The `channel_id` of the channel. + channel_id: ChannelId, + /// The `user_channel_id` of the channel. + user_channel_id: UserChannelId, + /// The `node_id` of the channel counterparty. + counterparty_node_id: PublicKey, + /// The outpoint of the channel's splice funding transaction, if one was created. + abandoned_funding_txo: Option, + }, } impl_writeable_tlv_based_enum!(Event, @@ -291,7 +313,19 @@ impl_writeable_tlv_based_enum!(Event, (10, skimmed_fee_msat, option), (12, claim_from_onchain_tx, required), (14, outbound_amount_forwarded_msat, option), - } + }, + (8, SplicePending) => { + (1, channel_id, required), + (3, counterparty_node_id, required), + (5, user_channel_id, required), + (7, new_funding_txo, required), + }, + (9, SpliceFailed) => { + (1, channel_id, required), + (3, counterparty_node_id, required), + (5, user_channel_id, required), + (7, abandoned_funding_txo, option), + }, ); pub struct EventQueue @@ -1645,17 +1679,74 @@ where LdkEvent::FundingTransactionReadyForSigning { .. } => { debug_assert!(false, "We currently don't support interactive-tx, so this event should never be emitted."); }, - LdkEvent::SplicePending { .. } => { - debug_assert!( - false, - "We currently don't support splicing, so this event should never be emitted." + LdkEvent::SplicePending { + channel_id, + user_channel_id, + counterparty_node_id, + new_funding_txo, + .. + } => { + log_info!( + self.logger, + "Channel {} with counterparty {} pending splice with funding_txo {}", + channel_id, + counterparty_node_id, + new_funding_txo, ); + + let event = Event::SplicePending { + channel_id, + user_channel_id: UserChannelId(user_channel_id), + counterparty_node_id, + new_funding_txo, + }; + + match self.event_queue.add_event(event).await { + Ok(_) => {}, + Err(e) => { + log_error!(self.logger, "Failed to push to event queue: {}", e); + return Err(ReplayEvent()); + }, + }; }, - LdkEvent::SpliceFailed { .. } => { - debug_assert!( - false, - "We currently don't support splicing, so this event should never be emitted." - ); + LdkEvent::SpliceFailed { + channel_id, + user_channel_id, + counterparty_node_id, + abandoned_funding_txo, + .. + } => { + if let Some(funding_txo) = abandoned_funding_txo { + log_info!( + self.logger, + "Channel {} with counterparty {} failed splice with funding_txo {}", + channel_id, + counterparty_node_id, + funding_txo, + ); + } else { + log_info!( + self.logger, + "Channel {} with counterparty {} failed splice", + channel_id, + counterparty_node_id, + ); + } + + let event = Event::SpliceFailed { + channel_id, + user_channel_id: UserChannelId(user_channel_id), + counterparty_node_id, + abandoned_funding_txo, + }; + + match self.event_queue.add_event(event).await { + Ok(_) => {}, + Err(e) => { + log_error!(self.logger, "Failed to push to event queue: {}", e); + return Err(ReplayEvent()); + }, + }; }, } Ok(()) From 16d43cda23a805dda535d31261154460fea9402b Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 23 Oct 2025 12:45:46 -0500 Subject: [PATCH 35/60] Handle LdkEvent::FundingTransactionReadyForSigning When the interactive-tx construction protocol completes in LDK during splicing (and in the future dual-funding), LDK Node must provide signatures for any non-shared inputs belonging to its on-chain wallet. This commit implements this when handling the corresponding FundingTransactionReadyForSigning event. --- src/event.rs | 31 +++++++++++++++++++++++++++++-- src/wallet/mod.rs | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 2 deletions(-) diff --git a/src/event.rs b/src/event.rs index 566265d84..8a1499823 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1676,8 +1676,35 @@ where } } }, - LdkEvent::FundingTransactionReadyForSigning { .. } => { - debug_assert!(false, "We currently don't support interactive-tx, so this event should never be emitted."); + // TODO(splicing): Revisit error handling once splicing API is settled in LDK 0.3 + LdkEvent::FundingTransactionReadyForSigning { + channel_id, + counterparty_node_id, + unsigned_transaction, + .. + } => match self.wallet.sign_owned_inputs(unsigned_transaction) { + Ok(partially_signed_tx) => { + match self.channel_manager.funding_transaction_signed( + &channel_id, + &counterparty_node_id, + partially_signed_tx, + ) { + Ok(()) => { + log_info!( + self.logger, + "Signed funding transaction for channel {} with counterparty {}", + channel_id, + counterparty_node_id + ); + }, + Err(e) => { + // TODO(splicing): Abort splice once supported in LDK 0.3 + debug_assert!(false, "Failed signing funding transaction: {:?}", e); + log_error!(self.logger, "Failed signing funding transaction: {:?}", e); + }, + } + }, + Err(()) => log_error!(self.logger, "Failed signing funding transaction"), }, LdkEvent::SplicePending { channel_id, diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 0f3797431..db2d1cf9d 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -664,6 +664,43 @@ impl Wallet { Ok(address_info.address.script_pubkey()) } + #[allow(deprecated)] + pub(crate) fn sign_owned_inputs(&self, unsigned_tx: Transaction) -> Result { + let locked_wallet = self.inner.lock().unwrap(); + + let mut psbt = Psbt::from_unsigned_tx(unsigned_tx).map_err(|e| { + log_error!(self.logger, "Failed to construct PSBT: {}", e); + })?; + for (i, txin) in psbt.unsigned_tx.input.iter().enumerate() { + if let Some(utxo) = locked_wallet.get_utxo(txin.previous_output) { + debug_assert!(!utxo.is_spent); + psbt.inputs[i] = locked_wallet.get_psbt_input(utxo, None, true).map_err(|e| { + log_error!(self.logger, "Failed to construct PSBT input: {}", e); + })?; + } + } + + let mut sign_options = SignOptions::default(); + sign_options.trust_witness_utxo = true; + + match locked_wallet.sign(&mut psbt, sign_options) { + Ok(finalized) => debug_assert!(!finalized), + Err(e) => { + log_error!(self.logger, "Failed to sign owned inputs: {}", e); + return Err(()); + }, + } + + match psbt.extract_tx() { + Ok(tx) => Ok(tx), + Err(bitcoin::psbt::ExtractTxError::MissingInputValue { tx }) => Ok(tx), + Err(e) => { + log_error!(self.logger, "Failed to extract transaction: {}", e); + Err(()) + }, + } + } + #[allow(deprecated)] fn sign_psbt_inner(&self, mut psbt: Psbt) -> Result { let locked_wallet = self.inner.lock().unwrap(); From 14ae7531c1fc3dce3cc5a2b66ae5d9b4b46230d8 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Mon, 22 Sep 2025 10:40:22 -0500 Subject: [PATCH 36/60] Refactor funds checking logic into reusable method Extract the funds availability checking logic from open_channel_inner into a separate method so that it can be reused for channel splicing. --- src/lib.rs | 85 ++++++++++++++++++++++++++++++------------------------ 1 file changed, 47 insertions(+), 38 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index cb13d5d9d..b7bc5cb40 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1072,50 +1072,14 @@ impl Node { let con_addr = peer_info.address.clone(); let con_cm = Arc::clone(&self.connection_manager); - let cur_anchor_reserve_sats = - total_anchor_channels_reserve_sats(&self.channel_manager, &self.config); - let spendable_amount_sats = - self.wallet.get_spendable_amount_sats(cur_anchor_reserve_sats).unwrap_or(0); - - // Fail early if we have less than the channel value available. - if spendable_amount_sats < channel_amount_sats { - log_error!(self.logger, - "Unable to create channel due to insufficient funds. Available: {}sats, Required: {}sats", - spendable_amount_sats, channel_amount_sats - ); - return Err(Error::InsufficientFunds); - } - // We need to use our main runtime here as a local runtime might not be around to poll // connection futures going forward. self.runtime.block_on(async move { con_cm.connect_peer_if_necessary(con_node_id, con_addr).await })?; - // Fail if we have less than the channel value + anchor reserve available (if applicable). - let init_features = self - .peer_manager - .peer_by_node_id(&node_id) - .ok_or(Error::ConnectionFailed)? - .init_features; - let required_funds_sats = channel_amount_sats - + self.config.anchor_channels_config.as_ref().map_or(0, |c| { - if init_features.requires_anchors_zero_fee_htlc_tx() - && !c.trusted_peers_no_reserve.contains(&node_id) - { - c.per_channel_reserve_sats - } else { - 0 - } - }); - - if spendable_amount_sats < required_funds_sats { - log_error!(self.logger, - "Unable to create channel due to insufficient funds. Available: {}sats, Required: {}sats", - spendable_amount_sats, required_funds_sats - ); - return Err(Error::InsufficientFunds); - } + // Check funds availability after connection (includes anchor reserve calculation) + self.check_sufficient_funds_for_channel(channel_amount_sats, &node_id)?; let mut user_config = default_user_config(&self.config); user_config.channel_handshake_config.announce_for_forwarding = announce_for_forwarding; @@ -1156,6 +1120,51 @@ impl Node { } } + fn check_sufficient_funds_for_channel( + &self, amount_sats: u64, peer_node_id: &PublicKey, + ) -> Result<(), Error> { + let cur_anchor_reserve_sats = + total_anchor_channels_reserve_sats(&self.channel_manager, &self.config); + let spendable_amount_sats = + self.wallet.get_spendable_amount_sats(cur_anchor_reserve_sats).unwrap_or(0); + + // Fail early if we have less than the channel value available. + if spendable_amount_sats < amount_sats { + log_error!(self.logger, + "Unable to create channel due to insufficient funds. Available: {}sats, Required: {}sats", + spendable_amount_sats, amount_sats + ); + return Err(Error::InsufficientFunds); + } + + // Fail if we have less than the channel value + anchor reserve available (if applicable). + let init_features = self + .peer_manager + .peer_by_node_id(peer_node_id) + .ok_or(Error::ConnectionFailed)? + .init_features; + let required_funds_sats = amount_sats + + self.config.anchor_channels_config.as_ref().map_or(0, |c| { + if init_features.requires_anchors_zero_fee_htlc_tx() + && !c.trusted_peers_no_reserve.contains(peer_node_id) + { + c.per_channel_reserve_sats + } else { + 0 + } + }); + + if spendable_amount_sats < required_funds_sats { + log_error!(self.logger, + "Unable to create channel due to insufficient funds. Available: {}sats, Required: {}sats", + spendable_amount_sats, required_funds_sats + ); + return Err(Error::InsufficientFunds); + } + + Ok(()) + } + /// Connect to a node and open a new unannounced channel. /// /// To open an announced channel, see [`Node::open_announced_channel`]. From 1a576d07238e7d21b05a6cfa46bd223a1b53489d Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Mon, 22 Sep 2025 12:26:18 -0500 Subject: [PATCH 37/60] Add Node::splice_in method Instead of closing and re-opening a channel when outbound liquidity is exhausted, splicing allows to adding more funds (splice-in) while keeping the channel operational. This commit implements splice-in using funds from the BDK on-chain wallet. --- bindings/ldk_node.udl | 3 ++ src/builder.rs | 1 + src/error.rs | 3 ++ src/event.rs | 12 +++++ src/lib.rs | 121 +++++++++++++++++++++++++++++++++++++++++- src/wallet/mod.rs | 74 ++++++++++++++++++++++++-- 6 files changed, 209 insertions(+), 5 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index ae2fa7555..97c808481 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -150,6 +150,8 @@ interface Node { [Throws=NodeError] UserChannelId open_announced_channel(PublicKey node_id, SocketAddress address, u64 channel_amount_sats, u64? push_to_counterparty_msat, ChannelConfig? channel_config); [Throws=NodeError] + void splice_in([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, u64 splice_amount_sats); + [Throws=NodeError] void close_channel([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id); [Throws=NodeError] void force_close_channel([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, string? reason); @@ -290,6 +292,7 @@ enum NodeError { "ProbeSendingFailed", "ChannelCreationFailed", "ChannelClosingFailed", + "ChannelSplicingFailed", "ChannelConfigUpdateFailed", "PersistenceFailed", "FeerateEstimationUpdateFailed", diff --git a/src/builder.rs b/src/builder.rs index 183c7513b..63e84db37 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1795,6 +1795,7 @@ fn build_with_store_internal( wallet, chain_source, tx_broadcaster, + fee_estimator, event_queue, channel_manager, chain_monitor, diff --git a/src/error.rs b/src/error.rs index 7e9dbac20..20b1cceab 100644 --- a/src/error.rs +++ b/src/error.rs @@ -43,6 +43,8 @@ pub enum Error { ChannelCreationFailed, /// A channel could not be closed. ChannelClosingFailed, + /// A channel could not be spliced. + ChannelSplicingFailed, /// A channel configuration could not be updated. ChannelConfigUpdateFailed, /// Persistence failed. @@ -145,6 +147,7 @@ impl fmt::Display for Error { Self::ProbeSendingFailed => write!(f, "Failed to send the given payment probe."), Self::ChannelCreationFailed => write!(f, "Failed to create channel."), Self::ChannelClosingFailed => write!(f, "Failed to close channel."), + Self::ChannelSplicingFailed => write!(f, "Failed to splice channel."), Self::ChannelConfigUpdateFailed => write!(f, "Failed to update channel config."), Self::PersistenceFailed => write!(f, "Failed to persist data."), Self::FeerateEstimationUpdateFailed => { diff --git a/src/event.rs b/src/event.rs index 8a1499823..41f76f216 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1741,6 +1741,7 @@ where user_channel_id, counterparty_node_id, abandoned_funding_txo, + contributed_outputs, .. } => { if let Some(funding_txo) = abandoned_funding_txo { @@ -1760,6 +1761,17 @@ where ); } + let tx = bitcoin::Transaction { + version: bitcoin::transaction::Version::TWO, + lock_time: bitcoin::absolute::LockTime::ZERO, + input: vec![], + output: contributed_outputs, + }; + if let Err(e) = self.wallet.cancel_tx(&tx) { + log_error!(self.logger, "Failed reclaiming unused addresses: {}", e); + return Err(ReplayEvent()); + } + let event = Event::SpliceFailed { channel_id, user_channel_id: UserChannelId(user_channel_id), diff --git a/src/lib.rs b/src/lib.rs index b7bc5cb40..56f2c0da0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -109,6 +109,7 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; use bitcoin::secp256k1::PublicKey; +use bitcoin::Amount; #[cfg(feature = "uniffi")] pub use builder::ArcedNodeBuilder as Builder; pub use builder::BuildError; @@ -124,6 +125,7 @@ pub use error::Error as NodeError; use error::Error; pub use event::Event; use event::{EventHandler, EventQueue}; +use fee_estimator::{ConfirmationTarget, FeeEstimator, OnchainFeeEstimator}; #[cfg(feature = "uniffi")] use ffi::*; use gossip::GossipSource; @@ -131,10 +133,12 @@ use graph::NetworkGraph; pub use io::utils::generate_entropy_mnemonic; use io::utils::write_node_metrics; use lightning::chain::BestBlock; -use lightning::events::bump_transaction::Wallet as LdkWallet; +use lightning::events::bump_transaction::{Input, Wallet as LdkWallet}; use lightning::impl_writeable_tlv_based; +use lightning::ln::chan_utils::{make_funding_redeemscript, FUNDING_TRANSACTION_WITNESS_WEIGHT}; use lightning::ln::channel_state::{ChannelDetails as LdkChannelDetails, ChannelShutdownState}; use lightning::ln::channelmanager::PaymentId; +use lightning::ln::funding::SpliceContribution; use lightning::ln::msgs::SocketAddress; use lightning::routing::gossip::NodeAlias; use lightning::util::persist::KVStoreSync; @@ -179,6 +183,7 @@ pub struct Node { wallet: Arc, chain_source: Arc, tx_broadcaster: Arc, + fee_estimator: Arc, event_queue: Arc>>, channel_manager: Arc, chain_monitor: Arc, @@ -1236,6 +1241,120 @@ impl Node { ) } + /// Add funds from the on-chain wallet into an existing channel. + /// + /// This provides for increasing a channel's outbound liquidity without re-balancing or closing + /// it. Once negotiation with the counterparty is complete, the channel remains operational + /// while waiting for a new funding transaction to confirm. + /// + /// # Experimental API + /// + /// This API is experimental. Currently, a splice-in will be marked as an outbound payment, but + /// this classification may change in the future. + pub fn splice_in( + &self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, + splice_amount_sats: u64, + ) -> Result<(), Error> { + let open_channels = + self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); + if let Some(channel_details) = + open_channels.iter().find(|c| c.user_channel_id == user_channel_id.0) + { + self.check_sufficient_funds_for_channel(splice_amount_sats, &counterparty_node_id)?; + + const EMPTY_SCRIPT_SIG_WEIGHT: u64 = + 1 /* empty script_sig */ * bitcoin::constants::WITNESS_SCALE_FACTOR as u64; + + // Used for creating a redeem script for the previous funding txo and the new funding + // txo. Only needed when selecting which UTXOs to include in the funding tx that would + // be sufficient to pay for fees. Hence, the value does not matter. + let dummy_pubkey = PublicKey::from_slice(&[2; 33]).unwrap(); + + let funding_txo = channel_details.funding_txo.ok_or_else(|| { + log_error!(self.logger, "Failed to splice channel: channel not yet ready",); + Error::ChannelSplicingFailed + })?; + + let shared_input = Input { + outpoint: funding_txo.into_bitcoin_outpoint(), + previous_utxo: bitcoin::TxOut { + value: Amount::from_sat(channel_details.channel_value_satoshis), + script_pubkey: make_funding_redeemscript(&dummy_pubkey, &dummy_pubkey) + .to_p2wsh(), + }, + satisfaction_weight: EMPTY_SCRIPT_SIG_WEIGHT + FUNDING_TRANSACTION_WITNESS_WEIGHT, + }; + + let shared_output = bitcoin::TxOut { + value: shared_input.previous_utxo.value + Amount::from_sat(splice_amount_sats), + script_pubkey: make_funding_redeemscript(&dummy_pubkey, &dummy_pubkey).to_p2wsh(), + }; + + let fee_rate = self.fee_estimator.estimate_fee_rate(ConfirmationTarget::ChannelFunding); + + let inputs = self + .wallet + .select_confirmed_utxos(vec![shared_input], &[shared_output], fee_rate) + .map_err(|()| { + log_error!( + self.logger, + "Failed to splice channel: insufficient confirmed UTXOs", + ); + Error::ChannelSplicingFailed + })?; + + let change_address = self.wallet.get_new_internal_address()?; + + let contribution = SpliceContribution::SpliceIn { + value: Amount::from_sat(splice_amount_sats), + inputs, + change_script: Some(change_address.script_pubkey()), + }; + + let funding_feerate_per_kw: u32 = match fee_rate.to_sat_per_kwu().try_into() { + Ok(fee_rate) => fee_rate, + Err(_) => { + debug_assert!(false); + fee_estimator::get_fallback_rate_for_target(ConfirmationTarget::ChannelFunding) + }, + }; + + self.channel_manager + .splice_channel( + &channel_details.channel_id, + &counterparty_node_id, + contribution, + funding_feerate_per_kw, + None, + ) + .map_err(|e| { + log_error!(self.logger, "Failed to splice channel: {:?}", e); + let tx = bitcoin::Transaction { + version: bitcoin::transaction::Version::TWO, + lock_time: bitcoin::absolute::LockTime::ZERO, + input: vec![], + output: vec![bitcoin::TxOut { + value: Amount::ZERO, + script_pubkey: change_address.script_pubkey(), + }], + }; + match self.wallet.cancel_tx(&tx) { + Ok(()) => Error::ChannelSplicingFailed, + Err(e) => e, + } + }) + } else { + log_error!( + self.logger, + "Channel not found for user_channel_id {} and counterparty {}", + user_channel_id, + counterparty_node_id + ); + + Err(Error::ChannelSplicingFailed) + } + } + /// Manually sync the LDK and BDK wallets with the current chain state and update the fee rate /// cache. /// diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index db2d1cf9d..e26f87e90 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -6,11 +6,13 @@ // accordance with one or both of these licenses. use std::future::Future; +use std::ops::Deref; use std::pin::Pin; use std::str::FromStr; use std::sync::{Arc, Mutex}; use bdk_chain::spk_client::{FullScanRequest, SyncRequest}; +use bdk_wallet::descriptor::ExtendedDescriptor; #[allow(deprecated)] use bdk_wallet::SignOptions; use bdk_wallet::{Balance, KeychainKind, PersistedWallet, Update}; @@ -19,19 +21,20 @@ use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR; use bitcoin::blockdata::locktime::absolute::LockTime; use bitcoin::hashes::Hash; use bitcoin::key::XOnlyPublicKey; -use bitcoin::psbt::Psbt; +use bitcoin::psbt::{self, Psbt}; use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature}; use bitcoin::secp256k1::{All, PublicKey, Scalar, Secp256k1, SecretKey}; use bitcoin::{ - Address, Amount, FeeRate, Network, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, + Address, Amount, FeeRate, Network, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, Weight, WitnessProgram, WitnessVersion, }; use lightning::chain::chaininterface::BroadcasterInterface; use lightning::chain::channelmonitor::ANTI_REORG_DELAY; use lightning::chain::{BestBlock, Listen}; -use lightning::events::bump_transaction::{Utxo, WalletSource}; +use lightning::events::bump_transaction::{Input, Utxo, WalletSource}; use lightning::ln::channelmanager::PaymentId; +use lightning::ln::funding::FundingTxInput; use lightning::ln::inbound_payment::ExpandedKey; use lightning::ln::msgs::UnsignedGossipMessage; use lightning::ln::script::ShutdownScript; @@ -285,7 +288,7 @@ impl Wallet { Ok(address_info.address) } - fn get_new_internal_address(&self) -> Result { + pub(crate) fn get_new_internal_address(&self) -> Result { let mut locked_wallet = self.inner.lock().unwrap(); let mut locked_persister = self.persister.lock().unwrap(); @@ -297,6 +300,19 @@ impl Wallet { Ok(address_info.address) } + pub(crate) fn cancel_tx(&self, tx: &Transaction) -> Result<(), Error> { + let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_persister = self.persister.lock().unwrap(); + + locked_wallet.cancel_tx(tx); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + Error::PersistenceFailed + })?; + + Ok(()) + } + pub(crate) fn get_balances( &self, total_anchor_channels_reserve_sats: u64, ) -> Result<(u64, u64), Error> { @@ -559,6 +575,56 @@ impl Wallet { Ok(txid) } + pub(crate) fn select_confirmed_utxos( + &self, must_spend: Vec, must_pay_to: &[TxOut], fee_rate: FeeRate, + ) -> Result, ()> { + let mut locked_wallet = self.inner.lock().unwrap(); + debug_assert!(matches!( + locked_wallet.public_descriptor(KeychainKind::External), + ExtendedDescriptor::Wpkh(_) + )); + debug_assert!(matches!( + locked_wallet.public_descriptor(KeychainKind::Internal), + ExtendedDescriptor::Wpkh(_) + )); + + let mut tx_builder = locked_wallet.build_tx(); + tx_builder.only_witness_utxo(); + + for input in &must_spend { + let psbt_input = psbt::Input { + witness_utxo: Some(input.previous_utxo.clone()), + ..Default::default() + }; + let weight = Weight::from_wu(input.satisfaction_weight); + tx_builder.add_foreign_utxo(input.outpoint, psbt_input, weight).map_err(|_| ())?; + } + + for output in must_pay_to { + tx_builder.add_recipient(output.script_pubkey.clone(), output.value); + } + + tx_builder.fee_rate(fee_rate); + tx_builder.exclude_unconfirmed(); + + tx_builder + .finish() + .map_err(|e| { + log_error!(self.logger, "Failed to select confirmed UTXOs: {}", e); + })? + .unsigned_tx + .input + .iter() + .filter(|txin| must_spend.iter().all(|input| input.outpoint != txin.previous_output)) + .filter_map(|txin| { + locked_wallet + .tx_details(txin.previous_output.txid) + .map(|tx_details| tx_details.tx.deref().clone()) + .map(|prevtx| FundingTxInput::new_p2wpkh(prevtx, txin.previous_output.vout)) + }) + .collect::, ()>>() + } + fn list_confirmed_utxos_inner(&self) -> Result, ()> { let locked_wallet = self.inner.lock().unwrap(); let mut utxos = Vec::new(); From 04d1c3952355c2ed997bdd491d91f6bc9cd0b5a6 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 23 Oct 2025 16:04:46 -0500 Subject: [PATCH 38/60] Add Node::splice_out method Instead of closing and re-opening a channel when on-chain funds are needed, splicing allows removing funds (splice-out) while keeping the channel operational. This commit implements splice-out sending funds to a user-provided on-chain address. --- bindings/ldk_node.udl | 2 ++ src/lib.rs | 68 ++++++++++++++++++++++++++++++++++++++++++- src/wallet/mod.rs | 10 +++---- 3 files changed, 73 insertions(+), 7 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 97c808481..ff2469c7e 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -152,6 +152,8 @@ interface Node { [Throws=NodeError] void splice_in([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, u64 splice_amount_sats); [Throws=NodeError] + void splice_out([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, [ByRef]Address address, u64 splice_amount_sats); + [Throws=NodeError] void close_channel([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id); [Throws=NodeError] void force_close_channel([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, string? reason); diff --git a/src/lib.rs b/src/lib.rs index 56f2c0da0..8ac6780ed 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -109,7 +109,7 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; use bitcoin::secp256k1::PublicKey; -use bitcoin::Amount; +use bitcoin::{Address, Amount}; #[cfg(feature = "uniffi")] pub use builder::ArcedNodeBuilder as Builder; pub use builder::BuildError; @@ -1355,6 +1355,72 @@ impl Node { } } + /// Remove funds from an existing channel, sending them to an on-chain address. + /// + /// This provides for decreasing a channel's outbound liquidity without re-balancing or closing + /// it. Once negotiation with the counterparty is complete, the channel remains operational + /// while waiting for a new funding transaction to confirm. + /// + /// # Experimental API + /// + /// This API is experimental. Currently, a splice-out will be marked as an inbound payment if + /// paid to an address associated with the on-chain wallet, but this classification may change + /// in the future. + pub fn splice_out( + &self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, address: &Address, + splice_amount_sats: u64, + ) -> Result<(), Error> { + let open_channels = + self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); + if let Some(channel_details) = + open_channels.iter().find(|c| c.user_channel_id == user_channel_id.0) + { + if splice_amount_sats > channel_details.outbound_capacity_msat { + return Err(Error::ChannelSplicingFailed); + } + + self.wallet.parse_and_validate_address(address)?; + + let contribution = SpliceContribution::SpliceOut { + outputs: vec![bitcoin::TxOut { + value: Amount::from_sat(splice_amount_sats), + script_pubkey: address.script_pubkey(), + }], + }; + + let fee_rate = self.fee_estimator.estimate_fee_rate(ConfirmationTarget::ChannelFunding); + let funding_feerate_per_kw: u32 = match fee_rate.to_sat_per_kwu().try_into() { + Ok(fee_rate) => fee_rate, + Err(_) => { + debug_assert!(false, "FeeRate should always fit within u32"); + log_error!(self.logger, "FeeRate should always fit within u32"); + fee_estimator::get_fallback_rate_for_target(ConfirmationTarget::ChannelFunding) + }, + }; + + self.channel_manager + .splice_channel( + &channel_details.channel_id, + &counterparty_node_id, + contribution, + funding_feerate_per_kw, + None, + ) + .map_err(|e| { + log_error!(self.logger, "Failed to splice channel: {:?}", e); + Error::ChannelSplicingFailed + }) + } else { + log_error!( + self.logger, + "Channel not found for user_channel_id {} and counterparty {}", + user_channel_id, + counterparty_node_id + ); + Err(Error::ChannelSplicingFailed) + } + } + /// Manually sync the LDK and BDK wallets with the current chain state and update the fee rate /// cache. /// diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index e26f87e90..2f8daa500 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -26,7 +26,7 @@ use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature}; use bitcoin::secp256k1::{All, PublicKey, Scalar, Secp256k1, SecretKey}; use bitcoin::{ - Address, Amount, FeeRate, Network, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, Weight, + Address, Amount, FeeRate, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, Weight, WitnessProgram, WitnessVersion, }; use lightning::chain::chaininterface::BroadcasterInterface; @@ -348,12 +348,10 @@ impl Wallet { self.get_balances(total_anchor_channels_reserve_sats).map(|(_, s)| s) } - fn parse_and_validate_address( - &self, network: Network, address: &Address, - ) -> Result { + pub(crate) fn parse_and_validate_address(&self, address: &Address) -> Result { Address::::from_str(address.to_string().as_str()) .map_err(|_| Error::InvalidAddress)? - .require_network(network) + .require_network(self.config.network) .map_err(|_| Error::InvalidAddress) } @@ -362,7 +360,7 @@ impl Wallet { &self, address: &bitcoin::Address, send_amount: OnchainSendAmount, fee_rate: Option, ) -> Result { - self.parse_and_validate_address(self.config.network, &address)?; + self.parse_and_validate_address(&address)?; // Use the set fee_rate or default to fee estimation. let confirmation_target = ConfirmationTarget::OnchainPayment; From a5c9cbf63dcb02e51b1efdc0ae53e0b0c777a5d9 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 29 Oct 2025 12:42:46 -0500 Subject: [PATCH 39/60] Accept inbound splice attempts Since LDK Node does not support downgrades, there's no need to have a Config parameter for accepting inbound splices. Instead, enable it by default. --- src/config.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/config.rs b/src/config.rs index ce361c45a..510bcc875 100644 --- a/src/config.rs +++ b/src/config.rs @@ -325,6 +325,7 @@ pub(crate) fn default_user_config(config: &Config) -> UserConfig { user_config.manually_accept_inbound_channels = true; user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = config.anchor_channels_config.is_some(); + user_config.reject_inbound_splices = false; if may_announce_channel(config).is_err() { user_config.accept_forwards_to_priv_channels = false; From d9ebd344ea075d3427813ddc732b0beb9c051c27 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 23 Oct 2025 13:03:36 -0500 Subject: [PATCH 40/60] Add an integration test for splicing --- tests/integration_tests_rust.rs | 145 ++++++++++++++++++++++++++++++++ 1 file changed, 145 insertions(+) diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 69df12710..2d487da06 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -925,6 +925,151 @@ async fn concurrent_connections_succeed() { } } +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn splice_channel() { + macro_rules! expect_splice_pending_event { + ($node: expr, $counterparty_node_id: expr) => {{ + match $node.next_event_async().await { + ref e @ Event::SplicePending { new_funding_txo, counterparty_node_id, .. } => { + println!("{} got event {:?}", $node.node_id(), e); + assert_eq!(counterparty_node_id, $counterparty_node_id); + $node.event_handled().unwrap(); + new_funding_txo + }, + ref e => { + panic!("{} got unexpected event!: {:?}", std::stringify!($node), e); + }, + } + }}; + } + + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); + + let address_a = node_a.onchain_payment().new_address().unwrap(); + let address_b = node_b.onchain_payment().new_address().unwrap(); + let premine_amount_sat = 5_000_000; + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![address_a, address_b], + Amount::from_sat(premine_amount_sat), + ) + .await; + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + assert_eq!(node_a.list_balances().total_onchain_balance_sats, premine_amount_sat); + assert_eq!(node_b.list_balances().total_onchain_balance_sats, premine_amount_sat); + + open_channel(&node_a, &node_b, 4_000_000, false, &electrsd).await; + + // Open a channel with Node A contributing the funding + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + let user_channel_id_a = expect_channel_ready_event!(node_a, node_b.node_id()); + let user_channel_id_b = expect_channel_ready_event!(node_b, node_a.node_id()); + + let opening_transaction_fee_sat = 156; + let closing_transaction_fee_sat = 614; + let anchor_output_sat = 330; + + assert_eq!( + node_a.list_balances().total_onchain_balance_sats, + premine_amount_sat - 4_000_000 - opening_transaction_fee_sat + ); + assert_eq!( + node_a.list_balances().total_lightning_balance_sats, + 4_000_000 - closing_transaction_fee_sat - anchor_output_sat + ); + assert_eq!(node_b.list_balances().total_lightning_balance_sats, 0); + + // Test that splicing and payments fail when there are insufficient funds + let address = node_b.onchain_payment().new_address().unwrap(); + let amount_msat = 400_000_000; + + assert_eq!( + node_b.splice_in(&user_channel_id_b, node_b.node_id(), 5_000_000), + Err(NodeError::ChannelSplicingFailed), + ); + assert_eq!( + node_b.splice_out(&user_channel_id_b, node_b.node_id(), &address, amount_msat / 1000), + Err(NodeError::ChannelSplicingFailed), + ); + assert_eq!( + node_b.spontaneous_payment().send(amount_msat, node_a.node_id(), None), + Err(NodeError::PaymentSendingFailed) + ); + + // Splice-in funds for Node B so that it has outbound liquidity to make a payment + node_b.splice_in(&user_channel_id_b, node_a.node_id(), 4_000_000).unwrap(); + + expect_splice_pending_event!(node_a, node_b.node_id()); + expect_splice_pending_event!(node_b, node_a.node_id()); + + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + let splice_in_fee_sat = 252; + + assert_eq!( + node_b.list_balances().total_onchain_balance_sats, + premine_amount_sat - 4_000_000 - splice_in_fee_sat + ); + assert_eq!(node_b.list_balances().total_lightning_balance_sats, 4_000_000); + + let payment_id = + node_b.spontaneous_payment().send(amount_msat, node_a.node_id(), None).unwrap(); + + expect_payment_successful_event!(node_b, Some(payment_id), None); + expect_payment_received_event!(node_a, amount_msat); + + // Mine a block to give time for the HTLC to resolve + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 1).await; + + assert_eq!( + node_a.list_balances().total_lightning_balance_sats, + 4_000_000 - closing_transaction_fee_sat - anchor_output_sat + amount_msat / 1000 + ); + assert_eq!(node_b.list_balances().total_lightning_balance_sats, 4_000_000 - amount_msat / 1000); + + // Splice-out funds for Node A from the payment sent by Node B + let address = node_a.onchain_payment().new_address().unwrap(); + node_a.splice_out(&user_channel_id_a, node_b.node_id(), &address, amount_msat / 1000).unwrap(); + + expect_splice_pending_event!(node_a, node_b.node_id()); + expect_splice_pending_event!(node_b, node_a.node_id()); + + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + let splice_out_fee_sat = 183; + + assert_eq!( + node_a.list_balances().total_onchain_balance_sats, + premine_amount_sat - 4_000_000 - opening_transaction_fee_sat + amount_msat / 1000 + ); + assert_eq!( + node_a.list_balances().total_lightning_balance_sats, + 4_000_000 - closing_transaction_fee_sat - anchor_output_sat - splice_out_fee_sat + ); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn simple_bolt12_send_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); From 204e04d80fccd3d7d6f6a0df33850128f4342d83 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 29 Oct 2025 17:18:33 -0500 Subject: [PATCH 41/60] Test splicing in do_channel_full_cycle --- tests/common/mod.rs | 78 ++++++++++++++++++++++++++++++--- tests/integration_tests_rust.rs | 24 ++-------- 2 files changed, 76 insertions(+), 26 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index f023da680..699f8f1d0 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -99,6 +99,24 @@ macro_rules! expect_channel_ready_event { pub(crate) use expect_channel_ready_event; +macro_rules! expect_splice_pending_event { + ($node: expr, $counterparty_node_id: expr) => {{ + match $node.next_event_async().await { + ref e @ Event::SplicePending { new_funding_txo, counterparty_node_id, .. } => { + println!("{} got event {:?}", $node.node_id(), e); + assert_eq!(counterparty_node_id, $counterparty_node_id); + $node.event_handled().unwrap(); + new_funding_txo + }, + ref e => { + panic!("{} got unexpected event!: {:?}", std::stringify!($node), e); + }, + } + }}; +} + +pub(crate) use expect_splice_pending_event; + macro_rules! expect_payment_received_event { ($node:expr, $amount_msat:expr) => {{ match $node.next_event_async().await { @@ -795,8 +813,8 @@ pub(crate) async fn do_channel_full_cycle( node_b_anchor_reserve_sat ); - let user_channel_id = expect_channel_ready_event!(node_a, node_b.node_id()); - expect_channel_ready_event!(node_b, node_a.node_id()); + let user_channel_id_a = expect_channel_ready_event!(node_a, node_b.node_id()); + let user_channel_id_b = expect_channel_ready_event!(node_b, node_a.node_id()); println!("\nB receive"); let invoice_amount_1_msat = 2500_000; @@ -1085,12 +1103,60 @@ pub(crate) async fn do_channel_full_cycle( 1 ); + // Mine a block to give time for the HTLC to resolve + generate_blocks_and_wait(&bitcoind, electrsd, 1).await; + + println!("\nB splices out to pay A"); + let addr_a = node_a.onchain_payment().new_address().unwrap(); + let splice_out_sat = funding_amount_sat / 2; + node_b.splice_out(&user_channel_id_b, node_a.node_id(), &addr_a, splice_out_sat).unwrap(); + + expect_splice_pending_event!(node_a, node_b.node_id()); + expect_splice_pending_event!(node_b, node_a.node_id()); + + generate_blocks_and_wait(&bitcoind, electrsd, 6).await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + assert_eq!( + node_a + .list_payments_with_filter(|p| p.direction == PaymentDirection::Inbound + && matches!(p.kind, PaymentKind::Onchain { .. })) + .len(), + 2 + ); + + println!("\nA splices in the splice-out payment from B"); + let splice_in_sat = splice_out_sat; + node_a.splice_in(&user_channel_id_a, node_b.node_id(), splice_in_sat).unwrap(); + + expect_splice_pending_event!(node_a, node_b.node_id()); + expect_splice_pending_event!(node_b, node_a.node_id()); + + generate_blocks_and_wait(&bitcoind, electrsd, 6).await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + assert_eq!( + node_a + .list_payments_with_filter(|p| p.direction == PaymentDirection::Outbound + && matches!(p.kind, PaymentKind::Onchain { .. })) + .len(), + 2 + ); + println!("\nB close_channel (force: {})", force_close); if force_close { tokio::time::sleep(Duration::from_secs(1)).await; - node_a.force_close_channel(&user_channel_id, node_b.node_id(), None).unwrap(); + node_a.force_close_channel(&user_channel_id_a, node_b.node_id(), None).unwrap(); } else { - node_a.close_channel(&user_channel_id, node_b.node_id()).unwrap(); + node_a.close_channel(&user_channel_id_a, node_b.node_id()).unwrap(); } expect_event!(node_a, ChannelClosed); @@ -1189,7 +1255,7 @@ pub(crate) async fn do_channel_full_cycle( + invoice_amount_3_msat + determined_amount_msat + keysend_amount_msat) - / 1000; + / 1000 - splice_out_sat; let node_a_upper_bound_sat = (premine_amount_sat - funding_amount_sat) + (funding_amount_sat - sum_of_all_payments_sat); let node_a_lower_bound_sat = node_a_upper_bound_sat - onchain_fee_buffer_sat; @@ -1210,7 +1276,7 @@ pub(crate) async fn do_channel_full_cycle( .list_payments_with_filter(|p| p.direction == PaymentDirection::Inbound && matches!(p.kind, PaymentKind::Onchain { .. })) .len(), - 2 + 3 ); assert_eq!( node_b diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 2d487da06..d6c7c9447 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -20,10 +20,10 @@ use common::{ bump_fee_and_broadcast, distribute_funds_unconfirmed, do_channel_full_cycle, expect_channel_pending_event, expect_channel_ready_event, expect_event, expect_payment_claimable_event, expect_payment_received_event, expect_payment_successful_event, - generate_blocks_and_wait, open_channel, open_channel_push_amt, premine_and_distribute_funds, - premine_blocks, prepare_rbf, random_config, random_listening_addresses, - setup_bitcoind_and_electrsd, setup_builder, setup_node, setup_node_for_async_payments, - setup_two_nodes, wait_for_tx, TestChainSource, TestSyncStore, + expect_splice_pending_event, generate_blocks_and_wait, open_channel, open_channel_push_amt, + premine_and_distribute_funds, premine_blocks, prepare_rbf, random_config, + random_listening_addresses, setup_bitcoind_and_electrsd, setup_builder, setup_node, + setup_node_for_async_payments, setup_two_nodes, wait_for_tx, TestChainSource, TestSyncStore, }; use ldk_node::config::{AsyncPaymentsRole, EsploraSyncConfig}; use ldk_node::liquidity::LSPS2ServiceConfig; @@ -927,22 +927,6 @@ async fn concurrent_connections_succeed() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn splice_channel() { - macro_rules! expect_splice_pending_event { - ($node: expr, $counterparty_node_id: expr) => {{ - match $node.next_event_async().await { - ref e @ Event::SplicePending { new_funding_txo, counterparty_node_id, .. } => { - println!("{} got event {:?}", $node.node_id(), e); - assert_eq!(counterparty_node_id, $counterparty_node_id); - $node.event_handled().unwrap(); - new_funding_txo - }, - ref e => { - panic!("{} got unexpected event!: {:?}", std::stringify!($node), e); - }, - } - }}; - } - let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); From 998c69e822483e129e20d0e6aac20220a41d9997 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 19 Nov 2025 09:18:55 +0100 Subject: [PATCH 42/60] Expand docs on `LSPS2ServiceConfig::client_trusts_lsp` field Previously the docs have been a bit sparse. Now that we actually implement the client-trusts-LSP flow, we should expand a bit on what the bool actually does. --- src/liquidity.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/liquidity.rs b/src/liquidity.rs index ee520e14d..74e6098dd 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -130,7 +130,18 @@ pub struct LSPS2ServiceConfig { pub min_payment_size_msat: u64, /// The maximum payment size that we will accept when opening a channel. pub max_payment_size_msat: u64, - /// Use the client trusts lsp model + /// Use the 'client-trusts-LSP' trust model. + /// + /// When set, the service will delay *broadcasting* the JIT channel's funding transaction until + /// the client claimed sufficient HTLC parts to pay for the channel open. + /// + /// Note this will render the flow incompatible with clients utilizing the 'LSP-trust-client' + /// trust model, i.e., in turn delay *claiming* any HTLCs until they see the funding + /// transaction in the mempool. + /// + /// Please refer to [`bLIP-52`] for more information. + /// + /// [`bLIP-52`]: https://github.com/lightning/blips/blob/master/blip-0052.md#trust-models pub client_trusts_lsp: bool, } From fe14855a07c11b4794643d84cf82d29939399652 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 19 Nov 2025 11:16:55 +0100 Subject: [PATCH 43/60] Add draft changelog for LDK Node v0.7.0 --- CHANGELOG.md | 53 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 05813b621..d03401d85 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,56 @@ +# 0.7.0 - TODO +This seventh minor release introduces numerous new features, bug fixes, and API improvements. In particular, it adds support for channel Splicing, Async Payments, as well as sourcing chain data from a Bitcoin Core REST backend. + +## Feature and API updates +- Experimental support for channel splicing has been added. (#677) + - **Note**: Splicing-related transactions might currently still get misclassified in the payment store. +- Support for serving and paying static invoices for Async Payments has been added. (#621, #632) +- Sourcing chain data via Bitcoin Core's REST interface is now supported. (#526) +- A new `Builder::set_chain_source_esplora_with_headers` method has been added + that allows specifying headers to be sent to the Esplora backend. (#596) +- The ability to import and merge pathfinding scores has been added. (#449) +- Passing a custom pre-image when sending spontaneous payments is now supported. (#549) +- When running in the context of a `tokio` runtime, we now attempt to reuse the + outer runtime context for our main runtime. (#543) +- Specifying a `RouteParametersConfig` when paying BOLT12 offers or sending refunds is now supported. (#702) +- Liquidity service data is now persisted across restarts. (#650) +- The bLIP-52/LSPS2 service now supports the 'client-trusts-LSP' model. (#687) +- The manual-claiming flow is now also supported for JIT invoices. (#608) +- Any key-value stores provided to `Builder::build_with_store` are now + required to implement LDK's `KVStore` as well as `KVStoreSync` interfaces. + (#633) +- The `generate_entropy_mnemonic` method now supports specifying a word count. (#699) + +## Bug Fixes and Improvements +- Robustness of the shutdown procedure has been improved, minimizing risk of blocking during `Node::stop`. (#592, #612, #619, #622) +- The VSS storage backend now supports 'lazy' deletes, allowing it to avoid unnecessary remote calls for certain operations. (#689) +- The encryption and obfuscation scheme used when storing data against a VSS backend has been improved. (#627) +- Transient errors during `bitcoind` RPC chain synchronization are now retried with an exponential back-off. (#588) +- Transactions evicted from the mempool are now correctly handled when syncing via `bitcoind` RPC/REST. (#605) +- When sourcing chain data from a Bitcoin Core backend, we now poll for the + current tip in `Builder::build`, avoiding re-validating the chain from + genesis on first startup. (#706) +- A bug that could result in the node hanging on shutdown when sourcing chain data from a Bitcoin Core backend has been fixed. (#682) +- Unnecessary fee estimation calls to Bitcoin Core RPC are now avoided. (#631) +- The node now persists differential updates instead of re-persisting full channel monitor, reducing IO load. (#661) +- The previously rather restrictive `MaximumFeeEstimate` was relaxed. (#629) +- The node now listens on all provided listening addresses. (#644) + +## Compatibility Notes +- The minimum supported Rust version (MSRV) has been bumped to `rustc` v1.85 (#606) +- The LDK dependency has been bumped to v0.2. +- The BDK dependency has been bumped to v2.2. (#656) +- The VSS client dependency has been updated to utilize the new `vss-client-ng` crate v0.4. (#627) +- The `rust-bitcoin` dependency has been bumped to v0.32.7. (#656) +- The `uniffi` dependency has been bumped to v0.28.3. (#591) +- The `electrum-client` dependency has been bumped to v0.24.0. (#602) +- For Kotlin/Android builds we now require 16kb page sizes, ensuring Play Store compatibility. (#625) + +In total, this release features TODO files changed, TODO insertions, TODO +deletions in TODO commits from TODO authors in alphabetical order: + +- TODO TODO + # 0.6.2 - Aug. 14, 2025 This patch release fixes a panic that could have been hit when syncing to a TLS-enabled Electrum server, as well as some minor issues when shutting down From 31c589a85545dc8b5ffb0e64d1d9d408bcb5db82 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 19 Nov 2025 21:55:14 +0100 Subject: [PATCH 44/60] Replace docs.rs build `doc_auto_cfg` feature with `doc_cfg` Unfortunately, `doc_auto_cfg` was removed, breaking doc builds for v0.7.0-rc.0. Here we replace it with the `doc_cfg` attribute. --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 8ac6780ed..c0b02ae2f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -75,7 +75,7 @@ #![deny(rustdoc::private_intra_doc_links)] #![allow(bare_trait_objects)] #![allow(ellipsis_inclusive_range_patterns)] -#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![cfg_attr(docsrs, feature(doc_cfg))] mod balance; mod builder; From c3215d5a0eb81835e1056f0c9355c87c4f60804b Mon Sep 17 00:00:00 2001 From: Fmt Bot Date: Sun, 23 Nov 2025 01:39:24 +0000 Subject: [PATCH 45/60] 2025-11-23 automated rustfmt nightly --- tests/common/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 699f8f1d0..b70d2d675 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -100,7 +100,7 @@ macro_rules! expect_channel_ready_event { pub(crate) use expect_channel_ready_event; macro_rules! expect_splice_pending_event { - ($node: expr, $counterparty_node_id: expr) => {{ + ($node:expr, $counterparty_node_id:expr) => {{ match $node.next_event_async().await { ref e @ Event::SplicePending { new_funding_txo, counterparty_node_id, .. } => { println!("{} got event {:?}", $node.node_id(), e); From 10a8ce4b37d9b86019a36a70172043b3c5aa9eb9 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 18 Nov 2025 14:39:25 +0100 Subject: [PATCH 46/60] Move entropy-related types to new `entropy.rs` module As we're about to expose more entropy-related things, we here introduce a new module and start moving related types there. --- src/entropy.rs | 92 ++++++++++++++++++++++++++++++++++++++++++++++++ src/ffi/types.rs | 1 + src/io/utils.rs | 57 +----------------------------- src/lib.rs | 3 +- src/types.rs | 28 --------------- 5 files changed, 95 insertions(+), 86 deletions(-) create mode 100644 src/entropy.rs diff --git a/src/entropy.rs b/src/entropy.rs new file mode 100644 index 000000000..6c75d6da4 --- /dev/null +++ b/src/entropy.rs @@ -0,0 +1,92 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +//! Contains utilities for configuring and generating entropy. + +use bip39::Mnemonic; + +/// Generates a random [BIP 39] mnemonic with the specified word count. +/// +/// If no word count is specified, defaults to 24 words (256-bit entropy). +/// +/// The result may be used to initialize the [`Node`] entropy, i.e., can be given to +/// [`Builder::set_entropy_bip39_mnemonic`]. +/// +/// [BIP 39]: https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki +/// [`Node`]: crate::Node +/// [`Builder::set_entropy_bip39_mnemonic`]: crate::Builder::set_entropy_bip39_mnemonic +pub fn generate_entropy_mnemonic(word_count: Option) -> Mnemonic { + let word_count = word_count.unwrap_or(WordCount::Words24).word_count(); + Mnemonic::generate(word_count).expect("Failed to generate mnemonic") +} + +/// Supported BIP39 mnemonic word counts for entropy generation. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum WordCount { + /// 12-word mnemonic (128-bit entropy) + Words12, + /// 15-word mnemonic (160-bit entropy) + Words15, + /// 18-word mnemonic (192-bit entropy) + Words18, + /// 21-word mnemonic (224-bit entropy) + Words21, + /// 24-word mnemonic (256-bit entropy) + Words24, +} + +impl WordCount { + /// Returns the word count as a usize value. + pub fn word_count(&self) -> usize { + match self { + WordCount::Words12 => 12, + WordCount::Words15 => 15, + WordCount::Words18 => 18, + WordCount::Words21 => 21, + WordCount::Words24 => 24, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn mnemonic_to_entropy_to_mnemonic() { + // Test default (24 words) + let mnemonic = generate_entropy_mnemonic(None); + let entropy = mnemonic.to_entropy(); + assert_eq!(mnemonic, Mnemonic::from_entropy(&entropy).unwrap()); + assert_eq!(mnemonic.word_count(), 24); + + // Test with different word counts + let word_counts = [ + WordCount::Words12, + WordCount::Words15, + WordCount::Words18, + WordCount::Words21, + WordCount::Words24, + ]; + + for word_count in word_counts { + let mnemonic = generate_entropy_mnemonic(Some(word_count)); + let entropy = mnemonic.to_entropy(); + assert_eq!(mnemonic, Mnemonic::from_entropy(&entropy).unwrap()); + + // Verify expected word count + let expected_words = match word_count { + WordCount::Words12 => 12, + WordCount::Words15 => 15, + WordCount::Words18 => 18, + WordCount::Words21 => 21, + WordCount::Words24 => 24, + }; + assert_eq!(mnemonic.word_count(), expected_words); + } + } +} diff --git a/src/ffi/types.rs b/src/ffi/types.rs index 3c88a665f..80be1fe79 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -47,6 +47,7 @@ pub use crate::config::{ default_config, AnchorChannelsConfig, BackgroundSyncConfig, ElectrumSyncConfig, EsploraSyncConfig, MaxDustHTLCExposure, }; +pub use crate::entropy::{generate_entropy_mnemonic, WordCount}; use crate::error::Error; pub use crate::graph::{ChannelInfo, ChannelUpdateInfo, NodeAnnouncementInfo, NodeInfo}; pub use crate::liquidity::{LSPS1OrderStatus, LSPS2ServiceConfig}; diff --git a/src/io/utils.rs b/src/io/utils.rs index 1b4b02a82..389767397 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -17,7 +17,6 @@ use bdk_chain::miniscript::{Descriptor, DescriptorPublicKey}; use bdk_chain::tx_graph::ChangeSet as BdkTxGraphChangeSet; use bdk_chain::ConfirmationBlockTime; use bdk_wallet::ChangeSet as BdkWalletChangeSet; -use bip39::Mnemonic; use bitcoin::Network; use lightning::io::Cursor; use lightning::ln::msgs::DecodeError; @@ -47,27 +46,12 @@ use crate::io::{ }; use crate::logger::{log_error, LdkLogger, Logger}; use crate::peer_store::PeerStore; -use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper, WordCount}; +use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper}; use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; use crate::{Error, EventQueue, NodeMetrics, PaymentDetails}; pub const EXTERNAL_PATHFINDING_SCORES_CACHE_KEY: &str = "external_pathfinding_scores_cache"; -/// Generates a random [BIP 39] mnemonic with the specified word count. -/// -/// If no word count is specified, defaults to 24 words (256-bit entropy). -/// -/// The result may be used to initialize the [`Node`] entropy, i.e., can be given to -/// [`Builder::set_entropy_bip39_mnemonic`]. -/// -/// [BIP 39]: https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki -/// [`Node`]: crate::Node -/// [`Builder::set_entropy_bip39_mnemonic`]: crate::Builder::set_entropy_bip39_mnemonic -pub fn generate_entropy_mnemonic(word_count: Option) -> Mnemonic { - let word_count = word_count.unwrap_or(WordCount::Words24).word_count(); - Mnemonic::generate(word_count).expect("Failed to generate mnemonic") -} - pub(crate) fn read_or_generate_seed_file( keys_seed_path: &str, logger: L, ) -> std::io::Result<[u8; WALLET_KEYS_SEED_LEN]> @@ -620,42 +604,3 @@ pub(crate) fn read_bdk_wallet_change_set( .map(|indexer| change_set.indexer = indexer); Ok(Some(change_set)) } - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn mnemonic_to_entropy_to_mnemonic() { - // Test default (24 words) - let mnemonic = generate_entropy_mnemonic(None); - let entropy = mnemonic.to_entropy(); - assert_eq!(mnemonic, Mnemonic::from_entropy(&entropy).unwrap()); - assert_eq!(mnemonic.word_count(), 24); - - // Test with different word counts - let word_counts = [ - WordCount::Words12, - WordCount::Words15, - WordCount::Words18, - WordCount::Words21, - WordCount::Words24, - ]; - - for word_count in word_counts { - let mnemonic = generate_entropy_mnemonic(Some(word_count)); - let entropy = mnemonic.to_entropy(); - assert_eq!(mnemonic, Mnemonic::from_entropy(&entropy).unwrap()); - - // Verify expected word count - let expected_words = match word_count { - WordCount::Words12 => 12, - WordCount::Words15 => 15, - WordCount::Words18 => 18, - WordCount::Words21 => 21, - WordCount::Words24 => 24, - }; - assert_eq!(mnemonic.word_count(), expected_words); - } - } -} diff --git a/src/lib.rs b/src/lib.rs index c0b02ae2f..ccda53af9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -83,6 +83,7 @@ mod chain; pub mod config; mod connection; mod data_store; +pub mod entropy; mod error; mod event; mod fee_estimator; @@ -130,7 +131,6 @@ use fee_estimator::{ConfirmationTarget, FeeEstimator, OnchainFeeEstimator}; use ffi::*; use gossip::GossipSource; use graph::NetworkGraph; -pub use io::utils::generate_entropy_mnemonic; use io::utils::write_node_metrics; use lightning::chain::BestBlock; use lightning::events::bump_transaction::{Input, Wallet as LdkWallet}; @@ -160,7 +160,6 @@ use types::{ }; pub use types::{ ChannelDetails, CustomTlvRecord, DynStore, PeerDetails, SyncAndAsyncKVStore, UserChannelId, - WordCount, }; pub use { bip39, bitcoin, lightning, lightning_invoice, lightning_liquidity, lightning_types, tokio, diff --git a/src/types.rs b/src/types.rs index 6d6bdcd20..b8dc10b18 100644 --- a/src/types.rs +++ b/src/types.rs @@ -36,34 +36,6 @@ use crate::logger::Logger; use crate::message_handler::NodeCustomMessageHandler; use crate::payment::PaymentDetails; -/// Supported BIP39 mnemonic word counts for entropy generation. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum WordCount { - /// 12-word mnemonic (128-bit entropy) - Words12, - /// 15-word mnemonic (160-bit entropy) - Words15, - /// 18-word mnemonic (192-bit entropy) - Words18, - /// 21-word mnemonic (224-bit entropy) - Words21, - /// 24-word mnemonic (256-bit entropy) - Words24, -} - -impl WordCount { - /// Returns the word count as a usize value. - pub fn word_count(&self) -> usize { - match self { - WordCount::Words12 => 12, - WordCount::Words15 => 15, - WordCount::Words18 => 18, - WordCount::Words21 => 21, - WordCount::Words24 => 24, - } - } -} - /// A supertrait that requires that a type implements both [`KVStore`] and [`KVStoreSync`] at the /// same time. pub trait SyncAndAsyncKVStore: KVStore + KVStoreSync {} From 90400975d003a8eae0ee5e848d7982de654557ca Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 18 Nov 2025 15:42:16 +0100 Subject: [PATCH 47/60] Use `build_with_store` in `build_with_vss_store` Now that we don't use the `Runtime` in `VssStore` anymore, we can in fact revert to reuse the public interface. --- src/builder.rs | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 63e84db37..5ab2c8b9b 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -705,15 +705,6 @@ impl NodeBuilder { ) -> Result { let logger = setup_logger(&self.log_writer_config, &self.config)?; - let runtime = if let Some(handle) = self.runtime_handle.as_ref() { - Arc::new(Runtime::with_handle(handle.clone(), Arc::clone(&logger))) - } else { - Arc::new(Runtime::new(Arc::clone(&logger)).map_err(|e| { - log_error!(logger, "Failed to setup tokio runtime: {}", e); - BuildError::RuntimeSetupFailed - })?) - }; - let seed_bytes = seed_bytes_from_config( &self.config, self.entropy_source_config.as_ref(), @@ -737,18 +728,7 @@ impl NodeBuilder { BuildError::KVStoreSetupFailed })?; - build_with_store_internal( - config, - self.chain_data_source_config.as_ref(), - self.gossip_source_config.as_ref(), - self.liquidity_source_config.as_ref(), - self.pathfinding_scores_sync_config.as_ref(), - self.async_payments_role, - seed_bytes, - runtime, - logger, - Arc::new(vss_store), - ) + self.build_with_store(Arc::new(vss_store)) } /// Builds a [`Node`] instance according to the options previously configured. From c06b118255fc14678f046358497484a3e569a9b0 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 18 Nov 2025 16:41:10 +0100 Subject: [PATCH 48/60] Introduce new mandatory `NodeEntropy` object Previously, the `Builder` allowed setting different entropy sources via its `set_entropy...` methods, defaulting to sourcing from an auto-generated seed file in the storage path. While this allowed for really easy setup, it spared the user to actually think about where to store their node secret. Here, we therefore introduce a mandatory `NodeEntropy` object that, as before, allows the user to source entropy from BIP39 Mnemonic, seed bytes, or a seed file. However, it doesn't implement any default and hence intentionally requires manually setup by the user. Moreover, this API refactor also allows to reuse the same object outside of the `Node`'s `Builder` in a future commit. --- README.md | 16 +- .../lightningdevkit/ldknode/AndroidLibTest.kt | 9 +- .../lightningdevkit/ldknode/LibraryTest.kt | 9 +- bindings/ldk_node.udl | 30 ++- bindings/python/src/ldk_node/test_ldk_node.py | 4 +- src/builder.rs | 207 ++++++------------ src/entropy.rs | 105 ++++++++- src/ffi/types.rs | 2 +- src/io/utils.rs | 47 +--- src/lib.rs | 17 +- tests/common/mod.rs | 45 ++-- tests/integration_tests_cln.rs | 2 +- tests/integration_tests_lnd.rs | 2 +- tests/integration_tests_rust.rs | 55 +++-- tests/integration_tests_vss.rs | 23 +- tests/reorg_test.rs | 2 +- 16 files changed, 306 insertions(+), 269 deletions(-) diff --git a/README.md b/README.md index d11c5fc8e..4e60d3602 100644 --- a/README.md +++ b/README.md @@ -14,20 +14,26 @@ LDK Node is a self-custodial Lightning node in library form. Its central goal is The primary abstraction of the library is the [`Node`][api_docs_node], which can be retrieved by setting up and configuring a [`Builder`][api_docs_builder] to your liking and calling one of the `build` methods. `Node` can then be controlled via commands such as `start`, `stop`, `open_channel`, `send`, etc. ```rust -use ldk_node::Builder; -use ldk_node::lightning_invoice::Bolt11Invoice; -use ldk_node::lightning::ln::msgs::SocketAddress; use ldk_node::bitcoin::secp256k1::PublicKey; use ldk_node::bitcoin::Network; +use ldk_node::entropy::{generate_entropy_mnemonic, NodeEntropy}; +use ldk_node::lightning::ln::msgs::SocketAddress; +use ldk_node::lightning_invoice::Bolt11Invoice; +use ldk_node::Builder; use std::str::FromStr; fn main() { let mut builder = Builder::new(); builder.set_network(Network::Testnet); builder.set_chain_source_esplora("https://blockstream.info/testnet/api".to_string(), None); - builder.set_gossip_source_rgs("https://rapidsync.lightningdevkit.org/testnet/snapshot".to_string()); + builder.set_gossip_source_rgs( + "https://rapidsync.lightningdevkit.org/testnet/snapshot".to_string(), + ); + - let node = builder.build().unwrap(); + let mnemonic = generate_entropy_mnemonic(None); + let node_entropy = NodeEntropy::from_bip39_mnemonic(mnemonic, None); + let node = builder.build(node_entropy).unwrap(); node.start().unwrap(); diff --git a/bindings/kotlin/ldk-node-android/lib/src/androidTest/kotlin/org/lightningdevkit/ldknode/AndroidLibTest.kt b/bindings/kotlin/ldk-node-android/lib/src/androidTest/kotlin/org/lightningdevkit/ldknode/AndroidLibTest.kt index fb29d3219..dd550f71a 100644 --- a/bindings/kotlin/ldk-node-android/lib/src/androidTest/kotlin/org/lightningdevkit/ldknode/AndroidLibTest.kt +++ b/bindings/kotlin/ldk-node-android/lib/src/androidTest/kotlin/org/lightningdevkit/ldknode/AndroidLibTest.kt @@ -34,8 +34,13 @@ class AndroidLibTest { val builder1 = Builder.fromConfig(config1) val builder2 = Builder.fromConfig(config2) - val node1 = builder1.build() - val node2 = builder2.build() + val mnemonic1 = generateEntropyMnemonic(null) + val nodeEntropy1 = NodeEntropy.fromBip39Mnemonic(mnemonic1, null) + val node1 = builder1.build(nodeEntropy1) + + val mnemonic2 = generateEntropyMnemonic(null) + val nodeEntropy2 = NodeEntropy.fromBip39Mnemonic(mnemonic2, null) + val node2 = builder2.build(nodeEntropy2) node1.start() node2.start() diff --git a/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt b/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt index c8c43c49c..006878a4c 100644 --- a/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt +++ b/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt @@ -193,8 +193,13 @@ class LibraryTest { builder2.setChainSourceEsplora(esploraEndpoint, null) builder2.setCustomLogger(logWriter2) - val node1 = builder1.build() - val node2 = builder2.build() + val mnemonic1 = generateEntropyMnemonic(null) + val nodeEntropy1 = NodeEntropy.fromBip39Mnemonic(mnemonic1, null) + val node1 = builder1.build(nodeEntropy1) + + val mnemonic2 = generateEntropyMnemonic(null) + val nodeEntropy2 = NodeEntropy.fromBip39Mnemonic(mnemonic2, null) + val node2 = builder2.build(nodeEntropy2) node1.start() node2.start() diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index ff2469c7e..c4ebf56a6 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -47,6 +47,20 @@ dictionary LSPS2ServiceConfig { boolean client_trusts_lsp; }; +interface NodeEntropy { + [Name=from_bip39_mnemonic] + constructor(Mnemonic mnemonic, string? passphrase); + [Throws=EntropyError, Name=from_seed_bytes] + constructor(bytes seed_bytes); + [Throws=EntropyError, Name=from_seed_path] + constructor(string seed_path); +}; + +enum EntropyError { + "InvalidSeedBytes", + "InvalidSeedFile", +}; + enum WordCount { "Words12", "Words15", @@ -80,10 +94,6 @@ interface Builder { constructor(); [Name=from_config] constructor(Config config); - void set_entropy_seed_path(string seed_path); - [Throws=BuildError] - void set_entropy_seed_bytes(sequence seed_bytes); - void set_entropy_bip39_mnemonic(Mnemonic mnemonic, string? passphrase); void set_chain_source_esplora(string server_url, EsploraSyncConfig? config); void set_chain_source_electrum(string server_url, ElectrumSyncConfig? config); void set_chain_source_bitcoind_rpc(string rpc_host, u16 rpc_port, string rpc_user, string rpc_password); @@ -107,15 +117,15 @@ interface Builder { [Throws=BuildError] void set_async_payments_role(AsyncPaymentsRole? role); [Throws=BuildError] - Node build(); + Node build(NodeEntropy node_entropy); [Throws=BuildError] - Node build_with_fs_store(); + Node build_with_fs_store(NodeEntropy node_entropy); [Throws=BuildError] - Node build_with_vss_store(string vss_url, string store_id, string lnurl_auth_server_url, record fixed_headers); + Node build_with_vss_store(NodeEntropy node_entropy, string vss_url, string store_id, string lnurl_auth_server_url, record fixed_headers); [Throws=BuildError] - Node build_with_vss_store_and_fixed_headers(string vss_url, string store_id, record fixed_headers); + Node build_with_vss_store_and_fixed_headers(NodeEntropy node_entropy, string vss_url, string store_id, record fixed_headers); [Throws=BuildError] - Node build_with_vss_store_and_header_provider(string vss_url, string store_id, VssHeaderProvider header_provider); + Node build_with_vss_store_and_header_provider(NodeEntropy node_entropy, string vss_url, string store_id, VssHeaderProvider header_provider); }; interface Node { @@ -357,8 +367,6 @@ dictionary BestBlock { [Error] enum BuildError { - "InvalidSeedBytes", - "InvalidSeedFile", "InvalidSystemTime", "InvalidChannelMonitor", "InvalidListeningAddresses", diff --git a/bindings/python/src/ldk_node/test_ldk_node.py b/bindings/python/src/ldk_node/test_ldk_node.py index f71e89df8..0b73e6a47 100644 --- a/bindings/python/src/ldk_node/test_ldk_node.py +++ b/bindings/python/src/ldk_node/test_ldk_node.py @@ -97,13 +97,15 @@ def send_to_address(address, amount_sats): def setup_node(tmp_dir, esplora_endpoint, listening_addresses): + mnemonic = generate_entropy_mnemonic(None) + node_entropy = NodeEntropy.from_bip39_mnemonic(mnemonic, None) config = default_config() builder = Builder.from_config(config) builder.set_storage_dir_path(tmp_dir) builder.set_chain_source_esplora(esplora_endpoint, None) builder.set_network(DEFAULT_TEST_NETWORK) builder.set_listening_addresses(listening_addresses) - return builder.build() + return builder.build(node_entropy) def get_esplora_endpoint(): if os.environ.get('ESPLORA_ENDPOINT'): diff --git a/src/builder.rs b/src/builder.rs index 5ab2c8b9b..13a7567b7 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -15,7 +15,6 @@ use std::{fmt, fs}; use bdk_wallet::template::Bip84; use bdk_wallet::{KeychainKind, Wallet as BdkWallet}; -use bip39::Mnemonic; use bitcoin::bip32::{ChildNumber, Xpriv}; use bitcoin::secp256k1::PublicKey; use bitcoin::{BlockHash, Network}; @@ -45,9 +44,10 @@ use crate::chain::ChainSource; use crate::config::{ default_user_config, may_announce_channel, AnnounceError, AsyncPaymentsRole, BitcoindRestClientConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, - DEFAULT_ESPLORA_SERVER_URL, DEFAULT_LOG_FILENAME, DEFAULT_LOG_LEVEL, WALLET_KEYS_SEED_LEN, + DEFAULT_ESPLORA_SERVER_URL, DEFAULT_LOG_FILENAME, DEFAULT_LOG_LEVEL, }; use crate::connection::ConnectionManager; +use crate::entropy::NodeEntropy; use crate::event::EventQueue; use crate::fee_estimator::OnchainFeeEstimator; use crate::gossip::GossipSource; @@ -101,13 +101,6 @@ enum ChainDataSourceConfig { }, } -#[derive(Debug, Clone)] -enum EntropySourceConfig { - SeedFile(String), - SeedBytes([u8; WALLET_KEYS_SEED_LEN]), - Bip39Mnemonic { mnemonic: Mnemonic, passphrase: Option }, -} - #[derive(Debug, Clone)] enum GossipSourceConfig { P2PNetwork, @@ -157,10 +150,6 @@ impl std::fmt::Debug for LogWriterConfig { /// [`Node`]: crate::Node #[derive(Debug, Clone, PartialEq)] pub enum BuildError { - /// The given seed bytes are invalid, e.g., have invalid length. - InvalidSeedBytes, - /// The given seed file is invalid, e.g., has invalid length, or could not be read. - InvalidSeedFile, /// The current system time is invalid, clocks might have gone backwards. InvalidSystemTime, /// The a read channel monitor is invalid. @@ -200,8 +189,6 @@ pub enum BuildError { impl fmt::Display for BuildError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - Self::InvalidSeedBytes => write!(f, "Given seed bytes are invalid."), - Self::InvalidSeedFile => write!(f, "Given seed file is invalid or could not be read."), Self::InvalidSystemTime => { write!(f, "System time is invalid. Clocks might have gone back in time.") }, @@ -245,7 +232,6 @@ impl std::error::Error for BuildError {} #[derive(Debug)] pub struct NodeBuilder { config: Config, - entropy_source_config: Option, chain_data_source_config: Option, gossip_source_config: Option, liquidity_source_config: Option, @@ -264,7 +250,6 @@ impl NodeBuilder { /// Creates a new builder instance from an [`Config`]. pub fn from_config(config: Config) -> Self { - let entropy_source_config = None; let chain_data_source_config = None; let gossip_source_config = None; let liquidity_source_config = None; @@ -273,7 +258,6 @@ impl NodeBuilder { let pathfinding_scores_sync_config = None; Self { config, - entropy_source_config, chain_data_source_config, gossip_source_config, liquidity_source_config, @@ -294,33 +278,6 @@ impl NodeBuilder { self } - /// Configures the [`Node`] instance to source its wallet entropy from a seed file on disk. - /// - /// If the given file does not exist a new random seed file will be generated and - /// stored at the given location. - pub fn set_entropy_seed_path(&mut self, seed_path: String) -> &mut Self { - self.entropy_source_config = Some(EntropySourceConfig::SeedFile(seed_path)); - self - } - - /// Configures the [`Node`] instance to source its wallet entropy from the given - /// [`WALLET_KEYS_SEED_LEN`] seed bytes. - pub fn set_entropy_seed_bytes(&mut self, seed_bytes: [u8; WALLET_KEYS_SEED_LEN]) -> &mut Self { - self.entropy_source_config = Some(EntropySourceConfig::SeedBytes(seed_bytes)); - self - } - - /// Configures the [`Node`] instance to source its wallet entropy from a [BIP 39] mnemonic. - /// - /// [BIP 39]: https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki - pub fn set_entropy_bip39_mnemonic( - &mut self, mnemonic: Mnemonic, passphrase: Option, - ) -> &mut Self { - self.entropy_source_config = - Some(EntropySourceConfig::Bip39Mnemonic { mnemonic, passphrase }); - self - } - /// Configures the [`Node`] instance to source its chain data from the given Esplora server. /// /// If no `sync_config` is given, default values are used. See [`EsploraSyncConfig`] for more @@ -584,7 +541,7 @@ impl NodeBuilder { /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. - pub fn build(&self) -> Result { + pub fn build(&self, node_entropy: NodeEntropy) -> Result { let storage_dir_path = self.config.storage_dir_path.clone(); fs::create_dir_all(storage_dir_path.clone()) .map_err(|_| BuildError::StoragePathAccessFailed)?; @@ -596,19 +553,19 @@ impl NodeBuilder { ) .map_err(|_| BuildError::KVStoreSetupFailed)?, ); - self.build_with_store(kv_store) + self.build_with_store(node_entropy, kv_store) } /// Builds a [`Node`] instance with a [`FilesystemStore`] backend and according to the options /// previously configured. - pub fn build_with_fs_store(&self) -> Result { + pub fn build_with_fs_store(&self, node_entropy: NodeEntropy) -> Result { let mut storage_dir_path: PathBuf = self.config.storage_dir_path.clone().into(); storage_dir_path.push("fs_store"); fs::create_dir_all(storage_dir_path.clone()) .map_err(|_| BuildError::StoragePathAccessFailed)?; let kv_store = Arc::new(FilesystemStore::new(storage_dir_path)); - self.build_with_store(kv_store) + self.build_with_store(node_entropy, kv_store) } /// Builds a [`Node`] instance with a [VSS] backend and according to the options @@ -629,19 +586,14 @@ impl NodeBuilder { /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md /// [LNURL-auth]: https://github.com/lnurl/luds/blob/luds/04.md pub fn build_with_vss_store( - &self, vss_url: String, store_id: String, lnurl_auth_server_url: String, - fixed_headers: HashMap, + &self, node_entropy: NodeEntropy, vss_url: String, store_id: String, + lnurl_auth_server_url: String, fixed_headers: HashMap, ) -> Result { use bitcoin::key::Secp256k1; let logger = setup_logger(&self.log_writer_config, &self.config)?; - let seed_bytes = seed_bytes_from_config( - &self.config, - self.entropy_source_config.as_ref(), - Arc::clone(&logger), - )?; - + let seed_bytes = node_entropy.to_seed_bytes(); let config = Arc::new(self.config.clone()); let vss_xprv = @@ -666,7 +618,12 @@ impl NodeBuilder { let header_provider = Arc::new(lnurl_auth_jwt_provider); - self.build_with_vss_store_and_header_provider(vss_url, store_id, header_provider) + self.build_with_vss_store_and_header_provider( + node_entropy, + vss_url, + store_id, + header_provider, + ) } /// Builds a [`Node`] instance with a [VSS] backend and according to the options @@ -682,11 +639,17 @@ impl NodeBuilder { /// /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md pub fn build_with_vss_store_and_fixed_headers( - &self, vss_url: String, store_id: String, fixed_headers: HashMap, + &self, node_entropy: NodeEntropy, vss_url: String, store_id: String, + fixed_headers: HashMap, ) -> Result { let header_provider = Arc::new(FixedHeaders::new(fixed_headers)); - self.build_with_vss_store_and_header_provider(vss_url, store_id, header_provider) + self.build_with_vss_store_and_header_provider( + node_entropy, + vss_url, + store_id, + header_provider, + ) } /// Builds a [`Node`] instance with a [VSS] backend and according to the options @@ -701,16 +664,12 @@ impl NodeBuilder { /// /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md pub fn build_with_vss_store_and_header_provider( - &self, vss_url: String, store_id: String, header_provider: Arc, + &self, node_entropy: NodeEntropy, vss_url: String, store_id: String, + header_provider: Arc, ) -> Result { let logger = setup_logger(&self.log_writer_config, &self.config)?; - let seed_bytes = seed_bytes_from_config( - &self.config, - self.entropy_source_config.as_ref(), - Arc::clone(&logger), - )?; - + let seed_bytes = node_entropy.to_seed_bytes(); let config = Arc::new(self.config.clone()); let vss_xprv = derive_xprv( @@ -728,11 +687,13 @@ impl NodeBuilder { BuildError::KVStoreSetupFailed })?; - self.build_with_store(Arc::new(vss_store)) + self.build_with_store(node_entropy, Arc::new(vss_store)) } /// Builds a [`Node`] instance according to the options previously configured. - pub fn build_with_store(&self, kv_store: Arc) -> Result { + pub fn build_with_store( + &self, node_entropy: NodeEntropy, kv_store: Arc, + ) -> Result { let logger = setup_logger(&self.log_writer_config, &self.config)?; let runtime = if let Some(handle) = self.runtime_handle.as_ref() { @@ -744,11 +705,7 @@ impl NodeBuilder { })?) }; - let seed_bytes = seed_bytes_from_config( - &self.config, - self.entropy_source_config.as_ref(), - Arc::clone(&logger), - )?; + let seed_bytes = node_entropy.to_seed_bytes(); let config = Arc::new(self.config.clone()); build_with_store_internal( @@ -793,37 +750,6 @@ impl ArcedNodeBuilder { Self { inner } } - /// Configures the [`Node`] instance to source its wallet entropy from a seed file on disk. - /// - /// If the given file does not exist a new random seed file will be generated and - /// stored at the given location. - pub fn set_entropy_seed_path(&self, seed_path: String) { - self.inner.write().unwrap().set_entropy_seed_path(seed_path); - } - - /// Configures the [`Node`] instance to source its wallet entropy from the given - /// [`WALLET_KEYS_SEED_LEN`] seed bytes. - /// - /// **Note:** Will return an error if the length of the given `seed_bytes` differs from - /// [`WALLET_KEYS_SEED_LEN`]. - pub fn set_entropy_seed_bytes(&self, seed_bytes: Vec) -> Result<(), BuildError> { - if seed_bytes.len() != WALLET_KEYS_SEED_LEN { - return Err(BuildError::InvalidSeedBytes); - } - let mut bytes = [0u8; WALLET_KEYS_SEED_LEN]; - bytes.copy_from_slice(&seed_bytes); - - self.inner.write().unwrap().set_entropy_seed_bytes(bytes); - Ok(()) - } - - /// Configures the [`Node`] instance to source its wallet entropy from a [BIP 39] mnemonic. - /// - /// [BIP 39]: https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki - pub fn set_entropy_bip39_mnemonic(&self, mnemonic: Mnemonic, passphrase: Option) { - self.inner.write().unwrap().set_entropy_bip39_mnemonic(mnemonic, passphrase); - } - /// Configures the [`Node`] instance to source its chain data from the given Esplora server. /// /// If no `sync_config` is given, default values are used. See [`EsploraSyncConfig`] for more @@ -1031,14 +957,16 @@ impl ArcedNodeBuilder { /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. - pub fn build(&self) -> Result, BuildError> { - self.inner.read().unwrap().build().map(Arc::new) + pub fn build(&self, node_entropy: Arc) -> Result, BuildError> { + self.inner.read().unwrap().build(*node_entropy).map(Arc::new) } /// Builds a [`Node`] instance with a [`FilesystemStore`] backend and according to the options /// previously configured. - pub fn build_with_fs_store(&self) -> Result, BuildError> { - self.inner.read().unwrap().build_with_fs_store().map(Arc::new) + pub fn build_with_fs_store( + &self, node_entropy: Arc, + ) -> Result, BuildError> { + self.inner.read().unwrap().build_with_fs_store(*node_entropy).map(Arc::new) } /// Builds a [`Node`] instance with a [VSS] backend and according to the options @@ -1059,13 +987,19 @@ impl ArcedNodeBuilder { /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md /// [LNURL-auth]: https://github.com/lnurl/luds/blob/luds/04.md pub fn build_with_vss_store( - &self, vss_url: String, store_id: String, lnurl_auth_server_url: String, - fixed_headers: HashMap, + &self, node_entropy: Arc, vss_url: String, store_id: String, + lnurl_auth_server_url: String, fixed_headers: HashMap, ) -> Result, BuildError> { self.inner .read() .unwrap() - .build_with_vss_store(vss_url, store_id, lnurl_auth_server_url, fixed_headers) + .build_with_vss_store( + *node_entropy, + vss_url, + store_id, + lnurl_auth_server_url, + fixed_headers, + ) .map(Arc::new) } @@ -1082,12 +1016,13 @@ impl ArcedNodeBuilder { /// /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md pub fn build_with_vss_store_and_fixed_headers( - &self, vss_url: String, store_id: String, fixed_headers: HashMap, + &self, node_entropy: Arc, vss_url: String, store_id: String, + fixed_headers: HashMap, ) -> Result, BuildError> { self.inner .read() .unwrap() - .build_with_vss_store_and_fixed_headers(vss_url, store_id, fixed_headers) + .build_with_vss_store_and_fixed_headers(*node_entropy, vss_url, store_id, fixed_headers) .map(Arc::new) } @@ -1103,18 +1038,26 @@ impl ArcedNodeBuilder { /// /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md pub fn build_with_vss_store_and_header_provider( - &self, vss_url: String, store_id: String, header_provider: Arc, + &self, node_entropy: Arc, vss_url: String, store_id: String, + header_provider: Arc, ) -> Result, BuildError> { self.inner .read() .unwrap() - .build_with_vss_store_and_header_provider(vss_url, store_id, header_provider) + .build_with_vss_store_and_header_provider( + *node_entropy, + vss_url, + store_id, + header_provider, + ) .map(Arc::new) } /// Builds a [`Node`] instance according to the options previously configured. - pub fn build_with_store(&self, kv_store: Arc) -> Result, BuildError> { - self.inner.read().unwrap().build_with_store(kv_store).map(Arc::new) + pub fn build_with_store( + &self, node_entropy: Arc, kv_store: Arc, + ) -> Result, BuildError> { + self.inner.read().unwrap().build_with_store(*node_entropy, kv_store).map(Arc::new) } } @@ -1265,7 +1208,7 @@ fn build_with_store_internal( // Initialize the on-chain wallet and chain access let xprv = bitcoin::bip32::Xpriv::new_master(config.network, &seed_bytes).map_err(|e| { log_error!(logger, "Failed to derive master secret: {}", e); - BuildError::InvalidSeedBytes + BuildError::WalletSetupFailed })?; let descriptor = Bip84(xprv, KeychainKind::External); @@ -1851,28 +1794,6 @@ fn setup_logger( Ok(Arc::new(logger)) } -fn seed_bytes_from_config( - config: &Config, entropy_source_config: Option<&EntropySourceConfig>, logger: Arc, -) -> Result<[u8; 64], BuildError> { - match entropy_source_config { - Some(EntropySourceConfig::SeedBytes(bytes)) => Ok(bytes.clone()), - Some(EntropySourceConfig::SeedFile(seed_path)) => { - Ok(io::utils::read_or_generate_seed_file(seed_path, Arc::clone(&logger)) - .map_err(|_| BuildError::InvalidSeedFile)?) - }, - Some(EntropySourceConfig::Bip39Mnemonic { mnemonic, passphrase }) => match passphrase { - Some(passphrase) => Ok(mnemonic.to_seed(passphrase)), - None => Ok(mnemonic.to_seed("")), - }, - None => { - // Default to read or generate from the default location generate a seed file. - let seed_path = format!("{}/keys_seed", config.storage_dir_path); - Ok(io::utils::read_or_generate_seed_file(&seed_path, Arc::clone(&logger)) - .map_err(|_| BuildError::InvalidSeedFile)?) - }, - } -} - fn derive_xprv( config: Arc, seed_bytes: &[u8; 64], hardened_child_index: u32, logger: Arc, ) -> Result { @@ -1880,13 +1801,13 @@ fn derive_xprv( let xprv = Xpriv::new_master(config.network, seed_bytes).map_err(|e| { log_error!(logger, "Failed to derive master secret: {}", e); - BuildError::InvalidSeedBytes + BuildError::WalletSetupFailed })?; xprv.derive_priv(&Secp256k1::new(), &[ChildNumber::Hardened { index: hardened_child_index }]) .map_err(|e| { log_error!(logger, "Failed to derive hardened child secret: {}", e); - BuildError::InvalidSeedBytes + BuildError::WalletSetupFailed }) } diff --git a/src/entropy.rs b/src/entropy.rs index 6c75d6da4..8bd338622 100644 --- a/src/entropy.rs +++ b/src/entropy.rs @@ -7,18 +7,117 @@ //! Contains utilities for configuring and generating entropy. +use std::fmt; + use bip39::Mnemonic; +use crate::config::WALLET_KEYS_SEED_LEN; +use crate::io; + +/// An error that could arise during [`NodeEntropy`] construction. +#[derive(Debug, Clone, PartialEq)] +pub enum EntropyError { + /// The given seed bytes are invalid, e.g., have invalid length. + InvalidSeedBytes, + /// The given seed file is invalid, e.g., has invalid length, or could not be read. + InvalidSeedFile, +} + +impl fmt::Display for EntropyError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Self::InvalidSeedBytes => write!(f, "Given seed bytes are invalid."), + Self::InvalidSeedFile => write!(f, "Given seed file is invalid or could not be read."), + } + } +} + +impl std::error::Error for EntropyError {} + +/// The node entropy, i.e., the main secret from which all other secrets of the [`Node`] are +/// derived. +/// +/// [`Node`]: crate::Node +#[derive(Copy, Clone)] +pub struct NodeEntropy([u8; WALLET_KEYS_SEED_LEN]); + +impl NodeEntropy { + /// Configures the [`Node`] instance to source its wallet entropy from a [BIP 39] mnemonic. + /// + /// [BIP 39]: https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki + /// [`Node`]: crate::Node + pub fn from_bip39_mnemonic(mnemonic: Mnemonic, passphrase: Option) -> Self { + match passphrase { + Some(passphrase) => Self(mnemonic.to_seed(passphrase)), + None => Self(mnemonic.to_seed("")), + } + } + + /// Configures the [`Node`] instance to source its wallet entropy from the given + /// [`WALLET_KEYS_SEED_LEN`] seed bytes. + /// + /// [`Node`]: crate::Node + #[cfg(not(feature = "uniffi"))] + pub fn from_seed_bytes(seed_bytes: [u8; WALLET_KEYS_SEED_LEN]) -> Self { + Self(seed_bytes) + } + + /// Configures the [`Node`] instance to source its wallet entropy from the given + /// [`WALLET_KEYS_SEED_LEN`] seed bytes. + /// + /// Will return an error if the length of the given `Vec` is not exactly + /// [`WALLET_KEYS_SEED_LEN`]. + /// + /// [`Node`]: crate::Node + #[cfg(feature = "uniffi")] + pub fn from_seed_bytes(seed_bytes: Vec) -> Result { + if seed_bytes.len() != WALLET_KEYS_SEED_LEN { + return Err(EntropyError::InvalidSeedBytes); + } + let mut seed_bytes_inner = [0u8; WALLET_KEYS_SEED_LEN]; + seed_bytes_inner.copy_from_slice(&seed_bytes); + Ok(Self(seed_bytes_inner)) + } + + /// Configures the [`Node`] instance to source its wallet entropy from a seed file on disk. + /// + /// If the given file does not exist a new random seed file will be generated and + /// stored at the given location. + /// + /// [`Node`]: crate::Node + pub fn from_seed_path(seed_path: String) -> Result { + Ok(Self( + io::utils::read_or_generate_seed_file(&seed_path) + .map_err(|_| EntropyError::InvalidSeedFile)?, + )) + } + + pub(crate) fn to_seed_bytes(&self) -> [u8; WALLET_KEYS_SEED_LEN] { + self.0 + } +} + +impl fmt::Display for NodeEntropy { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "NODE ENTROPY") + } +} + +impl fmt::Debug for NodeEntropy { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "NODE ENTROPY") + } +} + /// Generates a random [BIP 39] mnemonic with the specified word count. /// /// If no word count is specified, defaults to 24 words (256-bit entropy). /// -/// The result may be used to initialize the [`Node`] entropy, i.e., can be given to -/// [`Builder::set_entropy_bip39_mnemonic`]. +/// The result may be used to initialize the [`NodeEntropy`], i.e., can be given to +/// [`NodeEntropy::from_bip39_mnemonic`]. /// /// [BIP 39]: https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki /// [`Node`]: crate::Node -/// [`Builder::set_entropy_bip39_mnemonic`]: crate::Builder::set_entropy_bip39_mnemonic pub fn generate_entropy_mnemonic(word_count: Option) -> Mnemonic { let word_count = word_count.unwrap_or(WordCount::Words24).word_count(); Mnemonic::generate(word_count).expect("Failed to generate mnemonic") diff --git a/src/ffi/types.rs b/src/ffi/types.rs index 80be1fe79..c69987c96 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -47,7 +47,7 @@ pub use crate::config::{ default_config, AnchorChannelsConfig, BackgroundSyncConfig, ElectrumSyncConfig, EsploraSyncConfig, MaxDustHTLCExposure, }; -pub use crate::entropy::{generate_entropy_mnemonic, WordCount}; +pub use crate::entropy::{generate_entropy_mnemonic, EntropyError, NodeEntropy, WordCount}; use crate::error::Error; pub use crate::graph::{ChannelInfo, ChannelUpdateInfo, NodeAnnouncementInfo, NodeInfo}; pub use crate::liquidity::{LSPS1OrderStatus, LSPS2ServiceConfig}; diff --git a/src/io/utils.rs b/src/io/utils.rs index 389767397..4acc7dd41 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -52,24 +52,13 @@ use crate::{Error, EventQueue, NodeMetrics, PaymentDetails}; pub const EXTERNAL_PATHFINDING_SCORES_CACHE_KEY: &str = "external_pathfinding_scores_cache"; -pub(crate) fn read_or_generate_seed_file( - keys_seed_path: &str, logger: L, -) -> std::io::Result<[u8; WALLET_KEYS_SEED_LEN]> -where - L::Target: LdkLogger, -{ +pub(crate) fn read_or_generate_seed_file( + keys_seed_path: &str, +) -> std::io::Result<[u8; WALLET_KEYS_SEED_LEN]> { if Path::new(&keys_seed_path).exists() { - let seed = fs::read(keys_seed_path).map_err(|e| { - log_error!(logger, "Failed to read keys seed file: {}", keys_seed_path); - e - })?; + let seed = fs::read(keys_seed_path)?; if seed.len() != WALLET_KEYS_SEED_LEN { - log_error!( - logger, - "Failed to read keys seed file due to invalid length: {}", - keys_seed_path - ); return Err(std::io::Error::new( std::io::ErrorKind::InvalidData, "Failed to read keys seed file due to invalid length", @@ -81,37 +70,19 @@ where Ok(key) } else { let mut key = [0; WALLET_KEYS_SEED_LEN]; - OsRng.try_fill_bytes(&mut key).map_err(|e| { - log_error!(logger, "Failed to generate entropy: {}", e); + OsRng.try_fill_bytes(&mut key).map_err(|_| { std::io::Error::new(std::io::ErrorKind::Other, "Failed to generate seed bytes") })?; if let Some(parent_dir) = Path::new(&keys_seed_path).parent() { - fs::create_dir_all(parent_dir).map_err(|e| { - log_error!( - logger, - "Failed to create parent directory for key seed file: {}.", - keys_seed_path - ); - e - })?; + fs::create_dir_all(parent_dir)?; } - let mut f = fs::File::create(keys_seed_path).map_err(|e| { - log_error!(logger, "Failed to create keys seed file: {}", keys_seed_path); - e - })?; - - f.write_all(&key).map_err(|e| { - log_error!(logger, "Failed to write node keys seed to disk: {}", keys_seed_path); - e - })?; + let mut f = fs::File::create(keys_seed_path)?; - f.sync_all().map_err(|e| { - log_error!(logger, "Failed to sync node keys seed to disk: {}", keys_seed_path); - e - })?; + f.write_all(&key)?; + f.sync_all()?; Ok(key) } } diff --git a/src/lib.rs b/src/lib.rs index ccda53af9..bbae8ac72 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -29,6 +29,7 @@ //! //! use ldk_node::bitcoin::secp256k1::PublicKey; //! use ldk_node::bitcoin::Network; +//! use ldk_node::entropy::{generate_entropy_mnemonic, NodeEntropy}; //! use ldk_node::lightning::ln::msgs::SocketAddress; //! use ldk_node::lightning_invoice::Bolt11Invoice; //! use ldk_node::Builder; @@ -41,7 +42,9 @@ //! "https://rapidsync.lightningdevkit.org/testnet/snapshot".to_string(), //! ); //! -//! let node = builder.build().unwrap(); +//! let mnemonic = generate_entropy_mnemonic(None); +//! let node_entropy = NodeEntropy::from_bip39_mnemonic(mnemonic, None); +//! let node = builder.build(node_entropy).unwrap(); //! //! node.start().unwrap(); //! @@ -1625,11 +1628,19 @@ impl Node { /// # use ldk_node::config::Config; /// # use ldk_node::payment::PaymentDirection; /// # use ldk_node::bitcoin::Network; + /// # use ldk_node::entropy::{generate_entropy_mnemonic, NodeEntropy}; + /// # use rand::distr::Alphanumeric; + /// # use rand::{rng, Rng}; /// # let mut config = Config::default(); /// # config.network = Network::Regtest; - /// # config.storage_dir_path = "/tmp/ldk_node_test/".to_string(); + /// # let mut temp_path = std::env::temp_dir(); + /// # let rand_dir: String = (0..7).map(|_| rng().sample(Alphanumeric) as char).collect(); + /// # temp_path.push(rand_dir); + /// # config.storage_dir_path = temp_path.display().to_string(); /// # let builder = Builder::from_config(config); - /// # let node = builder.build().unwrap(); + /// # let mnemonic = generate_entropy_mnemonic(None); + /// # let node_entropy = NodeEntropy::from_bip39_mnemonic(mnemonic, None); + /// # let node = builder.build(node_entropy.into()).unwrap(); /// node.list_payments_with_filter(|p| p.direction == PaymentDirection::Outbound); /// ``` pub fn list_payments_with_filter bool>( diff --git a/tests/common/mod.rs b/tests/common/mod.rs index b70d2d675..38ecb1fd3 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -29,6 +29,7 @@ use electrsd::corepc_node::{Client as BitcoindClient, Node as BitcoinD}; use electrsd::{corepc_node, ElectrsD}; use electrum_client::ElectrumApi; use ldk_node::config::{AsyncPaymentsRole, Config, ElectrumSyncConfig, EsploraSyncConfig}; +use ldk_node::entropy::{generate_entropy_mnemonic, NodeEntropy}; use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{ @@ -292,11 +293,24 @@ impl Default for TestStoreType { } } -#[derive(Clone, Default)] +#[derive(Clone)] pub(crate) struct TestConfig { pub node_config: Config, pub log_writer: TestLogWriter, pub store_type: TestStoreType, + pub node_entropy: NodeEntropy, +} + +impl Default for TestConfig { + fn default() -> Self { + let node_config = Default::default(); + let log_writer = Default::default(); + let store_type = Default::default(); + + let mnemonic = generate_entropy_mnemonic(None); + let node_entropy = NodeEntropy::from_bip39_mnemonic(mnemonic, None); + TestConfig { node_config, log_writer, store_type, node_entropy } + } } macro_rules! setup_builder { @@ -330,7 +344,7 @@ pub(crate) fn setup_two_nodes_with_store( println!("== Node A =="); let mut config_a = random_config(anchor_channels); config_a.store_type = store_type; - let node_a = setup_node(chain_source, config_a, None); + let node_a = setup_node(chain_source, config_a); println!("\n== Node B =="); let mut config_b = random_config(anchor_channels); @@ -347,18 +361,16 @@ pub(crate) fn setup_two_nodes_with_store( .trusted_peers_no_reserve .push(node_a.node_id()); } - let node_b = setup_node(chain_source, config_b, None); + let node_b = setup_node(chain_source, config_b); (node_a, node_b) } -pub(crate) fn setup_node( - chain_source: &TestChainSource, config: TestConfig, seed_bytes: Option>, -) -> TestNode { - setup_node_for_async_payments(chain_source, config, seed_bytes, None) +pub(crate) fn setup_node(chain_source: &TestChainSource, config: TestConfig) -> TestNode { + setup_node_for_async_payments(chain_source, config, None) } pub(crate) fn setup_node_for_async_payments( - chain_source: &TestChainSource, config: TestConfig, seed_bytes: Option>, + chain_source: &TestChainSource, config: TestConfig, async_payments_role: Option, ) -> TestNode { setup_builder!(builder, config.node_config); @@ -412,27 +424,14 @@ pub(crate) fn setup_node_for_async_payments( }, } - if let Some(seed) = seed_bytes { - #[cfg(feature = "uniffi")] - { - builder.set_entropy_seed_bytes(seed).unwrap(); - } - #[cfg(not(feature = "uniffi"))] - { - let mut bytes = [0u8; 64]; - bytes.copy_from_slice(&seed); - builder.set_entropy_seed_bytes(bytes); - } - } - builder.set_async_payments_role(async_payments_role).unwrap(); let node = match config.store_type { TestStoreType::TestSyncStore => { let kv_store = Arc::new(TestSyncStore::new(config.node_config.storage_dir_path.into())); - builder.build_with_store(kv_store).unwrap() + builder.build_with_store(config.node_entropy.into(), kv_store).unwrap() }, - TestStoreType::Sqlite => builder.build().unwrap(), + TestStoreType::Sqlite => builder.build(config.node_entropy.into()).unwrap(), }; node.start().unwrap(); diff --git a/tests/integration_tests_cln.rs b/tests/integration_tests_cln.rs index e8eb72a1d..0245f1fdf 100644 --- a/tests/integration_tests_cln.rs +++ b/tests/integration_tests_cln.rs @@ -43,7 +43,7 @@ async fn test_cln() { let mut builder = Builder::from_config(config.node_config); builder.set_chain_source_esplora("http://127.0.0.1:3002".to_string(), None); - let node = builder.build().unwrap(); + let node = builder.build(config.node_entropy).unwrap(); node.start().unwrap(); // Premine some funds and distribute diff --git a/tests/integration_tests_lnd.rs b/tests/integration_tests_lnd.rs index 311a11c3c..8f1d4c868 100755 --- a/tests/integration_tests_lnd.rs +++ b/tests/integration_tests_lnd.rs @@ -41,7 +41,7 @@ async fn test_lnd() { let mut builder = Builder::from_config(config.node_config); builder.set_chain_source_esplora("http://127.0.0.1:3002".to_string(), None); - let node = builder.build().unwrap(); + let node = builder.build(config.node_entropy).unwrap(); node.start().unwrap(); // Premine some funds and distribute diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index d6c7c9447..7c1ed8344 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -159,7 +159,7 @@ async fn multi_hop_sending() { let sync_config = EsploraSyncConfig { background_sync_config: None }; setup_builder!(builder, config.node_config); builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); - let node = builder.build().unwrap(); + let node = builder.build(config.node_entropy.into()).unwrap(); node.start().unwrap(); nodes.push(node); } @@ -259,7 +259,8 @@ async fn start_stop_reinit() { setup_builder!(builder, config.node_config); builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); - let node = builder.build_with_store(Arc::clone(&test_sync_store)).unwrap(); + let node = + builder.build_with_store(config.node_entropy.into(), Arc::clone(&test_sync_store)).unwrap(); node.start().unwrap(); let expected_node_id = node.node_id(); @@ -297,7 +298,8 @@ async fn start_stop_reinit() { setup_builder!(builder, config.node_config); builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); - let reinitialized_node = builder.build_with_store(Arc::clone(&test_sync_store)).unwrap(); + let reinitialized_node = + builder.build_with_store(config.node_entropy.into(), Arc::clone(&test_sync_store)).unwrap(); reinitialized_node.start().unwrap(); assert_eq!(reinitialized_node.node_id(), expected_node_id); @@ -606,10 +608,9 @@ async fn onchain_wallet_recovery() { let chain_source = TestChainSource::Esplora(&electrsd); - let seed_bytes = vec![42u8; 64]; - let original_config = random_config(true); - let original_node = setup_node(&chain_source, original_config, Some(seed_bytes.clone())); + let original_node_entropy = original_config.node_entropy; + let original_node = setup_node(&chain_source, original_config); let premine_amount_sat = 100_000; @@ -648,8 +649,9 @@ async fn onchain_wallet_recovery() { drop(original_node); // Now we start from scratch, only the seed remains the same. - let recovered_config = random_config(true); - let recovered_node = setup_node(&chain_source, recovered_config, Some(seed_bytes)); + let mut recovered_config = random_config(true); + recovered_config.node_entropy = original_node_entropy; + let recovered_node = setup_node(&chain_source, recovered_config); recovered_node.sync_wallets().unwrap(); assert_eq!( @@ -703,7 +705,7 @@ async fn run_rbf_test(is_insert_block: bool) { macro_rules! config_node { ($chain_source:expr, $anchor_channels:expr) => {{ let config_a = random_config($anchor_channels); - let node = setup_node(&$chain_source, config_a, None); + let node = setup_node(&$chain_source, config_a); node }}; } @@ -822,7 +824,7 @@ async fn sign_verify_msg() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let config = random_config(true); let chain_source = TestChainSource::Esplora(&electrsd); - let node = setup_node(&chain_source, config, None); + let node = setup_node(&chain_source, config); // Tests arbitrary message signing and later verification let msg = "OK computer".as_bytes(); @@ -1296,7 +1298,6 @@ async fn async_payment() { let node_sender = setup_node_for_async_payments( &chain_source, config_sender, - None, Some(AsyncPaymentsRole::Client), ); @@ -1306,7 +1307,6 @@ async fn async_payment() { let node_sender_lsp = setup_node_for_async_payments( &chain_source, config_sender_lsp, - None, Some(AsyncPaymentsRole::Server), ); @@ -1317,7 +1317,6 @@ async fn async_payment() { let node_receiver_lsp = setup_node_for_async_payments( &chain_source, config_receiver_lsp, - None, Some(AsyncPaymentsRole::Server), ); @@ -1326,7 +1325,7 @@ async fn async_payment() { config_receiver.node_config.node_alias = None; config_receiver.log_writer = TestLogWriter::Custom(Arc::new(MultiNodeLogger::new("receiver ".to_string()))); - let node_receiver = setup_node(&chain_source, config_receiver, None); + let node_receiver = setup_node(&chain_source, config_receiver); let address_sender = node_sender.onchain_payment().new_address().unwrap(); let address_sender_lsp = node_sender_lsp.onchain_payment().new_address().unwrap(); @@ -1450,8 +1449,8 @@ async fn test_node_announcement_propagation() { config_b.node_config.listening_addresses = Some(node_b_listening_addresses.clone()); config_b.node_config.announcement_addresses = None; - let node_a = setup_node(&chain_source, config_a, None); - let node_b = setup_node(&chain_source, config_b, None); + let node_a = setup_node(&chain_source, config_a); + let node_b = setup_node(&chain_source, config_b); let address_a = node_a.onchain_payment().new_address().unwrap(); let premine_amount_sat = 5_000_000; @@ -1711,7 +1710,7 @@ async fn do_lsps2_client_service_integration(client_trusts_lsp: bool) { setup_builder!(service_builder, service_config.node_config); service_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); service_builder.set_liquidity_provider_lsps2(lsps2_service_config); - let service_node = service_builder.build().unwrap(); + let service_node = service_builder.build(service_config.node_entropy.into()).unwrap(); service_node.start().unwrap(); let service_node_id = service_node.node_id(); @@ -1721,13 +1720,13 @@ async fn do_lsps2_client_service_integration(client_trusts_lsp: bool) { setup_builder!(client_builder, client_config.node_config); client_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); client_builder.set_liquidity_source_lsps2(service_node_id, service_addr, None); - let client_node = client_builder.build().unwrap(); + let client_node = client_builder.build(client_config.node_entropy.into()).unwrap(); client_node.start().unwrap(); let payer_config = random_config(true); setup_builder!(payer_builder, payer_config.node_config); payer_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); - let payer_node = payer_builder.build().unwrap(); + let payer_node = payer_builder.build(payer_config.node_entropy.into()).unwrap(); payer_node.start().unwrap(); let service_addr = service_node.onchain_payment().new_address().unwrap(); @@ -1916,7 +1915,7 @@ async fn facade_logging() { config.log_writer = TestLogWriter::LogFacade; println!("== Facade logging starts =="); - let _node = setup_node(&chain_source, config, None); + let _node = setup_node(&chain_source, config); assert!(!logger.retrieve_logs().is_empty()); for (_, entry) in logger.retrieve_logs().iter().enumerate() { @@ -1995,10 +1994,8 @@ async fn spontaneous_send_with_custom_preimage() { async fn drop_in_async_context() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); - let seed_bytes = vec![42u8; 64]; - let config = random_config(true); - let node = setup_node(&chain_source, config, Some(seed_bytes)); + let node = setup_node(&chain_source, config); node.stop().unwrap(); } @@ -2030,7 +2027,7 @@ async fn lsps2_client_trusts_lsp() { setup_builder!(service_builder, service_config.node_config); service_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); service_builder.set_liquidity_provider_lsps2(lsps2_service_config); - let service_node = service_builder.build().unwrap(); + let service_node = service_builder.build(service_config.node_entropy.into()).unwrap(); service_node.start().unwrap(); let service_node_id = service_node.node_id(); let service_addr = service_node.listening_addresses().unwrap().first().unwrap().clone(); @@ -2039,14 +2036,14 @@ async fn lsps2_client_trusts_lsp() { setup_builder!(client_builder, client_config.node_config); client_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); client_builder.set_liquidity_source_lsps2(service_node_id, service_addr.clone(), None); - let client_node = client_builder.build().unwrap(); + let client_node = client_builder.build(client_config.node_entropy.into()).unwrap(); client_node.start().unwrap(); let client_node_id = client_node.node_id(); let payer_config = random_config(true); setup_builder!(payer_builder, payer_config.node_config); payer_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); - let payer_node = payer_builder.build().unwrap(); + let payer_node = payer_builder.build(payer_config.node_entropy.into()).unwrap(); payer_node.start().unwrap(); let service_addr_onchain = service_node.onchain_payment().new_address().unwrap(); @@ -2203,7 +2200,7 @@ async fn lsps2_lsp_trusts_client_but_client_does_not_claim() { setup_builder!(service_builder, service_config.node_config); service_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); service_builder.set_liquidity_provider_lsps2(lsps2_service_config); - let service_node = service_builder.build().unwrap(); + let service_node = service_builder.build(service_config.node_entropy.into()).unwrap(); service_node.start().unwrap(); let service_node_id = service_node.node_id(); @@ -2213,7 +2210,7 @@ async fn lsps2_lsp_trusts_client_but_client_does_not_claim() { setup_builder!(client_builder, client_config.node_config); client_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); client_builder.set_liquidity_source_lsps2(service_node_id, service_addr.clone(), None); - let client_node = client_builder.build().unwrap(); + let client_node = client_builder.build(client_config.node_entropy.into()).unwrap(); client_node.start().unwrap(); let client_node_id = client_node.node_id(); @@ -2221,7 +2218,7 @@ async fn lsps2_lsp_trusts_client_but_client_does_not_claim() { let payer_config = random_config(true); setup_builder!(payer_builder, payer_config.node_config); payer_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); - let payer_node = payer_builder.build().unwrap(); + let payer_node = payer_builder.build(payer_config.node_entropy.into()).unwrap(); payer_node.start().unwrap(); let service_addr_onchain = service_node.onchain_payment().new_address().unwrap(); diff --git a/tests/integration_tests_vss.rs b/tests/integration_tests_vss.rs index 3b384ec45..54912b358 100644 --- a/tests/integration_tests_vss.rs +++ b/tests/integration_tests_vss.rs @@ -11,6 +11,7 @@ mod common; use std::collections::HashMap; +use ldk_node::entropy::NodeEntropy; use ldk_node::Builder; use rand::{rng, Rng}; @@ -25,6 +26,7 @@ async fn channel_full_cycle_with_vss_store() { let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); let node_a = builder_a .build_with_vss_store_and_fixed_headers( + config_a.node_entropy, vss_base_url.clone(), "node_1_store".to_string(), HashMap::new(), @@ -38,6 +40,7 @@ async fn channel_full_cycle_with_vss_store() { builder_b.set_chain_source_esplora(esplora_url.clone(), None); let node_b = builder_b .build_with_vss_store_and_fixed_headers( + config_b.node_entropy, vss_base_url, "node_2_store".to_string(), HashMap::new(), @@ -68,6 +71,7 @@ async fn vss_v0_schema_backwards_compatibility() { let store_id = format!("v0_compat_test_{}", rand_suffix); let storage_path = common::random_storage_path().to_str().unwrap().to_owned(); let seed_bytes = [42u8; 64]; + let node_entropy = NodeEntropy::from_seed_bytes(seed_bytes); // Setup a v0.6.2 `Node` persisted with the v0 scheme. let (old_balance, old_node_id) = { @@ -112,11 +116,15 @@ async fn vss_v0_schema_backwards_compatibility() { let mut builder_new = Builder::new(); builder_new.set_network(bitcoin::Network::Regtest); builder_new.set_storage_dir_path(storage_path); - builder_new.set_entropy_seed_bytes(seed_bytes); builder_new.set_chain_source_esplora(esplora_url, None); let node_new = builder_new - .build_with_vss_store_and_fixed_headers(vss_base_url, store_id, HashMap::new()) + .build_with_vss_store_and_fixed_headers( + node_entropy, + vss_base_url, + store_id, + HashMap::new(), + ) .unwrap(); node_new.start().unwrap(); @@ -142,16 +150,17 @@ async fn vss_node_restart() { let store_id = format!("restart_test_{}", rand_suffix); let storage_path = common::random_storage_path().to_str().unwrap().to_owned(); let seed_bytes = [42u8; 64]; + let node_entropy = NodeEntropy::from_seed_bytes(seed_bytes); // Setup initial node and fund it. let (expected_balance_sats, expected_node_id) = { let mut builder = Builder::new(); builder.set_network(bitcoin::Network::Regtest); builder.set_storage_dir_path(storage_path.clone()); - builder.set_entropy_seed_bytes(seed_bytes); builder.set_chain_source_esplora(esplora_url.clone(), None); let node = builder .build_with_vss_store_and_fixed_headers( + node_entropy, vss_base_url.clone(), store_id.clone(), HashMap::new(), @@ -181,11 +190,15 @@ async fn vss_node_restart() { let mut builder = Builder::new(); builder.set_network(bitcoin::Network::Regtest); builder.set_storage_dir_path(storage_path); - builder.set_entropy_seed_bytes(seed_bytes); builder.set_chain_source_esplora(esplora_url, None); let node = builder - .build_with_vss_store_and_fixed_headers(vss_base_url, store_id, HashMap::new()) + .build_with_vss_store_and_fixed_headers( + node_entropy, + vss_base_url, + store_id, + HashMap::new(), + ) .unwrap(); node.start().unwrap(); diff --git a/tests/reorg_test.rs b/tests/reorg_test.rs index 491a37fd4..89660a407 100644 --- a/tests/reorg_test.rs +++ b/tests/reorg_test.rs @@ -31,7 +31,7 @@ proptest! { macro_rules! config_node { ($chain_source: expr, $anchor_channels: expr) => {{ let config_a = random_config($anchor_channels); - let node = setup_node(&$chain_source, config_a, None); + let node = setup_node(&$chain_source, config_a); node }}; } From 8d312eb521c3ca121a02942505f597da2f4a0761 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 24 Nov 2025 10:14:26 +0100 Subject: [PATCH 49/60] Add a `docs.rs` CI job checking documentation builds We previouly ran into a quiet error that lead to `docs.rs` not rendering our docs properly, which unfortunately didn't surface until after we pushed out a releas (thankfully only an RC in this case). Here, we add a CI job that tests our docs build with exactly the settings `docs.rs` uses. Additionally, the change also has the benefit that we now only build docs once rather than for every combiantion in our workflow matrix, which was a bit overkill. --- .github/workflows/rust.yml | 16 +++++++++++----- Cargo.toml | 2 -- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 87249bd72..661703ded 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -72,11 +72,6 @@ jobs: - name: Build with UniFFI support on Rust ${{ matrix.toolchain }} if: matrix.build-uniffi run: cargo build --features uniffi --verbose --color always - - name: Build documentation on Rust ${{ matrix.toolchain }} - if: "matrix.platform != 'windows-latest' || matrix.toolchain != '1.85.0'" - run: | - cargo doc --release --verbose --color always - cargo doc --document-private-items --verbose --color always - name: Check release build on Rust ${{ matrix.toolchain }} run: cargo check --release --verbose --color always - name: Check release build with UniFFI support on Rust ${{ matrix.toolchain }} @@ -90,3 +85,14 @@ jobs: if: "matrix.platform != 'windows-latest' && matrix.build-uniffi" run: | RUSTFLAGS="--cfg no_download" cargo test --features uniffi + + doc: + name: Documentation + runs-on: ubuntu-latest + env: + RUSTDOCFLAGS: -Dwarnings + steps: + - uses: actions/checkout@v6 + - uses: dtolnay/rust-toolchain@nightly + - uses: dtolnay/install@cargo-docs-rs + - run: cargo docs-rs diff --git a/Cargo.toml b/Cargo.toml index 5df9b3309..608ae439d 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,8 +12,6 @@ readme = "README.md" keywords = ["bitcoin", "lightning", "ldk", "bdk"] categories = ["cryptography::cryptocurrencies"] -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - [lib] crate-type = ["lib", "staticlib", "cdylib"] name = "ldk_node" From c0b4933a46c0b87c9e6276fb050037f57f35da29 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 25 Nov 2025 13:21:45 +0100 Subject: [PATCH 50/60] Avoid listening port collision by letting OS assing ports Previously, we could in tests potentially run into listening port collisions resulting into `InvalidSocketAddress` errors. These errors could surface if we rolled port numbers that either collided with other concurrent tests *or* with other unrelated services running on localhost. Here, we simply let the OS assign us a free port number when setting up the testing nodes, which avoids such collisions altoghether (mod the potential TOCTOU race here, which we ignore for now). --- tests/common/mod.rs | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index b70d2d675..0cdd5e125 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -215,22 +215,17 @@ pub(crate) fn random_storage_path() -> PathBuf { temp_path } -pub(crate) fn random_port() -> u16 { - let mut rng = rng(); - rng.random_range(5000..32768) -} - pub(crate) fn random_listening_addresses() -> Vec { let num_addresses = 2; - let mut listening_addresses = Vec::with_capacity(num_addresses); + let mut listening_addresses = HashSet::new(); - for _ in 0..num_addresses { - let rand_port = random_port(); - let address: SocketAddress = format!("127.0.0.1:{}", rand_port).parse().unwrap(); - listening_addresses.push(address); + while listening_addresses.len() < num_addresses { + let socket = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); + let address: SocketAddress = socket.local_addr().unwrap().into(); + listening_addresses.insert(address); } - listening_addresses + listening_addresses.into_iter().collect() } pub(crate) fn random_node_alias() -> Option { From 996868413522647a9120881eef71fa82df4b3f43 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 25 Nov 2025 15:47:24 +0100 Subject: [PATCH 51/60] Account for `check_closed_event` being moved on LDK `main` In rust-lightning#4220 the `check_closed_event` macros was replaced with a method and is now also only re-exported via `fuctional_test_utils`. --- Cargo.toml | 36 ++++++++++++++++++++++++------------ src/io/test_utils.rs | 17 +++++++++-------- 2 files changed, 33 insertions(+), 20 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 5df9b3309..ba7b0f303 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,17 +29,29 @@ panic = 'abort' # Abort on panic default = [] [dependencies] -lightning = { version = "0.2.0-rc1", features = ["std"] } -lightning-types = { version = "0.3.0-rc1" } -lightning-invoice = { version = "0.34.0-rc1", features = ["std"] } -lightning-net-tokio = { version = "0.2.0-rc1" } -lightning-persister = { version = "0.2.0-rc1", features = ["tokio"] } -lightning-background-processor = { version = "0.2.0-rc1" } -lightning-rapid-gossip-sync = { version = "0.2.0-rc1" } -lightning-block-sync = { version = "0.2.0-rc1", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { version = "0.2.0-rc1", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } -lightning-liquidity = { version = "0.2.0-rc1", features = ["std"] } -lightning-macros = { version = "0.2.0-rc1" } +#lightning = { version = "0.2.0-rc1", features = ["std"] } +#lightning-types = { version = "0.3.0-rc1" } +#lightning-invoice = { version = "0.34.0-rc1", features = ["std"] } +#lightning-net-tokio = { version = "0.2.0-rc1" } +#lightning-persister = { version = "0.2.0-rc1", features = ["tokio"] } +#lightning-background-processor = { version = "0.2.0-rc1" } +#lightning-rapid-gossip-sync = { version = "0.2.0-rc1" } +#lightning-block-sync = { version = "0.2.0-rc1", features = ["rest-client", "rpc-client", "tokio"] } +#lightning-transaction-sync = { version = "0.2.0-rc1", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } +#lightning-liquidity = { version = "0.2.0-rc1", features = ["std"] } +#lightning-macros = { version = "0.2.0-rc1" } + +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "bb5504ec62d4b7e9d5626d8b1a6de60d71e8d370", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "bb5504ec62d4b7e9d5626d8b1a6de60d71e8d370" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "bb5504ec62d4b7e9d5626d8b1a6de60d71e8d370", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "bb5504ec62d4b7e9d5626d8b1a6de60d71e8d370" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "bb5504ec62d4b7e9d5626d8b1a6de60d71e8d370", features = ["tokio"] } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "bb5504ec62d4b7e9d5626d8b1a6de60d71e8d370" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "bb5504ec62d4b7e9d5626d8b1a6de60d71e8d370" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "bb5504ec62d4b7e9d5626d8b1a6de60d71e8d370", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "bb5504ec62d4b7e9d5626d8b1a6de60d71e8d370", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "bb5504ec62d4b7e9d5626d8b1a6de60d71e8d370", features = ["std"] } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "bb5504ec62d4b7e9d5626d8b1a6de60d71e8d370" } bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} @@ -72,7 +84,7 @@ prost = { version = "0.11.6", default-features = false} winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { version = "0.2.0-rc1", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "bb5504ec62d4b7e9d5626d8b1a6de60d71e8d370", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" criterion = { version = "0.7.0", features = ["async_tokio"] } diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index 310638dd8..a360b443b 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -15,14 +15,15 @@ use std::sync::Mutex; use lightning::events::ClosureReason; use lightning::ln::functional_test_utils::{ - connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, create_dummy_block, - create_network, create_node_cfgs, create_node_chanmgrs, send_payment, TestChanMonCfg, + check_closed_event, connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, + create_dummy_block, create_network, create_node_cfgs, create_node_chanmgrs, send_payment, + TestChanMonCfg, }; use lightning::util::persist::{ KVStore, KVStoreSync, MonitorUpdatingPersister, KVSTORE_NAMESPACE_KEY_MAX_LEN, }; use lightning::util::test_utils; -use lightning::{check_added_monitors, check_closed_broadcast, check_closed_event, io}; +use lightning::{check_added_monitors, check_closed_broadcast, io}; use rand::distr::Alphanumeric; use rand::{rng, Rng}; @@ -326,12 +327,12 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { message.clone(), ) .unwrap(); - check_closed_event!( - nodes[0], + check_closed_event( + &nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }, - [nodes[1].node.get_our_node_id()], - 100000 + &[nodes[1].node.get_our_node_id()], + 100000, ); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); @@ -345,7 +346,7 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { check_closed_broadcast!(nodes[1], true); let reason = ClosureReason::CommitmentTxConfirmed; let node_id_0 = nodes[0].node.get_our_node_id(); - check_closed_event!(nodes[1], 1, reason, false, [node_id_0], 100000); + check_closed_event(&nodes[1], 1, reason, &[node_id_0], 100000); check_added_monitors!(nodes[1], 1); // Make sure everything is persisted as expected after close. From 2fb1df513b06249dc52799ab568295f68cd3b837 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 26 Nov 2025 08:38:09 +0100 Subject: [PATCH 52/60] Add test asserting generated seed bytes can be read back We add a simple test calling `read_or_generate_seed_file` twice, asserting it returns the same value in both cases. --- src/io/utils.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/io/utils.rs b/src/io/utils.rs index 4acc7dd41..928d4031b 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -575,3 +575,18 @@ pub(crate) fn read_bdk_wallet_change_set( .map(|indexer| change_set.indexer = indexer); Ok(Some(change_set)) } + +#[cfg(test)] +mod tests { + use super::read_or_generate_seed_file; + use super::test_utils::random_storage_path; + + #[test] + fn generated_seed_is_readable() { + let mut rand_path = random_storage_path(); + rand_path.push("test_keys_seed"); + let expected_seed_bytes = read_or_generate_seed_file(&rand_path.to_str().unwrap()).unwrap(); + let read_seed_bytes = read_or_generate_seed_file(&rand_path.to_str().unwrap()).unwrap(); + assert_eq!(expected_seed_bytes, read_seed_bytes); + } +} From d231642efc3b7235b29f2562811c49fa582aa015 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 1 Dec 2025 10:23:12 +0100 Subject: [PATCH 53/60] Update `ChannelDetails` docs for splicing This simply adopts the changes of rust-lightning#4250. --- src/types.rs | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/src/types.rs b/src/types.rs index 6d6bdcd20..f6b8a0008 100644 --- a/src/types.rs +++ b/src/types.rs @@ -228,12 +228,15 @@ impl fmt::Display for UserChannelId { /// Details of a channel as returned by [`Node::list_channels`]. /// +/// When a channel is spliced, most fields continue to refer to the original pre-splice channel +/// state until the splice transaction reaches sufficient confirmations to be locked (and we +/// exchange `splice_locked` messages with our peer). See individual fields for details. +/// /// [`Node::list_channels`]: crate::Node::list_channels #[derive(Debug, Clone)] pub struct ChannelDetails { - /// The channel ID (prior to funding transaction generation, this is a random 32-byte - /// identifier, afterwards this is the transaction ID of the funding transaction XOR the - /// funding transaction output). + /// The channel's ID (prior to initial channel setup this is a random 32 bytes, thereafter it + /// is derived from channel funding or key material). /// /// Note that this means this value is *not* persistent - it can change once during the /// lifetime of the channel. @@ -242,6 +245,10 @@ pub struct ChannelDetails { pub counterparty_node_id: PublicKey, /// The channel's funding transaction output, if we've negotiated the funding transaction with /// our counterparty already. + /// + /// When a channel is spliced, this continues to refer to the original pre-splice channel + /// state until the splice transaction reaches sufficient confirmations to be locked (and we + /// exchange `splice_locked` messages with our peer). pub funding_txo: Option, /// The position of the funding transaction in the chain. None if the funding transaction has /// not yet been confirmed and the channel fully opened. @@ -252,6 +259,10 @@ pub struct ChannelDetails { /// For channels with [`confirmations_required`] set to `Some(0)`, [`outbound_scid_alias`] may /// be used in place of this in outbound routes. /// + /// When a channel is spliced, this continues to refer to the original pre-splice channel state + /// until the splice transaction reaches sufficient confirmations to be locked (and we exchange + /// `splice_locked` messages with our peer). + /// /// [`inbound_scid_alias`]: Self::inbound_scid_alias /// [`outbound_scid_alias`]: Self::outbound_scid_alias /// [`confirmations_required`]: Self::confirmations_required @@ -263,6 +274,10 @@ pub struct ChannelDetails { /// /// This will be `None` as long as the channel is not available for routing outbound payments. /// + /// When a channel is spliced, this continues to refer to the original pre-splice channel + /// state until the splice transaction reaches sufficient confirmations to be locked (and we + /// exchange `splice_locked` messages with our peer). + /// /// [`short_channel_id`]: Self::short_channel_id /// [`confirmations_required`]: Self::confirmations_required pub outbound_scid_alias: Option, @@ -277,6 +292,10 @@ pub struct ChannelDetails { /// [`short_channel_id`]: Self::short_channel_id pub inbound_scid_alias: Option, /// The value, in satoshis, of this channel as it appears in the funding output. + /// + /// When a channel is spliced, this continues to refer to the original pre-splice channel + /// state until the splice transaction reaches sufficient confirmations to be locked (and we + /// exchange `splice_locked` messages with our peer). pub channel_value_sats: u64, /// The value, in satoshis, that must always be held as a reserve in the channel for us. This /// value ensures that if we broadcast a revoked state, our counterparty can punish us by From 89aff7e93c3c2db51532ad1d31b9bd5ed53736e5 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 2 Dec 2025 08:34:12 +0100 Subject: [PATCH 54/60] Revert batched VSS `lazy` deletes, rather `spawn` them into the background Previously, we implemented `lazy` deletes in `VssStore` by batching them with the next write call as part of the next `PutObjectRequest` sent. However, we unfortunately overlooked that in this instance any non-existent `delete_items` would yield a `ConflictError`. Rather than batched `VssStore` lazy deletes, we therefore here opt to simply spawn them into the background and ignore any errors. --- src/io/vss_store.rs | 125 ++++++-------------------------------------- 1 file changed, 16 insertions(+), 109 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 2906b89ca..2fd1ab2ca 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -15,7 +15,6 @@ use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; use std::time::Duration; -use bdk_chain::Merge; use bitcoin::hashes::{sha256, Hash, HashEngine, Hmac, HmacEngine}; use lightning::impl_writeable_tlv_based_enum; use lightning::io::{self, Error, ErrorKind}; @@ -244,11 +243,15 @@ impl KVStoreSync for VssStore { primary_namespace, secondary_namespace, key, - lazy, ) .await }; - tokio::task::block_in_place(move || internal_runtime.block_on(fut)) + if lazy { + internal_runtime.spawn(async { fut.await }); + Ok(()) + } else { + tokio::task::block_in_place(move || internal_runtime.block_on(fut)) + } } fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { @@ -316,7 +319,7 @@ impl KVStore for VssStore { let secondary_namespace = secondary_namespace.to_string(); let key = key.to_string(); let inner = Arc::clone(&self.inner); - Box::pin(async move { + let fut = async move { inner .remove_internal( &inner.async_client, @@ -326,10 +329,15 @@ impl KVStore for VssStore { primary_namespace, secondary_namespace, key, - lazy, ) .await - }) + }; + if lazy { + tokio::task::spawn(async { fut.await }); + Box::pin(async { Ok(()) }) + } else { + Box::pin(async { fut.await }) + } } fn list( &self, primary_namespace: &str, secondary_namespace: &str, @@ -362,7 +370,6 @@ struct VssStoreInner { // Per-key locks that ensures that we don't have concurrent writes to the same namespace/key. // The lock also encapsulates the latest written version per key. locks: Mutex>>>, - pending_lazy_deletes: Mutex>, } impl VssStoreInner { @@ -372,7 +379,6 @@ impl VssStoreInner { data_encryption_key: [u8; 32], key_obfuscator: KeyObfuscator, ) -> Self { let locks = Mutex::new(HashMap::new()); - let pending_lazy_deletes = Mutex::new(Vec::new()); Self { schema_version, blocking_client, @@ -381,7 +387,6 @@ impl VssStoreInner { data_encryption_key, key_obfuscator, locks, - pending_lazy_deletes, } } @@ -520,12 +525,6 @@ impl VssStoreInner { "write", )?; - let delete_items = self - .pending_lazy_deletes - .try_lock() - .ok() - .and_then(|mut guard| guard.take()) - .unwrap_or_default(); let store_key = self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); let vss_version = -1; let storable_builder = StorableBuilder::new(RandEntropySource); @@ -541,16 +540,11 @@ impl VssStoreInner { version: vss_version, value: storable.encode_to_vec(), }], - delete_items: delete_items.clone(), + delete_items: vec![], }; self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { client.put_object(&request).await.map_err(|e| { - // Restore delete items so they'll be retried on next write. - if !delete_items.is_empty() { - self.pending_lazy_deletes.lock().unwrap().extend(delete_items); - } - let msg = format!( "Failed to write to key {}/{}/{}: {}", primary_namespace, secondary_namespace, key, e @@ -566,7 +560,7 @@ impl VssStoreInner { async fn remove_internal( &self, client: &VssClient, inner_lock_ref: Arc>, locking_key: String, version: u64, primary_namespace: String, secondary_namespace: String, - key: String, lazy: bool, + key: String, ) -> io::Result<()> { check_namespace_key_validity( &primary_namespace, @@ -579,12 +573,6 @@ impl VssStoreInner { self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); let key_value = KeyValue { key: obfuscated_key, version: -1, value: vec![] }; - if lazy { - let mut pending_lazy_deletes = self.pending_lazy_deletes.lock().unwrap(); - pending_lazy_deletes.push(key_value); - return Ok(()); - } - self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { let request = DeleteObjectRequest { store_id: self.store_id.clone(), key_value: Some(key_value) }; @@ -851,85 +839,4 @@ mod tests { do_read_write_remove_list_persist(&vss_store); drop(vss_store) } - - #[tokio::test(flavor = "multi_thread", worker_threads = 1)] - async fn vss_lazy_delete() { - let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); - let mut rng = rng(); - let rand_store_id: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); - let mut vss_seed = [0u8; 32]; - rng.fill_bytes(&mut vss_seed); - let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); - let vss_store = - VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider).unwrap(); - - let primary_namespace = "test_namespace"; - let secondary_namespace = ""; - let key_to_delete = "key_to_delete"; - let key_for_trigger = "key_for_trigger"; - let data_to_delete = b"data_to_delete".to_vec(); - let trigger_data = b"trigger_data".to_vec(); - - // Write the key that we'll later lazily delete - KVStore::write( - &vss_store, - primary_namespace, - secondary_namespace, - key_to_delete, - data_to_delete.clone(), - ) - .await - .unwrap(); - - // Verify the key exists - let read_data = - KVStore::read(&vss_store, primary_namespace, secondary_namespace, key_to_delete) - .await - .unwrap(); - assert_eq!(read_data, data_to_delete); - - // Perform a lazy delete - KVStore::remove(&vss_store, primary_namespace, secondary_namespace, key_to_delete, true) - .await - .unwrap(); - - // Verify the key still exists (lazy delete doesn't immediately remove it) - let read_data = - KVStore::read(&vss_store, primary_namespace, secondary_namespace, key_to_delete) - .await - .unwrap(); - assert_eq!(read_data, data_to_delete); - - // Verify the key is still in the list - let keys = KVStore::list(&vss_store, primary_namespace, secondary_namespace).await.unwrap(); - assert!(keys.contains(&key_to_delete.to_string())); - - // Trigger the actual deletion by performing a write operation - KVStore::write( - &vss_store, - primary_namespace, - secondary_namespace, - key_for_trigger, - trigger_data.clone(), - ) - .await - .unwrap(); - - // Now verify the key is actually deleted - let read_result = - KVStore::read(&vss_store, primary_namespace, secondary_namespace, key_to_delete).await; - assert!(read_result.is_err()); - assert_eq!(read_result.unwrap_err().kind(), ErrorKind::NotFound); - - // Verify the key is no longer in the list - let keys = KVStore::list(&vss_store, primary_namespace, secondary_namespace).await.unwrap(); - assert!(!keys.contains(&key_to_delete.to_string())); - - // Verify the trigger key still exists - let read_data = - KVStore::read(&vss_store, primary_namespace, secondary_namespace, key_for_trigger) - .await - .unwrap(); - assert_eq!(read_data, trigger_data); - } } From 6baba6a9c950864c48498998d719bac7d00e7344 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 3 Dec 2025 08:48:43 +0100 Subject: [PATCH 55/60] Update CHANGELOG for final v0.7.0 release --- CHANGELOG.md | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d03401d85..38b7d6de5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -# 0.7.0 - TODO +# 0.7.0 - Dec. 3, 2025 This seventh minor release introduces numerous new features, bug fixes, and API improvements. In particular, it adds support for channel Splicing, Async Payments, as well as sourcing chain data from a Bitcoin Core REST backend. ## Feature and API updates @@ -23,7 +23,8 @@ This seventh minor release introduces numerous new features, bug fixes, and API ## Bug Fixes and Improvements - Robustness of the shutdown procedure has been improved, minimizing risk of blocking during `Node::stop`. (#592, #612, #619, #622) -- The VSS storage backend now supports 'lazy' deletes, allowing it to avoid unnecessary remote calls for certain operations. (#689) +- The VSS storage backend now supports 'lazy' deletes, allowing it to avoid + unnecessarily waiting on remote calls for certain operations. (#689, #722) - The encryption and obfuscation scheme used when storing data against a VSS backend has been improved. (#627) - Transient errors during `bitcoind` RPC chain synchronization are now retried with an exponential back-off. (#588) - Transactions evicted from the mempool are now correctly handled when syncing via `bitcoind` RPC/REST. (#605) @@ -46,10 +47,23 @@ This seventh minor release introduces numerous new features, bug fixes, and API - The `electrum-client` dependency has been bumped to v0.24.0. (#602) - For Kotlin/Android builds we now require 16kb page sizes, ensuring Play Store compatibility. (#625) -In total, this release features TODO files changed, TODO insertions, TODO -deletions in TODO commits from TODO authors in alphabetical order: +In total, this release features 77 files changed, 12350 insertions, 5708 +deletions in 264 commits from 14 authors in alphabetical order: -- TODO TODO +- aagbotemi +- alexanderwiederin +- Andrei +- Artur Gontijo +- benthecarman +- Chuks Agbakuru +- coreyphillips +- Elias Rohrer +- Enigbe +- Joost Jager +- Jeffrey Czyz +- moisesPomilio +- Martin Saposnic +- tosynthegeek # 0.6.2 - Aug. 14, 2025 This patch release fixes a panic that could have been hit when syncing to a From ecb9538f8606245c9237b0155a47b1ec5bdf7947 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 3 Dec 2025 09:23:13 +0100 Subject: [PATCH 56/60] Update Swift files for v0.7.0 build --- Package.swift | 4 +- bindings/swift/Sources/LDKNode/LDKNode.swift | 4969 +++++++++++++----- 2 files changed, 3713 insertions(+), 1260 deletions(-) diff --git a/Package.swift b/Package.swift index 00f3eeb84..ed3a97ea0 100644 --- a/Package.swift +++ b/Package.swift @@ -3,8 +3,8 @@ import PackageDescription -let tag = "v0.6.2" -let checksum = "dee28eb2bc019eeb61cc28ca5c19fdada465a6eb2b5169d2dbaa369f0c63ba03" +let tag = "v0.7.0" +let checksum = "37e909987c285ddaaabf7caede58f0695491398acd4561987914996f4623a3c3" let url = "https://github.com/lightningdevkit/ldk-node/releases/download/\(tag)/LDKNodeFFI.xcframework.zip" let package = Package( diff --git a/bindings/swift/Sources/LDKNode/LDKNode.swift b/bindings/swift/Sources/LDKNode/LDKNode.swift index 20ad658d7..ded6d7d96 100644 --- a/bindings/swift/Sources/LDKNode/LDKNode.swift +++ b/bindings/swift/Sources/LDKNode/LDKNode.swift @@ -51,9 +51,11 @@ fileprivate extension ForeignBytes { fileprivate extension Data { init(rustBuffer: RustBuffer) { - // TODO: This copies the buffer. Can we read directly from a - // Rust buffer? - self.init(bytes: rustBuffer.data!, count: Int(rustBuffer.len)) + self.init( + bytesNoCopy: rustBuffer.data!, + count: Int(rustBuffer.len), + deallocator: .none + ) } } @@ -154,7 +156,7 @@ fileprivate func writeDouble(_ writer: inout [UInt8], _ value: Double) { } // Protocol for types that transfer other types across the FFI. This is -// analogous go the Rust trait of the same name. +// analogous to the Rust trait of the same name. fileprivate protocol FfiConverter { associatedtype FfiType associatedtype SwiftType @@ -169,10 +171,16 @@ fileprivate protocol FfiConverter { fileprivate protocol FfiConverterPrimitive: FfiConverter where FfiType == SwiftType { } extension FfiConverterPrimitive { +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public static func lift(_ value: FfiType) throws -> SwiftType { return value } +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public static func lower(_ value: SwiftType) -> FfiType { return value } @@ -183,6 +191,9 @@ extension FfiConverterPrimitive { fileprivate protocol FfiConverterRustBuffer: FfiConverter where FfiType == RustBuffer {} extension FfiConverterRustBuffer { +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public static func lift(_ buf: RustBuffer) throws -> SwiftType { var reader = createReader(data: Data(rustBuffer: buf)) let value = try read(from: &reader) @@ -193,6 +204,9 @@ extension FfiConverterRustBuffer { return value } +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public static func lower(_ value: SwiftType) -> RustBuffer { var writer = createWriter() write(value, into: &writer) @@ -254,18 +268,19 @@ fileprivate extension RustCallStatus { } private func rustCall(_ callback: (UnsafeMutablePointer) -> T) throws -> T { - try makeRustCall(callback, errorHandler: nil) + let neverThrow: ((RustBuffer) throws -> Never)? = nil + return try makeRustCall(callback, errorHandler: neverThrow) } -private func rustCallWithError( - _ errorHandler: @escaping (RustBuffer) throws -> Error, +private func rustCallWithError( + _ errorHandler: @escaping (RustBuffer) throws -> E, _ callback: (UnsafeMutablePointer) -> T) throws -> T { try makeRustCall(callback, errorHandler: errorHandler) } -private func makeRustCall( +private func makeRustCall( _ callback: (UnsafeMutablePointer) -> T, - errorHandler: ((RustBuffer) throws -> Error)? + errorHandler: ((RustBuffer) throws -> E)? ) throws -> T { uniffiEnsureInitialized() var callStatus = RustCallStatus.init() @@ -274,9 +289,9 @@ private func makeRustCall( return returnedVal } -private func uniffiCheckCallStatus( +private func uniffiCheckCallStatus( callStatus: RustCallStatus, - errorHandler: ((RustBuffer) throws -> Error)? + errorHandler: ((RustBuffer) throws -> E)? ) throws { switch callStatus.code { case CALL_SUCCESS: @@ -382,6 +397,9 @@ fileprivate class UniffiHandleMap { // Public interface members begin here. +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterUInt8: FfiConverterPrimitive { typealias FfiType = UInt8 typealias SwiftType = UInt8 @@ -395,6 +413,9 @@ fileprivate struct FfiConverterUInt8: FfiConverterPrimitive { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterUInt16: FfiConverterPrimitive { typealias FfiType = UInt16 typealias SwiftType = UInt16 @@ -408,6 +429,9 @@ fileprivate struct FfiConverterUInt16: FfiConverterPrimitive { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterUInt32: FfiConverterPrimitive { typealias FfiType = UInt32 typealias SwiftType = UInt32 @@ -421,6 +445,9 @@ fileprivate struct FfiConverterUInt32: FfiConverterPrimitive { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterUInt64: FfiConverterPrimitive { typealias FfiType = UInt64 typealias SwiftType = UInt64 @@ -434,6 +461,9 @@ fileprivate struct FfiConverterUInt64: FfiConverterPrimitive { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterBool : FfiConverter { typealias FfiType = Int8 typealias SwiftType = Bool @@ -455,6 +485,9 @@ fileprivate struct FfiConverterBool : FfiConverter { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterString: FfiConverter { typealias SwiftType = String typealias FfiType = RustBuffer @@ -493,6 +526,9 @@ fileprivate struct FfiConverterString: FfiConverter { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterData: FfiConverterRustBuffer { typealias SwiftType = Data @@ -555,6 +591,9 @@ open class Bolt11Invoice: fileprivate let pointer: UnsafeMutableRawPointer! /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public struct NoPointer { public init() {} } @@ -566,15 +605,21 @@ open class Bolt11Invoice: self.pointer = pointer } - /// This constructor can be used to instantiate a fake object. - /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. - /// - /// - Warning: - /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + // This constructor can be used to instantiate a fake object. + // - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + // + // - Warning: + // Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public init(noPointer: NoPointer) { self.pointer = nil } +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_bolt11invoice(self.pointer, $0) } } @@ -740,6 +785,9 @@ open func wouldExpire(atTimeSeconds: UInt64) -> Bool { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeBolt11Invoice: FfiConverter { typealias FfiType = UnsafeMutableRawPointer @@ -774,10 +822,16 @@ public struct FfiConverterTypeBolt11Invoice: FfiConverter { +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeBolt11Invoice_lift(_ pointer: UnsafeMutableRawPointer) throws -> Bolt11Invoice { return try FfiConverterTypeBolt11Invoice.lift(pointer) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeBolt11Invoice_lower(_ value: Bolt11Invoice) -> UnsafeMutableRawPointer { return FfiConverterTypeBolt11Invoice.lower(value) } @@ -801,15 +855,19 @@ public protocol Bolt11PaymentProtocol : AnyObject { func receiveVariableAmountViaJitChannel(description: Bolt11InvoiceDescription, expirySecs: UInt32, maxProportionalLspFeeLimitPpmMsat: UInt64?) throws -> Bolt11Invoice + func receiveVariableAmountViaJitChannelForHash(description: Bolt11InvoiceDescription, expirySecs: UInt32, maxProportionalLspFeeLimitPpmMsat: UInt64?, paymentHash: PaymentHash) throws -> Bolt11Invoice + func receiveViaJitChannel(amountMsat: UInt64, description: Bolt11InvoiceDescription, expirySecs: UInt32, maxLspFeeLimitMsat: UInt64?) throws -> Bolt11Invoice - func send(invoice: Bolt11Invoice, sendingParameters: SendingParameters?) throws -> PaymentId + func receiveViaJitChannelForHash(amountMsat: UInt64, description: Bolt11InvoiceDescription, expirySecs: UInt32, maxLspFeeLimitMsat: UInt64?, paymentHash: PaymentHash) throws -> Bolt11Invoice - func sendProbes(invoice: Bolt11Invoice) throws + func send(invoice: Bolt11Invoice, routeParameters: RouteParametersConfig?) throws -> PaymentId - func sendProbesUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64) throws + func sendProbes(invoice: Bolt11Invoice, routeParameters: RouteParametersConfig?) throws - func sendUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64, sendingParameters: SendingParameters?) throws -> PaymentId + func sendProbesUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64, routeParameters: RouteParametersConfig?) throws + + func sendUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64, routeParameters: RouteParametersConfig?) throws -> PaymentId } @@ -818,6 +876,9 @@ open class Bolt11Payment: fileprivate let pointer: UnsafeMutableRawPointer! /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public struct NoPointer { public init() {} } @@ -829,15 +890,21 @@ open class Bolt11Payment: self.pointer = pointer } - /// This constructor can be used to instantiate a fake object. - /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. - /// - /// - Warning: - /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + // This constructor can be used to instantiate a fake object. + // - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + // + // - Warning: + // Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public init(noPointer: NoPointer) { self.pointer = nil } +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_bolt11payment(self.pointer, $0) } } @@ -920,6 +987,17 @@ open func receiveVariableAmountViaJitChannel(description: Bolt11InvoiceDescripti }) } +open func receiveVariableAmountViaJitChannelForHash(description: Bolt11InvoiceDescription, expirySecs: UInt32, maxProportionalLspFeeLimitPpmMsat: UInt64?, paymentHash: PaymentHash)throws -> Bolt11Invoice { + return try FfiConverterTypeBolt11Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_receive_variable_amount_via_jit_channel_for_hash(self.uniffiClonePointer(), + FfiConverterTypeBolt11InvoiceDescription.lower(description), + FfiConverterUInt32.lower(expirySecs), + FfiConverterOptionUInt64.lower(maxProportionalLspFeeLimitPpmMsat), + FfiConverterTypePaymentHash.lower(paymentHash),$0 + ) +}) +} + open func receiveViaJitChannel(amountMsat: UInt64, description: Bolt11InvoiceDescription, expirySecs: UInt32, maxLspFeeLimitMsat: UInt64?)throws -> Bolt11Invoice { return try FfiConverterTypeBolt11Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { uniffi_ldk_node_fn_method_bolt11payment_receive_via_jit_channel(self.uniffiClonePointer(), @@ -931,36 +1009,50 @@ open func receiveViaJitChannel(amountMsat: UInt64, description: Bolt11InvoiceDes }) } -open func send(invoice: Bolt11Invoice, sendingParameters: SendingParameters?)throws -> PaymentId { +open func receiveViaJitChannelForHash(amountMsat: UInt64, description: Bolt11InvoiceDescription, expirySecs: UInt32, maxLspFeeLimitMsat: UInt64?, paymentHash: PaymentHash)throws -> Bolt11Invoice { + return try FfiConverterTypeBolt11Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_receive_via_jit_channel_for_hash(self.uniffiClonePointer(), + FfiConverterUInt64.lower(amountMsat), + FfiConverterTypeBolt11InvoiceDescription.lower(description), + FfiConverterUInt32.lower(expirySecs), + FfiConverterOptionUInt64.lower(maxLspFeeLimitMsat), + FfiConverterTypePaymentHash.lower(paymentHash),$0 + ) +}) +} + +open func send(invoice: Bolt11Invoice, routeParameters: RouteParametersConfig?)throws -> PaymentId { return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { uniffi_ldk_node_fn_method_bolt11payment_send(self.uniffiClonePointer(), FfiConverterTypeBolt11Invoice.lower(invoice), - FfiConverterOptionTypeSendingParameters.lower(sendingParameters),$0 + FfiConverterOptionTypeRouteParametersConfig.lower(routeParameters),$0 ) }) } -open func sendProbes(invoice: Bolt11Invoice)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { +open func sendProbes(invoice: Bolt11Invoice, routeParameters: RouteParametersConfig?)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { uniffi_ldk_node_fn_method_bolt11payment_send_probes(self.uniffiClonePointer(), - FfiConverterTypeBolt11Invoice.lower(invoice),$0 + FfiConverterTypeBolt11Invoice.lower(invoice), + FfiConverterOptionTypeRouteParametersConfig.lower(routeParameters),$0 ) } } -open func sendProbesUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { +open func sendProbesUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64, routeParameters: RouteParametersConfig?)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { uniffi_ldk_node_fn_method_bolt11payment_send_probes_using_amount(self.uniffiClonePointer(), FfiConverterTypeBolt11Invoice.lower(invoice), - FfiConverterUInt64.lower(amountMsat),$0 + FfiConverterUInt64.lower(amountMsat), + FfiConverterOptionTypeRouteParametersConfig.lower(routeParameters),$0 ) } } -open func sendUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64, sendingParameters: SendingParameters?)throws -> PaymentId { +open func sendUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64, routeParameters: RouteParametersConfig?)throws -> PaymentId { return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { uniffi_ldk_node_fn_method_bolt11payment_send_using_amount(self.uniffiClonePointer(), FfiConverterTypeBolt11Invoice.lower(invoice), FfiConverterUInt64.lower(amountMsat), - FfiConverterOptionTypeSendingParameters.lower(sendingParameters),$0 + FfiConverterOptionTypeRouteParametersConfig.lower(routeParameters),$0 ) }) } @@ -968,6 +1060,9 @@ open func sendUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64, sendingPar } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeBolt11Payment: FfiConverter { typealias FfiType = UnsafeMutableRawPointer @@ -1002,10 +1097,16 @@ public struct FfiConverterTypeBolt11Payment: FfiConverter { +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeBolt11Payment_lift(_ pointer: UnsafeMutableRawPointer) throws -> Bolt11Payment { return try FfiConverterTypeBolt11Payment.lift(pointer) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeBolt11Payment_lower(_ value: Bolt11Payment) -> UnsafeMutableRawPointer { return FfiConverterTypeBolt11Payment.lower(value) } @@ -1013,27 +1114,58 @@ public func FfiConverterTypeBolt11Payment_lower(_ value: Bolt11Payment) -> Unsaf -public protocol Bolt12PaymentProtocol : AnyObject { +public protocol Bolt12InvoiceProtocol : AnyObject { - func initiateRefund(amountMsat: UInt64, expirySecs: UInt32, quantity: UInt64?, payerNote: String?) throws -> Refund + func absoluteExpirySeconds() -> UInt64? - func receive(amountMsat: UInt64, description: String, expirySecs: UInt32?, quantity: UInt64?) throws -> Offer + func amount() -> OfferAmount? - func receiveVariableAmount(description: String, expirySecs: UInt32?) throws -> Offer + func amountMsats() -> UInt64 - func requestRefundPayment(refund: Refund) throws -> Bolt12Invoice + func chain() -> [UInt8] + + func createdAt() -> UInt64 + + func encode() -> [UInt8] + + func fallbackAddresses() -> [Address] + + func invoiceDescription() -> String? + + func isExpired() -> Bool + + func issuer() -> String? + + func issuerSigningPubkey() -> PublicKey? + + func metadata() -> [UInt8]? + + func offerChains() -> [[UInt8]]? + + func payerNote() -> String? + + func payerSigningPubkey() -> PublicKey + + func paymentHash() -> PaymentHash + + func quantity() -> UInt64? - func send(offer: Offer, quantity: UInt64?, payerNote: String?) throws -> PaymentId + func relativeExpiry() -> UInt64 + + func signableHash() -> [UInt8] - func sendUsingAmount(offer: Offer, amountMsat: UInt64, quantity: UInt64?, payerNote: String?) throws -> PaymentId + func signingPubkey() -> PublicKey } -open class Bolt12Payment: - Bolt12PaymentProtocol { +open class Bolt12Invoice: + Bolt12InvoiceProtocol { fileprivate let pointer: UnsafeMutableRawPointer! /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public struct NoPointer { public init() {} } @@ -1045,17 +1177,23 @@ open class Bolt12Payment: self.pointer = pointer } - /// This constructor can be used to instantiate a fake object. - /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. - /// - /// - Warning: - /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + // This constructor can be used to instantiate a fake object. + // - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + // + // - Warning: + // Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public init(noPointer: NoPointer) { self.pointer = nil } +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public func uniffiClonePointer() -> UnsafeMutableRawPointer { - return try! rustCall { uniffi_ldk_node_fn_clone_bolt12payment(self.pointer, $0) } + return try! rustCall { uniffi_ldk_node_fn_clone_bolt12invoice(self.pointer, $0) } } // No primary constructor declared for this class. @@ -1064,68 +1202,156 @@ open class Bolt12Payment: return } - try! rustCall { uniffi_ldk_node_fn_free_bolt12payment(pointer, $0) } + try! rustCall { uniffi_ldk_node_fn_free_bolt12invoice(pointer, $0) } } +public static func fromStr(invoiceStr: String)throws -> Bolt12Invoice { + return try FfiConverterTypeBolt12Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_constructor_bolt12invoice_from_str( + FfiConverterString.lower(invoiceStr),$0 + ) +}) +} + -open func initiateRefund(amountMsat: UInt64, expirySecs: UInt32, quantity: UInt64?, payerNote: String?)throws -> Refund { - return try FfiConverterTypeRefund.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt12payment_initiate_refund(self.uniffiClonePointer(), - FfiConverterUInt64.lower(amountMsat), - FfiConverterUInt32.lower(expirySecs), - FfiConverterOptionUInt64.lower(quantity), - FfiConverterOptionString.lower(payerNote),$0 +open func absoluteExpirySeconds() -> UInt64? { + return try! FfiConverterOptionUInt64.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_absolute_expiry_seconds(self.uniffiClonePointer(),$0 ) }) } -open func receive(amountMsat: UInt64, description: String, expirySecs: UInt32?, quantity: UInt64?)throws -> Offer { - return try FfiConverterTypeOffer.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt12payment_receive(self.uniffiClonePointer(), - FfiConverterUInt64.lower(amountMsat), - FfiConverterString.lower(description), - FfiConverterOptionUInt32.lower(expirySecs), - FfiConverterOptionUInt64.lower(quantity),$0 +open func amount() -> OfferAmount? { + return try! FfiConverterOptionTypeOfferAmount.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_amount(self.uniffiClonePointer(),$0 ) }) } -open func receiveVariableAmount(description: String, expirySecs: UInt32?)throws -> Offer { - return try FfiConverterTypeOffer.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt12payment_receive_variable_amount(self.uniffiClonePointer(), - FfiConverterString.lower(description), - FfiConverterOptionUInt32.lower(expirySecs),$0 +open func amountMsats() -> UInt64 { + return try! FfiConverterUInt64.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_amount_msats(self.uniffiClonePointer(),$0 ) }) } -open func requestRefundPayment(refund: Refund)throws -> Bolt12Invoice { - return try FfiConverterTypeBolt12Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt12payment_request_refund_payment(self.uniffiClonePointer(), - FfiConverterTypeRefund.lower(refund),$0 +open func chain() -> [UInt8] { + return try! FfiConverterSequenceUInt8.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_chain(self.uniffiClonePointer(),$0 ) }) } -open func send(offer: Offer, quantity: UInt64?, payerNote: String?)throws -> PaymentId { - return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt12payment_send(self.uniffiClonePointer(), - FfiConverterTypeOffer.lower(offer), - FfiConverterOptionUInt64.lower(quantity), - FfiConverterOptionString.lower(payerNote),$0 +open func createdAt() -> UInt64 { + return try! FfiConverterUInt64.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_created_at(self.uniffiClonePointer(),$0 ) }) } -open func sendUsingAmount(offer: Offer, amountMsat: UInt64, quantity: UInt64?, payerNote: String?)throws -> PaymentId { - return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt12payment_send_using_amount(self.uniffiClonePointer(), - FfiConverterTypeOffer.lower(offer), - FfiConverterUInt64.lower(amountMsat), - FfiConverterOptionUInt64.lower(quantity), - FfiConverterOptionString.lower(payerNote),$0 +open func encode() -> [UInt8] { + return try! FfiConverterSequenceUInt8.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_encode(self.uniffiClonePointer(),$0 + ) +}) +} + +open func fallbackAddresses() -> [Address] { + return try! FfiConverterSequenceTypeAddress.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_fallback_addresses(self.uniffiClonePointer(),$0 + ) +}) +} + +open func invoiceDescription() -> String? { + return try! FfiConverterOptionString.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_invoice_description(self.uniffiClonePointer(),$0 + ) +}) +} + +open func isExpired() -> Bool { + return try! FfiConverterBool.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_is_expired(self.uniffiClonePointer(),$0 + ) +}) +} + +open func issuer() -> String? { + return try! FfiConverterOptionString.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_issuer(self.uniffiClonePointer(),$0 + ) +}) +} + +open func issuerSigningPubkey() -> PublicKey? { + return try! FfiConverterOptionTypePublicKey.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_issuer_signing_pubkey(self.uniffiClonePointer(),$0 + ) +}) +} + +open func metadata() -> [UInt8]? { + return try! FfiConverterOptionSequenceUInt8.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_metadata(self.uniffiClonePointer(),$0 + ) +}) +} + +open func offerChains() -> [[UInt8]]? { + return try! FfiConverterOptionSequenceSequenceUInt8.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_offer_chains(self.uniffiClonePointer(),$0 + ) +}) +} + +open func payerNote() -> String? { + return try! FfiConverterOptionString.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_payer_note(self.uniffiClonePointer(),$0 + ) +}) +} + +open func payerSigningPubkey() -> PublicKey { + return try! FfiConverterTypePublicKey.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_payer_signing_pubkey(self.uniffiClonePointer(),$0 + ) +}) +} + +open func paymentHash() -> PaymentHash { + return try! FfiConverterTypePaymentHash.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_payment_hash(self.uniffiClonePointer(),$0 + ) +}) +} + +open func quantity() -> UInt64? { + return try! FfiConverterOptionUInt64.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_quantity(self.uniffiClonePointer(),$0 + ) +}) +} + +open func relativeExpiry() -> UInt64 { + return try! FfiConverterUInt64.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_relative_expiry(self.uniffiClonePointer(),$0 + ) +}) +} + +open func signableHash() -> [UInt8] { + return try! FfiConverterSequenceUInt8.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_signable_hash(self.uniffiClonePointer(),$0 + ) +}) +} + +open func signingPubkey() -> PublicKey { + return try! FfiConverterTypePublicKey.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt12invoice_signing_pubkey(self.uniffiClonePointer(),$0 ) }) } @@ -1133,20 +1359,23 @@ open func sendUsingAmount(offer: Offer, amountMsat: UInt64, quantity: UInt64?, p } -public struct FfiConverterTypeBolt12Payment: FfiConverter { +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeBolt12Invoice: FfiConverter { typealias FfiType = UnsafeMutableRawPointer - typealias SwiftType = Bolt12Payment + typealias SwiftType = Bolt12Invoice - public static func lift(_ pointer: UnsafeMutableRawPointer) throws -> Bolt12Payment { - return Bolt12Payment(unsafeFromRawPointer: pointer) + public static func lift(_ pointer: UnsafeMutableRawPointer) throws -> Bolt12Invoice { + return Bolt12Invoice(unsafeFromRawPointer: pointer) } - public static func lower(_ value: Bolt12Payment) -> UnsafeMutableRawPointer { + public static func lower(_ value: Bolt12Invoice) -> UnsafeMutableRawPointer { return value.uniffiClonePointer() } - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Bolt12Payment { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Bolt12Invoice { let v: UInt64 = try readInt(&buf) // The Rust code won't compile if a pointer won't fit in a UInt64. // We have to go via `UInt` because that's the thing that's the size of a pointer. @@ -1157,7 +1386,7 @@ public struct FfiConverterTypeBolt12Payment: FfiConverter { return try lift(ptr!) } - public static func write(_ value: Bolt12Payment, into buf: inout [UInt8]) { + public static func write(_ value: Bolt12Invoice, into buf: inout [UInt8]) { // This fiddling is because `Int` is the thing that's the same size as a pointer. // The Rust code won't compile if a pointer won't fit in a `UInt64`. writeInt(&buf, UInt64(bitPattern: Int64(Int(bitPattern: lower(value))))) @@ -1167,72 +1396,53 @@ public struct FfiConverterTypeBolt12Payment: FfiConverter { -public func FfiConverterTypeBolt12Payment_lift(_ pointer: UnsafeMutableRawPointer) throws -> Bolt12Payment { - return try FfiConverterTypeBolt12Payment.lift(pointer) +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeBolt12Invoice_lift(_ pointer: UnsafeMutableRawPointer) throws -> Bolt12Invoice { + return try FfiConverterTypeBolt12Invoice.lift(pointer) } -public func FfiConverterTypeBolt12Payment_lower(_ value: Bolt12Payment) -> UnsafeMutableRawPointer { - return FfiConverterTypeBolt12Payment.lower(value) +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeBolt12Invoice_lower(_ value: Bolt12Invoice) -> UnsafeMutableRawPointer { + return FfiConverterTypeBolt12Invoice.lower(value) } -public protocol BuilderProtocol : AnyObject { - - func build() throws -> Node - - func buildWithFsStore() throws -> Node - - func buildWithVssStore(vssUrl: String, storeId: String, lnurlAuthServerUrl: String, fixedHeaders: [String: String]) throws -> Node - - func buildWithVssStoreAndFixedHeaders(vssUrl: String, storeId: String, fixedHeaders: [String: String]) throws -> Node - - func buildWithVssStoreAndHeaderProvider(vssUrl: String, storeId: String, headerProvider: VssHeaderProvider) throws -> Node - - func setAnnouncementAddresses(announcementAddresses: [SocketAddress]) throws - - func setChainSourceBitcoindRpc(rpcHost: String, rpcPort: UInt16, rpcUser: String, rpcPassword: String) - - func setChainSourceElectrum(serverUrl: String, config: ElectrumSyncConfig?) - - func setChainSourceEsplora(serverUrl: String, config: EsploraSyncConfig?) - - func setCustomLogger(logWriter: LogWriter) - - func setEntropyBip39Mnemonic(mnemonic: Mnemonic, passphrase: String?) - - func setEntropySeedBytes(seedBytes: [UInt8]) throws - - func setEntropySeedPath(seedPath: String) - - func setFilesystemLogger(logFilePath: String?, maxLogLevel: LogLevel?) +public protocol Bolt12PaymentProtocol : AnyObject { - func setGossipSourceP2p() + func blindedPathsForAsyncRecipient(recipientId: Data) throws -> Data - func setGossipSourceRgs(rgsServerUrl: String) + func initiateRefund(amountMsat: UInt64, expirySecs: UInt32, quantity: UInt64?, payerNote: String?, routeParameters: RouteParametersConfig?) throws -> Refund - func setLiquiditySourceLsps1(nodeId: PublicKey, address: SocketAddress, token: String?) + func receive(amountMsat: UInt64, description: String, expirySecs: UInt32?, quantity: UInt64?) throws -> Offer - func setLiquiditySourceLsps2(nodeId: PublicKey, address: SocketAddress, token: String?) + func receiveAsync() throws -> Offer - func setListeningAddresses(listeningAddresses: [SocketAddress]) throws + func receiveVariableAmount(description: String, expirySecs: UInt32?) throws -> Offer - func setLogFacadeLogger() + func requestRefundPayment(refund: Refund) throws -> Bolt12Invoice - func setNetwork(network: Network) + func send(offer: Offer, quantity: UInt64?, payerNote: String?, routeParameters: RouteParametersConfig?) throws -> PaymentId - func setNodeAlias(nodeAlias: String) throws + func sendUsingAmount(offer: Offer, amountMsat: UInt64, quantity: UInt64?, payerNote: String?, routeParameters: RouteParametersConfig?) throws -> PaymentId - func setStorageDirPath(storageDirPath: String) + func setPathsToStaticInvoiceServer(paths: Data) throws } -open class Builder: - BuilderProtocol { +open class Bolt12Payment: + Bolt12PaymentProtocol { fileprivate let pointer: UnsafeMutableRawPointer! /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public struct NoPointer { public init() {} } @@ -1244,21 +1454,275 @@ open class Builder: self.pointer = pointer } - /// This constructor can be used to instantiate a fake object. - /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. - /// - /// - Warning: - /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + // This constructor can be used to instantiate a fake object. + // - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + // + // - Warning: + // Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public init(noPointer: NoPointer) { self.pointer = nil } +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public func uniffiClonePointer() -> UnsafeMutableRawPointer { - return try! rustCall { uniffi_ldk_node_fn_clone_builder(self.pointer, $0) } + return try! rustCall { uniffi_ldk_node_fn_clone_bolt12payment(self.pointer, $0) } } -public convenience init() { - let pointer = - try! rustCall() { + // No primary constructor declared for this class. + + deinit { + guard let pointer = pointer else { + return + } + + try! rustCall { uniffi_ldk_node_fn_free_bolt12payment(pointer, $0) } + } + + + + +open func blindedPathsForAsyncRecipient(recipientId: Data)throws -> Data { + return try FfiConverterData.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_blinded_paths_for_async_recipient(self.uniffiClonePointer(), + FfiConverterData.lower(recipientId),$0 + ) +}) +} + +open func initiateRefund(amountMsat: UInt64, expirySecs: UInt32, quantity: UInt64?, payerNote: String?, routeParameters: RouteParametersConfig?)throws -> Refund { + return try FfiConverterTypeRefund.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_initiate_refund(self.uniffiClonePointer(), + FfiConverterUInt64.lower(amountMsat), + FfiConverterUInt32.lower(expirySecs), + FfiConverterOptionUInt64.lower(quantity), + FfiConverterOptionString.lower(payerNote), + FfiConverterOptionTypeRouteParametersConfig.lower(routeParameters),$0 + ) +}) +} + +open func receive(amountMsat: UInt64, description: String, expirySecs: UInt32?, quantity: UInt64?)throws -> Offer { + return try FfiConverterTypeOffer.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_receive(self.uniffiClonePointer(), + FfiConverterUInt64.lower(amountMsat), + FfiConverterString.lower(description), + FfiConverterOptionUInt32.lower(expirySecs), + FfiConverterOptionUInt64.lower(quantity),$0 + ) +}) +} + +open func receiveAsync()throws -> Offer { + return try FfiConverterTypeOffer.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_receive_async(self.uniffiClonePointer(),$0 + ) +}) +} + +open func receiveVariableAmount(description: String, expirySecs: UInt32?)throws -> Offer { + return try FfiConverterTypeOffer.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_receive_variable_amount(self.uniffiClonePointer(), + FfiConverterString.lower(description), + FfiConverterOptionUInt32.lower(expirySecs),$0 + ) +}) +} + +open func requestRefundPayment(refund: Refund)throws -> Bolt12Invoice { + return try FfiConverterTypeBolt12Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_request_refund_payment(self.uniffiClonePointer(), + FfiConverterTypeRefund.lower(refund),$0 + ) +}) +} + +open func send(offer: Offer, quantity: UInt64?, payerNote: String?, routeParameters: RouteParametersConfig?)throws -> PaymentId { + return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_send(self.uniffiClonePointer(), + FfiConverterTypeOffer.lower(offer), + FfiConverterOptionUInt64.lower(quantity), + FfiConverterOptionString.lower(payerNote), + FfiConverterOptionTypeRouteParametersConfig.lower(routeParameters),$0 + ) +}) +} + +open func sendUsingAmount(offer: Offer, amountMsat: UInt64, quantity: UInt64?, payerNote: String?, routeParameters: RouteParametersConfig?)throws -> PaymentId { + return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_send_using_amount(self.uniffiClonePointer(), + FfiConverterTypeOffer.lower(offer), + FfiConverterUInt64.lower(amountMsat), + FfiConverterOptionUInt64.lower(quantity), + FfiConverterOptionString.lower(payerNote), + FfiConverterOptionTypeRouteParametersConfig.lower(routeParameters),$0 + ) +}) +} + +open func setPathsToStaticInvoiceServer(paths: Data)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_set_paths_to_static_invoice_server(self.uniffiClonePointer(), + FfiConverterData.lower(paths),$0 + ) +} +} + + +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeBolt12Payment: FfiConverter { + + typealias FfiType = UnsafeMutableRawPointer + typealias SwiftType = Bolt12Payment + + public static func lift(_ pointer: UnsafeMutableRawPointer) throws -> Bolt12Payment { + return Bolt12Payment(unsafeFromRawPointer: pointer) + } + + public static func lower(_ value: Bolt12Payment) -> UnsafeMutableRawPointer { + return value.uniffiClonePointer() + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Bolt12Payment { + let v: UInt64 = try readInt(&buf) + // The Rust code won't compile if a pointer won't fit in a UInt64. + // We have to go via `UInt` because that's the thing that's the size of a pointer. + let ptr = UnsafeMutableRawPointer(bitPattern: UInt(truncatingIfNeeded: v)) + if (ptr == nil) { + throw UniffiInternalError.unexpectedNullPointer + } + return try lift(ptr!) + } + + public static func write(_ value: Bolt12Payment, into buf: inout [UInt8]) { + // This fiddling is because `Int` is the thing that's the same size as a pointer. + // The Rust code won't compile if a pointer won't fit in a `UInt64`. + writeInt(&buf, UInt64(bitPattern: Int64(Int(bitPattern: lower(value))))) + } +} + + + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeBolt12Payment_lift(_ pointer: UnsafeMutableRawPointer) throws -> Bolt12Payment { + return try FfiConverterTypeBolt12Payment.lift(pointer) +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeBolt12Payment_lower(_ value: Bolt12Payment) -> UnsafeMutableRawPointer { + return FfiConverterTypeBolt12Payment.lower(value) +} + + + + +public protocol BuilderProtocol : AnyObject { + + func build() throws -> Node + + func buildWithFsStore() throws -> Node + + func buildWithVssStore(vssUrl: String, storeId: String, lnurlAuthServerUrl: String, fixedHeaders: [String: String]) throws -> Node + + func buildWithVssStoreAndFixedHeaders(vssUrl: String, storeId: String, fixedHeaders: [String: String]) throws -> Node + + func buildWithVssStoreAndHeaderProvider(vssUrl: String, storeId: String, headerProvider: VssHeaderProvider) throws -> Node + + func setAnnouncementAddresses(announcementAddresses: [SocketAddress]) throws + + func setAsyncPaymentsRole(role: AsyncPaymentsRole?) throws + + func setChainSourceBitcoindRest(restHost: String, restPort: UInt16, rpcHost: String, rpcPort: UInt16, rpcUser: String, rpcPassword: String) + + func setChainSourceBitcoindRpc(rpcHost: String, rpcPort: UInt16, rpcUser: String, rpcPassword: String) + + func setChainSourceElectrum(serverUrl: String, config: ElectrumSyncConfig?) + + func setChainSourceEsplora(serverUrl: String, config: EsploraSyncConfig?) + + func setCustomLogger(logWriter: LogWriter) + + func setEntropyBip39Mnemonic(mnemonic: Mnemonic, passphrase: String?) + + func setEntropySeedBytes(seedBytes: [UInt8]) throws + + func setEntropySeedPath(seedPath: String) + + func setFilesystemLogger(logFilePath: String?, maxLogLevel: LogLevel?) + + func setGossipSourceP2p() + + func setGossipSourceRgs(rgsServerUrl: String) + + func setLiquiditySourceLsps1(nodeId: PublicKey, address: SocketAddress, token: String?) + + func setLiquiditySourceLsps2(nodeId: PublicKey, address: SocketAddress, token: String?) + + func setListeningAddresses(listeningAddresses: [SocketAddress]) throws + + func setLogFacadeLogger() + + func setNetwork(network: Network) + + func setNodeAlias(nodeAlias: String) throws + + func setPathfindingScoresSource(url: String) + + func setStorageDirPath(storageDirPath: String) + +} + +open class Builder: + BuilderProtocol { + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif + public struct NoPointer { + public init() {} + } + + // TODO: We'd like this to be `private` but for Swifty reasons, + // we can't implement `FfiConverter` without making this `required` and we can't + // make it `required` without making it `public`. + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + self.pointer = pointer + } + + // This constructor can be used to instantiate a fake object. + // - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + // + // - Warning: + // Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif + public init(noPointer: NoPointer) { + self.pointer = nil + } + +#if swift(>=5.8) + @_documentation(visibility: private) +#endif + public func uniffiClonePointer() -> UnsafeMutableRawPointer { + return try! rustCall { uniffi_ldk_node_fn_clone_builder(self.pointer, $0) } + } +public convenience init() { + let pointer = + try! rustCall() { uniffi_ldk_node_fn_constructor_builder_new($0 ) } @@ -1336,6 +1800,25 @@ open func setAnnouncementAddresses(announcementAddresses: [SocketAddress])throws } } +open func setAsyncPaymentsRole(role: AsyncPaymentsRole?)throws {try rustCallWithError(FfiConverterTypeBuildError.lift) { + uniffi_ldk_node_fn_method_builder_set_async_payments_role(self.uniffiClonePointer(), + FfiConverterOptionTypeAsyncPaymentsRole.lower(role),$0 + ) +} +} + +open func setChainSourceBitcoindRest(restHost: String, restPort: UInt16, rpcHost: String, rpcPort: UInt16, rpcUser: String, rpcPassword: String) {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_chain_source_bitcoind_rest(self.uniffiClonePointer(), + FfiConverterString.lower(restHost), + FfiConverterUInt16.lower(restPort), + FfiConverterString.lower(rpcHost), + FfiConverterUInt16.lower(rpcPort), + FfiConverterString.lower(rpcUser), + FfiConverterString.lower(rpcPassword),$0 + ) +} +} + open func setChainSourceBitcoindRpc(rpcHost: String, rpcPort: UInt16, rpcUser: String, rpcPassword: String) {try! rustCall() { uniffi_ldk_node_fn_method_builder_set_chain_source_bitcoind_rpc(self.uniffiClonePointer(), FfiConverterString.lower(rpcHost), @@ -1457,6 +1940,13 @@ open func setNodeAlias(nodeAlias: String)throws {try rustCallWithError(FfiConve } } +open func setPathfindingScoresSource(url: String) {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_pathfinding_scores_source(self.uniffiClonePointer(), + FfiConverterString.lower(url),$0 + ) +} +} + open func setStorageDirPath(storageDirPath: String) {try! rustCall() { uniffi_ldk_node_fn_method_builder_set_storage_dir_path(self.uniffiClonePointer(), FfiConverterString.lower(storageDirPath),$0 @@ -1467,6 +1957,9 @@ open func setStorageDirPath(storageDirPath: String) {try! rustCall() { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeBuilder: FfiConverter { typealias FfiType = UnsafeMutableRawPointer @@ -1501,10 +1994,16 @@ public struct FfiConverterTypeBuilder: FfiConverter { +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeBuilder_lift(_ pointer: UnsafeMutableRawPointer) throws -> Builder { return try FfiConverterTypeBuilder.lift(pointer) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeBuilder_lower(_ value: Builder) -> UnsafeMutableRawPointer { return FfiConverterTypeBuilder.lower(value) } @@ -1527,6 +2026,9 @@ open class FeeRate: fileprivate let pointer: UnsafeMutableRawPointer! /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public struct NoPointer { public init() {} } @@ -1538,15 +2040,21 @@ open class FeeRate: self.pointer = pointer } - /// This constructor can be used to instantiate a fake object. - /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. - /// - /// - Warning: - /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + // This constructor can be used to instantiate a fake object. + // - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + // + // - Warning: + // Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public init(noPointer: NoPointer) { self.pointer = nil } +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_feerate(self.pointer, $0) } } @@ -1603,6 +2111,9 @@ open func toSatPerVbFloor() -> UInt64 { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeFeeRate: FfiConverter { typealias FfiType = UnsafeMutableRawPointer @@ -1637,10 +2148,16 @@ public struct FfiConverterTypeFeeRate: FfiConverter { +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeFeeRate_lift(_ pointer: UnsafeMutableRawPointer) throws -> FeeRate { return try FfiConverterTypeFeeRate.lift(pointer) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeFeeRate_lower(_ value: FeeRate) -> UnsafeMutableRawPointer { return FfiConverterTypeFeeRate.lower(value) } @@ -1650,7 +2167,7 @@ public func FfiConverterTypeFeeRate_lower(_ value: FeeRate) -> UnsafeMutableRawP public protocol Lsps1LiquidityProtocol : AnyObject { - func checkOrderStatus(orderId: OrderId) throws -> Lsps1OrderStatus + func checkOrderStatus(orderId: Lsps1OrderId) throws -> Lsps1OrderStatus func requestChannel(lspBalanceSat: UInt64, clientBalanceSat: UInt64, channelExpiryBlocks: UInt32, announceChannel: Bool) throws -> Lsps1OrderStatus @@ -1661,6 +2178,9 @@ open class Lsps1Liquidity: fileprivate let pointer: UnsafeMutableRawPointer! /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public struct NoPointer { public init() {} } @@ -1672,15 +2192,21 @@ open class Lsps1Liquidity: self.pointer = pointer } - /// This constructor can be used to instantiate a fake object. - /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. - /// - /// - Warning: - /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + // This constructor can be used to instantiate a fake object. + // - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + // + // - Warning: + // Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public init(noPointer: NoPointer) { self.pointer = nil } +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_lsps1liquidity(self.pointer, $0) } } @@ -1697,10 +2223,10 @@ open class Lsps1Liquidity: -open func checkOrderStatus(orderId: OrderId)throws -> Lsps1OrderStatus { +open func checkOrderStatus(orderId: Lsps1OrderId)throws -> Lsps1OrderStatus { return try FfiConverterTypeLSPS1OrderStatus.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { uniffi_ldk_node_fn_method_lsps1liquidity_check_order_status(self.uniffiClonePointer(), - FfiConverterTypeOrderId.lower(orderId),$0 + FfiConverterTypeLSPS1OrderId.lower(orderId),$0 ) }) } @@ -1719,6 +2245,9 @@ open func requestChannel(lspBalanceSat: UInt64, clientBalanceSat: UInt64, channe } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeLSPS1Liquidity: FfiConverter { typealias FfiType = UnsafeMutableRawPointer @@ -1753,10 +2282,16 @@ public struct FfiConverterTypeLSPS1Liquidity: FfiConverter { +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeLSPS1Liquidity_lift(_ pointer: UnsafeMutableRawPointer) throws -> Lsps1Liquidity { return try FfiConverterTypeLSPS1Liquidity.lift(pointer) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeLSPS1Liquidity_lower(_ value: Lsps1Liquidity) -> UnsafeMutableRawPointer { return FfiConverterTypeLSPS1Liquidity.lower(value) } @@ -1775,6 +2310,9 @@ open class LogWriterImpl: fileprivate let pointer: UnsafeMutableRawPointer! /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public struct NoPointer { public init() {} } @@ -1786,15 +2324,21 @@ open class LogWriterImpl: self.pointer = pointer } - /// This constructor can be used to instantiate a fake object. - /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. - /// - /// - Warning: - /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + // This constructor can be used to instantiate a fake object. + // - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + // + // - Warning: + // Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public init(noPointer: NoPointer) { self.pointer = nil } +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_logwriter(self.pointer, $0) } } @@ -1871,6 +2415,9 @@ private func uniffiCallbackInitLogWriter() { uniffi_ldk_node_fn_init_callback_vtable_logwriter(&UniffiCallbackInterfaceLogWriter.vtable) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeLogWriter: FfiConverter { fileprivate static var handleMap = UniffiHandleMap() @@ -1909,10 +2456,16 @@ public struct FfiConverterTypeLogWriter: FfiConverter { +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeLogWriter_lift(_ pointer: UnsafeMutableRawPointer) throws -> LogWriter { return try FfiConverterTypeLogWriter.lift(pointer) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeLogWriter_lower(_ value: LogWriter) -> UnsafeMutableRawPointer { return FfiConverterTypeLogWriter.lower(value) } @@ -1937,6 +2490,9 @@ open class NetworkGraph: fileprivate let pointer: UnsafeMutableRawPointer! /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public struct NoPointer { public init() {} } @@ -1948,15 +2504,21 @@ open class NetworkGraph: self.pointer = pointer } - /// This constructor can be used to instantiate a fake object. - /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. - /// - /// - Warning: - /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + // This constructor can be used to instantiate a fake object. + // - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + // + // - Warning: + // Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public init(noPointer: NoPointer) { self.pointer = nil } +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_networkgraph(self.pointer, $0) } } @@ -2006,6 +2568,9 @@ open func node(nodeId: NodeId) -> NodeInfo? { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeNetworkGraph: FfiConverter { typealias FfiType = UnsafeMutableRawPointer @@ -2040,10 +2605,16 @@ public struct FfiConverterTypeNetworkGraph: FfiConverter { +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeNetworkGraph_lift(_ pointer: UnsafeMutableRawPointer) throws -> NetworkGraph { return try FfiConverterTypeNetworkGraph.lift(pointer) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeNetworkGraph_lower(_ value: NetworkGraph) -> UnsafeMutableRawPointer { return FfiConverterTypeNetworkGraph.lower(value) } @@ -2107,6 +2678,10 @@ public protocol NodeProtocol : AnyObject { func signMessage(msg: [UInt8]) -> String + func spliceIn(userChannelId: UserChannelId, counterpartyNodeId: PublicKey, spliceAmountSats: UInt64) throws + + func spliceOut(userChannelId: UserChannelId, counterpartyNodeId: PublicKey, address: Address, spliceAmountSats: UInt64) throws + func spontaneousPayment() -> SpontaneousPayment func start() throws @@ -2132,6 +2707,9 @@ open class Node: fileprivate let pointer: UnsafeMutableRawPointer! /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public struct NoPointer { public init() {} } @@ -2143,15 +2721,21 @@ open class Node: self.pointer = pointer } - /// This constructor can be used to instantiate a fake object. - /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. - /// - /// - Warning: - /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + // This constructor can be used to instantiate a fake object. + // - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + // + // - Warning: + // Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public init(noPointer: NoPointer) { self.pointer = nil } +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_node(self.pointer, $0) } } @@ -2384,6 +2968,25 @@ open func signMessage(msg: [UInt8]) -> String { }) } +open func spliceIn(userChannelId: UserChannelId, counterpartyNodeId: PublicKey, spliceAmountSats: UInt64)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_splice_in(self.uniffiClonePointer(), + FfiConverterTypeUserChannelId.lower(userChannelId), + FfiConverterTypePublicKey.lower(counterpartyNodeId), + FfiConverterUInt64.lower(spliceAmountSats),$0 + ) +} +} + +open func spliceOut(userChannelId: UserChannelId, counterpartyNodeId: PublicKey, address: Address, spliceAmountSats: UInt64)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_splice_out(self.uniffiClonePointer(), + FfiConverterTypeUserChannelId.lower(userChannelId), + FfiConverterTypePublicKey.lower(counterpartyNodeId), + FfiConverterTypeAddress.lower(address), + FfiConverterUInt64.lower(spliceAmountSats),$0 + ) +} +} + open func spontaneousPayment() -> SpontaneousPayment { return try! FfiConverterTypeSpontaneousPayment.lift(try! rustCall() { uniffi_ldk_node_fn_method_node_spontaneous_payment(self.uniffiClonePointer(),$0 @@ -2452,6 +3055,9 @@ open func waitNextEvent() -> Event { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeNode: FfiConverter { typealias FfiType = UnsafeMutableRawPointer @@ -2486,10 +3092,16 @@ public struct FfiConverterTypeNode: FfiConverter { +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeNode_lift(_ pointer: UnsafeMutableRawPointer) throws -> Node { return try FfiConverterTypeNode.lift(pointer) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeNode_lower(_ value: Node) -> UnsafeMutableRawPointer { return FfiConverterTypeNode.lower(value) } @@ -2497,21 +3109,45 @@ public func FfiConverterTypeNode_lower(_ value: Node) -> UnsafeMutableRawPointer -public protocol OnchainPaymentProtocol : AnyObject { +public protocol OfferProtocol : AnyObject { - func newAddress() throws -> Address + func absoluteExpirySeconds() -> UInt64? - func sendAllToAddress(address: Address, retainReserve: Bool, feeRate: FeeRate?) throws -> Txid + func amount() -> OfferAmount? - func sendToAddress(address: Address, amountSats: UInt64, feeRate: FeeRate?) throws -> Txid + func chains() -> [Network] + + func expectsQuantity() -> Bool + + func id() -> OfferId + + func isExpired() -> Bool + + func isValidQuantity(quantity: UInt64) -> Bool + + func issuer() -> String? + + func issuerSigningPubkey() -> PublicKey? + + func metadata() -> [UInt8]? + + func offerDescription() -> String? + + func supportsChain(chain: Network) -> Bool } -open class OnchainPayment: - OnchainPaymentProtocol { +open class Offer: + CustomDebugStringConvertible, + CustomStringConvertible, + Equatable, + OfferProtocol { fileprivate let pointer: UnsafeMutableRawPointer! /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public struct NoPointer { public init() {} } @@ -2523,17 +3159,23 @@ open class OnchainPayment: self.pointer = pointer } - /// This constructor can be used to instantiate a fake object. - /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. - /// - /// - Warning: - /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + // This constructor can be used to instantiate a fake object. + // - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + // + // - Warning: + // Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public init(noPointer: NoPointer) { self.pointer = nil } +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public func uniffiClonePointer() -> UnsafeMutableRawPointer { - return try! rustCall { uniffi_ldk_node_fn_clone_onchainpayment(self.pointer, $0) } + return try! rustCall { uniffi_ldk_node_fn_clone_offer(self.pointer, $0) } } // No primary constructor declared for this class. @@ -2542,56 +3184,151 @@ open class OnchainPayment: return } - try! rustCall { uniffi_ldk_node_fn_free_onchainpayment(pointer, $0) } + try! rustCall { uniffi_ldk_node_fn_free_offer(pointer, $0) } } +public static func fromStr(offerStr: String)throws -> Offer { + return try FfiConverterTypeOffer.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_constructor_offer_from_str( + FfiConverterString.lower(offerStr),$0 + ) +}) +} + -open func newAddress()throws -> Address { - return try FfiConverterTypeAddress.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_onchainpayment_new_address(self.uniffiClonePointer(),$0 +open func absoluteExpirySeconds() -> UInt64? { + return try! FfiConverterOptionUInt64.lift(try! rustCall() { + uniffi_ldk_node_fn_method_offer_absolute_expiry_seconds(self.uniffiClonePointer(),$0 ) }) } -open func sendAllToAddress(address: Address, retainReserve: Bool, feeRate: FeeRate?)throws -> Txid { - return try FfiConverterTypeTxid.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_onchainpayment_send_all_to_address(self.uniffiClonePointer(), - FfiConverterTypeAddress.lower(address), - FfiConverterBool.lower(retainReserve), - FfiConverterOptionTypeFeeRate.lower(feeRate),$0 +open func amount() -> OfferAmount? { + return try! FfiConverterOptionTypeOfferAmount.lift(try! rustCall() { + uniffi_ldk_node_fn_method_offer_amount(self.uniffiClonePointer(),$0 ) }) } -open func sendToAddress(address: Address, amountSats: UInt64, feeRate: FeeRate?)throws -> Txid { - return try FfiConverterTypeTxid.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_onchainpayment_send_to_address(self.uniffiClonePointer(), - FfiConverterTypeAddress.lower(address), - FfiConverterUInt64.lower(amountSats), - FfiConverterOptionTypeFeeRate.lower(feeRate),$0 +open func chains() -> [Network] { + return try! FfiConverterSequenceTypeNetwork.lift(try! rustCall() { + uniffi_ldk_node_fn_method_offer_chains(self.uniffiClonePointer(),$0 + ) +}) +} + +open func expectsQuantity() -> Bool { + return try! FfiConverterBool.lift(try! rustCall() { + uniffi_ldk_node_fn_method_offer_expects_quantity(self.uniffiClonePointer(),$0 + ) +}) +} + +open func id() -> OfferId { + return try! FfiConverterTypeOfferId.lift(try! rustCall() { + uniffi_ldk_node_fn_method_offer_id(self.uniffiClonePointer(),$0 + ) +}) +} + +open func isExpired() -> Bool { + return try! FfiConverterBool.lift(try! rustCall() { + uniffi_ldk_node_fn_method_offer_is_expired(self.uniffiClonePointer(),$0 + ) +}) +} + +open func isValidQuantity(quantity: UInt64) -> Bool { + return try! FfiConverterBool.lift(try! rustCall() { + uniffi_ldk_node_fn_method_offer_is_valid_quantity(self.uniffiClonePointer(), + FfiConverterUInt64.lower(quantity),$0 + ) +}) +} + +open func issuer() -> String? { + return try! FfiConverterOptionString.lift(try! rustCall() { + uniffi_ldk_node_fn_method_offer_issuer(self.uniffiClonePointer(),$0 + ) +}) +} + +open func issuerSigningPubkey() -> PublicKey? { + return try! FfiConverterOptionTypePublicKey.lift(try! rustCall() { + uniffi_ldk_node_fn_method_offer_issuer_signing_pubkey(self.uniffiClonePointer(),$0 + ) +}) +} + +open func metadata() -> [UInt8]? { + return try! FfiConverterOptionSequenceUInt8.lift(try! rustCall() { + uniffi_ldk_node_fn_method_offer_metadata(self.uniffiClonePointer(),$0 + ) +}) +} + +open func offerDescription() -> String? { + return try! FfiConverterOptionString.lift(try! rustCall() { + uniffi_ldk_node_fn_method_offer_offer_description(self.uniffiClonePointer(),$0 + ) +}) +} + +open func supportsChain(chain: Network) -> Bool { + return try! FfiConverterBool.lift(try! rustCall() { + uniffi_ldk_node_fn_method_offer_supports_chain(self.uniffiClonePointer(), + FfiConverterTypeNetwork.lower(chain),$0 ) }) } + open var debugDescription: String { + return try! FfiConverterString.lift( + try! rustCall() { + uniffi_ldk_node_fn_method_offer_uniffi_trait_debug(self.uniffiClonePointer(),$0 + ) +} + ) + } + open var description: String { + return try! FfiConverterString.lift( + try! rustCall() { + uniffi_ldk_node_fn_method_offer_uniffi_trait_display(self.uniffiClonePointer(),$0 + ) +} + ) + } + public static func == (self: Offer, other: Offer) -> Bool { + return try! FfiConverterBool.lift( + try! rustCall() { + uniffi_ldk_node_fn_method_offer_uniffi_trait_eq_eq(self.uniffiClonePointer(), + FfiConverterTypeOffer.lower(other),$0 + ) +} + ) + } } -public struct FfiConverterTypeOnchainPayment: FfiConverter { +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeOffer: FfiConverter { typealias FfiType = UnsafeMutableRawPointer - typealias SwiftType = OnchainPayment + typealias SwiftType = Offer - public static func lift(_ pointer: UnsafeMutableRawPointer) throws -> OnchainPayment { - return OnchainPayment(unsafeFromRawPointer: pointer) + public static func lift(_ pointer: UnsafeMutableRawPointer) throws -> Offer { + return Offer(unsafeFromRawPointer: pointer) } - public static func lower(_ value: OnchainPayment) -> UnsafeMutableRawPointer { + public static func lower(_ value: Offer) -> UnsafeMutableRawPointer { return value.uniffiClonePointer() } - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> OnchainPayment { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Offer { let v: UInt64 = try readInt(&buf) // The Rust code won't compile if a pointer won't fit in a UInt64. // We have to go via `UInt` because that's the thing that's the size of a pointer. @@ -2602,7 +3339,7 @@ public struct FfiConverterTypeOnchainPayment: FfiConverter { return try lift(ptr!) } - public static func write(_ value: OnchainPayment, into buf: inout [UInt8]) { + public static func write(_ value: Offer, into buf: inout [UInt8]) { // This fiddling is because `Int` is the thing that's the same size as a pointer. // The Rust code won't compile if a pointer won't fit in a `UInt64`. writeInt(&buf, UInt64(bitPattern: Int64(Int(bitPattern: lower(value))))) @@ -2612,32 +3349,41 @@ public struct FfiConverterTypeOnchainPayment: FfiConverter { -public func FfiConverterTypeOnchainPayment_lift(_ pointer: UnsafeMutableRawPointer) throws -> OnchainPayment { - return try FfiConverterTypeOnchainPayment.lift(pointer) +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeOffer_lift(_ pointer: UnsafeMutableRawPointer) throws -> Offer { + return try FfiConverterTypeOffer.lift(pointer) } -public func FfiConverterTypeOnchainPayment_lower(_ value: OnchainPayment) -> UnsafeMutableRawPointer { - return FfiConverterTypeOnchainPayment.lower(value) +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeOffer_lower(_ value: Offer) -> UnsafeMutableRawPointer { + return FfiConverterTypeOffer.lower(value) } -public protocol SpontaneousPaymentProtocol : AnyObject { +public protocol OnchainPaymentProtocol : AnyObject { - func send(amountMsat: UInt64, nodeId: PublicKey, sendingParameters: SendingParameters?) throws -> PaymentId + func newAddress() throws -> Address - func sendProbes(amountMsat: UInt64, nodeId: PublicKey) throws + func sendAllToAddress(address: Address, retainReserve: Bool, feeRate: FeeRate?) throws -> Txid - func sendWithCustomTlvs(amountMsat: UInt64, nodeId: PublicKey, sendingParameters: SendingParameters?, customTlvs: [CustomTlvRecord]) throws -> PaymentId + func sendToAddress(address: Address, amountSats: UInt64, feeRate: FeeRate?) throws -> Txid } -open class SpontaneousPayment: - SpontaneousPaymentProtocol { +open class OnchainPayment: + OnchainPaymentProtocol { fileprivate let pointer: UnsafeMutableRawPointer! /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public struct NoPointer { public init() {} } @@ -2649,17 +3395,23 @@ open class SpontaneousPayment: self.pointer = pointer } - /// This constructor can be used to instantiate a fake object. - /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. - /// - /// - Warning: - /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + // This constructor can be used to instantiate a fake object. + // - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + // + // - Warning: + // Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public init(noPointer: NoPointer) { self.pointer = nil } +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public func uniffiClonePointer() -> UnsafeMutableRawPointer { - return try! rustCall { uniffi_ldk_node_fn_clone_spontaneouspayment(self.pointer, $0) } + return try! rustCall { uniffi_ldk_node_fn_clone_onchainpayment(self.pointer, $0) } } // No primary constructor declared for this class. @@ -2668,37 +3420,35 @@ open class SpontaneousPayment: return } - try! rustCall { uniffi_ldk_node_fn_free_spontaneouspayment(pointer, $0) } + try! rustCall { uniffi_ldk_node_fn_free_onchainpayment(pointer, $0) } } -open func send(amountMsat: UInt64, nodeId: PublicKey, sendingParameters: SendingParameters?)throws -> PaymentId { - return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_spontaneouspayment_send(self.uniffiClonePointer(), - FfiConverterUInt64.lower(amountMsat), - FfiConverterTypePublicKey.lower(nodeId), - FfiConverterOptionTypeSendingParameters.lower(sendingParameters),$0 +open func newAddress()throws -> Address { + return try FfiConverterTypeAddress.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_onchainpayment_new_address(self.uniffiClonePointer(),$0 ) }) } -open func sendProbes(amountMsat: UInt64, nodeId: PublicKey)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_spontaneouspayment_send_probes(self.uniffiClonePointer(), - FfiConverterUInt64.lower(amountMsat), - FfiConverterTypePublicKey.lower(nodeId),$0 +open func sendAllToAddress(address: Address, retainReserve: Bool, feeRate: FeeRate?)throws -> Txid { + return try FfiConverterTypeTxid.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_onchainpayment_send_all_to_address(self.uniffiClonePointer(), + FfiConverterTypeAddress.lower(address), + FfiConverterBool.lower(retainReserve), + FfiConverterOptionTypeFeeRate.lower(feeRate),$0 ) -} +}) } -open func sendWithCustomTlvs(amountMsat: UInt64, nodeId: PublicKey, sendingParameters: SendingParameters?, customTlvs: [CustomTlvRecord])throws -> PaymentId { - return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_spontaneouspayment_send_with_custom_tlvs(self.uniffiClonePointer(), - FfiConverterUInt64.lower(amountMsat), - FfiConverterTypePublicKey.lower(nodeId), - FfiConverterOptionTypeSendingParameters.lower(sendingParameters), - FfiConverterSequenceTypeCustomTlvRecord.lower(customTlvs),$0 +open func sendToAddress(address: Address, amountSats: UInt64, feeRate: FeeRate?)throws -> Txid { + return try FfiConverterTypeTxid.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_onchainpayment_send_to_address(self.uniffiClonePointer(), + FfiConverterTypeAddress.lower(address), + FfiConverterUInt64.lower(amountSats), + FfiConverterOptionTypeFeeRate.lower(feeRate),$0 ) }) } @@ -2706,20 +3456,23 @@ open func sendWithCustomTlvs(amountMsat: UInt64, nodeId: PublicKey, sendingParam } -public struct FfiConverterTypeSpontaneousPayment: FfiConverter { +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeOnchainPayment: FfiConverter { typealias FfiType = UnsafeMutableRawPointer - typealias SwiftType = SpontaneousPayment + typealias SwiftType = OnchainPayment - public static func lift(_ pointer: UnsafeMutableRawPointer) throws -> SpontaneousPayment { - return SpontaneousPayment(unsafeFromRawPointer: pointer) + public static func lift(_ pointer: UnsafeMutableRawPointer) throws -> OnchainPayment { + return OnchainPayment(unsafeFromRawPointer: pointer) } - public static func lower(_ value: SpontaneousPayment) -> UnsafeMutableRawPointer { + public static func lower(_ value: OnchainPayment) -> UnsafeMutableRawPointer { return value.uniffiClonePointer() } - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SpontaneousPayment { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> OnchainPayment { let v: UInt64 = try readInt(&buf) // The Rust code won't compile if a pointer won't fit in a UInt64. // We have to go via `UInt` because that's the thing that's the size of a pointer. @@ -2730,7 +3483,7 @@ public struct FfiConverterTypeSpontaneousPayment: FfiConverter { return try lift(ptr!) } - public static func write(_ value: SpontaneousPayment, into buf: inout [UInt8]) { + public static func write(_ value: OnchainPayment, into buf: inout [UInt8]) { // This fiddling is because `Int` is the thing that's the same size as a pointer. // The Rust code won't compile if a pointer won't fit in a `UInt64`. writeInt(&buf, UInt64(bitPattern: Int64(Int(bitPattern: lower(value))))) @@ -2740,30 +3493,58 @@ public struct FfiConverterTypeSpontaneousPayment: FfiConverter { -public func FfiConverterTypeSpontaneousPayment_lift(_ pointer: UnsafeMutableRawPointer) throws -> SpontaneousPayment { - return try FfiConverterTypeSpontaneousPayment.lift(pointer) +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeOnchainPayment_lift(_ pointer: UnsafeMutableRawPointer) throws -> OnchainPayment { + return try FfiConverterTypeOnchainPayment.lift(pointer) } -public func FfiConverterTypeSpontaneousPayment_lower(_ value: SpontaneousPayment) -> UnsafeMutableRawPointer { - return FfiConverterTypeSpontaneousPayment.lower(value) +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeOnchainPayment_lower(_ value: OnchainPayment) -> UnsafeMutableRawPointer { + return FfiConverterTypeOnchainPayment.lower(value) } -public protocol UnifiedQrPaymentProtocol : AnyObject { +public protocol RefundProtocol : AnyObject { - func receive(amountSats: UInt64, message: String, expirySec: UInt32) throws -> String + func absoluteExpirySeconds() -> UInt64? + + func amountMsats() -> UInt64 + + func chain() -> Network? + + func isExpired() -> Bool - func send(uriStr: String) throws -> QrPaymentResult + func issuer() -> String? + + func payerMetadata() -> [UInt8] + + func payerNote() -> String? + + func payerSigningPubkey() -> PublicKey + + func quantity() -> UInt64? + + func refundDescription() -> String } -open class UnifiedQrPayment: - UnifiedQrPaymentProtocol { +open class Refund: + CustomDebugStringConvertible, + CustomStringConvertible, + Equatable, + RefundProtocol { fileprivate let pointer: UnsafeMutableRawPointer! /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public struct NoPointer { public init() {} } @@ -2775,17 +3556,23 @@ open class UnifiedQrPayment: self.pointer = pointer } - /// This constructor can be used to instantiate a fake object. - /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. - /// - /// - Warning: - /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + // This constructor can be used to instantiate a fake object. + // - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + // + // - Warning: + // Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public init(noPointer: NoPointer) { self.pointer = nil } +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public func uniffiClonePointer() -> UnsafeMutableRawPointer { - return try! rustCall { uniffi_ldk_node_fn_clone_unifiedqrpayment(self.pointer, $0) } + return try! rustCall { uniffi_ldk_node_fn_clone_refund(self.pointer, $0) } } // No primary constructor declared for this class. @@ -2794,33 +3581,428 @@ open class UnifiedQrPayment: return } - try! rustCall { uniffi_ldk_node_fn_free_unifiedqrpayment(pointer, $0) } + try! rustCall { uniffi_ldk_node_fn_free_refund(pointer, $0) } } +public static func fromStr(refundStr: String)throws -> Refund { + return try FfiConverterTypeRefund.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_constructor_refund_from_str( + FfiConverterString.lower(refundStr),$0 + ) +}) +} + -open func receive(amountSats: UInt64, message: String, expirySec: UInt32)throws -> String { - return try FfiConverterString.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_unifiedqrpayment_receive(self.uniffiClonePointer(), - FfiConverterUInt64.lower(amountSats), - FfiConverterString.lower(message), - FfiConverterUInt32.lower(expirySec),$0 +open func absoluteExpirySeconds() -> UInt64? { + return try! FfiConverterOptionUInt64.lift(try! rustCall() { + uniffi_ldk_node_fn_method_refund_absolute_expiry_seconds(self.uniffiClonePointer(),$0 ) }) } -open func send(uriStr: String)throws -> QrPaymentResult { - return try FfiConverterTypeQrPaymentResult.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_unifiedqrpayment_send(self.uniffiClonePointer(), - FfiConverterString.lower(uriStr),$0 +open func amountMsats() -> UInt64 { + return try! FfiConverterUInt64.lift(try! rustCall() { + uniffi_ldk_node_fn_method_refund_amount_msats(self.uniffiClonePointer(),$0 ) }) } +open func chain() -> Network? { + return try! FfiConverterOptionTypeNetwork.lift(try! rustCall() { + uniffi_ldk_node_fn_method_refund_chain(self.uniffiClonePointer(),$0 + ) +}) +} + +open func isExpired() -> Bool { + return try! FfiConverterBool.lift(try! rustCall() { + uniffi_ldk_node_fn_method_refund_is_expired(self.uniffiClonePointer(),$0 + ) +}) +} + +open func issuer() -> String? { + return try! FfiConverterOptionString.lift(try! rustCall() { + uniffi_ldk_node_fn_method_refund_issuer(self.uniffiClonePointer(),$0 + ) +}) +} + +open func payerMetadata() -> [UInt8] { + return try! FfiConverterSequenceUInt8.lift(try! rustCall() { + uniffi_ldk_node_fn_method_refund_payer_metadata(self.uniffiClonePointer(),$0 + ) +}) +} + +open func payerNote() -> String? { + return try! FfiConverterOptionString.lift(try! rustCall() { + uniffi_ldk_node_fn_method_refund_payer_note(self.uniffiClonePointer(),$0 + ) +}) +} + +open func payerSigningPubkey() -> PublicKey { + return try! FfiConverterTypePublicKey.lift(try! rustCall() { + uniffi_ldk_node_fn_method_refund_payer_signing_pubkey(self.uniffiClonePointer(),$0 + ) +}) +} + +open func quantity() -> UInt64? { + return try! FfiConverterOptionUInt64.lift(try! rustCall() { + uniffi_ldk_node_fn_method_refund_quantity(self.uniffiClonePointer(),$0 + ) +}) +} + +open func refundDescription() -> String { + return try! FfiConverterString.lift(try! rustCall() { + uniffi_ldk_node_fn_method_refund_refund_description(self.uniffiClonePointer(),$0 + ) +}) +} + + open var debugDescription: String { + return try! FfiConverterString.lift( + try! rustCall() { + uniffi_ldk_node_fn_method_refund_uniffi_trait_debug(self.uniffiClonePointer(),$0 + ) +} + ) + } + open var description: String { + return try! FfiConverterString.lift( + try! rustCall() { + uniffi_ldk_node_fn_method_refund_uniffi_trait_display(self.uniffiClonePointer(),$0 + ) +} + ) + } + public static func == (self: Refund, other: Refund) -> Bool { + return try! FfiConverterBool.lift( + try! rustCall() { + uniffi_ldk_node_fn_method_refund_uniffi_trait_eq_eq(self.uniffiClonePointer(), + FfiConverterTypeRefund.lower(other),$0 + ) +} + ) + } + +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeRefund: FfiConverter { + + typealias FfiType = UnsafeMutableRawPointer + typealias SwiftType = Refund + + public static func lift(_ pointer: UnsafeMutableRawPointer) throws -> Refund { + return Refund(unsafeFromRawPointer: pointer) + } + + public static func lower(_ value: Refund) -> UnsafeMutableRawPointer { + return value.uniffiClonePointer() + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Refund { + let v: UInt64 = try readInt(&buf) + // The Rust code won't compile if a pointer won't fit in a UInt64. + // We have to go via `UInt` because that's the thing that's the size of a pointer. + let ptr = UnsafeMutableRawPointer(bitPattern: UInt(truncatingIfNeeded: v)) + if (ptr == nil) { + throw UniffiInternalError.unexpectedNullPointer + } + return try lift(ptr!) + } + + public static func write(_ value: Refund, into buf: inout [UInt8]) { + // This fiddling is because `Int` is the thing that's the same size as a pointer. + // The Rust code won't compile if a pointer won't fit in a `UInt64`. + writeInt(&buf, UInt64(bitPattern: Int64(Int(bitPattern: lower(value))))) + } +} + + + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeRefund_lift(_ pointer: UnsafeMutableRawPointer) throws -> Refund { + return try FfiConverterTypeRefund.lift(pointer) +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeRefund_lower(_ value: Refund) -> UnsafeMutableRawPointer { + return FfiConverterTypeRefund.lower(value) +} + + + + +public protocol SpontaneousPaymentProtocol : AnyObject { + + func send(amountMsat: UInt64, nodeId: PublicKey, routeParameters: RouteParametersConfig?) throws -> PaymentId + + func sendProbes(amountMsat: UInt64, nodeId: PublicKey) throws + + func sendWithCustomTlvs(amountMsat: UInt64, nodeId: PublicKey, routeParameters: RouteParametersConfig?, customTlvs: [CustomTlvRecord]) throws -> PaymentId + + func sendWithPreimage(amountMsat: UInt64, nodeId: PublicKey, preimage: PaymentPreimage, routeParameters: RouteParametersConfig?) throws -> PaymentId + + func sendWithPreimageAndCustomTlvs(amountMsat: UInt64, nodeId: PublicKey, customTlvs: [CustomTlvRecord], preimage: PaymentPreimage, routeParameters: RouteParametersConfig?) throws -> PaymentId + +} + +open class SpontaneousPayment: + SpontaneousPaymentProtocol { + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif + public struct NoPointer { + public init() {} + } + + // TODO: We'd like this to be `private` but for Swifty reasons, + // we can't implement `FfiConverter` without making this `required` and we can't + // make it `required` without making it `public`. + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + self.pointer = pointer + } + + // This constructor can be used to instantiate a fake object. + // - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + // + // - Warning: + // Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif + public init(noPointer: NoPointer) { + self.pointer = nil + } + +#if swift(>=5.8) + @_documentation(visibility: private) +#endif + public func uniffiClonePointer() -> UnsafeMutableRawPointer { + return try! rustCall { uniffi_ldk_node_fn_clone_spontaneouspayment(self.pointer, $0) } + } + // No primary constructor declared for this class. + + deinit { + guard let pointer = pointer else { + return + } + + try! rustCall { uniffi_ldk_node_fn_free_spontaneouspayment(pointer, $0) } + } + + + + +open func send(amountMsat: UInt64, nodeId: PublicKey, routeParameters: RouteParametersConfig?)throws -> PaymentId { + return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_spontaneouspayment_send(self.uniffiClonePointer(), + FfiConverterUInt64.lower(amountMsat), + FfiConverterTypePublicKey.lower(nodeId), + FfiConverterOptionTypeRouteParametersConfig.lower(routeParameters),$0 + ) +}) +} + +open func sendProbes(amountMsat: UInt64, nodeId: PublicKey)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_spontaneouspayment_send_probes(self.uniffiClonePointer(), + FfiConverterUInt64.lower(amountMsat), + FfiConverterTypePublicKey.lower(nodeId),$0 + ) +} +} + +open func sendWithCustomTlvs(amountMsat: UInt64, nodeId: PublicKey, routeParameters: RouteParametersConfig?, customTlvs: [CustomTlvRecord])throws -> PaymentId { + return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_spontaneouspayment_send_with_custom_tlvs(self.uniffiClonePointer(), + FfiConverterUInt64.lower(amountMsat), + FfiConverterTypePublicKey.lower(nodeId), + FfiConverterOptionTypeRouteParametersConfig.lower(routeParameters), + FfiConverterSequenceTypeCustomTlvRecord.lower(customTlvs),$0 + ) +}) +} + +open func sendWithPreimage(amountMsat: UInt64, nodeId: PublicKey, preimage: PaymentPreimage, routeParameters: RouteParametersConfig?)throws -> PaymentId { + return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_spontaneouspayment_send_with_preimage(self.uniffiClonePointer(), + FfiConverterUInt64.lower(amountMsat), + FfiConverterTypePublicKey.lower(nodeId), + FfiConverterTypePaymentPreimage.lower(preimage), + FfiConverterOptionTypeRouteParametersConfig.lower(routeParameters),$0 + ) +}) +} + +open func sendWithPreimageAndCustomTlvs(amountMsat: UInt64, nodeId: PublicKey, customTlvs: [CustomTlvRecord], preimage: PaymentPreimage, routeParameters: RouteParametersConfig?)throws -> PaymentId { + return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_spontaneouspayment_send_with_preimage_and_custom_tlvs(self.uniffiClonePointer(), + FfiConverterUInt64.lower(amountMsat), + FfiConverterTypePublicKey.lower(nodeId), + FfiConverterSequenceTypeCustomTlvRecord.lower(customTlvs), + FfiConverterTypePaymentPreimage.lower(preimage), + FfiConverterOptionTypeRouteParametersConfig.lower(routeParameters),$0 + ) +}) +} + + +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeSpontaneousPayment: FfiConverter { + + typealias FfiType = UnsafeMutableRawPointer + typealias SwiftType = SpontaneousPayment + + public static func lift(_ pointer: UnsafeMutableRawPointer) throws -> SpontaneousPayment { + return SpontaneousPayment(unsafeFromRawPointer: pointer) + } + + public static func lower(_ value: SpontaneousPayment) -> UnsafeMutableRawPointer { + return value.uniffiClonePointer() + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SpontaneousPayment { + let v: UInt64 = try readInt(&buf) + // The Rust code won't compile if a pointer won't fit in a UInt64. + // We have to go via `UInt` because that's the thing that's the size of a pointer. + let ptr = UnsafeMutableRawPointer(bitPattern: UInt(truncatingIfNeeded: v)) + if (ptr == nil) { + throw UniffiInternalError.unexpectedNullPointer + } + return try lift(ptr!) + } + + public static func write(_ value: SpontaneousPayment, into buf: inout [UInt8]) { + // This fiddling is because `Int` is the thing that's the same size as a pointer. + // The Rust code won't compile if a pointer won't fit in a `UInt64`. + writeInt(&buf, UInt64(bitPattern: Int64(Int(bitPattern: lower(value))))) + } +} + + + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeSpontaneousPayment_lift(_ pointer: UnsafeMutableRawPointer) throws -> SpontaneousPayment { + return try FfiConverterTypeSpontaneousPayment.lift(pointer) +} -} - +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeSpontaneousPayment_lower(_ value: SpontaneousPayment) -> UnsafeMutableRawPointer { + return FfiConverterTypeSpontaneousPayment.lower(value) +} + + + + +public protocol UnifiedQrPaymentProtocol : AnyObject { + + func receive(amountSats: UInt64, message: String, expirySec: UInt32) throws -> String + + func send(uriStr: String, routeParameters: RouteParametersConfig?) throws -> QrPaymentResult + +} + +open class UnifiedQrPayment: + UnifiedQrPaymentProtocol { + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif + public struct NoPointer { + public init() {} + } + + // TODO: We'd like this to be `private` but for Swifty reasons, + // we can't implement `FfiConverter` without making this `required` and we can't + // make it `required` without making it `public`. + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + self.pointer = pointer + } + + // This constructor can be used to instantiate a fake object. + // - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + // + // - Warning: + // Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif + public init(noPointer: NoPointer) { + self.pointer = nil + } + +#if swift(>=5.8) + @_documentation(visibility: private) +#endif + public func uniffiClonePointer() -> UnsafeMutableRawPointer { + return try! rustCall { uniffi_ldk_node_fn_clone_unifiedqrpayment(self.pointer, $0) } + } + // No primary constructor declared for this class. + + deinit { + guard let pointer = pointer else { + return + } + + try! rustCall { uniffi_ldk_node_fn_free_unifiedqrpayment(pointer, $0) } + } + + + + +open func receive(amountSats: UInt64, message: String, expirySec: UInt32)throws -> String { + return try FfiConverterString.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_unifiedqrpayment_receive(self.uniffiClonePointer(), + FfiConverterUInt64.lower(amountSats), + FfiConverterString.lower(message), + FfiConverterUInt32.lower(expirySec),$0 + ) +}) +} + +open func send(uriStr: String, routeParameters: RouteParametersConfig?)throws -> QrPaymentResult { + return try FfiConverterTypeQrPaymentResult.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_unifiedqrpayment_send(self.uniffiClonePointer(), + FfiConverterString.lower(uriStr), + FfiConverterOptionTypeRouteParametersConfig.lower(routeParameters),$0 + ) +}) +} + + +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeUnifiedQrPayment: FfiConverter { typealias FfiType = UnsafeMutableRawPointer @@ -2855,10 +4037,16 @@ public struct FfiConverterTypeUnifiedQrPayment: FfiConverter { +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeUnifiedQrPayment_lift(_ pointer: UnsafeMutableRawPointer) throws -> UnifiedQrPayment { return try FfiConverterTypeUnifiedQrPayment.lift(pointer) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeUnifiedQrPayment_lower(_ value: UnifiedQrPayment) -> UnsafeMutableRawPointer { return FfiConverterTypeUnifiedQrPayment.lower(value) } @@ -2877,6 +4065,9 @@ open class VssHeaderProvider: fileprivate let pointer: UnsafeMutableRawPointer! /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public struct NoPointer { public init() {} } @@ -2888,15 +4079,21 @@ open class VssHeaderProvider: self.pointer = pointer } - /// This constructor can be used to instantiate a fake object. - /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. - /// - /// - Warning: - /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + // This constructor can be used to instantiate a fake object. + // - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + // + // - Warning: + // Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public init(noPointer: NoPointer) { self.pointer = nil } +#if swift(>=5.8) + @_documentation(visibility: private) +#endif public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_vssheaderprovider(self.pointer, $0) } } @@ -2933,6 +4130,9 @@ open func getHeaders(request: [UInt8])async throws -> [String: String] { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeVssHeaderProvider: FfiConverter { typealias FfiType = UnsafeMutableRawPointer @@ -2967,10 +4167,16 @@ public struct FfiConverterTypeVssHeaderProvider: FfiConverter { +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeVssHeaderProvider_lift(_ pointer: UnsafeMutableRawPointer) throws -> VssHeaderProvider { return try FfiConverterTypeVssHeaderProvider.lift(pointer) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeVssHeaderProvider_lower(_ value: VssHeaderProvider) -> UnsafeMutableRawPointer { return FfiConverterTypeVssHeaderProvider.lower(value) } @@ -3008,6 +4214,9 @@ extension AnchorChannelsConfig: Equatable, Hashable { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeAnchorChannelsConfig: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> AnchorChannelsConfig { return @@ -3024,10 +4233,16 @@ public struct FfiConverterTypeAnchorChannelsConfig: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeAnchorChannelsConfig_lift(_ buf: RustBuffer) throws -> AnchorChannelsConfig { return try FfiConverterTypeAnchorChannelsConfig.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeAnchorChannelsConfig_lower(_ value: AnchorChannelsConfig) -> RustBuffer { return FfiConverterTypeAnchorChannelsConfig.lower(value) } @@ -3071,6 +4286,9 @@ extension BackgroundSyncConfig: Equatable, Hashable { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeBackgroundSyncConfig: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> BackgroundSyncConfig { return @@ -3089,10 +4307,16 @@ public struct FfiConverterTypeBackgroundSyncConfig: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeBackgroundSyncConfig_lift(_ buf: RustBuffer) throws -> BackgroundSyncConfig { return try FfiConverterTypeBackgroundSyncConfig.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeBackgroundSyncConfig_lower(_ value: BackgroundSyncConfig) -> RustBuffer { return FfiConverterTypeBackgroundSyncConfig.lower(value) } @@ -3154,6 +4378,9 @@ extension BalanceDetails: Equatable, Hashable { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeBalanceDetails: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> BalanceDetails { return @@ -3178,10 +4405,16 @@ public struct FfiConverterTypeBalanceDetails: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeBalanceDetails_lift(_ buf: RustBuffer) throws -> BalanceDetails { return try FfiConverterTypeBalanceDetails.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeBalanceDetails_lower(_ value: BalanceDetails) -> RustBuffer { return FfiConverterTypeBalanceDetails.lower(value) } @@ -3219,6 +4452,9 @@ extension BestBlock: Equatable, Hashable { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeBestBlock: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> BestBlock { return @@ -3235,66 +4471,21 @@ public struct FfiConverterTypeBestBlock: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeBestBlock_lift(_ buf: RustBuffer) throws -> BestBlock { return try FfiConverterTypeBestBlock.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeBestBlock_lower(_ value: BestBlock) -> RustBuffer { return FfiConverterTypeBestBlock.lower(value) } -public struct Bolt11PaymentInfo { - public var state: PaymentState - public var expiresAt: DateTime - public var feeTotalSat: UInt64 - public var orderTotalSat: UInt64 - public var invoice: Bolt11Invoice - - // Default memberwise initializers are never public by default, so we - // declare one manually. - public init(state: PaymentState, expiresAt: DateTime, feeTotalSat: UInt64, orderTotalSat: UInt64, invoice: Bolt11Invoice) { - self.state = state - self.expiresAt = expiresAt - self.feeTotalSat = feeTotalSat - self.orderTotalSat = orderTotalSat - self.invoice = invoice - } -} - - - -public struct FfiConverterTypeBolt11PaymentInfo: FfiConverterRustBuffer { - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Bolt11PaymentInfo { - return - try Bolt11PaymentInfo( - state: FfiConverterTypePaymentState.read(from: &buf), - expiresAt: FfiConverterTypeDateTime.read(from: &buf), - feeTotalSat: FfiConverterUInt64.read(from: &buf), - orderTotalSat: FfiConverterUInt64.read(from: &buf), - invoice: FfiConverterTypeBolt11Invoice.read(from: &buf) - ) - } - - public static func write(_ value: Bolt11PaymentInfo, into buf: inout [UInt8]) { - FfiConverterTypePaymentState.write(value.state, into: &buf) - FfiConverterTypeDateTime.write(value.expiresAt, into: &buf) - FfiConverterUInt64.write(value.feeTotalSat, into: &buf) - FfiConverterUInt64.write(value.orderTotalSat, into: &buf) - FfiConverterTypeBolt11Invoice.write(value.invoice, into: &buf) - } -} - - -public func FfiConverterTypeBolt11PaymentInfo_lift(_ buf: RustBuffer) throws -> Bolt11PaymentInfo { - return try FfiConverterTypeBolt11PaymentInfo.lift(buf) -} - -public func FfiConverterTypeBolt11PaymentInfo_lower(_ value: Bolt11PaymentInfo) -> RustBuffer { - return FfiConverterTypeBolt11PaymentInfo.lower(value) -} - - public struct ChannelConfig { public var forwardingFeeProportionalMillionths: UInt32 public var forwardingFeeBaseMsat: UInt32 @@ -3351,6 +4542,9 @@ extension ChannelConfig: Equatable, Hashable { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeChannelConfig: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> ChannelConfig { return @@ -3375,10 +4569,16 @@ public struct FfiConverterTypeChannelConfig: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeChannelConfig_lift(_ buf: RustBuffer) throws -> ChannelConfig { return try FfiConverterTypeChannelConfig.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeChannelConfig_lower(_ value: ChannelConfig) -> RustBuffer { return FfiConverterTypeChannelConfig.lower(value) } @@ -3590,6 +4790,9 @@ extension ChannelDetails: Equatable, Hashable { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeChannelDetails: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> ChannelDetails { return @@ -3664,10 +4867,16 @@ public struct FfiConverterTypeChannelDetails: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeChannelDetails_lift(_ buf: RustBuffer) throws -> ChannelDetails { return try FfiConverterTypeChannelDetails.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeChannelDetails_lower(_ value: ChannelDetails) -> RustBuffer { return FfiConverterTypeChannelDetails.lower(value) } @@ -3698,124 +4907,68 @@ extension ChannelInfo: Equatable, Hashable { if lhs.nodeOne != rhs.nodeOne { return false } - if lhs.oneToTwo != rhs.oneToTwo { - return false - } - if lhs.nodeTwo != rhs.nodeTwo { - return false - } - if lhs.twoToOne != rhs.twoToOne { - return false - } - if lhs.capacitySats != rhs.capacitySats { - return false - } - return true - } - - public func hash(into hasher: inout Hasher) { - hasher.combine(nodeOne) - hasher.combine(oneToTwo) - hasher.combine(nodeTwo) - hasher.combine(twoToOne) - hasher.combine(capacitySats) - } -} - - -public struct FfiConverterTypeChannelInfo: FfiConverterRustBuffer { - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> ChannelInfo { - return - try ChannelInfo( - nodeOne: FfiConverterTypeNodeId.read(from: &buf), - oneToTwo: FfiConverterOptionTypeChannelUpdateInfo.read(from: &buf), - nodeTwo: FfiConverterTypeNodeId.read(from: &buf), - twoToOne: FfiConverterOptionTypeChannelUpdateInfo.read(from: &buf), - capacitySats: FfiConverterOptionUInt64.read(from: &buf) - ) - } - - public static func write(_ value: ChannelInfo, into buf: inout [UInt8]) { - FfiConverterTypeNodeId.write(value.nodeOne, into: &buf) - FfiConverterOptionTypeChannelUpdateInfo.write(value.oneToTwo, into: &buf) - FfiConverterTypeNodeId.write(value.nodeTwo, into: &buf) - FfiConverterOptionTypeChannelUpdateInfo.write(value.twoToOne, into: &buf) - FfiConverterOptionUInt64.write(value.capacitySats, into: &buf) - } -} - - -public func FfiConverterTypeChannelInfo_lift(_ buf: RustBuffer) throws -> ChannelInfo { - return try FfiConverterTypeChannelInfo.lift(buf) -} - -public func FfiConverterTypeChannelInfo_lower(_ value: ChannelInfo) -> RustBuffer { - return FfiConverterTypeChannelInfo.lower(value) -} - - -public struct ChannelOrderInfo { - public var fundedAt: DateTime - public var fundingOutpoint: OutPoint - public var expiresAt: DateTime - - // Default memberwise initializers are never public by default, so we - // declare one manually. - public init(fundedAt: DateTime, fundingOutpoint: OutPoint, expiresAt: DateTime) { - self.fundedAt = fundedAt - self.fundingOutpoint = fundingOutpoint - self.expiresAt = expiresAt - } -} - - - -extension ChannelOrderInfo: Equatable, Hashable { - public static func ==(lhs: ChannelOrderInfo, rhs: ChannelOrderInfo) -> Bool { - if lhs.fundedAt != rhs.fundedAt { + if lhs.oneToTwo != rhs.oneToTwo { return false } - if lhs.fundingOutpoint != rhs.fundingOutpoint { + if lhs.nodeTwo != rhs.nodeTwo { return false } - if lhs.expiresAt != rhs.expiresAt { + if lhs.twoToOne != rhs.twoToOne { + return false + } + if lhs.capacitySats != rhs.capacitySats { return false } return true } public func hash(into hasher: inout Hasher) { - hasher.combine(fundedAt) - hasher.combine(fundingOutpoint) - hasher.combine(expiresAt) + hasher.combine(nodeOne) + hasher.combine(oneToTwo) + hasher.combine(nodeTwo) + hasher.combine(twoToOne) + hasher.combine(capacitySats) } } -public struct FfiConverterTypeChannelOrderInfo: FfiConverterRustBuffer { - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> ChannelOrderInfo { +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeChannelInfo: FfiConverterRustBuffer { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> ChannelInfo { return - try ChannelOrderInfo( - fundedAt: FfiConverterTypeDateTime.read(from: &buf), - fundingOutpoint: FfiConverterTypeOutPoint.read(from: &buf), - expiresAt: FfiConverterTypeDateTime.read(from: &buf) + try ChannelInfo( + nodeOne: FfiConverterTypeNodeId.read(from: &buf), + oneToTwo: FfiConverterOptionTypeChannelUpdateInfo.read(from: &buf), + nodeTwo: FfiConverterTypeNodeId.read(from: &buf), + twoToOne: FfiConverterOptionTypeChannelUpdateInfo.read(from: &buf), + capacitySats: FfiConverterOptionUInt64.read(from: &buf) ) } - public static func write(_ value: ChannelOrderInfo, into buf: inout [UInt8]) { - FfiConverterTypeDateTime.write(value.fundedAt, into: &buf) - FfiConverterTypeOutPoint.write(value.fundingOutpoint, into: &buf) - FfiConverterTypeDateTime.write(value.expiresAt, into: &buf) + public static func write(_ value: ChannelInfo, into buf: inout [UInt8]) { + FfiConverterTypeNodeId.write(value.nodeOne, into: &buf) + FfiConverterOptionTypeChannelUpdateInfo.write(value.oneToTwo, into: &buf) + FfiConverterTypeNodeId.write(value.nodeTwo, into: &buf) + FfiConverterOptionTypeChannelUpdateInfo.write(value.twoToOne, into: &buf) + FfiConverterOptionUInt64.write(value.capacitySats, into: &buf) } } -public func FfiConverterTypeChannelOrderInfo_lift(_ buf: RustBuffer) throws -> ChannelOrderInfo { - return try FfiConverterTypeChannelOrderInfo.lift(buf) +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeChannelInfo_lift(_ buf: RustBuffer) throws -> ChannelInfo { + return try FfiConverterTypeChannelInfo.lift(buf) } -public func FfiConverterTypeChannelOrderInfo_lower(_ value: ChannelOrderInfo) -> RustBuffer { - return FfiConverterTypeChannelOrderInfo.lower(value) +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeChannelInfo_lower(_ value: ChannelInfo) -> RustBuffer { + return FfiConverterTypeChannelInfo.lower(value) } @@ -3875,6 +5028,9 @@ extension ChannelUpdateInfo: Equatable, Hashable { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeChannelUpdateInfo: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> ChannelUpdateInfo { return @@ -3899,10 +5055,16 @@ public struct FfiConverterTypeChannelUpdateInfo: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeChannelUpdateInfo_lift(_ buf: RustBuffer) throws -> ChannelUpdateInfo { return try FfiConverterTypeChannelUpdateInfo.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeChannelUpdateInfo_lower(_ value: ChannelUpdateInfo) -> RustBuffer { return FfiConverterTypeChannelUpdateInfo.lower(value) } @@ -3917,11 +5079,11 @@ public struct Config { public var trustedPeers0conf: [PublicKey] public var probingLiquidityLimitMultiplier: UInt64 public var anchorChannelsConfig: AnchorChannelsConfig? - public var sendingParameters: SendingParameters? + public var routeParameters: RouteParametersConfig? // Default memberwise initializers are never public by default, so we // declare one manually. - public init(storageDirPath: String, network: Network, listeningAddresses: [SocketAddress]?, announcementAddresses: [SocketAddress]?, nodeAlias: NodeAlias?, trustedPeers0conf: [PublicKey], probingLiquidityLimitMultiplier: UInt64, anchorChannelsConfig: AnchorChannelsConfig?, sendingParameters: SendingParameters?) { + public init(storageDirPath: String, network: Network, listeningAddresses: [SocketAddress]?, announcementAddresses: [SocketAddress]?, nodeAlias: NodeAlias?, trustedPeers0conf: [PublicKey], probingLiquidityLimitMultiplier: UInt64, anchorChannelsConfig: AnchorChannelsConfig?, routeParameters: RouteParametersConfig?) { self.storageDirPath = storageDirPath self.network = network self.listeningAddresses = listeningAddresses @@ -3930,7 +5092,7 @@ public struct Config { self.trustedPeers0conf = trustedPeers0conf self.probingLiquidityLimitMultiplier = probingLiquidityLimitMultiplier self.anchorChannelsConfig = anchorChannelsConfig - self.sendingParameters = sendingParameters + self.routeParameters = routeParameters } } @@ -3962,7 +5124,7 @@ extension Config: Equatable, Hashable { if lhs.anchorChannelsConfig != rhs.anchorChannelsConfig { return false } - if lhs.sendingParameters != rhs.sendingParameters { + if lhs.routeParameters != rhs.routeParameters { return false } return true @@ -3977,11 +5139,14 @@ extension Config: Equatable, Hashable { hasher.combine(trustedPeers0conf) hasher.combine(probingLiquidityLimitMultiplier) hasher.combine(anchorChannelsConfig) - hasher.combine(sendingParameters) + hasher.combine(routeParameters) } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeConfig: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Config { return @@ -3994,7 +5159,7 @@ public struct FfiConverterTypeConfig: FfiConverterRustBuffer { trustedPeers0conf: FfiConverterSequenceTypePublicKey.read(from: &buf), probingLiquidityLimitMultiplier: FfiConverterUInt64.read(from: &buf), anchorChannelsConfig: FfiConverterOptionTypeAnchorChannelsConfig.read(from: &buf), - sendingParameters: FfiConverterOptionTypeSendingParameters.read(from: &buf) + routeParameters: FfiConverterOptionTypeRouteParametersConfig.read(from: &buf) ) } @@ -4007,15 +5172,21 @@ public struct FfiConverterTypeConfig: FfiConverterRustBuffer { FfiConverterSequenceTypePublicKey.write(value.trustedPeers0conf, into: &buf) FfiConverterUInt64.write(value.probingLiquidityLimitMultiplier, into: &buf) FfiConverterOptionTypeAnchorChannelsConfig.write(value.anchorChannelsConfig, into: &buf) - FfiConverterOptionTypeSendingParameters.write(value.sendingParameters, into: &buf) + FfiConverterOptionTypeRouteParametersConfig.write(value.routeParameters, into: &buf) } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeConfig_lift(_ buf: RustBuffer) throws -> Config { return try FfiConverterTypeConfig.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeConfig_lower(_ value: Config) -> RustBuffer { return FfiConverterTypeConfig.lower(value) } @@ -4053,6 +5224,9 @@ extension CustomTlvRecord: Equatable, Hashable { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeCustomTlvRecord: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> CustomTlvRecord { return @@ -4069,10 +5243,16 @@ public struct FfiConverterTypeCustomTlvRecord: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeCustomTlvRecord_lift(_ buf: RustBuffer) throws -> CustomTlvRecord { return try FfiConverterTypeCustomTlvRecord.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeCustomTlvRecord_lower(_ value: CustomTlvRecord) -> RustBuffer { return FfiConverterTypeCustomTlvRecord.lower(value) } @@ -4104,6 +5284,9 @@ extension ElectrumSyncConfig: Equatable, Hashable { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeElectrumSyncConfig: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> ElectrumSyncConfig { return @@ -4118,10 +5301,16 @@ public struct FfiConverterTypeElectrumSyncConfig: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeElectrumSyncConfig_lift(_ buf: RustBuffer) throws -> ElectrumSyncConfig { return try FfiConverterTypeElectrumSyncConfig.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeElectrumSyncConfig_lower(_ value: ElectrumSyncConfig) -> RustBuffer { return FfiConverterTypeElectrumSyncConfig.lower(value) } @@ -4153,6 +5342,9 @@ extension EsploraSyncConfig: Equatable, Hashable { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeEsploraSyncConfig: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> EsploraSyncConfig { return @@ -4167,10 +5359,16 @@ public struct FfiConverterTypeEsploraSyncConfig: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeEsploraSyncConfig_lift(_ buf: RustBuffer) throws -> EsploraSyncConfig { return try FfiConverterTypeEsploraSyncConfig.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeEsploraSyncConfig_lower(_ value: EsploraSyncConfig) -> RustBuffer { return FfiConverterTypeEsploraSyncConfig.lower(value) } @@ -4182,66 +5380,387 @@ public struct LspFeeLimits { // Default memberwise initializers are never public by default, so we // declare one manually. - public init(maxTotalOpeningFeeMsat: UInt64?, maxProportionalOpeningFeePpmMsat: UInt64?) { - self.maxTotalOpeningFeeMsat = maxTotalOpeningFeeMsat - self.maxProportionalOpeningFeePpmMsat = maxProportionalOpeningFeePpmMsat + public init(maxTotalOpeningFeeMsat: UInt64?, maxProportionalOpeningFeePpmMsat: UInt64?) { + self.maxTotalOpeningFeeMsat = maxTotalOpeningFeeMsat + self.maxProportionalOpeningFeePpmMsat = maxProportionalOpeningFeePpmMsat + } +} + + + +extension LspFeeLimits: Equatable, Hashable { + public static func ==(lhs: LspFeeLimits, rhs: LspFeeLimits) -> Bool { + if lhs.maxTotalOpeningFeeMsat != rhs.maxTotalOpeningFeeMsat { + return false + } + if lhs.maxProportionalOpeningFeePpmMsat != rhs.maxProportionalOpeningFeePpmMsat { + return false + } + return true + } + + public func hash(into hasher: inout Hasher) { + hasher.combine(maxTotalOpeningFeeMsat) + hasher.combine(maxProportionalOpeningFeePpmMsat) + } +} + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeLSPFeeLimits: FfiConverterRustBuffer { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> LspFeeLimits { + return + try LspFeeLimits( + maxTotalOpeningFeeMsat: FfiConverterOptionUInt64.read(from: &buf), + maxProportionalOpeningFeePpmMsat: FfiConverterOptionUInt64.read(from: &buf) + ) + } + + public static func write(_ value: LspFeeLimits, into buf: inout [UInt8]) { + FfiConverterOptionUInt64.write(value.maxTotalOpeningFeeMsat, into: &buf) + FfiConverterOptionUInt64.write(value.maxProportionalOpeningFeePpmMsat, into: &buf) + } +} + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeLSPFeeLimits_lift(_ buf: RustBuffer) throws -> LspFeeLimits { + return try FfiConverterTypeLSPFeeLimits.lift(buf) +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeLSPFeeLimits_lower(_ value: LspFeeLimits) -> RustBuffer { + return FfiConverterTypeLSPFeeLimits.lower(value) +} + + +public struct Lsps1Bolt11PaymentInfo { + public var state: Lsps1PaymentState + public var expiresAt: LspsDateTime + public var feeTotalSat: UInt64 + public var orderTotalSat: UInt64 + public var invoice: Bolt11Invoice + + // Default memberwise initializers are never public by default, so we + // declare one manually. + public init(state: Lsps1PaymentState, expiresAt: LspsDateTime, feeTotalSat: UInt64, orderTotalSat: UInt64, invoice: Bolt11Invoice) { + self.state = state + self.expiresAt = expiresAt + self.feeTotalSat = feeTotalSat + self.orderTotalSat = orderTotalSat + self.invoice = invoice + } +} + + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeLSPS1Bolt11PaymentInfo: FfiConverterRustBuffer { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Lsps1Bolt11PaymentInfo { + return + try Lsps1Bolt11PaymentInfo( + state: FfiConverterTypeLSPS1PaymentState.read(from: &buf), + expiresAt: FfiConverterTypeLSPSDateTime.read(from: &buf), + feeTotalSat: FfiConverterUInt64.read(from: &buf), + orderTotalSat: FfiConverterUInt64.read(from: &buf), + invoice: FfiConverterTypeBolt11Invoice.read(from: &buf) + ) + } + + public static func write(_ value: Lsps1Bolt11PaymentInfo, into buf: inout [UInt8]) { + FfiConverterTypeLSPS1PaymentState.write(value.state, into: &buf) + FfiConverterTypeLSPSDateTime.write(value.expiresAt, into: &buf) + FfiConverterUInt64.write(value.feeTotalSat, into: &buf) + FfiConverterUInt64.write(value.orderTotalSat, into: &buf) + FfiConverterTypeBolt11Invoice.write(value.invoice, into: &buf) + } +} + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeLSPS1Bolt11PaymentInfo_lift(_ buf: RustBuffer) throws -> Lsps1Bolt11PaymentInfo { + return try FfiConverterTypeLSPS1Bolt11PaymentInfo.lift(buf) +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeLSPS1Bolt11PaymentInfo_lower(_ value: Lsps1Bolt11PaymentInfo) -> RustBuffer { + return FfiConverterTypeLSPS1Bolt11PaymentInfo.lower(value) +} + + +public struct Lsps1ChannelInfo { + public var fundedAt: LspsDateTime + public var fundingOutpoint: OutPoint + public var expiresAt: LspsDateTime + + // Default memberwise initializers are never public by default, so we + // declare one manually. + public init(fundedAt: LspsDateTime, fundingOutpoint: OutPoint, expiresAt: LspsDateTime) { + self.fundedAt = fundedAt + self.fundingOutpoint = fundingOutpoint + self.expiresAt = expiresAt + } +} + + + +extension Lsps1ChannelInfo: Equatable, Hashable { + public static func ==(lhs: Lsps1ChannelInfo, rhs: Lsps1ChannelInfo) -> Bool { + if lhs.fundedAt != rhs.fundedAt { + return false + } + if lhs.fundingOutpoint != rhs.fundingOutpoint { + return false + } + if lhs.expiresAt != rhs.expiresAt { + return false + } + return true + } + + public func hash(into hasher: inout Hasher) { + hasher.combine(fundedAt) + hasher.combine(fundingOutpoint) + hasher.combine(expiresAt) + } +} + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeLSPS1ChannelInfo: FfiConverterRustBuffer { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Lsps1ChannelInfo { + return + try Lsps1ChannelInfo( + fundedAt: FfiConverterTypeLSPSDateTime.read(from: &buf), + fundingOutpoint: FfiConverterTypeOutPoint.read(from: &buf), + expiresAt: FfiConverterTypeLSPSDateTime.read(from: &buf) + ) + } + + public static func write(_ value: Lsps1ChannelInfo, into buf: inout [UInt8]) { + FfiConverterTypeLSPSDateTime.write(value.fundedAt, into: &buf) + FfiConverterTypeOutPoint.write(value.fundingOutpoint, into: &buf) + FfiConverterTypeLSPSDateTime.write(value.expiresAt, into: &buf) + } +} + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeLSPS1ChannelInfo_lift(_ buf: RustBuffer) throws -> Lsps1ChannelInfo { + return try FfiConverterTypeLSPS1ChannelInfo.lift(buf) +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeLSPS1ChannelInfo_lower(_ value: Lsps1ChannelInfo) -> RustBuffer { + return FfiConverterTypeLSPS1ChannelInfo.lower(value) +} + + +public struct Lsps1OnchainPaymentInfo { + public var state: Lsps1PaymentState + public var expiresAt: LspsDateTime + public var feeTotalSat: UInt64 + public var orderTotalSat: UInt64 + public var address: Address + public var minOnchainPaymentConfirmations: UInt16? + public var minFeeFor0conf: FeeRate + public var refundOnchainAddress: Address? + + // Default memberwise initializers are never public by default, so we + // declare one manually. + public init(state: Lsps1PaymentState, expiresAt: LspsDateTime, feeTotalSat: UInt64, orderTotalSat: UInt64, address: Address, minOnchainPaymentConfirmations: UInt16?, minFeeFor0conf: FeeRate, refundOnchainAddress: Address?) { + self.state = state + self.expiresAt = expiresAt + self.feeTotalSat = feeTotalSat + self.orderTotalSat = orderTotalSat + self.address = address + self.minOnchainPaymentConfirmations = minOnchainPaymentConfirmations + self.minFeeFor0conf = minFeeFor0conf + self.refundOnchainAddress = refundOnchainAddress + } +} + + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeLSPS1OnchainPaymentInfo: FfiConverterRustBuffer { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Lsps1OnchainPaymentInfo { + return + try Lsps1OnchainPaymentInfo( + state: FfiConverterTypeLSPS1PaymentState.read(from: &buf), + expiresAt: FfiConverterTypeLSPSDateTime.read(from: &buf), + feeTotalSat: FfiConverterUInt64.read(from: &buf), + orderTotalSat: FfiConverterUInt64.read(from: &buf), + address: FfiConverterTypeAddress.read(from: &buf), + minOnchainPaymentConfirmations: FfiConverterOptionUInt16.read(from: &buf), + minFeeFor0conf: FfiConverterTypeFeeRate.read(from: &buf), + refundOnchainAddress: FfiConverterOptionTypeAddress.read(from: &buf) + ) + } + + public static func write(_ value: Lsps1OnchainPaymentInfo, into buf: inout [UInt8]) { + FfiConverterTypeLSPS1PaymentState.write(value.state, into: &buf) + FfiConverterTypeLSPSDateTime.write(value.expiresAt, into: &buf) + FfiConverterUInt64.write(value.feeTotalSat, into: &buf) + FfiConverterUInt64.write(value.orderTotalSat, into: &buf) + FfiConverterTypeAddress.write(value.address, into: &buf) + FfiConverterOptionUInt16.write(value.minOnchainPaymentConfirmations, into: &buf) + FfiConverterTypeFeeRate.write(value.minFeeFor0conf, into: &buf) + FfiConverterOptionTypeAddress.write(value.refundOnchainAddress, into: &buf) + } +} + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeLSPS1OnchainPaymentInfo_lift(_ buf: RustBuffer) throws -> Lsps1OnchainPaymentInfo { + return try FfiConverterTypeLSPS1OnchainPaymentInfo.lift(buf) +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeLSPS1OnchainPaymentInfo_lower(_ value: Lsps1OnchainPaymentInfo) -> RustBuffer { + return FfiConverterTypeLSPS1OnchainPaymentInfo.lower(value) +} + + +public struct Lsps1OrderParams { + public var lspBalanceSat: UInt64 + public var clientBalanceSat: UInt64 + public var requiredChannelConfirmations: UInt16 + public var fundingConfirmsWithinBlocks: UInt16 + public var channelExpiryBlocks: UInt32 + public var token: String? + public var announceChannel: Bool + + // Default memberwise initializers are never public by default, so we + // declare one manually. + public init(lspBalanceSat: UInt64, clientBalanceSat: UInt64, requiredChannelConfirmations: UInt16, fundingConfirmsWithinBlocks: UInt16, channelExpiryBlocks: UInt32, token: String?, announceChannel: Bool) { + self.lspBalanceSat = lspBalanceSat + self.clientBalanceSat = clientBalanceSat + self.requiredChannelConfirmations = requiredChannelConfirmations + self.fundingConfirmsWithinBlocks = fundingConfirmsWithinBlocks + self.channelExpiryBlocks = channelExpiryBlocks + self.token = token + self.announceChannel = announceChannel } } -extension LspFeeLimits: Equatable, Hashable { - public static func ==(lhs: LspFeeLimits, rhs: LspFeeLimits) -> Bool { - if lhs.maxTotalOpeningFeeMsat != rhs.maxTotalOpeningFeeMsat { +extension Lsps1OrderParams: Equatable, Hashable { + public static func ==(lhs: Lsps1OrderParams, rhs: Lsps1OrderParams) -> Bool { + if lhs.lspBalanceSat != rhs.lspBalanceSat { return false } - if lhs.maxProportionalOpeningFeePpmMsat != rhs.maxProportionalOpeningFeePpmMsat { + if lhs.clientBalanceSat != rhs.clientBalanceSat { + return false + } + if lhs.requiredChannelConfirmations != rhs.requiredChannelConfirmations { + return false + } + if lhs.fundingConfirmsWithinBlocks != rhs.fundingConfirmsWithinBlocks { + return false + } + if lhs.channelExpiryBlocks != rhs.channelExpiryBlocks { + return false + } + if lhs.token != rhs.token { + return false + } + if lhs.announceChannel != rhs.announceChannel { return false } return true } public func hash(into hasher: inout Hasher) { - hasher.combine(maxTotalOpeningFeeMsat) - hasher.combine(maxProportionalOpeningFeePpmMsat) + hasher.combine(lspBalanceSat) + hasher.combine(clientBalanceSat) + hasher.combine(requiredChannelConfirmations) + hasher.combine(fundingConfirmsWithinBlocks) + hasher.combine(channelExpiryBlocks) + hasher.combine(token) + hasher.combine(announceChannel) } } -public struct FfiConverterTypeLSPFeeLimits: FfiConverterRustBuffer { - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> LspFeeLimits { +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeLSPS1OrderParams: FfiConverterRustBuffer { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Lsps1OrderParams { return - try LspFeeLimits( - maxTotalOpeningFeeMsat: FfiConverterOptionUInt64.read(from: &buf), - maxProportionalOpeningFeePpmMsat: FfiConverterOptionUInt64.read(from: &buf) + try Lsps1OrderParams( + lspBalanceSat: FfiConverterUInt64.read(from: &buf), + clientBalanceSat: FfiConverterUInt64.read(from: &buf), + requiredChannelConfirmations: FfiConverterUInt16.read(from: &buf), + fundingConfirmsWithinBlocks: FfiConverterUInt16.read(from: &buf), + channelExpiryBlocks: FfiConverterUInt32.read(from: &buf), + token: FfiConverterOptionString.read(from: &buf), + announceChannel: FfiConverterBool.read(from: &buf) ) } - public static func write(_ value: LspFeeLimits, into buf: inout [UInt8]) { - FfiConverterOptionUInt64.write(value.maxTotalOpeningFeeMsat, into: &buf) - FfiConverterOptionUInt64.write(value.maxProportionalOpeningFeePpmMsat, into: &buf) + public static func write(_ value: Lsps1OrderParams, into buf: inout [UInt8]) { + FfiConverterUInt64.write(value.lspBalanceSat, into: &buf) + FfiConverterUInt64.write(value.clientBalanceSat, into: &buf) + FfiConverterUInt16.write(value.requiredChannelConfirmations, into: &buf) + FfiConverterUInt16.write(value.fundingConfirmsWithinBlocks, into: &buf) + FfiConverterUInt32.write(value.channelExpiryBlocks, into: &buf) + FfiConverterOptionString.write(value.token, into: &buf) + FfiConverterBool.write(value.announceChannel, into: &buf) } } -public func FfiConverterTypeLSPFeeLimits_lift(_ buf: RustBuffer) throws -> LspFeeLimits { - return try FfiConverterTypeLSPFeeLimits.lift(buf) +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeLSPS1OrderParams_lift(_ buf: RustBuffer) throws -> Lsps1OrderParams { + return try FfiConverterTypeLSPS1OrderParams.lift(buf) } -public func FfiConverterTypeLSPFeeLimits_lower(_ value: LspFeeLimits) -> RustBuffer { - return FfiConverterTypeLSPFeeLimits.lower(value) +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeLSPS1OrderParams_lower(_ value: Lsps1OrderParams) -> RustBuffer { + return FfiConverterTypeLSPS1OrderParams.lower(value) } public struct Lsps1OrderStatus { - public var orderId: OrderId - public var orderParams: OrderParameters - public var paymentOptions: PaymentInfo - public var channelState: ChannelOrderInfo? + public var orderId: Lsps1OrderId + public var orderParams: Lsps1OrderParams + public var paymentOptions: Lsps1PaymentInfo + public var channelState: Lsps1ChannelInfo? // Default memberwise initializers are never public by default, so we // declare one manually. - public init(orderId: OrderId, orderParams: OrderParameters, paymentOptions: PaymentInfo, channelState: ChannelOrderInfo?) { + public init(orderId: Lsps1OrderId, orderParams: Lsps1OrderParams, paymentOptions: Lsps1PaymentInfo, channelState: Lsps1ChannelInfo?) { self.orderId = orderId self.orderParams = orderParams self.paymentOptions = paymentOptions @@ -4251,35 +5770,92 @@ public struct Lsps1OrderStatus { +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeLSPS1OrderStatus: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Lsps1OrderStatus { return try Lsps1OrderStatus( - orderId: FfiConverterTypeOrderId.read(from: &buf), - orderParams: FfiConverterTypeOrderParameters.read(from: &buf), - paymentOptions: FfiConverterTypePaymentInfo.read(from: &buf), - channelState: FfiConverterOptionTypeChannelOrderInfo.read(from: &buf) + orderId: FfiConverterTypeLSPS1OrderId.read(from: &buf), + orderParams: FfiConverterTypeLSPS1OrderParams.read(from: &buf), + paymentOptions: FfiConverterTypeLSPS1PaymentInfo.read(from: &buf), + channelState: FfiConverterOptionTypeLSPS1ChannelInfo.read(from: &buf) ) } public static func write(_ value: Lsps1OrderStatus, into buf: inout [UInt8]) { - FfiConverterTypeOrderId.write(value.orderId, into: &buf) - FfiConverterTypeOrderParameters.write(value.orderParams, into: &buf) - FfiConverterTypePaymentInfo.write(value.paymentOptions, into: &buf) - FfiConverterOptionTypeChannelOrderInfo.write(value.channelState, into: &buf) + FfiConverterTypeLSPS1OrderId.write(value.orderId, into: &buf) + FfiConverterTypeLSPS1OrderParams.write(value.orderParams, into: &buf) + FfiConverterTypeLSPS1PaymentInfo.write(value.paymentOptions, into: &buf) + FfiConverterOptionTypeLSPS1ChannelInfo.write(value.channelState, into: &buf) } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeLSPS1OrderStatus_lift(_ buf: RustBuffer) throws -> Lsps1OrderStatus { return try FfiConverterTypeLSPS1OrderStatus.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeLSPS1OrderStatus_lower(_ value: Lsps1OrderStatus) -> RustBuffer { return FfiConverterTypeLSPS1OrderStatus.lower(value) } +public struct Lsps1PaymentInfo { + public var bolt11: Lsps1Bolt11PaymentInfo? + public var onchain: Lsps1OnchainPaymentInfo? + + // Default memberwise initializers are never public by default, so we + // declare one manually. + public init(bolt11: Lsps1Bolt11PaymentInfo?, onchain: Lsps1OnchainPaymentInfo?) { + self.bolt11 = bolt11 + self.onchain = onchain + } +} + + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeLSPS1PaymentInfo: FfiConverterRustBuffer { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Lsps1PaymentInfo { + return + try Lsps1PaymentInfo( + bolt11: FfiConverterOptionTypeLSPS1Bolt11PaymentInfo.read(from: &buf), + onchain: FfiConverterOptionTypeLSPS1OnchainPaymentInfo.read(from: &buf) + ) + } + + public static func write(_ value: Lsps1PaymentInfo, into buf: inout [UInt8]) { + FfiConverterOptionTypeLSPS1Bolt11PaymentInfo.write(value.bolt11, into: &buf) + FfiConverterOptionTypeLSPS1OnchainPaymentInfo.write(value.onchain, into: &buf) + } +} + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeLSPS1PaymentInfo_lift(_ buf: RustBuffer) throws -> Lsps1PaymentInfo { + return try FfiConverterTypeLSPS1PaymentInfo.lift(buf) +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeLSPS1PaymentInfo_lower(_ value: Lsps1PaymentInfo) -> RustBuffer { + return FfiConverterTypeLSPS1PaymentInfo.lower(value) +} + + public struct Lsps2ServiceConfig { public var requireToken: String? public var advertiseService: Bool @@ -4290,10 +5866,11 @@ public struct Lsps2ServiceConfig { public var maxClientToSelfDelay: UInt32 public var minPaymentSizeMsat: UInt64 public var maxPaymentSizeMsat: UInt64 + public var clientTrustsLsp: Bool // Default memberwise initializers are never public by default, so we // declare one manually. - public init(requireToken: String?, advertiseService: Bool, channelOpeningFeePpm: UInt32, channelOverProvisioningPpm: UInt32, minChannelOpeningFeeMsat: UInt64, minChannelLifetime: UInt32, maxClientToSelfDelay: UInt32, minPaymentSizeMsat: UInt64, maxPaymentSizeMsat: UInt64) { + public init(requireToken: String?, advertiseService: Bool, channelOpeningFeePpm: UInt32, channelOverProvisioningPpm: UInt32, minChannelOpeningFeeMsat: UInt64, minChannelLifetime: UInt32, maxClientToSelfDelay: UInt32, minPaymentSizeMsat: UInt64, maxPaymentSizeMsat: UInt64, clientTrustsLsp: Bool) { self.requireToken = requireToken self.advertiseService = advertiseService self.channelOpeningFeePpm = channelOpeningFeePpm @@ -4303,6 +5880,7 @@ public struct Lsps2ServiceConfig { self.maxClientToSelfDelay = maxClientToSelfDelay self.minPaymentSizeMsat = minPaymentSizeMsat self.maxPaymentSizeMsat = maxPaymentSizeMsat + self.clientTrustsLsp = clientTrustsLsp } } @@ -4337,6 +5915,9 @@ extension Lsps2ServiceConfig: Equatable, Hashable { if lhs.maxPaymentSizeMsat != rhs.maxPaymentSizeMsat { return false } + if lhs.clientTrustsLsp != rhs.clientTrustsLsp { + return false + } return true } @@ -4350,10 +5931,14 @@ extension Lsps2ServiceConfig: Equatable, Hashable { hasher.combine(maxClientToSelfDelay) hasher.combine(minPaymentSizeMsat) hasher.combine(maxPaymentSizeMsat) + hasher.combine(clientTrustsLsp) } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeLSPS2ServiceConfig: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Lsps2ServiceConfig { return @@ -4366,7 +5951,8 @@ public struct FfiConverterTypeLSPS2ServiceConfig: FfiConverterRustBuffer { minChannelLifetime: FfiConverterUInt32.read(from: &buf), maxClientToSelfDelay: FfiConverterUInt32.read(from: &buf), minPaymentSizeMsat: FfiConverterUInt64.read(from: &buf), - maxPaymentSizeMsat: FfiConverterUInt64.read(from: &buf) + maxPaymentSizeMsat: FfiConverterUInt64.read(from: &buf), + clientTrustsLsp: FfiConverterBool.read(from: &buf) ) } @@ -4380,14 +5966,21 @@ public struct FfiConverterTypeLSPS2ServiceConfig: FfiConverterRustBuffer { FfiConverterUInt32.write(value.maxClientToSelfDelay, into: &buf) FfiConverterUInt64.write(value.minPaymentSizeMsat, into: &buf) FfiConverterUInt64.write(value.maxPaymentSizeMsat, into: &buf) + FfiConverterBool.write(value.clientTrustsLsp, into: &buf) } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeLSPS2ServiceConfig_lift(_ buf: RustBuffer) throws -> Lsps2ServiceConfig { return try FfiConverterTypeLSPS2ServiceConfig.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeLSPS2ServiceConfig_lower(_ value: Lsps2ServiceConfig) -> RustBuffer { return FfiConverterTypeLSPS2ServiceConfig.lower(value) } @@ -4437,6 +6030,9 @@ extension LogRecord: Equatable, Hashable { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeLogRecord: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> LogRecord { return @@ -4457,10 +6053,16 @@ public struct FfiConverterTypeLogRecord: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeLogRecord_lift(_ buf: RustBuffer) throws -> LogRecord { return try FfiConverterTypeLogRecord.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeLogRecord_lower(_ value: LogRecord) -> RustBuffer { return FfiConverterTypeLogRecord.lower(value) } @@ -4504,6 +6106,9 @@ extension NodeAnnouncementInfo: Equatable, Hashable { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeNodeAnnouncementInfo: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> NodeAnnouncementInfo { return @@ -4522,10 +6127,16 @@ public struct FfiConverterTypeNodeAnnouncementInfo: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeNodeAnnouncementInfo_lift(_ buf: RustBuffer) throws -> NodeAnnouncementInfo { return try FfiConverterTypeNodeAnnouncementInfo.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeNodeAnnouncementInfo_lower(_ value: NodeAnnouncementInfo) -> RustBuffer { return FfiConverterTypeNodeAnnouncementInfo.lower(value) } @@ -4563,6 +6174,9 @@ extension NodeInfo: Equatable, Hashable { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeNodeInfo: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> NodeInfo { return @@ -4579,10 +6193,16 @@ public struct FfiConverterTypeNodeInfo: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeNodeInfo_lift(_ buf: RustBuffer) throws -> NodeInfo { return try FfiConverterTypeNodeInfo.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeNodeInfo_lower(_ value: NodeInfo) -> RustBuffer { return FfiConverterTypeNodeInfo.lower(value) } @@ -4590,25 +6210,25 @@ public func FfiConverterTypeNodeInfo_lower(_ value: NodeInfo) -> RustBuffer { public struct NodeStatus { public var isRunning: Bool - public var isListening: Bool public var currentBestBlock: BestBlock public var latestLightningWalletSyncTimestamp: UInt64? public var latestOnchainWalletSyncTimestamp: UInt64? public var latestFeeRateCacheUpdateTimestamp: UInt64? public var latestRgsSnapshotTimestamp: UInt64? + public var latestPathfindingScoresSyncTimestamp: UInt64? public var latestNodeAnnouncementBroadcastTimestamp: UInt64? public var latestChannelMonitorArchivalHeight: UInt32? // Default memberwise initializers are never public by default, so we // declare one manually. - public init(isRunning: Bool, isListening: Bool, currentBestBlock: BestBlock, latestLightningWalletSyncTimestamp: UInt64?, latestOnchainWalletSyncTimestamp: UInt64?, latestFeeRateCacheUpdateTimestamp: UInt64?, latestRgsSnapshotTimestamp: UInt64?, latestNodeAnnouncementBroadcastTimestamp: UInt64?, latestChannelMonitorArchivalHeight: UInt32?) { + public init(isRunning: Bool, currentBestBlock: BestBlock, latestLightningWalletSyncTimestamp: UInt64?, latestOnchainWalletSyncTimestamp: UInt64?, latestFeeRateCacheUpdateTimestamp: UInt64?, latestRgsSnapshotTimestamp: UInt64?, latestPathfindingScoresSyncTimestamp: UInt64?, latestNodeAnnouncementBroadcastTimestamp: UInt64?, latestChannelMonitorArchivalHeight: UInt32?) { self.isRunning = isRunning - self.isListening = isListening self.currentBestBlock = currentBestBlock self.latestLightningWalletSyncTimestamp = latestLightningWalletSyncTimestamp self.latestOnchainWalletSyncTimestamp = latestOnchainWalletSyncTimestamp self.latestFeeRateCacheUpdateTimestamp = latestFeeRateCacheUpdateTimestamp self.latestRgsSnapshotTimestamp = latestRgsSnapshotTimestamp + self.latestPathfindingScoresSyncTimestamp = latestPathfindingScoresSyncTimestamp self.latestNodeAnnouncementBroadcastTimestamp = latestNodeAnnouncementBroadcastTimestamp self.latestChannelMonitorArchivalHeight = latestChannelMonitorArchivalHeight } @@ -4621,9 +6241,6 @@ extension NodeStatus: Equatable, Hashable { if lhs.isRunning != rhs.isRunning { return false } - if lhs.isListening != rhs.isListening { - return false - } if lhs.currentBestBlock != rhs.currentBestBlock { return false } @@ -4636,228 +6253,80 @@ extension NodeStatus: Equatable, Hashable { if lhs.latestFeeRateCacheUpdateTimestamp != rhs.latestFeeRateCacheUpdateTimestamp { return false } - if lhs.latestRgsSnapshotTimestamp != rhs.latestRgsSnapshotTimestamp { - return false - } - if lhs.latestNodeAnnouncementBroadcastTimestamp != rhs.latestNodeAnnouncementBroadcastTimestamp { - return false - } - if lhs.latestChannelMonitorArchivalHeight != rhs.latestChannelMonitorArchivalHeight { - return false - } - return true - } - - public func hash(into hasher: inout Hasher) { - hasher.combine(isRunning) - hasher.combine(isListening) - hasher.combine(currentBestBlock) - hasher.combine(latestLightningWalletSyncTimestamp) - hasher.combine(latestOnchainWalletSyncTimestamp) - hasher.combine(latestFeeRateCacheUpdateTimestamp) - hasher.combine(latestRgsSnapshotTimestamp) - hasher.combine(latestNodeAnnouncementBroadcastTimestamp) - hasher.combine(latestChannelMonitorArchivalHeight) - } -} - - -public struct FfiConverterTypeNodeStatus: FfiConverterRustBuffer { - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> NodeStatus { - return - try NodeStatus( - isRunning: FfiConverterBool.read(from: &buf), - isListening: FfiConverterBool.read(from: &buf), - currentBestBlock: FfiConverterTypeBestBlock.read(from: &buf), - latestLightningWalletSyncTimestamp: FfiConverterOptionUInt64.read(from: &buf), - latestOnchainWalletSyncTimestamp: FfiConverterOptionUInt64.read(from: &buf), - latestFeeRateCacheUpdateTimestamp: FfiConverterOptionUInt64.read(from: &buf), - latestRgsSnapshotTimestamp: FfiConverterOptionUInt64.read(from: &buf), - latestNodeAnnouncementBroadcastTimestamp: FfiConverterOptionUInt64.read(from: &buf), - latestChannelMonitorArchivalHeight: FfiConverterOptionUInt32.read(from: &buf) - ) - } - - public static func write(_ value: NodeStatus, into buf: inout [UInt8]) { - FfiConverterBool.write(value.isRunning, into: &buf) - FfiConverterBool.write(value.isListening, into: &buf) - FfiConverterTypeBestBlock.write(value.currentBestBlock, into: &buf) - FfiConverterOptionUInt64.write(value.latestLightningWalletSyncTimestamp, into: &buf) - FfiConverterOptionUInt64.write(value.latestOnchainWalletSyncTimestamp, into: &buf) - FfiConverterOptionUInt64.write(value.latestFeeRateCacheUpdateTimestamp, into: &buf) - FfiConverterOptionUInt64.write(value.latestRgsSnapshotTimestamp, into: &buf) - FfiConverterOptionUInt64.write(value.latestNodeAnnouncementBroadcastTimestamp, into: &buf) - FfiConverterOptionUInt32.write(value.latestChannelMonitorArchivalHeight, into: &buf) - } -} - - -public func FfiConverterTypeNodeStatus_lift(_ buf: RustBuffer) throws -> NodeStatus { - return try FfiConverterTypeNodeStatus.lift(buf) -} - -public func FfiConverterTypeNodeStatus_lower(_ value: NodeStatus) -> RustBuffer { - return FfiConverterTypeNodeStatus.lower(value) -} - - -public struct OnchainPaymentInfo { - public var state: PaymentState - public var expiresAt: DateTime - public var feeTotalSat: UInt64 - public var orderTotalSat: UInt64 - public var address: Address - public var minOnchainPaymentConfirmations: UInt16? - public var minFeeFor0conf: FeeRate - public var refundOnchainAddress: Address? - - // Default memberwise initializers are never public by default, so we - // declare one manually. - public init(state: PaymentState, expiresAt: DateTime, feeTotalSat: UInt64, orderTotalSat: UInt64, address: Address, minOnchainPaymentConfirmations: UInt16?, minFeeFor0conf: FeeRate, refundOnchainAddress: Address?) { - self.state = state - self.expiresAt = expiresAt - self.feeTotalSat = feeTotalSat - self.orderTotalSat = orderTotalSat - self.address = address - self.minOnchainPaymentConfirmations = minOnchainPaymentConfirmations - self.minFeeFor0conf = minFeeFor0conf - self.refundOnchainAddress = refundOnchainAddress - } -} - - - -public struct FfiConverterTypeOnchainPaymentInfo: FfiConverterRustBuffer { - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> OnchainPaymentInfo { - return - try OnchainPaymentInfo( - state: FfiConverterTypePaymentState.read(from: &buf), - expiresAt: FfiConverterTypeDateTime.read(from: &buf), - feeTotalSat: FfiConverterUInt64.read(from: &buf), - orderTotalSat: FfiConverterUInt64.read(from: &buf), - address: FfiConverterTypeAddress.read(from: &buf), - minOnchainPaymentConfirmations: FfiConverterOptionUInt16.read(from: &buf), - minFeeFor0conf: FfiConverterTypeFeeRate.read(from: &buf), - refundOnchainAddress: FfiConverterOptionTypeAddress.read(from: &buf) - ) - } - - public static func write(_ value: OnchainPaymentInfo, into buf: inout [UInt8]) { - FfiConverterTypePaymentState.write(value.state, into: &buf) - FfiConverterTypeDateTime.write(value.expiresAt, into: &buf) - FfiConverterUInt64.write(value.feeTotalSat, into: &buf) - FfiConverterUInt64.write(value.orderTotalSat, into: &buf) - FfiConverterTypeAddress.write(value.address, into: &buf) - FfiConverterOptionUInt16.write(value.minOnchainPaymentConfirmations, into: &buf) - FfiConverterTypeFeeRate.write(value.minFeeFor0conf, into: &buf) - FfiConverterOptionTypeAddress.write(value.refundOnchainAddress, into: &buf) - } -} - - -public func FfiConverterTypeOnchainPaymentInfo_lift(_ buf: RustBuffer) throws -> OnchainPaymentInfo { - return try FfiConverterTypeOnchainPaymentInfo.lift(buf) -} - -public func FfiConverterTypeOnchainPaymentInfo_lower(_ value: OnchainPaymentInfo) -> RustBuffer { - return FfiConverterTypeOnchainPaymentInfo.lower(value) -} - - -public struct OrderParameters { - public var lspBalanceSat: UInt64 - public var clientBalanceSat: UInt64 - public var requiredChannelConfirmations: UInt16 - public var fundingConfirmsWithinBlocks: UInt16 - public var channelExpiryBlocks: UInt32 - public var token: String? - public var announceChannel: Bool - - // Default memberwise initializers are never public by default, so we - // declare one manually. - public init(lspBalanceSat: UInt64, clientBalanceSat: UInt64, requiredChannelConfirmations: UInt16, fundingConfirmsWithinBlocks: UInt16, channelExpiryBlocks: UInt32, token: String?, announceChannel: Bool) { - self.lspBalanceSat = lspBalanceSat - self.clientBalanceSat = clientBalanceSat - self.requiredChannelConfirmations = requiredChannelConfirmations - self.fundingConfirmsWithinBlocks = fundingConfirmsWithinBlocks - self.channelExpiryBlocks = channelExpiryBlocks - self.token = token - self.announceChannel = announceChannel - } -} - - - -extension OrderParameters: Equatable, Hashable { - public static func ==(lhs: OrderParameters, rhs: OrderParameters) -> Bool { - if lhs.lspBalanceSat != rhs.lspBalanceSat { - return false - } - if lhs.clientBalanceSat != rhs.clientBalanceSat { - return false - } - if lhs.requiredChannelConfirmations != rhs.requiredChannelConfirmations { - return false - } - if lhs.fundingConfirmsWithinBlocks != rhs.fundingConfirmsWithinBlocks { + if lhs.latestRgsSnapshotTimestamp != rhs.latestRgsSnapshotTimestamp { return false } - if lhs.channelExpiryBlocks != rhs.channelExpiryBlocks { + if lhs.latestPathfindingScoresSyncTimestamp != rhs.latestPathfindingScoresSyncTimestamp { return false } - if lhs.token != rhs.token { + if lhs.latestNodeAnnouncementBroadcastTimestamp != rhs.latestNodeAnnouncementBroadcastTimestamp { return false } - if lhs.announceChannel != rhs.announceChannel { + if lhs.latestChannelMonitorArchivalHeight != rhs.latestChannelMonitorArchivalHeight { return false } return true } public func hash(into hasher: inout Hasher) { - hasher.combine(lspBalanceSat) - hasher.combine(clientBalanceSat) - hasher.combine(requiredChannelConfirmations) - hasher.combine(fundingConfirmsWithinBlocks) - hasher.combine(channelExpiryBlocks) - hasher.combine(token) - hasher.combine(announceChannel) + hasher.combine(isRunning) + hasher.combine(currentBestBlock) + hasher.combine(latestLightningWalletSyncTimestamp) + hasher.combine(latestOnchainWalletSyncTimestamp) + hasher.combine(latestFeeRateCacheUpdateTimestamp) + hasher.combine(latestRgsSnapshotTimestamp) + hasher.combine(latestPathfindingScoresSyncTimestamp) + hasher.combine(latestNodeAnnouncementBroadcastTimestamp) + hasher.combine(latestChannelMonitorArchivalHeight) } } -public struct FfiConverterTypeOrderParameters: FfiConverterRustBuffer { - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> OrderParameters { +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeNodeStatus: FfiConverterRustBuffer { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> NodeStatus { return - try OrderParameters( - lspBalanceSat: FfiConverterUInt64.read(from: &buf), - clientBalanceSat: FfiConverterUInt64.read(from: &buf), - requiredChannelConfirmations: FfiConverterUInt16.read(from: &buf), - fundingConfirmsWithinBlocks: FfiConverterUInt16.read(from: &buf), - channelExpiryBlocks: FfiConverterUInt32.read(from: &buf), - token: FfiConverterOptionString.read(from: &buf), - announceChannel: FfiConverterBool.read(from: &buf) + try NodeStatus( + isRunning: FfiConverterBool.read(from: &buf), + currentBestBlock: FfiConverterTypeBestBlock.read(from: &buf), + latestLightningWalletSyncTimestamp: FfiConverterOptionUInt64.read(from: &buf), + latestOnchainWalletSyncTimestamp: FfiConverterOptionUInt64.read(from: &buf), + latestFeeRateCacheUpdateTimestamp: FfiConverterOptionUInt64.read(from: &buf), + latestRgsSnapshotTimestamp: FfiConverterOptionUInt64.read(from: &buf), + latestPathfindingScoresSyncTimestamp: FfiConverterOptionUInt64.read(from: &buf), + latestNodeAnnouncementBroadcastTimestamp: FfiConverterOptionUInt64.read(from: &buf), + latestChannelMonitorArchivalHeight: FfiConverterOptionUInt32.read(from: &buf) ) } - public static func write(_ value: OrderParameters, into buf: inout [UInt8]) { - FfiConverterUInt64.write(value.lspBalanceSat, into: &buf) - FfiConverterUInt64.write(value.clientBalanceSat, into: &buf) - FfiConverterUInt16.write(value.requiredChannelConfirmations, into: &buf) - FfiConverterUInt16.write(value.fundingConfirmsWithinBlocks, into: &buf) - FfiConverterUInt32.write(value.channelExpiryBlocks, into: &buf) - FfiConverterOptionString.write(value.token, into: &buf) - FfiConverterBool.write(value.announceChannel, into: &buf) + public static func write(_ value: NodeStatus, into buf: inout [UInt8]) { + FfiConverterBool.write(value.isRunning, into: &buf) + FfiConverterTypeBestBlock.write(value.currentBestBlock, into: &buf) + FfiConverterOptionUInt64.write(value.latestLightningWalletSyncTimestamp, into: &buf) + FfiConverterOptionUInt64.write(value.latestOnchainWalletSyncTimestamp, into: &buf) + FfiConverterOptionUInt64.write(value.latestFeeRateCacheUpdateTimestamp, into: &buf) + FfiConverterOptionUInt64.write(value.latestRgsSnapshotTimestamp, into: &buf) + FfiConverterOptionUInt64.write(value.latestPathfindingScoresSyncTimestamp, into: &buf) + FfiConverterOptionUInt64.write(value.latestNodeAnnouncementBroadcastTimestamp, into: &buf) + FfiConverterOptionUInt32.write(value.latestChannelMonitorArchivalHeight, into: &buf) } } -public func FfiConverterTypeOrderParameters_lift(_ buf: RustBuffer) throws -> OrderParameters { - return try FfiConverterTypeOrderParameters.lift(buf) +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeNodeStatus_lift(_ buf: RustBuffer) throws -> NodeStatus { + return try FfiConverterTypeNodeStatus.lift(buf) } -public func FfiConverterTypeOrderParameters_lower(_ value: OrderParameters) -> RustBuffer { - return FfiConverterTypeOrderParameters.lower(value) +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeNodeStatus_lower(_ value: NodeStatus) -> RustBuffer { + return FfiConverterTypeNodeStatus.lower(value) } @@ -4893,6 +6362,9 @@ extension OutPoint: Equatable, Hashable { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeOutPoint: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> OutPoint { return @@ -4909,10 +6381,16 @@ public struct FfiConverterTypeOutPoint: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeOutPoint_lift(_ buf: RustBuffer) throws -> OutPoint { return try FfiConverterTypeOutPoint.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeOutPoint_lower(_ value: OutPoint) -> RustBuffer { return FfiConverterTypeOutPoint.lower(value) } @@ -4980,6 +6458,9 @@ extension PaymentDetails: Equatable, Hashable { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypePaymentDetails: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> PaymentDetails { return @@ -5006,54 +6487,21 @@ public struct FfiConverterTypePaymentDetails: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePaymentDetails_lift(_ buf: RustBuffer) throws -> PaymentDetails { return try FfiConverterTypePaymentDetails.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePaymentDetails_lower(_ value: PaymentDetails) -> RustBuffer { return FfiConverterTypePaymentDetails.lower(value) } -public struct PaymentInfo { - public var bolt11: Bolt11PaymentInfo? - public var onchain: OnchainPaymentInfo? - - // Default memberwise initializers are never public by default, so we - // declare one manually. - public init(bolt11: Bolt11PaymentInfo?, onchain: OnchainPaymentInfo?) { - self.bolt11 = bolt11 - self.onchain = onchain - } -} - - - -public struct FfiConverterTypePaymentInfo: FfiConverterRustBuffer { - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> PaymentInfo { - return - try PaymentInfo( - bolt11: FfiConverterOptionTypeBolt11PaymentInfo.read(from: &buf), - onchain: FfiConverterOptionTypeOnchainPaymentInfo.read(from: &buf) - ) - } - - public static func write(_ value: PaymentInfo, into buf: inout [UInt8]) { - FfiConverterOptionTypeBolt11PaymentInfo.write(value.bolt11, into: &buf) - FfiConverterOptionTypeOnchainPaymentInfo.write(value.onchain, into: &buf) - } -} - - -public func FfiConverterTypePaymentInfo_lift(_ buf: RustBuffer) throws -> PaymentInfo { - return try FfiConverterTypePaymentInfo.lift(buf) -} - -public func FfiConverterTypePaymentInfo_lower(_ value: PaymentInfo) -> RustBuffer { - return FfiConverterTypePaymentInfo.lower(value) -} - - public struct PeerDetails { public var nodeId: PublicKey public var address: SocketAddress @@ -5098,6 +6546,9 @@ extension PeerDetails: Equatable, Hashable { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypePeerDetails: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> PeerDetails { return @@ -5118,10 +6569,16 @@ public struct FfiConverterTypePeerDetails: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePeerDetails_lift(_ buf: RustBuffer) throws -> PeerDetails { return try FfiConverterTypePeerDetails.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePeerDetails_lower(_ value: PeerDetails) -> RustBuffer { return FfiConverterTypePeerDetails.lower(value) } @@ -5183,6 +6640,9 @@ extension RouteHintHop: Equatable, Hashable { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeRouteHintHop: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> RouteHintHop { return @@ -5207,15 +6667,103 @@ public struct FfiConverterTypeRouteHintHop: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeRouteHintHop_lift(_ buf: RustBuffer) throws -> RouteHintHop { return try FfiConverterTypeRouteHintHop.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeRouteHintHop_lower(_ value: RouteHintHop) -> RustBuffer { return FfiConverterTypeRouteHintHop.lower(value) } +public struct RouteParametersConfig { + public var maxTotalRoutingFeeMsat: UInt64? + public var maxTotalCltvExpiryDelta: UInt32 + public var maxPathCount: UInt8 + public var maxChannelSaturationPowerOfHalf: UInt8 + + // Default memberwise initializers are never public by default, so we + // declare one manually. + public init(maxTotalRoutingFeeMsat: UInt64?, maxTotalCltvExpiryDelta: UInt32, maxPathCount: UInt8, maxChannelSaturationPowerOfHalf: UInt8) { + self.maxTotalRoutingFeeMsat = maxTotalRoutingFeeMsat + self.maxTotalCltvExpiryDelta = maxTotalCltvExpiryDelta + self.maxPathCount = maxPathCount + self.maxChannelSaturationPowerOfHalf = maxChannelSaturationPowerOfHalf + } +} + + + +extension RouteParametersConfig: Equatable, Hashable { + public static func ==(lhs: RouteParametersConfig, rhs: RouteParametersConfig) -> Bool { + if lhs.maxTotalRoutingFeeMsat != rhs.maxTotalRoutingFeeMsat { + return false + } + if lhs.maxTotalCltvExpiryDelta != rhs.maxTotalCltvExpiryDelta { + return false + } + if lhs.maxPathCount != rhs.maxPathCount { + return false + } + if lhs.maxChannelSaturationPowerOfHalf != rhs.maxChannelSaturationPowerOfHalf { + return false + } + return true + } + + public func hash(into hasher: inout Hasher) { + hasher.combine(maxTotalRoutingFeeMsat) + hasher.combine(maxTotalCltvExpiryDelta) + hasher.combine(maxPathCount) + hasher.combine(maxChannelSaturationPowerOfHalf) + } +} + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeRouteParametersConfig: FfiConverterRustBuffer { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> RouteParametersConfig { + return + try RouteParametersConfig( + maxTotalRoutingFeeMsat: FfiConverterOptionUInt64.read(from: &buf), + maxTotalCltvExpiryDelta: FfiConverterUInt32.read(from: &buf), + maxPathCount: FfiConverterUInt8.read(from: &buf), + maxChannelSaturationPowerOfHalf: FfiConverterUInt8.read(from: &buf) + ) + } + + public static func write(_ value: RouteParametersConfig, into buf: inout [UInt8]) { + FfiConverterOptionUInt64.write(value.maxTotalRoutingFeeMsat, into: &buf) + FfiConverterUInt32.write(value.maxTotalCltvExpiryDelta, into: &buf) + FfiConverterUInt8.write(value.maxPathCount, into: &buf) + FfiConverterUInt8.write(value.maxChannelSaturationPowerOfHalf, into: &buf) + } +} + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeRouteParametersConfig_lift(_ buf: RustBuffer) throws -> RouteParametersConfig { + return try FfiConverterTypeRouteParametersConfig.lift(buf) +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeRouteParametersConfig_lower(_ value: RouteParametersConfig) -> RustBuffer { + return FfiConverterTypeRouteParametersConfig.lower(value) +} + + public struct RoutingFees { public var baseMsat: UInt32 public var proportionalMillionths: UInt32 @@ -5248,6 +6796,9 @@ extension RoutingFees: Equatable, Hashable { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeRoutingFees: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> RoutingFees { return @@ -5264,86 +6815,83 @@ public struct FfiConverterTypeRoutingFees: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeRoutingFees_lift(_ buf: RustBuffer) throws -> RoutingFees { return try FfiConverterTypeRoutingFees.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeRoutingFees_lower(_ value: RoutingFees) -> RustBuffer { return FfiConverterTypeRoutingFees.lower(value) } +// Note that we don't yet support `indirect` for enums. +// See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. -public struct SendingParameters { - public var maxTotalRoutingFeeMsat: MaxTotalRoutingFeeLimit? - public var maxTotalCltvExpiryDelta: UInt32? - public var maxPathCount: UInt8? - public var maxChannelSaturationPowerOfHalf: UInt8? - - // Default memberwise initializers are never public by default, so we - // declare one manually. - public init(maxTotalRoutingFeeMsat: MaxTotalRoutingFeeLimit?, maxTotalCltvExpiryDelta: UInt32?, maxPathCount: UInt8?, maxChannelSaturationPowerOfHalf: UInt8?) { - self.maxTotalRoutingFeeMsat = maxTotalRoutingFeeMsat - self.maxTotalCltvExpiryDelta = maxTotalCltvExpiryDelta - self.maxPathCount = maxPathCount - self.maxChannelSaturationPowerOfHalf = maxChannelSaturationPowerOfHalf - } +public enum AsyncPaymentsRole { + + case client + case server } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeAsyncPaymentsRole: FfiConverterRustBuffer { + typealias SwiftType = AsyncPaymentsRole -extension SendingParameters: Equatable, Hashable { - public static func ==(lhs: SendingParameters, rhs: SendingParameters) -> Bool { - if lhs.maxTotalRoutingFeeMsat != rhs.maxTotalRoutingFeeMsat { - return false - } - if lhs.maxTotalCltvExpiryDelta != rhs.maxTotalCltvExpiryDelta { - return false - } - if lhs.maxPathCount != rhs.maxPathCount { - return false - } - if lhs.maxChannelSaturationPowerOfHalf != rhs.maxChannelSaturationPowerOfHalf { - return false + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> AsyncPaymentsRole { + let variant: Int32 = try readInt(&buf) + switch variant { + + case 1: return .client + + case 2: return .server + + default: throw UniffiInternalError.unexpectedEnumCase } - return true } - public func hash(into hasher: inout Hasher) { - hasher.combine(maxTotalRoutingFeeMsat) - hasher.combine(maxTotalCltvExpiryDelta) - hasher.combine(maxPathCount) - hasher.combine(maxChannelSaturationPowerOfHalf) + public static func write(_ value: AsyncPaymentsRole, into buf: inout [UInt8]) { + switch value { + + + case .client: + writeInt(&buf, Int32(1)) + + + case .server: + writeInt(&buf, Int32(2)) + + } } } -public struct FfiConverterTypeSendingParameters: FfiConverterRustBuffer { - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SendingParameters { - return - try SendingParameters( - maxTotalRoutingFeeMsat: FfiConverterOptionTypeMaxTotalRoutingFeeLimit.read(from: &buf), - maxTotalCltvExpiryDelta: FfiConverterOptionUInt32.read(from: &buf), - maxPathCount: FfiConverterOptionUInt8.read(from: &buf), - maxChannelSaturationPowerOfHalf: FfiConverterOptionUInt8.read(from: &buf) - ) - } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeAsyncPaymentsRole_lift(_ buf: RustBuffer) throws -> AsyncPaymentsRole { + return try FfiConverterTypeAsyncPaymentsRole.lift(buf) +} - public static func write(_ value: SendingParameters, into buf: inout [UInt8]) { - FfiConverterOptionTypeMaxTotalRoutingFeeLimit.write(value.maxTotalRoutingFeeMsat, into: &buf) - FfiConverterOptionUInt32.write(value.maxTotalCltvExpiryDelta, into: &buf) - FfiConverterOptionUInt8.write(value.maxPathCount, into: &buf) - FfiConverterOptionUInt8.write(value.maxChannelSaturationPowerOfHalf, into: &buf) - } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeAsyncPaymentsRole_lower(_ value: AsyncPaymentsRole) -> RustBuffer { + return FfiConverterTypeAsyncPaymentsRole.lower(value) } -public func FfiConverterTypeSendingParameters_lift(_ buf: RustBuffer) throws -> SendingParameters { - return try FfiConverterTypeSendingParameters.lift(buf) -} -public func FfiConverterTypeSendingParameters_lower(_ value: SendingParameters) -> RustBuffer { - return FfiConverterTypeSendingParameters.lower(value) -} +extension AsyncPaymentsRole: Equatable, Hashable {} + + // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. @@ -5357,6 +6905,9 @@ public enum BalanceSource { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeBalanceSource: FfiConverterRustBuffer { typealias SwiftType = BalanceSource @@ -5400,10 +6951,16 @@ public struct FfiConverterTypeBalanceSource: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeBalanceSource_lift(_ buf: RustBuffer) throws -> BalanceSource { return try FfiConverterTypeBalanceSource.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeBalanceSource_lower(_ value: BalanceSource) -> RustBuffer { return FfiConverterTypeBalanceSource.lower(value) } @@ -5426,6 +6983,9 @@ public enum Bolt11InvoiceDescription { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeBolt11InvoiceDescription: FfiConverterRustBuffer { typealias SwiftType = Bolt11InvoiceDescription @@ -5461,10 +7021,16 @@ public struct FfiConverterTypeBolt11InvoiceDescription: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeBolt11InvoiceDescription_lift(_ buf: RustBuffer) throws -> Bolt11InvoiceDescription { return try FfiConverterTypeBolt11InvoiceDescription.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeBolt11InvoiceDescription_lower(_ value: Bolt11InvoiceDescription) -> RustBuffer { return FfiConverterTypeBolt11InvoiceDescription.lower(value) } @@ -5494,6 +7060,8 @@ public enum BuildError { case InvalidNodeAlias(message: String) + case RuntimeSetupFailed(message: String) + case ReadFailed(message: String) case WriteFailed(message: String) @@ -5508,9 +7076,14 @@ public enum BuildError { case NetworkMismatch(message: String) + case AsyncPaymentsConfigMismatch(message: String) + } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeBuildError: FfiConverterRustBuffer { typealias SwiftType = BuildError @@ -5549,31 +7122,39 @@ public struct FfiConverterTypeBuildError: FfiConverterRustBuffer { message: try FfiConverterString.read(from: &buf) ) - case 8: return .ReadFailed( + case 8: return .RuntimeSetupFailed( + message: try FfiConverterString.read(from: &buf) + ) + + case 9: return .ReadFailed( message: try FfiConverterString.read(from: &buf) ) - case 9: return .WriteFailed( + case 10: return .WriteFailed( message: try FfiConverterString.read(from: &buf) ) - case 10: return .StoragePathAccessFailed( + case 11: return .StoragePathAccessFailed( message: try FfiConverterString.read(from: &buf) ) - case 11: return .KvStoreSetupFailed( + case 12: return .KvStoreSetupFailed( message: try FfiConverterString.read(from: &buf) ) - case 12: return .WalletSetupFailed( + case 13: return .WalletSetupFailed( message: try FfiConverterString.read(from: &buf) ) - case 13: return .LoggerSetupFailed( + case 14: return .LoggerSetupFailed( message: try FfiConverterString.read(from: &buf) ) - case 14: return .NetworkMismatch( + case 15: return .NetworkMismatch( + message: try FfiConverterString.read(from: &buf) + ) + + case 16: return .AsyncPaymentsConfigMismatch( message: try FfiConverterString.read(from: &buf) ) @@ -5602,20 +7183,24 @@ public struct FfiConverterTypeBuildError: FfiConverterRustBuffer { writeInt(&buf, Int32(6)) case .InvalidNodeAlias(_ /* message is ignored*/): writeInt(&buf, Int32(7)) - case .ReadFailed(_ /* message is ignored*/): + case .RuntimeSetupFailed(_ /* message is ignored*/): writeInt(&buf, Int32(8)) - case .WriteFailed(_ /* message is ignored*/): + case .ReadFailed(_ /* message is ignored*/): writeInt(&buf, Int32(9)) - case .StoragePathAccessFailed(_ /* message is ignored*/): + case .WriteFailed(_ /* message is ignored*/): writeInt(&buf, Int32(10)) - case .KvStoreSetupFailed(_ /* message is ignored*/): + case .StoragePathAccessFailed(_ /* message is ignored*/): writeInt(&buf, Int32(11)) - case .WalletSetupFailed(_ /* message is ignored*/): + case .KvStoreSetupFailed(_ /* message is ignored*/): writeInt(&buf, Int32(12)) - case .LoggerSetupFailed(_ /* message is ignored*/): + case .WalletSetupFailed(_ /* message is ignored*/): writeInt(&buf, Int32(13)) - case .NetworkMismatch(_ /* message is ignored*/): + case .LoggerSetupFailed(_ /* message is ignored*/): writeInt(&buf, Int32(14)) + case .NetworkMismatch(_ /* message is ignored*/): + writeInt(&buf, Int32(15)) + case .AsyncPaymentsConfigMismatch(_ /* message is ignored*/): + writeInt(&buf, Int32(16)) } @@ -5625,7 +7210,11 @@ public struct FfiConverterTypeBuildError: FfiConverterRustBuffer { extension BuildError: Equatable, Hashable {} -extension BuildError: Error { } +extension BuildError: Foundation.LocalizedError { + public var errorDescription: String? { + String(reflecting: self) + } +} // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. @@ -5634,7 +7223,7 @@ public enum ClosureReason { case counterpartyForceClosed(peerMsg: UntrustedString ) - case holderForceClosed(broadcastedLatestTxn: Bool? + case holderForceClosed(broadcastedLatestTxn: Bool?, message: String ) case legacyCooperativeClosure case counterpartyInitiatedCooperativeClosure @@ -5646,13 +7235,18 @@ public enum ClosureReason { case disconnectedPeer case outdatedChannelManager case counterpartyCoopClosedUnfundedChannel + case locallyCoopClosedUnfundedChannel case fundingBatchClosure - case htlCsTimedOut + case htlCsTimedOut(paymentHash: PaymentHash? + ) case peerFeerateTooLow(peerFeerateSatPerKw: UInt32, requiredFeerateSatPerKw: UInt32 ) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeClosureReason: FfiConverterRustBuffer { typealias SwiftType = ClosureReason @@ -5663,7 +7257,7 @@ public struct FfiConverterTypeClosureReason: FfiConverterRustBuffer { case 1: return .counterpartyForceClosed(peerMsg: try FfiConverterTypeUntrustedString.read(from: &buf) ) - case 2: return .holderForceClosed(broadcastedLatestTxn: try FfiConverterOptionBool.read(from: &buf) + case 2: return .holderForceClosed(broadcastedLatestTxn: try FfiConverterOptionBool.read(from: &buf), message: try FfiConverterString.read(from: &buf) ) case 3: return .legacyCooperativeClosure @@ -5685,11 +7279,14 @@ public struct FfiConverterTypeClosureReason: FfiConverterRustBuffer { case 11: return .counterpartyCoopClosedUnfundedChannel - case 12: return .fundingBatchClosure + case 12: return .locallyCoopClosedUnfundedChannel - case 13: return .htlCsTimedOut + case 13: return .fundingBatchClosure - case 14: return .peerFeerateTooLow(peerFeerateSatPerKw: try FfiConverterUInt32.read(from: &buf), requiredFeerateSatPerKw: try FfiConverterUInt32.read(from: &buf) + case 14: return .htlCsTimedOut(paymentHash: try FfiConverterOptionTypePaymentHash.read(from: &buf) + ) + + case 15: return .peerFeerateTooLow(peerFeerateSatPerKw: try FfiConverterUInt32.read(from: &buf), requiredFeerateSatPerKw: try FfiConverterUInt32.read(from: &buf) ) default: throw UniffiInternalError.unexpectedEnumCase @@ -5705,9 +7302,10 @@ public struct FfiConverterTypeClosureReason: FfiConverterRustBuffer { FfiConverterTypeUntrustedString.write(peerMsg, into: &buf) - case let .holderForceClosed(broadcastedLatestTxn): + case let .holderForceClosed(broadcastedLatestTxn,message): writeInt(&buf, Int32(2)) FfiConverterOptionBool.write(broadcastedLatestTxn, into: &buf) + FfiConverterString.write(message, into: &buf) case .legacyCooperativeClosure: @@ -5747,16 +7345,21 @@ public struct FfiConverterTypeClosureReason: FfiConverterRustBuffer { writeInt(&buf, Int32(11)) - case .fundingBatchClosure: + case .locallyCoopClosedUnfundedChannel: writeInt(&buf, Int32(12)) - case .htlCsTimedOut: + case .fundingBatchClosure: writeInt(&buf, Int32(13)) - case let .peerFeerateTooLow(peerFeerateSatPerKw,requiredFeerateSatPerKw): + case let .htlCsTimedOut(paymentHash): writeInt(&buf, Int32(14)) + FfiConverterOptionTypePaymentHash.write(paymentHash, into: &buf) + + + case let .peerFeerateTooLow(peerFeerateSatPerKw,requiredFeerateSatPerKw): + writeInt(&buf, Int32(15)) FfiConverterUInt32.write(peerFeerateSatPerKw, into: &buf) FfiConverterUInt32.write(requiredFeerateSatPerKw, into: &buf) @@ -5765,10 +7368,16 @@ public struct FfiConverterTypeClosureReason: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeClosureReason_lift(_ buf: RustBuffer) throws -> ClosureReason { return try FfiConverterTypeClosureReason.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeClosureReason_lower(_ value: ClosureReason) -> RustBuffer { return FfiConverterTypeClosureReason.lower(value) } @@ -5790,6 +7399,9 @@ public enum ConfirmationStatus { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeConfirmationStatus: FfiConverterRustBuffer { typealias SwiftType = ConfirmationStatus @@ -5825,10 +7437,16 @@ public struct FfiConverterTypeConfirmationStatus: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeConfirmationStatus_lift(_ buf: RustBuffer) throws -> ConfirmationStatus { return try FfiConverterTypeConfirmationStatus.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeConfirmationStatus_lower(_ value: ConfirmationStatus) -> RustBuffer { return FfiConverterTypeConfirmationStatus.lower(value) } @@ -5852,6 +7470,9 @@ public enum Currency { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeCurrency: FfiConverterRustBuffer { typealias SwiftType = Currency @@ -5901,10 +7522,16 @@ public struct FfiConverterTypeCurrency: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeCurrency_lift(_ buf: RustBuffer) throws -> Currency { return try FfiConverterTypeCurrency.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeCurrency_lower(_ value: Currency) -> RustBuffer { return FfiConverterTypeCurrency.lower(value) } @@ -5932,13 +7559,20 @@ public enum Event { ) case channelPending(channelId: ChannelId, userChannelId: UserChannelId, formerTemporaryChannelId: ChannelId, counterpartyNodeId: PublicKey, fundingTxo: OutPoint ) - case channelReady(channelId: ChannelId, userChannelId: UserChannelId, counterpartyNodeId: PublicKey? + case channelReady(channelId: ChannelId, userChannelId: UserChannelId, counterpartyNodeId: PublicKey?, fundingTxo: OutPoint? ) case channelClosed(channelId: ChannelId, userChannelId: UserChannelId, counterpartyNodeId: PublicKey?, reason: ClosureReason? ) + case splicePending(channelId: ChannelId, userChannelId: UserChannelId, counterpartyNodeId: PublicKey, newFundingTxo: OutPoint + ) + case spliceFailed(channelId: ChannelId, userChannelId: UserChannelId, counterpartyNodeId: PublicKey, abandonedFundingTxo: OutPoint? + ) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeEvent: FfiConverterRustBuffer { typealias SwiftType = Event @@ -5964,12 +7598,18 @@ public struct FfiConverterTypeEvent: FfiConverterRustBuffer { case 6: return .channelPending(channelId: try FfiConverterTypeChannelId.read(from: &buf), userChannelId: try FfiConverterTypeUserChannelId.read(from: &buf), formerTemporaryChannelId: try FfiConverterTypeChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), fundingTxo: try FfiConverterTypeOutPoint.read(from: &buf) ) - case 7: return .channelReady(channelId: try FfiConverterTypeChannelId.read(from: &buf), userChannelId: try FfiConverterTypeUserChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterOptionTypePublicKey.read(from: &buf) + case 7: return .channelReady(channelId: try FfiConverterTypeChannelId.read(from: &buf), userChannelId: try FfiConverterTypeUserChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterOptionTypePublicKey.read(from: &buf), fundingTxo: try FfiConverterOptionTypeOutPoint.read(from: &buf) ) case 8: return .channelClosed(channelId: try FfiConverterTypeChannelId.read(from: &buf), userChannelId: try FfiConverterTypeUserChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterOptionTypePublicKey.read(from: &buf), reason: try FfiConverterOptionTypeClosureReason.read(from: &buf) ) + case 9: return .splicePending(channelId: try FfiConverterTypeChannelId.read(from: &buf), userChannelId: try FfiConverterTypeUserChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), newFundingTxo: try FfiConverterTypeOutPoint.read(from: &buf) + ) + + case 10: return .spliceFailed(channelId: try FfiConverterTypeChannelId.read(from: &buf), userChannelId: try FfiConverterTypeUserChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), abandonedFundingTxo: try FfiConverterOptionTypeOutPoint.read(from: &buf) + ) + default: throw UniffiInternalError.unexpectedEnumCase } } @@ -6033,11 +7673,12 @@ public struct FfiConverterTypeEvent: FfiConverterRustBuffer { FfiConverterTypeOutPoint.write(fundingTxo, into: &buf) - case let .channelReady(channelId,userChannelId,counterpartyNodeId): + case let .channelReady(channelId,userChannelId,counterpartyNodeId,fundingTxo): writeInt(&buf, Int32(7)) FfiConverterTypeChannelId.write(channelId, into: &buf) FfiConverterTypeUserChannelId.write(userChannelId, into: &buf) FfiConverterOptionTypePublicKey.write(counterpartyNodeId, into: &buf) + FfiConverterOptionTypeOutPoint.write(fundingTxo, into: &buf) case let .channelClosed(channelId,userChannelId,counterpartyNodeId,reason): @@ -6047,15 +7688,37 @@ public struct FfiConverterTypeEvent: FfiConverterRustBuffer { FfiConverterOptionTypePublicKey.write(counterpartyNodeId, into: &buf) FfiConverterOptionTypeClosureReason.write(reason, into: &buf) + + case let .splicePending(channelId,userChannelId,counterpartyNodeId,newFundingTxo): + writeInt(&buf, Int32(9)) + FfiConverterTypeChannelId.write(channelId, into: &buf) + FfiConverterTypeUserChannelId.write(userChannelId, into: &buf) + FfiConverterTypePublicKey.write(counterpartyNodeId, into: &buf) + FfiConverterTypeOutPoint.write(newFundingTxo, into: &buf) + + + case let .spliceFailed(channelId,userChannelId,counterpartyNodeId,abandonedFundingTxo): + writeInt(&buf, Int32(10)) + FfiConverterTypeChannelId.write(channelId, into: &buf) + FfiConverterTypeUserChannelId.write(userChannelId, into: &buf) + FfiConverterTypePublicKey.write(counterpartyNodeId, into: &buf) + FfiConverterOptionTypeOutPoint.write(abandonedFundingTxo, into: &buf) + } } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeEvent_lift(_ buf: RustBuffer) throws -> Event { return try FfiConverterTypeEvent.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeEvent_lower(_ value: Event) -> RustBuffer { return FfiConverterTypeEvent.lower(value) } @@ -6066,6 +7729,77 @@ extension Event: Equatable, Hashable {} +// Note that we don't yet support `indirect` for enums. +// See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + +public enum Lsps1PaymentState { + + case expectPayment + case paid + case refunded +} + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeLSPS1PaymentState: FfiConverterRustBuffer { + typealias SwiftType = Lsps1PaymentState + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Lsps1PaymentState { + let variant: Int32 = try readInt(&buf) + switch variant { + + case 1: return .expectPayment + + case 2: return .paid + + case 3: return .refunded + + default: throw UniffiInternalError.unexpectedEnumCase + } + } + + public static func write(_ value: Lsps1PaymentState, into buf: inout [UInt8]) { + switch value { + + + case .expectPayment: + writeInt(&buf, Int32(1)) + + + case .paid: + writeInt(&buf, Int32(2)) + + + case .refunded: + writeInt(&buf, Int32(3)) + + } + } +} + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeLSPS1PaymentState_lift(_ buf: RustBuffer) throws -> Lsps1PaymentState { + return try FfiConverterTypeLSPS1PaymentState.lift(buf) +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeLSPS1PaymentState_lower(_ value: Lsps1PaymentState) -> RustBuffer { + return FfiConverterTypeLSPS1PaymentState.lower(value) +} + + + +extension Lsps1PaymentState: Equatable, Hashable {} + + + // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. @@ -6086,6 +7820,9 @@ public enum LightningBalance { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeLightningBalance: FfiConverterRustBuffer { typealias SwiftType = LightningBalance @@ -6180,10 +7917,16 @@ public struct FfiConverterTypeLightningBalance: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeLightningBalance_lift(_ buf: RustBuffer) throws -> LightningBalance { return try FfiConverterTypeLightningBalance.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeLightningBalance_lower(_ value: LightningBalance) -> RustBuffer { return FfiConverterTypeLightningBalance.lower(value) } @@ -6208,6 +7951,9 @@ public enum LogLevel { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeLogLevel: FfiConverterRustBuffer { typealias SwiftType = LogLevel @@ -6263,10 +8009,16 @@ public struct FfiConverterTypeLogLevel: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeLogLevel_lift(_ buf: RustBuffer) throws -> LogLevel { return try FfiConverterTypeLogLevel.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeLogLevel_lower(_ value: LogLevel) -> RustBuffer { return FfiConverterTypeLogLevel.lower(value) } @@ -6289,6 +8041,9 @@ public enum MaxDustHtlcExposure { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeMaxDustHTLCExposure: FfiConverterRustBuffer { typealias SwiftType = MaxDustHtlcExposure @@ -6324,10 +8079,16 @@ public struct FfiConverterTypeMaxDustHTLCExposure: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeMaxDustHTLCExposure_lift(_ buf: RustBuffer) throws -> MaxDustHtlcExposure { return try FfiConverterTypeMaxDustHTLCExposure.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeMaxDustHTLCExposure_lower(_ value: MaxDustHtlcExposure) -> RustBuffer { return FfiConverterTypeMaxDustHTLCExposure.lower(value) } @@ -6338,64 +8099,6 @@ extension MaxDustHtlcExposure: Equatable, Hashable {} -// Note that we don't yet support `indirect` for enums. -// See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. - -public enum MaxTotalRoutingFeeLimit { - - case none - case some(amountMsat: UInt64 - ) -} - - -public struct FfiConverterTypeMaxTotalRoutingFeeLimit: FfiConverterRustBuffer { - typealias SwiftType = MaxTotalRoutingFeeLimit - - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> MaxTotalRoutingFeeLimit { - let variant: Int32 = try readInt(&buf) - switch variant { - - case 1: return .none - - case 2: return .some(amountMsat: try FfiConverterUInt64.read(from: &buf) - ) - - default: throw UniffiInternalError.unexpectedEnumCase - } - } - - public static func write(_ value: MaxTotalRoutingFeeLimit, into buf: inout [UInt8]) { - switch value { - - - case .none: - writeInt(&buf, Int32(1)) - - - case let .some(amountMsat): - writeInt(&buf, Int32(2)) - FfiConverterUInt64.write(amountMsat, into: &buf) - - } - } -} - - -public func FfiConverterTypeMaxTotalRoutingFeeLimit_lift(_ buf: RustBuffer) throws -> MaxTotalRoutingFeeLimit { - return try FfiConverterTypeMaxTotalRoutingFeeLimit.lift(buf) -} - -public func FfiConverterTypeMaxTotalRoutingFeeLimit_lower(_ value: MaxTotalRoutingFeeLimit) -> RustBuffer { - return FfiConverterTypeMaxTotalRoutingFeeLimit.lower(value) -} - - - -extension MaxTotalRoutingFeeLimit: Equatable, Hashable {} - - - // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. @@ -6408,6 +8111,9 @@ public enum Network { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeNetwork: FfiConverterRustBuffer { typealias SwiftType = Network @@ -6451,10 +8157,16 @@ public struct FfiConverterTypeNetwork: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeNetwork_lift(_ buf: RustBuffer) throws -> Network { return try FfiConverterTypeNetwork.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeNetwork_lower(_ value: Network) -> RustBuffer { return FfiConverterTypeNetwork.lower(value) } @@ -6496,6 +8208,8 @@ public enum NodeError { case ChannelClosingFailed(message: String) + case ChannelSplicingFailed(message: String) + case ChannelConfigUpdateFailed(message: String) case PersistenceFailed(message: String) @@ -6574,9 +8288,16 @@ public enum NodeError { case LiquidityFeeTooHigh(message: String) + case InvalidBlindedPaths(message: String) + + case AsyncPaymentServicesDisabled(message: String) + } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeNodeError: FfiConverterRustBuffer { typealias SwiftType = NodeError @@ -6639,159 +8360,171 @@ public struct FfiConverterTypeNodeError: FfiConverterRustBuffer { message: try FfiConverterString.read(from: &buf) ) - case 14: return .ChannelConfigUpdateFailed( + case 14: return .ChannelSplicingFailed( + message: try FfiConverterString.read(from: &buf) + ) + + case 15: return .ChannelConfigUpdateFailed( + message: try FfiConverterString.read(from: &buf) + ) + + case 16: return .PersistenceFailed( + message: try FfiConverterString.read(from: &buf) + ) + + case 17: return .FeerateEstimationUpdateFailed( message: try FfiConverterString.read(from: &buf) ) - case 15: return .PersistenceFailed( + case 18: return .FeerateEstimationUpdateTimeout( message: try FfiConverterString.read(from: &buf) ) - case 16: return .FeerateEstimationUpdateFailed( + case 19: return .WalletOperationFailed( message: try FfiConverterString.read(from: &buf) ) - case 17: return .FeerateEstimationUpdateTimeout( + case 20: return .WalletOperationTimeout( message: try FfiConverterString.read(from: &buf) ) - case 18: return .WalletOperationFailed( + case 21: return .OnchainTxSigningFailed( message: try FfiConverterString.read(from: &buf) ) - case 19: return .WalletOperationTimeout( + case 22: return .TxSyncFailed( message: try FfiConverterString.read(from: &buf) ) - case 20: return .OnchainTxSigningFailed( + case 23: return .TxSyncTimeout( message: try FfiConverterString.read(from: &buf) ) - case 21: return .TxSyncFailed( + case 24: return .GossipUpdateFailed( message: try FfiConverterString.read(from: &buf) ) - case 22: return .TxSyncTimeout( + case 25: return .GossipUpdateTimeout( message: try FfiConverterString.read(from: &buf) ) - case 23: return .GossipUpdateFailed( + case 26: return .LiquidityRequestFailed( message: try FfiConverterString.read(from: &buf) ) - case 24: return .GossipUpdateTimeout( + case 27: return .UriParameterParsingFailed( message: try FfiConverterString.read(from: &buf) ) - case 25: return .LiquidityRequestFailed( + case 28: return .InvalidAddress( message: try FfiConverterString.read(from: &buf) ) - case 26: return .UriParameterParsingFailed( + case 29: return .InvalidSocketAddress( message: try FfiConverterString.read(from: &buf) ) - case 27: return .InvalidAddress( + case 30: return .InvalidPublicKey( message: try FfiConverterString.read(from: &buf) ) - case 28: return .InvalidSocketAddress( + case 31: return .InvalidSecretKey( message: try FfiConverterString.read(from: &buf) ) - case 29: return .InvalidPublicKey( + case 32: return .InvalidOfferId( message: try FfiConverterString.read(from: &buf) ) - case 30: return .InvalidSecretKey( + case 33: return .InvalidNodeId( message: try FfiConverterString.read(from: &buf) ) - case 31: return .InvalidOfferId( + case 34: return .InvalidPaymentId( message: try FfiConverterString.read(from: &buf) ) - case 32: return .InvalidNodeId( + case 35: return .InvalidPaymentHash( message: try FfiConverterString.read(from: &buf) ) - case 33: return .InvalidPaymentId( + case 36: return .InvalidPaymentPreimage( message: try FfiConverterString.read(from: &buf) ) - case 34: return .InvalidPaymentHash( + case 37: return .InvalidPaymentSecret( message: try FfiConverterString.read(from: &buf) ) - case 35: return .InvalidPaymentPreimage( + case 38: return .InvalidAmount( message: try FfiConverterString.read(from: &buf) ) - case 36: return .InvalidPaymentSecret( + case 39: return .InvalidInvoice( message: try FfiConverterString.read(from: &buf) ) - case 37: return .InvalidAmount( + case 40: return .InvalidOffer( message: try FfiConverterString.read(from: &buf) ) - case 38: return .InvalidInvoice( + case 41: return .InvalidRefund( message: try FfiConverterString.read(from: &buf) ) - case 39: return .InvalidOffer( + case 42: return .InvalidChannelId( message: try FfiConverterString.read(from: &buf) ) - case 40: return .InvalidRefund( + case 43: return .InvalidNetwork( message: try FfiConverterString.read(from: &buf) ) - case 41: return .InvalidChannelId( + case 44: return .InvalidUri( message: try FfiConverterString.read(from: &buf) ) - case 42: return .InvalidNetwork( + case 45: return .InvalidQuantity( message: try FfiConverterString.read(from: &buf) ) - case 43: return .InvalidUri( + case 46: return .InvalidNodeAlias( message: try FfiConverterString.read(from: &buf) ) - case 44: return .InvalidQuantity( + case 47: return .InvalidDateTime( message: try FfiConverterString.read(from: &buf) ) - case 45: return .InvalidNodeAlias( + case 48: return .InvalidFeeRate( message: try FfiConverterString.read(from: &buf) ) - case 46: return .InvalidDateTime( + case 49: return .DuplicatePayment( message: try FfiConverterString.read(from: &buf) ) - case 47: return .InvalidFeeRate( + case 50: return .UnsupportedCurrency( message: try FfiConverterString.read(from: &buf) ) - case 48: return .DuplicatePayment( + case 51: return .InsufficientFunds( message: try FfiConverterString.read(from: &buf) ) - case 49: return .UnsupportedCurrency( + case 52: return .LiquiditySourceUnavailable( message: try FfiConverterString.read(from: &buf) ) - case 50: return .InsufficientFunds( + case 53: return .LiquidityFeeTooHigh( message: try FfiConverterString.read(from: &buf) ) - case 51: return .LiquiditySourceUnavailable( + case 54: return .InvalidBlindedPaths( message: try FfiConverterString.read(from: &buf) ) - case 52: return .LiquidityFeeTooHigh( + case 55: return .AsyncPaymentServicesDisabled( message: try FfiConverterString.read(from: &buf) ) @@ -6832,94 +8565,175 @@ public struct FfiConverterTypeNodeError: FfiConverterRustBuffer { writeInt(&buf, Int32(12)) case .ChannelClosingFailed(_ /* message is ignored*/): writeInt(&buf, Int32(13)) - case .ChannelConfigUpdateFailed(_ /* message is ignored*/): + case .ChannelSplicingFailed(_ /* message is ignored*/): writeInt(&buf, Int32(14)) - case .PersistenceFailed(_ /* message is ignored*/): + case .ChannelConfigUpdateFailed(_ /* message is ignored*/): writeInt(&buf, Int32(15)) - case .FeerateEstimationUpdateFailed(_ /* message is ignored*/): + case .PersistenceFailed(_ /* message is ignored*/): writeInt(&buf, Int32(16)) - case .FeerateEstimationUpdateTimeout(_ /* message is ignored*/): + case .FeerateEstimationUpdateFailed(_ /* message is ignored*/): writeInt(&buf, Int32(17)) - case .WalletOperationFailed(_ /* message is ignored*/): + case .FeerateEstimationUpdateTimeout(_ /* message is ignored*/): writeInt(&buf, Int32(18)) - case .WalletOperationTimeout(_ /* message is ignored*/): + case .WalletOperationFailed(_ /* message is ignored*/): writeInt(&buf, Int32(19)) - case .OnchainTxSigningFailed(_ /* message is ignored*/): + case .WalletOperationTimeout(_ /* message is ignored*/): writeInt(&buf, Int32(20)) - case .TxSyncFailed(_ /* message is ignored*/): + case .OnchainTxSigningFailed(_ /* message is ignored*/): writeInt(&buf, Int32(21)) - case .TxSyncTimeout(_ /* message is ignored*/): + case .TxSyncFailed(_ /* message is ignored*/): writeInt(&buf, Int32(22)) - case .GossipUpdateFailed(_ /* message is ignored*/): + case .TxSyncTimeout(_ /* message is ignored*/): writeInt(&buf, Int32(23)) - case .GossipUpdateTimeout(_ /* message is ignored*/): + case .GossipUpdateFailed(_ /* message is ignored*/): writeInt(&buf, Int32(24)) - case .LiquidityRequestFailed(_ /* message is ignored*/): + case .GossipUpdateTimeout(_ /* message is ignored*/): writeInt(&buf, Int32(25)) - case .UriParameterParsingFailed(_ /* message is ignored*/): + case .LiquidityRequestFailed(_ /* message is ignored*/): writeInt(&buf, Int32(26)) - case .InvalidAddress(_ /* message is ignored*/): + case .UriParameterParsingFailed(_ /* message is ignored*/): writeInt(&buf, Int32(27)) - case .InvalidSocketAddress(_ /* message is ignored*/): + case .InvalidAddress(_ /* message is ignored*/): writeInt(&buf, Int32(28)) - case .InvalidPublicKey(_ /* message is ignored*/): + case .InvalidSocketAddress(_ /* message is ignored*/): writeInt(&buf, Int32(29)) - case .InvalidSecretKey(_ /* message is ignored*/): + case .InvalidPublicKey(_ /* message is ignored*/): writeInt(&buf, Int32(30)) - case .InvalidOfferId(_ /* message is ignored*/): + case .InvalidSecretKey(_ /* message is ignored*/): writeInt(&buf, Int32(31)) - case .InvalidNodeId(_ /* message is ignored*/): + case .InvalidOfferId(_ /* message is ignored*/): writeInt(&buf, Int32(32)) - case .InvalidPaymentId(_ /* message is ignored*/): + case .InvalidNodeId(_ /* message is ignored*/): writeInt(&buf, Int32(33)) - case .InvalidPaymentHash(_ /* message is ignored*/): + case .InvalidPaymentId(_ /* message is ignored*/): writeInt(&buf, Int32(34)) - case .InvalidPaymentPreimage(_ /* message is ignored*/): + case .InvalidPaymentHash(_ /* message is ignored*/): writeInt(&buf, Int32(35)) - case .InvalidPaymentSecret(_ /* message is ignored*/): + case .InvalidPaymentPreimage(_ /* message is ignored*/): writeInt(&buf, Int32(36)) - case .InvalidAmount(_ /* message is ignored*/): + case .InvalidPaymentSecret(_ /* message is ignored*/): writeInt(&buf, Int32(37)) - case .InvalidInvoice(_ /* message is ignored*/): + case .InvalidAmount(_ /* message is ignored*/): writeInt(&buf, Int32(38)) - case .InvalidOffer(_ /* message is ignored*/): + case .InvalidInvoice(_ /* message is ignored*/): writeInt(&buf, Int32(39)) - case .InvalidRefund(_ /* message is ignored*/): + case .InvalidOffer(_ /* message is ignored*/): writeInt(&buf, Int32(40)) - case .InvalidChannelId(_ /* message is ignored*/): + case .InvalidRefund(_ /* message is ignored*/): writeInt(&buf, Int32(41)) - case .InvalidNetwork(_ /* message is ignored*/): + case .InvalidChannelId(_ /* message is ignored*/): writeInt(&buf, Int32(42)) - case .InvalidUri(_ /* message is ignored*/): + case .InvalidNetwork(_ /* message is ignored*/): writeInt(&buf, Int32(43)) - case .InvalidQuantity(_ /* message is ignored*/): + case .InvalidUri(_ /* message is ignored*/): writeInt(&buf, Int32(44)) - case .InvalidNodeAlias(_ /* message is ignored*/): + case .InvalidQuantity(_ /* message is ignored*/): writeInt(&buf, Int32(45)) - case .InvalidDateTime(_ /* message is ignored*/): + case .InvalidNodeAlias(_ /* message is ignored*/): writeInt(&buf, Int32(46)) - case .InvalidFeeRate(_ /* message is ignored*/): + case .InvalidDateTime(_ /* message is ignored*/): writeInt(&buf, Int32(47)) - case .DuplicatePayment(_ /* message is ignored*/): + case .InvalidFeeRate(_ /* message is ignored*/): writeInt(&buf, Int32(48)) - case .UnsupportedCurrency(_ /* message is ignored*/): + case .DuplicatePayment(_ /* message is ignored*/): writeInt(&buf, Int32(49)) - case .InsufficientFunds(_ /* message is ignored*/): + case .UnsupportedCurrency(_ /* message is ignored*/): writeInt(&buf, Int32(50)) - case .LiquiditySourceUnavailable(_ /* message is ignored*/): + case .InsufficientFunds(_ /* message is ignored*/): writeInt(&buf, Int32(51)) - case .LiquidityFeeTooHigh(_ /* message is ignored*/): + case .LiquiditySourceUnavailable(_ /* message is ignored*/): writeInt(&buf, Int32(52)) + case .LiquidityFeeTooHigh(_ /* message is ignored*/): + writeInt(&buf, Int32(53)) + case .InvalidBlindedPaths(_ /* message is ignored*/): + writeInt(&buf, Int32(54)) + case .AsyncPaymentServicesDisabled(_ /* message is ignored*/): + writeInt(&buf, Int32(55)) + + + } + } +} + + +extension NodeError: Equatable, Hashable {} + +extension NodeError: Foundation.LocalizedError { + public var errorDescription: String? { + String(reflecting: self) + } +} + +// Note that we don't yet support `indirect` for enums. +// See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + +public enum OfferAmount { + + case bitcoin(amountMsats: UInt64 + ) + case currency(iso4217Code: String, amount: UInt64 + ) +} + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeOfferAmount: FfiConverterRustBuffer { + typealias SwiftType = OfferAmount + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> OfferAmount { + let variant: Int32 = try readInt(&buf) + switch variant { + + case 1: return .bitcoin(amountMsats: try FfiConverterUInt64.read(from: &buf) + ) + + case 2: return .currency(iso4217Code: try FfiConverterString.read(from: &buf), amount: try FfiConverterUInt64.read(from: &buf) + ) + + default: throw UniffiInternalError.unexpectedEnumCase + } + } + public static func write(_ value: OfferAmount, into buf: inout [UInt8]) { + switch value { + + + case let .bitcoin(amountMsats): + writeInt(&buf, Int32(1)) + FfiConverterUInt64.write(amountMsats, into: &buf) + + case let .currency(iso4217Code,amount): + writeInt(&buf, Int32(2)) + FfiConverterString.write(iso4217Code, into: &buf) + FfiConverterUInt64.write(amount, into: &buf) + } } } -extension NodeError: Equatable, Hashable {} +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeOfferAmount_lift(_ buf: RustBuffer) throws -> OfferAmount { + return try FfiConverterTypeOfferAmount.lift(buf) +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeOfferAmount_lower(_ value: OfferAmount) -> RustBuffer { + return FfiConverterTypeOfferAmount.lower(value) +} + + + +extension OfferAmount: Equatable, Hashable {} + -extension NodeError: Error { } // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. @@ -6931,6 +8745,9 @@ public enum PaymentDirection { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypePaymentDirection: FfiConverterRustBuffer { typealias SwiftType = PaymentDirection @@ -6962,10 +8779,16 @@ public struct FfiConverterTypePaymentDirection: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePaymentDirection_lift(_ buf: RustBuffer) throws -> PaymentDirection { return try FfiConverterTypePaymentDirection.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePaymentDirection_lower(_ value: PaymentDirection) -> RustBuffer { return FfiConverterTypePaymentDirection.lower(value) } @@ -6994,6 +8817,9 @@ public enum PaymentFailureReason { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypePaymentFailureReason: FfiConverterRustBuffer { typealias SwiftType = PaymentFailureReason @@ -7073,10 +8899,16 @@ public struct FfiConverterTypePaymentFailureReason: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePaymentFailureReason_lift(_ buf: RustBuffer) throws -> PaymentFailureReason { return try FfiConverterTypePaymentFailureReason.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePaymentFailureReason_lower(_ value: PaymentFailureReason) -> RustBuffer { return FfiConverterTypePaymentFailureReason.lower(value) } @@ -7107,6 +8939,9 @@ public enum PaymentKind { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypePaymentKind: FfiConverterRustBuffer { typealias SwiftType = PaymentKind @@ -7191,10 +9026,16 @@ public struct FfiConverterTypePaymentKind: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePaymentKind_lift(_ buf: RustBuffer) throws -> PaymentKind { return try FfiConverterTypePaymentKind.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePaymentKind_lower(_ value: PaymentKind) -> RustBuffer { return FfiConverterTypePaymentKind.lower(value) } @@ -7205,68 +9046,6 @@ extension PaymentKind: Equatable, Hashable {} -// Note that we don't yet support `indirect` for enums. -// See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. - -public enum PaymentState { - - case expectPayment - case paid - case refunded -} - - -public struct FfiConverterTypePaymentState: FfiConverterRustBuffer { - typealias SwiftType = PaymentState - - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> PaymentState { - let variant: Int32 = try readInt(&buf) - switch variant { - - case 1: return .expectPayment - - case 2: return .paid - - case 3: return .refunded - - default: throw UniffiInternalError.unexpectedEnumCase - } - } - - public static func write(_ value: PaymentState, into buf: inout [UInt8]) { - switch value { - - - case .expectPayment: - writeInt(&buf, Int32(1)) - - - case .paid: - writeInt(&buf, Int32(2)) - - - case .refunded: - writeInt(&buf, Int32(3)) - - } - } -} - - -public func FfiConverterTypePaymentState_lift(_ buf: RustBuffer) throws -> PaymentState { - return try FfiConverterTypePaymentState.lift(buf) -} - -public func FfiConverterTypePaymentState_lower(_ value: PaymentState) -> RustBuffer { - return FfiConverterTypePaymentState.lower(value) -} - - - -extension PaymentState: Equatable, Hashable {} - - - // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. @@ -7278,6 +9057,9 @@ public enum PaymentStatus { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypePaymentStatus: FfiConverterRustBuffer { typealias SwiftType = PaymentStatus @@ -7315,10 +9097,16 @@ public struct FfiConverterTypePaymentStatus: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePaymentStatus_lift(_ buf: RustBuffer) throws -> PaymentStatus { return try FfiConverterTypePaymentStatus.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePaymentStatus_lower(_ value: PaymentStatus) -> RustBuffer { return FfiConverterTypePaymentStatus.lower(value) } @@ -7343,6 +9131,9 @@ public enum PendingSweepBalance { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypePendingSweepBalance: FfiConverterRustBuffer { typealias SwiftType = PendingSweepBalance @@ -7394,10 +9185,16 @@ public struct FfiConverterTypePendingSweepBalance: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePendingSweepBalance_lift(_ buf: RustBuffer) throws -> PendingSweepBalance { return try FfiConverterTypePendingSweepBalance.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePendingSweepBalance_lower(_ value: PendingSweepBalance) -> RustBuffer { return FfiConverterTypePendingSweepBalance.lower(value) } @@ -7422,6 +9219,9 @@ public enum QrPaymentResult { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeQrPaymentResult: FfiConverterRustBuffer { typealias SwiftType = QrPaymentResult @@ -7465,10 +9265,16 @@ public struct FfiConverterTypeQrPaymentResult: FfiConverterRustBuffer { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeQrPaymentResult_lift(_ buf: RustBuffer) throws -> QrPaymentResult { return try FfiConverterTypeQrPaymentResult.lift(buf) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeQrPaymentResult_lower(_ value: QrPaymentResult) -> RustBuffer { return FfiConverterTypeQrPaymentResult.lower(value) } @@ -7495,6 +9301,9 @@ public enum VssHeaderProviderError { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeVssHeaderProviderError: FfiConverterRustBuffer { typealias SwiftType = VssHeaderProviderError @@ -7549,10 +9358,102 @@ public struct FfiConverterTypeVssHeaderProviderError: FfiConverterRustBuffer { extension VssHeaderProviderError: Equatable, Hashable {} -extension VssHeaderProviderError: Error { } +extension VssHeaderProviderError: Foundation.LocalizedError { + public var errorDescription: String? { + String(reflecting: self) + } +} + +// Note that we don't yet support `indirect` for enums. +// See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + +public enum WordCount { + + case words12 + case words15 + case words18 + case words21 + case words24 +} + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeWordCount: FfiConverterRustBuffer { + typealias SwiftType = WordCount + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> WordCount { + let variant: Int32 = try readInt(&buf) + switch variant { + + case 1: return .words12 + + case 2: return .words15 + + case 3: return .words18 + + case 4: return .words21 + + case 5: return .words24 + + default: throw UniffiInternalError.unexpectedEnumCase + } + } + + public static func write(_ value: WordCount, into buf: inout [UInt8]) { + switch value { + + + case .words12: + writeInt(&buf, Int32(1)) + + + case .words15: + writeInt(&buf, Int32(2)) + + + case .words18: + writeInt(&buf, Int32(3)) + + + case .words21: + writeInt(&buf, Int32(4)) + + + case .words24: + writeInt(&buf, Int32(5)) + + } + } +} + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeWordCount_lift(_ buf: RustBuffer) throws -> WordCount { + return try FfiConverterTypeWordCount.lift(buf) +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeWordCount_lower(_ value: WordCount) -> RustBuffer { + return FfiConverterTypeWordCount.lower(value) +} + + + +extension WordCount: Equatable, Hashable {} -fileprivate struct FfiConverterOptionUInt8: FfiConverterRustBuffer { - typealias SwiftType = UInt8? + + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionUInt16: FfiConverterRustBuffer { + typealias SwiftType = UInt16? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7560,20 +9461,23 @@ fileprivate struct FfiConverterOptionUInt8: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterUInt8.write(value, into: &buf) + FfiConverterUInt16.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterUInt8.read(from: &buf) + case 1: return try FfiConverterUInt16.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionUInt16: FfiConverterRustBuffer { - typealias SwiftType = UInt16? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionUInt32: FfiConverterRustBuffer { + typealias SwiftType = UInt32? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7581,20 +9485,119 @@ fileprivate struct FfiConverterOptionUInt16: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterUInt16.write(value, into: &buf) + FfiConverterUInt32.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterUInt16.read(from: &buf) + case 1: return try FfiConverterUInt32.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionUInt32: FfiConverterRustBuffer { - typealias SwiftType = UInt32? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionUInt64: FfiConverterRustBuffer { + typealias SwiftType = UInt64? + + public static func write(_ value: SwiftType, into buf: inout [UInt8]) { + guard let value = value else { + writeInt(&buf, Int8(0)) + return + } + writeInt(&buf, Int8(1)) + FfiConverterUInt64.write(value, into: &buf) + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { + switch try readInt(&buf) as Int8 { + case 0: return nil + case 1: return try FfiConverterUInt64.read(from: &buf) + default: throw UniffiInternalError.unexpectedOptionalTag + } + } +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionBool: FfiConverterRustBuffer { + typealias SwiftType = Bool? + + public static func write(_ value: SwiftType, into buf: inout [UInt8]) { + guard let value = value else { + writeInt(&buf, Int8(0)) + return + } + writeInt(&buf, Int8(1)) + FfiConverterBool.write(value, into: &buf) + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { + switch try readInt(&buf) as Int8 { + case 0: return nil + case 1: return try FfiConverterBool.read(from: &buf) + default: throw UniffiInternalError.unexpectedOptionalTag + } + } +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionString: FfiConverterRustBuffer { + typealias SwiftType = String? + + public static func write(_ value: SwiftType, into buf: inout [UInt8]) { + guard let value = value else { + writeInt(&buf, Int8(0)) + return + } + writeInt(&buf, Int8(1)) + FfiConverterString.write(value, into: &buf) + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { + switch try readInt(&buf) as Int8 { + case 0: return nil + case 1: return try FfiConverterString.read(from: &buf) + default: throw UniffiInternalError.unexpectedOptionalTag + } + } +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeFeeRate: FfiConverterRustBuffer { + typealias SwiftType = FeeRate? + + public static func write(_ value: SwiftType, into buf: inout [UInt8]) { + guard let value = value else { + writeInt(&buf, Int8(0)) + return + } + writeInt(&buf, Int8(1)) + FfiConverterTypeFeeRate.write(value, into: &buf) + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { + switch try readInt(&buf) as Int8 { + case 0: return nil + case 1: return try FfiConverterTypeFeeRate.read(from: &buf) + default: throw UniffiInternalError.unexpectedOptionalTag + } + } +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeAnchorChannelsConfig: FfiConverterRustBuffer { + typealias SwiftType = AnchorChannelsConfig? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7602,20 +9605,23 @@ fileprivate struct FfiConverterOptionUInt32: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterUInt32.write(value, into: &buf) + FfiConverterTypeAnchorChannelsConfig.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterUInt32.read(from: &buf) + case 1: return try FfiConverterTypeAnchorChannelsConfig.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionUInt64: FfiConverterRustBuffer { - typealias SwiftType = UInt64? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeBackgroundSyncConfig: FfiConverterRustBuffer { + typealias SwiftType = BackgroundSyncConfig? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7623,20 +9629,23 @@ fileprivate struct FfiConverterOptionUInt64: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterUInt64.write(value, into: &buf) + FfiConverterTypeBackgroundSyncConfig.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterUInt64.read(from: &buf) + case 1: return try FfiConverterTypeBackgroundSyncConfig.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionBool: FfiConverterRustBuffer { - typealias SwiftType = Bool? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeChannelConfig: FfiConverterRustBuffer { + typealias SwiftType = ChannelConfig? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7644,20 +9653,23 @@ fileprivate struct FfiConverterOptionBool: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterBool.write(value, into: &buf) + FfiConverterTypeChannelConfig.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterBool.read(from: &buf) + case 1: return try FfiConverterTypeChannelConfig.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionString: FfiConverterRustBuffer { - typealias SwiftType = String? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeChannelInfo: FfiConverterRustBuffer { + typealias SwiftType = ChannelInfo? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7665,20 +9677,23 @@ fileprivate struct FfiConverterOptionString: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterString.write(value, into: &buf) + FfiConverterTypeChannelInfo.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterString.read(from: &buf) + case 1: return try FfiConverterTypeChannelInfo.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeFeeRate: FfiConverterRustBuffer { - typealias SwiftType = FeeRate? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeChannelUpdateInfo: FfiConverterRustBuffer { + typealias SwiftType = ChannelUpdateInfo? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7686,20 +9701,23 @@ fileprivate struct FfiConverterOptionTypeFeeRate: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterTypeFeeRate.write(value, into: &buf) + FfiConverterTypeChannelUpdateInfo.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeFeeRate.read(from: &buf) + case 1: return try FfiConverterTypeChannelUpdateInfo.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeAnchorChannelsConfig: FfiConverterRustBuffer { - typealias SwiftType = AnchorChannelsConfig? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeElectrumSyncConfig: FfiConverterRustBuffer { + typealias SwiftType = ElectrumSyncConfig? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7707,20 +9725,23 @@ fileprivate struct FfiConverterOptionTypeAnchorChannelsConfig: FfiConverterRustB return } writeInt(&buf, Int8(1)) - FfiConverterTypeAnchorChannelsConfig.write(value, into: &buf) + FfiConverterTypeElectrumSyncConfig.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeAnchorChannelsConfig.read(from: &buf) + case 1: return try FfiConverterTypeElectrumSyncConfig.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeBackgroundSyncConfig: FfiConverterRustBuffer { - typealias SwiftType = BackgroundSyncConfig? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeEsploraSyncConfig: FfiConverterRustBuffer { + typealias SwiftType = EsploraSyncConfig? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7728,20 +9749,23 @@ fileprivate struct FfiConverterOptionTypeBackgroundSyncConfig: FfiConverterRustB return } writeInt(&buf, Int8(1)) - FfiConverterTypeBackgroundSyncConfig.write(value, into: &buf) + FfiConverterTypeEsploraSyncConfig.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeBackgroundSyncConfig.read(from: &buf) + case 1: return try FfiConverterTypeEsploraSyncConfig.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeBolt11PaymentInfo: FfiConverterRustBuffer { - typealias SwiftType = Bolt11PaymentInfo? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeLSPS1Bolt11PaymentInfo: FfiConverterRustBuffer { + typealias SwiftType = Lsps1Bolt11PaymentInfo? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7749,20 +9773,23 @@ fileprivate struct FfiConverterOptionTypeBolt11PaymentInfo: FfiConverterRustBuff return } writeInt(&buf, Int8(1)) - FfiConverterTypeBolt11PaymentInfo.write(value, into: &buf) + FfiConverterTypeLSPS1Bolt11PaymentInfo.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeBolt11PaymentInfo.read(from: &buf) + case 1: return try FfiConverterTypeLSPS1Bolt11PaymentInfo.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeChannelConfig: FfiConverterRustBuffer { - typealias SwiftType = ChannelConfig? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeLSPS1ChannelInfo: FfiConverterRustBuffer { + typealias SwiftType = Lsps1ChannelInfo? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7770,20 +9797,23 @@ fileprivate struct FfiConverterOptionTypeChannelConfig: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterTypeChannelConfig.write(value, into: &buf) + FfiConverterTypeLSPS1ChannelInfo.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeChannelConfig.read(from: &buf) + case 1: return try FfiConverterTypeLSPS1ChannelInfo.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeChannelInfo: FfiConverterRustBuffer { - typealias SwiftType = ChannelInfo? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeLSPS1OnchainPaymentInfo: FfiConverterRustBuffer { + typealias SwiftType = Lsps1OnchainPaymentInfo? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7791,20 +9821,23 @@ fileprivate struct FfiConverterOptionTypeChannelInfo: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterTypeChannelInfo.write(value, into: &buf) + FfiConverterTypeLSPS1OnchainPaymentInfo.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeChannelInfo.read(from: &buf) + case 1: return try FfiConverterTypeLSPS1OnchainPaymentInfo.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeChannelOrderInfo: FfiConverterRustBuffer { - typealias SwiftType = ChannelOrderInfo? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeNodeAnnouncementInfo: FfiConverterRustBuffer { + typealias SwiftType = NodeAnnouncementInfo? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7812,20 +9845,23 @@ fileprivate struct FfiConverterOptionTypeChannelOrderInfo: FfiConverterRustBuffe return } writeInt(&buf, Int8(1)) - FfiConverterTypeChannelOrderInfo.write(value, into: &buf) + FfiConverterTypeNodeAnnouncementInfo.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeChannelOrderInfo.read(from: &buf) + case 1: return try FfiConverterTypeNodeAnnouncementInfo.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeChannelUpdateInfo: FfiConverterRustBuffer { - typealias SwiftType = ChannelUpdateInfo? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeNodeInfo: FfiConverterRustBuffer { + typealias SwiftType = NodeInfo? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7833,20 +9869,23 @@ fileprivate struct FfiConverterOptionTypeChannelUpdateInfo: FfiConverterRustBuff return } writeInt(&buf, Int8(1)) - FfiConverterTypeChannelUpdateInfo.write(value, into: &buf) + FfiConverterTypeNodeInfo.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeChannelUpdateInfo.read(from: &buf) + case 1: return try FfiConverterTypeNodeInfo.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeElectrumSyncConfig: FfiConverterRustBuffer { - typealias SwiftType = ElectrumSyncConfig? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeOutPoint: FfiConverterRustBuffer { + typealias SwiftType = OutPoint? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7854,20 +9893,23 @@ fileprivate struct FfiConverterOptionTypeElectrumSyncConfig: FfiConverterRustBuf return } writeInt(&buf, Int8(1)) - FfiConverterTypeElectrumSyncConfig.write(value, into: &buf) + FfiConverterTypeOutPoint.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeElectrumSyncConfig.read(from: &buf) + case 1: return try FfiConverterTypeOutPoint.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeEsploraSyncConfig: FfiConverterRustBuffer { - typealias SwiftType = EsploraSyncConfig? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypePaymentDetails: FfiConverterRustBuffer { + typealias SwiftType = PaymentDetails? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7875,20 +9917,23 @@ fileprivate struct FfiConverterOptionTypeEsploraSyncConfig: FfiConverterRustBuff return } writeInt(&buf, Int8(1)) - FfiConverterTypeEsploraSyncConfig.write(value, into: &buf) + FfiConverterTypePaymentDetails.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeEsploraSyncConfig.read(from: &buf) + case 1: return try FfiConverterTypePaymentDetails.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeNodeAnnouncementInfo: FfiConverterRustBuffer { - typealias SwiftType = NodeAnnouncementInfo? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeRouteParametersConfig: FfiConverterRustBuffer { + typealias SwiftType = RouteParametersConfig? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7896,20 +9941,23 @@ fileprivate struct FfiConverterOptionTypeNodeAnnouncementInfo: FfiConverterRustB return } writeInt(&buf, Int8(1)) - FfiConverterTypeNodeAnnouncementInfo.write(value, into: &buf) + FfiConverterTypeRouteParametersConfig.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeNodeAnnouncementInfo.read(from: &buf) + case 1: return try FfiConverterTypeRouteParametersConfig.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeNodeInfo: FfiConverterRustBuffer { - typealias SwiftType = NodeInfo? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeAsyncPaymentsRole: FfiConverterRustBuffer { + typealias SwiftType = AsyncPaymentsRole? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7917,20 +9965,23 @@ fileprivate struct FfiConverterOptionTypeNodeInfo: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterTypeNodeInfo.write(value, into: &buf) + FfiConverterTypeAsyncPaymentsRole.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeNodeInfo.read(from: &buf) + case 1: return try FfiConverterTypeAsyncPaymentsRole.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeOnchainPaymentInfo: FfiConverterRustBuffer { - typealias SwiftType = OnchainPaymentInfo? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeClosureReason: FfiConverterRustBuffer { + typealias SwiftType = ClosureReason? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7938,20 +9989,23 @@ fileprivate struct FfiConverterOptionTypeOnchainPaymentInfo: FfiConverterRustBuf return } writeInt(&buf, Int8(1)) - FfiConverterTypeOnchainPaymentInfo.write(value, into: &buf) + FfiConverterTypeClosureReason.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeOnchainPaymentInfo.read(from: &buf) + case 1: return try FfiConverterTypeClosureReason.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeOutPoint: FfiConverterRustBuffer { - typealias SwiftType = OutPoint? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeEvent: FfiConverterRustBuffer { + typealias SwiftType = Event? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7959,20 +10013,23 @@ fileprivate struct FfiConverterOptionTypeOutPoint: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterTypeOutPoint.write(value, into: &buf) + FfiConverterTypeEvent.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeOutPoint.read(from: &buf) + case 1: return try FfiConverterTypeEvent.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypePaymentDetails: FfiConverterRustBuffer { - typealias SwiftType = PaymentDetails? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeLogLevel: FfiConverterRustBuffer { + typealias SwiftType = LogLevel? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -7980,20 +10037,23 @@ fileprivate struct FfiConverterOptionTypePaymentDetails: FfiConverterRustBuffer return } writeInt(&buf, Int8(1)) - FfiConverterTypePaymentDetails.write(value, into: &buf) + FfiConverterTypeLogLevel.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypePaymentDetails.read(from: &buf) + case 1: return try FfiConverterTypeLogLevel.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeSendingParameters: FfiConverterRustBuffer { - typealias SwiftType = SendingParameters? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeNetwork: FfiConverterRustBuffer { + typealias SwiftType = Network? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -8001,20 +10061,23 @@ fileprivate struct FfiConverterOptionTypeSendingParameters: FfiConverterRustBuff return } writeInt(&buf, Int8(1)) - FfiConverterTypeSendingParameters.write(value, into: &buf) + FfiConverterTypeNetwork.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeSendingParameters.read(from: &buf) + case 1: return try FfiConverterTypeNetwork.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeClosureReason: FfiConverterRustBuffer { - typealias SwiftType = ClosureReason? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeOfferAmount: FfiConverterRustBuffer { + typealias SwiftType = OfferAmount? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -8022,20 +10085,23 @@ fileprivate struct FfiConverterOptionTypeClosureReason: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterTypeClosureReason.write(value, into: &buf) + FfiConverterTypeOfferAmount.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeClosureReason.read(from: &buf) + case 1: return try FfiConverterTypeOfferAmount.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeEvent: FfiConverterRustBuffer { - typealias SwiftType = Event? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypePaymentFailureReason: FfiConverterRustBuffer { + typealias SwiftType = PaymentFailureReason? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -8043,20 +10109,23 @@ fileprivate struct FfiConverterOptionTypeEvent: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterTypeEvent.write(value, into: &buf) + FfiConverterTypePaymentFailureReason.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeEvent.read(from: &buf) + case 1: return try FfiConverterTypePaymentFailureReason.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeLogLevel: FfiConverterRustBuffer { - typealias SwiftType = LogLevel? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionTypeWordCount: FfiConverterRustBuffer { + typealias SwiftType = WordCount? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -8064,20 +10133,23 @@ fileprivate struct FfiConverterOptionTypeLogLevel: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterTypeLogLevel.write(value, into: &buf) + FfiConverterTypeWordCount.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeLogLevel.read(from: &buf) + case 1: return try FfiConverterTypeWordCount.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeMaxTotalRoutingFeeLimit: FfiConverterRustBuffer { - typealias SwiftType = MaxTotalRoutingFeeLimit? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionSequenceUInt8: FfiConverterRustBuffer { + typealias SwiftType = [UInt8]? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -8085,20 +10157,23 @@ fileprivate struct FfiConverterOptionTypeMaxTotalRoutingFeeLimit: FfiConverterRu return } writeInt(&buf, Int8(1)) - FfiConverterTypeMaxTotalRoutingFeeLimit.write(value, into: &buf) + FfiConverterSequenceUInt8.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeMaxTotalRoutingFeeLimit.read(from: &buf) + case 1: return try FfiConverterSequenceUInt8.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypePaymentFailureReason: FfiConverterRustBuffer { - typealias SwiftType = PaymentFailureReason? +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterOptionSequenceSequenceUInt8: FfiConverterRustBuffer { + typealias SwiftType = [[UInt8]]? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -8106,18 +10181,21 @@ fileprivate struct FfiConverterOptionTypePaymentFailureReason: FfiConverterRustB return } writeInt(&buf, Int8(1)) - FfiConverterTypePaymentFailureReason.write(value, into: &buf) + FfiConverterSequenceSequenceUInt8.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypePaymentFailureReason.read(from: &buf) + case 1: return try FfiConverterSequenceSequenceUInt8.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterOptionSequenceTypeSocketAddress: FfiConverterRustBuffer { typealias SwiftType = [SocketAddress]? @@ -8139,6 +10217,9 @@ fileprivate struct FfiConverterOptionSequenceTypeSocketAddress: FfiConverterRust } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterOptionTypeAddress: FfiConverterRustBuffer { typealias SwiftType = Address? @@ -8160,6 +10241,9 @@ fileprivate struct FfiConverterOptionTypeAddress: FfiConverterRustBuffer { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterOptionTypeChannelId: FfiConverterRustBuffer { typealias SwiftType = ChannelId? @@ -8181,6 +10265,9 @@ fileprivate struct FfiConverterOptionTypeChannelId: FfiConverterRustBuffer { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterOptionTypeNodeAlias: FfiConverterRustBuffer { typealias SwiftType = NodeAlias? @@ -8202,6 +10289,9 @@ fileprivate struct FfiConverterOptionTypeNodeAlias: FfiConverterRustBuffer { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterOptionTypePaymentHash: FfiConverterRustBuffer { typealias SwiftType = PaymentHash? @@ -8223,6 +10313,9 @@ fileprivate struct FfiConverterOptionTypePaymentHash: FfiConverterRustBuffer { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterOptionTypePaymentId: FfiConverterRustBuffer { typealias SwiftType = PaymentId? @@ -8244,6 +10337,9 @@ fileprivate struct FfiConverterOptionTypePaymentId: FfiConverterRustBuffer { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterOptionTypePaymentPreimage: FfiConverterRustBuffer { typealias SwiftType = PaymentPreimage? @@ -8265,6 +10361,9 @@ fileprivate struct FfiConverterOptionTypePaymentPreimage: FfiConverterRustBuffer } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterOptionTypePaymentSecret: FfiConverterRustBuffer { typealias SwiftType = PaymentSecret? @@ -8286,6 +10385,9 @@ fileprivate struct FfiConverterOptionTypePaymentSecret: FfiConverterRustBuffer { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterOptionTypePublicKey: FfiConverterRustBuffer { typealias SwiftType = PublicKey? @@ -8307,6 +10409,9 @@ fileprivate struct FfiConverterOptionTypePublicKey: FfiConverterRustBuffer { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterOptionTypeUntrustedString: FfiConverterRustBuffer { typealias SwiftType = UntrustedString? @@ -8328,6 +10433,9 @@ fileprivate struct FfiConverterOptionTypeUntrustedString: FfiConverterRustBuffer } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterOptionTypeUserChannelId: FfiConverterRustBuffer { typealias SwiftType = UserChannelId? @@ -8349,6 +10457,9 @@ fileprivate struct FfiConverterOptionTypeUserChannelId: FfiConverterRustBuffer { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterSequenceUInt8: FfiConverterRustBuffer { typealias SwiftType = [UInt8] @@ -8371,6 +10482,9 @@ fileprivate struct FfiConverterSequenceUInt8: FfiConverterRustBuffer { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterSequenceUInt64: FfiConverterRustBuffer { typealias SwiftType = [UInt64] @@ -8393,6 +10507,9 @@ fileprivate struct FfiConverterSequenceUInt64: FfiConverterRustBuffer { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterSequenceTypeChannelDetails: FfiConverterRustBuffer { typealias SwiftType = [ChannelDetails] @@ -8415,6 +10532,9 @@ fileprivate struct FfiConverterSequenceTypeChannelDetails: FfiConverterRustBuffe } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterSequenceTypeCustomTlvRecord: FfiConverterRustBuffer { typealias SwiftType = [CustomTlvRecord] @@ -8437,6 +10557,9 @@ fileprivate struct FfiConverterSequenceTypeCustomTlvRecord: FfiConverterRustBuff } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterSequenceTypePaymentDetails: FfiConverterRustBuffer { typealias SwiftType = [PaymentDetails] @@ -8459,6 +10582,9 @@ fileprivate struct FfiConverterSequenceTypePaymentDetails: FfiConverterRustBuffe } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterSequenceTypePeerDetails: FfiConverterRustBuffer { typealias SwiftType = [PeerDetails] @@ -8481,6 +10607,9 @@ fileprivate struct FfiConverterSequenceTypePeerDetails: FfiConverterRustBuffer { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterSequenceTypeRouteHintHop: FfiConverterRustBuffer { typealias SwiftType = [RouteHintHop] @@ -8503,6 +10632,9 @@ fileprivate struct FfiConverterSequenceTypeRouteHintHop: FfiConverterRustBuffer } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterSequenceTypeLightningBalance: FfiConverterRustBuffer { typealias SwiftType = [LightningBalance] @@ -8525,6 +10657,34 @@ fileprivate struct FfiConverterSequenceTypeLightningBalance: FfiConverterRustBuf } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterSequenceTypeNetwork: FfiConverterRustBuffer { + typealias SwiftType = [Network] + + public static func write(_ value: [Network], into buf: inout [UInt8]) { + let len = Int32(value.count) + writeInt(&buf, len) + for item in value { + FfiConverterTypeNetwork.write(item, into: &buf) + } + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> [Network] { + let len: Int32 = try readInt(&buf) + var seq = [Network]() + seq.reserveCapacity(Int(len)) + for _ in 0 ..< len { + seq.append(try FfiConverterTypeNetwork.read(from: &buf)) + } + return seq + } +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterSequenceTypePendingSweepBalance: FfiConverterRustBuffer { typealias SwiftType = [PendingSweepBalance] @@ -8547,6 +10707,34 @@ fileprivate struct FfiConverterSequenceTypePendingSweepBalance: FfiConverterRust } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +fileprivate struct FfiConverterSequenceSequenceUInt8: FfiConverterRustBuffer { + typealias SwiftType = [[UInt8]] + + public static func write(_ value: [[UInt8]], into buf: inout [UInt8]) { + let len = Int32(value.count) + writeInt(&buf, len) + for item in value { + FfiConverterSequenceUInt8.write(item, into: &buf) + } + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> [[UInt8]] { + let len: Int32 = try readInt(&buf) + var seq = [[UInt8]]() + seq.reserveCapacity(Int(len)) + for _ in 0 ..< len { + seq.append(try FfiConverterSequenceUInt8.read(from: &buf)) + } + return seq + } +} + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterSequenceSequenceTypeRouteHintHop: FfiConverterRustBuffer { typealias SwiftType = [[RouteHintHop]] @@ -8569,6 +10757,9 @@ fileprivate struct FfiConverterSequenceSequenceTypeRouteHintHop: FfiConverterRus } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterSequenceTypeAddress: FfiConverterRustBuffer { typealias SwiftType = [Address] @@ -8591,6 +10782,9 @@ fileprivate struct FfiConverterSequenceTypeAddress: FfiConverterRustBuffer { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterSequenceTypeNodeId: FfiConverterRustBuffer { typealias SwiftType = [NodeId] @@ -8613,6 +10807,9 @@ fileprivate struct FfiConverterSequenceTypeNodeId: FfiConverterRustBuffer { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterSequenceTypePublicKey: FfiConverterRustBuffer { typealias SwiftType = [PublicKey] @@ -8635,6 +10832,9 @@ fileprivate struct FfiConverterSequenceTypePublicKey: FfiConverterRustBuffer { } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterSequenceTypeSocketAddress: FfiConverterRustBuffer { typealias SwiftType = [SocketAddress] @@ -8657,6 +10857,9 @@ fileprivate struct FfiConverterSequenceTypeSocketAddress: FfiConverterRustBuffer } } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif fileprivate struct FfiConverterDictionaryStringString: FfiConverterRustBuffer { public static func write(_ value: [String: String], into buf: inout [UInt8]) { let len = Int32(value.count) @@ -8686,6 +10889,10 @@ fileprivate struct FfiConverterDictionaryStringString: FfiConverterRustBuffer { * is needed because the UDL type name is used in function/method signatures. */ public typealias Address = String + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeAddress: FfiConverter { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Address { return try FfiConverterString.read(from: &buf) @@ -8705,10 +10912,16 @@ public struct FfiConverterTypeAddress: FfiConverter { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeAddress_lift(_ value: RustBuffer) throws -> Address { return try FfiConverterTypeAddress.lift(value) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeAddress_lower(_ value: Address) -> RustBuffer { return FfiConverterTypeAddress.lower(value) } @@ -8720,6 +10933,10 @@ public func FfiConverterTypeAddress_lower(_ value: Address) -> RustBuffer { * is needed because the UDL type name is used in function/method signatures. */ public typealias BlockHash = String + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeBlockHash: FfiConverter { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> BlockHash { return try FfiConverterString.read(from: &buf) @@ -8739,10 +10956,16 @@ public struct FfiConverterTypeBlockHash: FfiConverter { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeBlockHash_lift(_ value: RustBuffer) throws -> BlockHash { return try FfiConverterTypeBlockHash.lift(value) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeBlockHash_lower(_ value: BlockHash) -> RustBuffer { return FfiConverterTypeBlockHash.lower(value) } @@ -8753,32 +10976,42 @@ public func FfiConverterTypeBlockHash_lower(_ value: BlockHash) -> RustBuffer { * Typealias from the type name used in the UDL file to the builtin type. This * is needed because the UDL type name is used in function/method signatures. */ -public typealias Bolt12Invoice = String -public struct FfiConverterTypeBolt12Invoice: FfiConverter { - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Bolt12Invoice { +public typealias ChannelId = String + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeChannelId: FfiConverter { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> ChannelId { return try FfiConverterString.read(from: &buf) } - public static func write(_ value: Bolt12Invoice, into buf: inout [UInt8]) { + public static func write(_ value: ChannelId, into buf: inout [UInt8]) { return FfiConverterString.write(value, into: &buf) } - public static func lift(_ value: RustBuffer) throws -> Bolt12Invoice { + public static func lift(_ value: RustBuffer) throws -> ChannelId { return try FfiConverterString.lift(value) } - public static func lower(_ value: Bolt12Invoice) -> RustBuffer { + public static func lower(_ value: ChannelId) -> RustBuffer { return FfiConverterString.lower(value) } } -public func FfiConverterTypeBolt12Invoice_lift(_ value: RustBuffer) throws -> Bolt12Invoice { - return try FfiConverterTypeBolt12Invoice.lift(value) +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeChannelId_lift(_ value: RustBuffer) throws -> ChannelId { + return try FfiConverterTypeChannelId.lift(value) } -public func FfiConverterTypeBolt12Invoice_lower(_ value: Bolt12Invoice) -> RustBuffer { - return FfiConverterTypeBolt12Invoice.lower(value) +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeChannelId_lower(_ value: ChannelId) -> RustBuffer { + return FfiConverterTypeChannelId.lower(value) } @@ -8787,32 +11020,42 @@ public func FfiConverterTypeBolt12Invoice_lower(_ value: Bolt12Invoice) -> RustB * Typealias from the type name used in the UDL file to the builtin type. This * is needed because the UDL type name is used in function/method signatures. */ -public typealias ChannelId = String -public struct FfiConverterTypeChannelId: FfiConverter { - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> ChannelId { +public typealias Lsps1OrderId = String + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeLSPS1OrderId: FfiConverter { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Lsps1OrderId { return try FfiConverterString.read(from: &buf) } - public static func write(_ value: ChannelId, into buf: inout [UInt8]) { + public static func write(_ value: Lsps1OrderId, into buf: inout [UInt8]) { return FfiConverterString.write(value, into: &buf) } - public static func lift(_ value: RustBuffer) throws -> ChannelId { + public static func lift(_ value: RustBuffer) throws -> Lsps1OrderId { return try FfiConverterString.lift(value) } - public static func lower(_ value: ChannelId) -> RustBuffer { + public static func lower(_ value: Lsps1OrderId) -> RustBuffer { return FfiConverterString.lower(value) } } - -public func FfiConverterTypeChannelId_lift(_ value: RustBuffer) throws -> ChannelId { - return try FfiConverterTypeChannelId.lift(value) + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeLSPS1OrderId_lift(_ value: RustBuffer) throws -> Lsps1OrderId { + return try FfiConverterTypeLSPS1OrderId.lift(value) } -public func FfiConverterTypeChannelId_lower(_ value: ChannelId) -> RustBuffer { - return FfiConverterTypeChannelId.lower(value) +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeLSPS1OrderId_lower(_ value: Lsps1OrderId) -> RustBuffer { + return FfiConverterTypeLSPS1OrderId.lower(value) } @@ -8821,32 +11064,42 @@ public func FfiConverterTypeChannelId_lower(_ value: ChannelId) -> RustBuffer { * Typealias from the type name used in the UDL file to the builtin type. This * is needed because the UDL type name is used in function/method signatures. */ -public typealias DateTime = String -public struct FfiConverterTypeDateTime: FfiConverter { - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> DateTime { +public typealias LspsDateTime = String + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public struct FfiConverterTypeLSPSDateTime: FfiConverter { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> LspsDateTime { return try FfiConverterString.read(from: &buf) } - public static func write(_ value: DateTime, into buf: inout [UInt8]) { + public static func write(_ value: LspsDateTime, into buf: inout [UInt8]) { return FfiConverterString.write(value, into: &buf) } - public static func lift(_ value: RustBuffer) throws -> DateTime { + public static func lift(_ value: RustBuffer) throws -> LspsDateTime { return try FfiConverterString.lift(value) } - public static func lower(_ value: DateTime) -> RustBuffer { + public static func lower(_ value: LspsDateTime) -> RustBuffer { return FfiConverterString.lower(value) } } -public func FfiConverterTypeDateTime_lift(_ value: RustBuffer) throws -> DateTime { - return try FfiConverterTypeDateTime.lift(value) +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeLSPSDateTime_lift(_ value: RustBuffer) throws -> LspsDateTime { + return try FfiConverterTypeLSPSDateTime.lift(value) } -public func FfiConverterTypeDateTime_lower(_ value: DateTime) -> RustBuffer { - return FfiConverterTypeDateTime.lower(value) +#if swift(>=5.8) +@_documentation(visibility: private) +#endif +public func FfiConverterTypeLSPSDateTime_lower(_ value: LspsDateTime) -> RustBuffer { + return FfiConverterTypeLSPSDateTime.lower(value) } @@ -8856,6 +11109,10 @@ public func FfiConverterTypeDateTime_lower(_ value: DateTime) -> RustBuffer { * is needed because the UDL type name is used in function/method signatures. */ public typealias Mnemonic = String + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeMnemonic: FfiConverter { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Mnemonic { return try FfiConverterString.read(from: &buf) @@ -8875,10 +11132,16 @@ public struct FfiConverterTypeMnemonic: FfiConverter { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeMnemonic_lift(_ value: RustBuffer) throws -> Mnemonic { return try FfiConverterTypeMnemonic.lift(value) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeMnemonic_lower(_ value: Mnemonic) -> RustBuffer { return FfiConverterTypeMnemonic.lower(value) } @@ -8890,6 +11153,10 @@ public func FfiConverterTypeMnemonic_lower(_ value: Mnemonic) -> RustBuffer { * is needed because the UDL type name is used in function/method signatures. */ public typealias NodeAlias = String + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeNodeAlias: FfiConverter { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> NodeAlias { return try FfiConverterString.read(from: &buf) @@ -8909,10 +11176,16 @@ public struct FfiConverterTypeNodeAlias: FfiConverter { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeNodeAlias_lift(_ value: RustBuffer) throws -> NodeAlias { return try FfiConverterTypeNodeAlias.lift(value) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeNodeAlias_lower(_ value: NodeAlias) -> RustBuffer { return FfiConverterTypeNodeAlias.lower(value) } @@ -8924,6 +11197,10 @@ public func FfiConverterTypeNodeAlias_lower(_ value: NodeAlias) -> RustBuffer { * is needed because the UDL type name is used in function/method signatures. */ public typealias NodeId = String + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeNodeId: FfiConverter { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> NodeId { return try FfiConverterString.read(from: &buf) @@ -8943,55 +11220,31 @@ public struct FfiConverterTypeNodeId: FfiConverter { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeNodeId_lift(_ value: RustBuffer) throws -> NodeId { return try FfiConverterTypeNodeId.lift(value) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeNodeId_lower(_ value: NodeId) -> RustBuffer { return FfiConverterTypeNodeId.lower(value) } -/** - * Typealias from the type name used in the UDL file to the builtin type. This - * is needed because the UDL type name is used in function/method signatures. - */ -public typealias Offer = String -public struct FfiConverterTypeOffer: FfiConverter { - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Offer { - return try FfiConverterString.read(from: &buf) - } - - public static func write(_ value: Offer, into buf: inout [UInt8]) { - return FfiConverterString.write(value, into: &buf) - } - - public static func lift(_ value: RustBuffer) throws -> Offer { - return try FfiConverterString.lift(value) - } - - public static func lower(_ value: Offer) -> RustBuffer { - return FfiConverterString.lower(value) - } -} - - -public func FfiConverterTypeOffer_lift(_ value: RustBuffer) throws -> Offer { - return try FfiConverterTypeOffer.lift(value) -} - -public func FfiConverterTypeOffer_lower(_ value: Offer) -> RustBuffer { - return FfiConverterTypeOffer.lower(value) -} - - - /** * Typealias from the type name used in the UDL file to the builtin type. This * is needed because the UDL type name is used in function/method signatures. */ public typealias OfferId = String + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeOfferId: FfiConverter { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> OfferId { return try FfiConverterString.read(from: &buf) @@ -9011,55 +11264,31 @@ public struct FfiConverterTypeOfferId: FfiConverter { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeOfferId_lift(_ value: RustBuffer) throws -> OfferId { return try FfiConverterTypeOfferId.lift(value) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeOfferId_lower(_ value: OfferId) -> RustBuffer { return FfiConverterTypeOfferId.lower(value) } -/** - * Typealias from the type name used in the UDL file to the builtin type. This - * is needed because the UDL type name is used in function/method signatures. - */ -public typealias OrderId = String -public struct FfiConverterTypeOrderId: FfiConverter { - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> OrderId { - return try FfiConverterString.read(from: &buf) - } - - public static func write(_ value: OrderId, into buf: inout [UInt8]) { - return FfiConverterString.write(value, into: &buf) - } - - public static func lift(_ value: RustBuffer) throws -> OrderId { - return try FfiConverterString.lift(value) - } - - public static func lower(_ value: OrderId) -> RustBuffer { - return FfiConverterString.lower(value) - } -} - - -public func FfiConverterTypeOrderId_lift(_ value: RustBuffer) throws -> OrderId { - return try FfiConverterTypeOrderId.lift(value) -} - -public func FfiConverterTypeOrderId_lower(_ value: OrderId) -> RustBuffer { - return FfiConverterTypeOrderId.lower(value) -} - - - /** * Typealias from the type name used in the UDL file to the builtin type. This * is needed because the UDL type name is used in function/method signatures. */ public typealias PaymentHash = String + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypePaymentHash: FfiConverter { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> PaymentHash { return try FfiConverterString.read(from: &buf) @@ -9079,10 +11308,16 @@ public struct FfiConverterTypePaymentHash: FfiConverter { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePaymentHash_lift(_ value: RustBuffer) throws -> PaymentHash { return try FfiConverterTypePaymentHash.lift(value) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePaymentHash_lower(_ value: PaymentHash) -> RustBuffer { return FfiConverterTypePaymentHash.lower(value) } @@ -9094,6 +11329,10 @@ public func FfiConverterTypePaymentHash_lower(_ value: PaymentHash) -> RustBuffe * is needed because the UDL type name is used in function/method signatures. */ public typealias PaymentId = String + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypePaymentId: FfiConverter { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> PaymentId { return try FfiConverterString.read(from: &buf) @@ -9113,10 +11352,16 @@ public struct FfiConverterTypePaymentId: FfiConverter { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePaymentId_lift(_ value: RustBuffer) throws -> PaymentId { return try FfiConverterTypePaymentId.lift(value) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePaymentId_lower(_ value: PaymentId) -> RustBuffer { return FfiConverterTypePaymentId.lower(value) } @@ -9128,6 +11373,10 @@ public func FfiConverterTypePaymentId_lower(_ value: PaymentId) -> RustBuffer { * is needed because the UDL type name is used in function/method signatures. */ public typealias PaymentPreimage = String + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypePaymentPreimage: FfiConverter { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> PaymentPreimage { return try FfiConverterString.read(from: &buf) @@ -9147,10 +11396,16 @@ public struct FfiConverterTypePaymentPreimage: FfiConverter { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePaymentPreimage_lift(_ value: RustBuffer) throws -> PaymentPreimage { return try FfiConverterTypePaymentPreimage.lift(value) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePaymentPreimage_lower(_ value: PaymentPreimage) -> RustBuffer { return FfiConverterTypePaymentPreimage.lower(value) } @@ -9162,6 +11417,10 @@ public func FfiConverterTypePaymentPreimage_lower(_ value: PaymentPreimage) -> R * is needed because the UDL type name is used in function/method signatures. */ public typealias PaymentSecret = String + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypePaymentSecret: FfiConverter { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> PaymentSecret { return try FfiConverterString.read(from: &buf) @@ -9181,10 +11440,16 @@ public struct FfiConverterTypePaymentSecret: FfiConverter { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePaymentSecret_lift(_ value: RustBuffer) throws -> PaymentSecret { return try FfiConverterTypePaymentSecret.lift(value) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePaymentSecret_lower(_ value: PaymentSecret) -> RustBuffer { return FfiConverterTypePaymentSecret.lower(value) } @@ -9196,6 +11461,10 @@ public func FfiConverterTypePaymentSecret_lower(_ value: PaymentSecret) -> RustB * is needed because the UDL type name is used in function/method signatures. */ public typealias PublicKey = String + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypePublicKey: FfiConverter { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> PublicKey { return try FfiConverterString.read(from: &buf) @@ -9215,55 +11484,31 @@ public struct FfiConverterTypePublicKey: FfiConverter { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePublicKey_lift(_ value: RustBuffer) throws -> PublicKey { return try FfiConverterTypePublicKey.lift(value) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypePublicKey_lower(_ value: PublicKey) -> RustBuffer { return FfiConverterTypePublicKey.lower(value) } -/** - * Typealias from the type name used in the UDL file to the builtin type. This - * is needed because the UDL type name is used in function/method signatures. - */ -public typealias Refund = String -public struct FfiConverterTypeRefund: FfiConverter { - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Refund { - return try FfiConverterString.read(from: &buf) - } - - public static func write(_ value: Refund, into buf: inout [UInt8]) { - return FfiConverterString.write(value, into: &buf) - } - - public static func lift(_ value: RustBuffer) throws -> Refund { - return try FfiConverterString.lift(value) - } - - public static func lower(_ value: Refund) -> RustBuffer { - return FfiConverterString.lower(value) - } -} - - -public func FfiConverterTypeRefund_lift(_ value: RustBuffer) throws -> Refund { - return try FfiConverterTypeRefund.lift(value) -} - -public func FfiConverterTypeRefund_lower(_ value: Refund) -> RustBuffer { - return FfiConverterTypeRefund.lower(value) -} - - - /** * Typealias from the type name used in the UDL file to the builtin type. This * is needed because the UDL type name is used in function/method signatures. */ public typealias SocketAddress = String + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeSocketAddress: FfiConverter { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SocketAddress { return try FfiConverterString.read(from: &buf) @@ -9283,10 +11528,16 @@ public struct FfiConverterTypeSocketAddress: FfiConverter { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeSocketAddress_lift(_ value: RustBuffer) throws -> SocketAddress { return try FfiConverterTypeSocketAddress.lift(value) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeSocketAddress_lower(_ value: SocketAddress) -> RustBuffer { return FfiConverterTypeSocketAddress.lower(value) } @@ -9298,6 +11549,10 @@ public func FfiConverterTypeSocketAddress_lower(_ value: SocketAddress) -> RustB * is needed because the UDL type name is used in function/method signatures. */ public typealias Txid = String + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeTxid: FfiConverter { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Txid { return try FfiConverterString.read(from: &buf) @@ -9317,10 +11572,16 @@ public struct FfiConverterTypeTxid: FfiConverter { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeTxid_lift(_ value: RustBuffer) throws -> Txid { return try FfiConverterTypeTxid.lift(value) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeTxid_lower(_ value: Txid) -> RustBuffer { return FfiConverterTypeTxid.lower(value) } @@ -9332,6 +11593,10 @@ public func FfiConverterTypeTxid_lower(_ value: Txid) -> RustBuffer { * is needed because the UDL type name is used in function/method signatures. */ public typealias UntrustedString = String + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeUntrustedString: FfiConverter { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> UntrustedString { return try FfiConverterString.read(from: &buf) @@ -9351,10 +11616,16 @@ public struct FfiConverterTypeUntrustedString: FfiConverter { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeUntrustedString_lift(_ value: RustBuffer) throws -> UntrustedString { return try FfiConverterTypeUntrustedString.lift(value) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeUntrustedString_lower(_ value: UntrustedString) -> RustBuffer { return FfiConverterTypeUntrustedString.lower(value) } @@ -9366,6 +11637,10 @@ public func FfiConverterTypeUntrustedString_lower(_ value: UntrustedString) -> R * is needed because the UDL type name is used in function/method signatures. */ public typealias UserChannelId = String + +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public struct FfiConverterTypeUserChannelId: FfiConverter { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> UserChannelId { return try FfiConverterString.read(from: &buf) @@ -9385,10 +11660,16 @@ public struct FfiConverterTypeUserChannelId: FfiConverter { } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeUserChannelId_lift(_ value: RustBuffer) throws -> UserChannelId { return try FfiConverterTypeUserChannelId.lift(value) } +#if swift(>=5.8) +@_documentation(visibility: private) +#endif public func FfiConverterTypeUserChannelId_lower(_ value: UserChannelId) -> RustBuffer { return FfiConverterTypeUserChannelId.lower(value) } @@ -9404,7 +11685,7 @@ fileprivate func uniffiRustCallAsync( completeFunc: (UInt64, UnsafeMutablePointer) -> F, freeFunc: (UInt64) -> (), liftFunc: (F) throws -> T, - errorHandler: ((RustBuffer) throws -> Error)? + errorHandler: ((RustBuffer) throws -> Swift.Error)? ) async throws -> T { // Make sure to call uniffiEnsureInitialized() since future creation doesn't have a // RustCallStatus param, so doesn't use makeRustCall() @@ -9445,9 +11726,10 @@ public func defaultConfig() -> Config { ) }) } -public func generateEntropyMnemonic() -> Mnemonic { +public func generateEntropyMnemonic(wordCount: WordCount?) -> Mnemonic { return try! FfiConverterTypeMnemonic.lift(try! rustCall() { - uniffi_ldk_node_fn_func_generate_entropy_mnemonic($0 + uniffi_ldk_node_fn_func_generate_entropy_mnemonic( + FfiConverterOptionTypeWordCount.lower(wordCount),$0 ) }) } @@ -9457,9 +11739,9 @@ private enum InitializationResult { case contractVersionMismatch case apiChecksumMismatch } -// Use a global variables to perform the versioning checks. Swift ensures that +// Use a global variable to perform the versioning checks. Swift ensures that // the code inside is only computed once. -private var initializationResult: InitializationResult { +private var initializationResult: InitializationResult = { // Get the bindings contract version from our ComponentInterface let bindings_contract_version = 26 // Get the scaffolding contract version by calling the into the dylib @@ -9470,7 +11752,7 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_func_default_config() != 55381) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_func_generate_entropy_mnemonic() != 59926) { + if (uniffi_ldk_node_checksum_func_generate_entropy_mnemonic() != 48014) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_bolt11invoice_amount_milli_satoshis() != 50823) { @@ -9542,37 +11824,112 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_bolt11payment_receive_variable_amount_via_jit_channel() != 24506) { return InitializationResult.apiChecksumMismatch } + if (uniffi_ldk_node_checksum_method_bolt11payment_receive_variable_amount_via_jit_channel_for_hash() != 38025) { + return InitializationResult.apiChecksumMismatch + } if (uniffi_ldk_node_checksum_method_bolt11payment_receive_via_jit_channel() != 16532) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11payment_send() != 63952) { + if (uniffi_ldk_node_checksum_method_bolt11payment_receive_via_jit_channel_for_hash() != 1143) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11payment_send() != 12953) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11payment_send_probes() != 19286) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11payment_send_probes_using_amount() != 5976) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11payment_send_using_amount() != 42793) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_absolute_expiry_seconds() != 28589) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_amount() != 5213) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_amount_msats() != 9297) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_chain() != 3308) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_created_at() != 56866) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_encode() != 13200) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_fallback_addresses() != 7925) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_invoice_description() != 1713) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_is_expired() != 39560) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_issuer() != 65270) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_issuer_signing_pubkey() != 55411) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_metadata() != 37374) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_offer_chains() != 39622) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_payer_note() != 28018) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_payer_signing_pubkey() != 12798) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_payment_hash() != 63778) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_quantity() != 43105) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_relative_expiry() != 14024) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_signable_hash() != 39303) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt12invoice_signing_pubkey() != 35202) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11payment_send_probes() != 969) { + if (uniffi_ldk_node_checksum_method_bolt12payment_blinded_paths_for_async_recipient() != 14695) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11payment_send_probes_using_amount() != 50136) { + if (uniffi_ldk_node_checksum_method_bolt12payment_initiate_refund() != 15019) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11payment_send_using_amount() != 36530) { + if (uniffi_ldk_node_checksum_method_bolt12payment_receive() != 59252) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt12payment_initiate_refund() != 38039) { + if (uniffi_ldk_node_checksum_method_bolt12payment_receive_async() != 23867) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt12payment_receive() != 15049) { + if (uniffi_ldk_node_checksum_method_bolt12payment_receive_variable_amount() != 35484) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt12payment_receive_variable_amount() != 7279) { + if (uniffi_ldk_node_checksum_method_bolt12payment_request_refund_payment() != 43248) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt12payment_request_refund_payment() != 61945) { + if (uniffi_ldk_node_checksum_method_bolt12payment_send() != 27679) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt12payment_send() != 56449) { + if (uniffi_ldk_node_checksum_method_bolt12payment_send_using_amount() != 33255) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt12payment_send_using_amount() != 26006) { + if (uniffi_ldk_node_checksum_method_bolt12payment_set_paths_to_static_invoice_server() != 20921) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_builder_build() != 785) { @@ -9593,6 +11950,12 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_builder_set_announcement_addresses() != 39271) { return InitializationResult.apiChecksumMismatch } + if (uniffi_ldk_node_checksum_method_builder_set_async_payments_role() != 16463) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_builder_set_chain_source_bitcoind_rest() != 37382) { + return InitializationResult.apiChecksumMismatch + } if (uniffi_ldk_node_checksum_method_builder_set_chain_source_bitcoind_rpc() != 2111) { return InitializationResult.apiChecksumMismatch } @@ -9641,6 +12004,9 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_builder_set_node_alias() != 18342) { return InitializationResult.apiChecksumMismatch } + if (uniffi_ldk_node_checksum_method_builder_set_pathfinding_scores_source() != 63501) { + return InitializationResult.apiChecksumMismatch + } if (uniffi_ldk_node_checksum_method_builder_set_storage_dir_path() != 59019) { return InitializationResult.apiChecksumMismatch } @@ -9653,7 +12019,7 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_feerate_to_sat_per_vb_floor() != 59617) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_lsps1liquidity_check_order_status() != 64731) { + if (uniffi_ldk_node_checksum_method_lsps1liquidity_check_order_status() != 57147) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_lsps1liquidity_request_channel() != 18153) { @@ -9755,6 +12121,12 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_node_sign_message() != 49319) { return InitializationResult.apiChecksumMismatch } + if (uniffi_ldk_node_checksum_method_node_splice_in() != 46431) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_node_splice_out() != 22115) { + return InitializationResult.apiChecksumMismatch + } if (uniffi_ldk_node_checksum_method_node_spontaneous_payment() != 37403) { return InitializationResult.apiChecksumMismatch } @@ -9782,6 +12154,42 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_node_wait_next_event() != 55101) { return InitializationResult.apiChecksumMismatch } + if (uniffi_ldk_node_checksum_method_offer_absolute_expiry_seconds() != 22836) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_offer_amount() != 59890) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_offer_chains() != 59522) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_offer_expects_quantity() != 58457) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_offer_id() != 8391) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_offer_is_expired() != 22651) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_offer_is_valid_quantity() != 58469) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_offer_issuer() != 41632) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_offer_issuer_signing_pubkey() != 38162) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_offer_metadata() != 18979) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_offer_offer_description() != 11122) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_offer_supports_chain() != 2135) { + return InitializationResult.apiChecksumMismatch + } if (uniffi_ldk_node_checksum_method_onchainpayment_new_address() != 37251) { return InitializationResult.apiChecksumMismatch } @@ -9791,19 +12199,55 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_onchainpayment_send_to_address() != 55646) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_spontaneouspayment_send() != 48210) { + if (uniffi_ldk_node_checksum_method_refund_absolute_expiry_seconds() != 43722) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_refund_amount_msats() != 26467) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_refund_chain() != 36565) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_refund_is_expired() != 10232) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_refund_issuer() != 40306) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_refund_payer_metadata() != 23501) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_refund_payer_note() != 47799) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_refund_payer_signing_pubkey() != 40880) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_refund_quantity() != 15192) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_refund_refund_description() != 39295) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_spontaneouspayment_send() != 27905) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_spontaneouspayment_send_probes() != 25937) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_spontaneouspayment_send_with_custom_tlvs() != 2376) { + if (uniffi_ldk_node_checksum_method_spontaneouspayment_send_with_custom_tlvs() != 17876) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_spontaneouspayment_send_with_preimage() != 30854) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_spontaneouspayment_send_with_preimage_and_custom_tlvs() != 12104) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_unifiedqrpayment_receive() != 913) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_unifiedqrpayment_send() != 53900) { + if (uniffi_ldk_node_checksum_method_unifiedqrpayment_send() != 28285) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_vssheaderprovider_get_headers() != 7788) { @@ -9812,6 +12256,9 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_constructor_bolt11invoice_from_str() != 349) { return InitializationResult.apiChecksumMismatch } + if (uniffi_ldk_node_checksum_constructor_bolt12invoice_from_str() != 22276) { + return InitializationResult.apiChecksumMismatch + } if (uniffi_ldk_node_checksum_constructor_builder_from_config() != 994) { return InitializationResult.apiChecksumMismatch } @@ -9824,10 +12271,16 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_constructor_feerate_from_sat_per_vb_unchecked() != 41808) { return InitializationResult.apiChecksumMismatch } + if (uniffi_ldk_node_checksum_constructor_offer_from_str() != 37070) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_constructor_refund_from_str() != 64884) { + return InitializationResult.apiChecksumMismatch + } uniffiCallbackInitLogWriter() return InitializationResult.ok -} +}() private func uniffiEnsureInitialized() { switch initializationResult { From c9a0381d2e1247e116c32a50f45bf74fc38c8c2b Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 3 Dec 2025 10:41:56 +0100 Subject: [PATCH 57/60] Update version numbers on `main` post v0.7.0 release --- Cargo.toml | 24 +++++++++---------- .../kotlin/ldk-node-android/gradle.properties | 2 +- .../kotlin/ldk-node-jvm/gradle.properties | 2 +- bindings/python/pyproject.toml | 2 +- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index bb5b1d86c..e1459c77d 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ldk-node" -version = "0.7.0+git" +version = "0.8.0+git" authors = ["Elias Rohrer "] homepage = "https://lightningdevkit.org/" license = "MIT OR Apache-2.0" @@ -27,17 +27,17 @@ panic = 'abort' # Abort on panic default = [] [dependencies] -#lightning = { version = "0.2.0-rc1", features = ["std"] } -#lightning-types = { version = "0.3.0-rc1" } -#lightning-invoice = { version = "0.34.0-rc1", features = ["std"] } -#lightning-net-tokio = { version = "0.2.0-rc1" } -#lightning-persister = { version = "0.2.0-rc1", features = ["tokio"] } -#lightning-background-processor = { version = "0.2.0-rc1" } -#lightning-rapid-gossip-sync = { version = "0.2.0-rc1" } -#lightning-block-sync = { version = "0.2.0-rc1", features = ["rest-client", "rpc-client", "tokio"] } -#lightning-transaction-sync = { version = "0.2.0-rc1", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } -#lightning-liquidity = { version = "0.2.0-rc1", features = ["std"] } -#lightning-macros = { version = "0.2.0-rc1" } +#lightning = { version = "0.2.0", features = ["std"] } +#lightning-types = { version = "0.3.0" } +#lightning-invoice = { version = "0.34.0", features = ["std"] } +#lightning-net-tokio = { version = "0.2.0" } +#lightning-persister = { version = "0.2.0", features = ["tokio"] } +#lightning-background-processor = { version = "0.2.0" } +#lightning-rapid-gossip-sync = { version = "0.2.0" } +#lightning-block-sync = { version = "0.2.0", features = ["rest-client", "rpc-client", "tokio"] } +#lightning-transaction-sync = { version = "0.2.0", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } +#lightning-liquidity = { version = "0.2.0", features = ["std"] } +#lightning-macros = { version = "0.2.0" } lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "bb5504ec62d4b7e9d5626d8b1a6de60d71e8d370", features = ["std"] } lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "bb5504ec62d4b7e9d5626d8b1a6de60d71e8d370" } diff --git a/bindings/kotlin/ldk-node-android/gradle.properties b/bindings/kotlin/ldk-node-android/gradle.properties index 578c3308b..fa772eb70 100644 --- a/bindings/kotlin/ldk-node-android/gradle.properties +++ b/bindings/kotlin/ldk-node-android/gradle.properties @@ -2,4 +2,4 @@ org.gradle.jvmargs=-Xmx1536m android.useAndroidX=true android.enableJetifier=true kotlin.code.style=official -libraryVersion=0.6.0 +libraryVersion=0.7.0 diff --git a/bindings/kotlin/ldk-node-jvm/gradle.properties b/bindings/kotlin/ldk-node-jvm/gradle.properties index 913b5caea..d24edea2e 100644 --- a/bindings/kotlin/ldk-node-jvm/gradle.properties +++ b/bindings/kotlin/ldk-node-jvm/gradle.properties @@ -1,3 +1,3 @@ org.gradle.jvmargs=-Xmx1536m kotlin.code.style=official -libraryVersion=0.6.0 +libraryVersion=0.7.0 diff --git a/bindings/python/pyproject.toml b/bindings/python/pyproject.toml index 496781a6a..18ba319c4 100644 --- a/bindings/python/pyproject.toml +++ b/bindings/python/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ldk_node" -version = "0.6.0" +version = "0.7.0" authors = [ { name="Elias Rohrer", email="dev@tnull.de" }, ] From 90f719d255c6dfea3fddbbff6ab57650ab74441b Mon Sep 17 00:00:00 2001 From: Enigbe Date: Mon, 6 Oct 2025 08:04:45 +0100 Subject: [PATCH 58/60] feat: introduce and configure node with tiered KVStore Introduces TierStore, a KVStore implementation that manages data across three storage layers: - Primary: Main/remote data store - Ephemeral: Secondary store for non-critical, easily-rebuildable data (e.g., network graph) with fast local access - Backup: Tertiary store for disaster recovery with async/lazy operations to avoid blocking primary store Adds four configuration methods to NodeBuilder: - set_tier_store_backup: Configure backup data store - set_tier_store_ephemeral: Configure ephemeral data store - set_tier_store_retry_config: Configure retry parameters with exponential backoff - build_with_tier_store: Build node with primary data store These methods are exposed to the foreign interface via additions in ffi/types.rs: - ffi::SyncAndAsyncKVStore: Composed of KVStore and KVStoreSync methods to handle the types::SyncAndAsyncKVStore supertrait across FFI - ffi::ForeignKVStoreAdapter and ffi::DynStore: Adapt/translate between foreign language store and native Rust store - Conditional compilation for DynStore: ffi::DynStore with uniffi, types::DynStore without, with selection aided by the wrap_store!() macro --- Cargo.toml | 1 + bindings/ldk_node.udl | 61 + src/builder.rs | 227 ++- src/chain/bitcoind.rs | 4 +- src/chain/electrum.rs | 3 +- src/chain/esplora.rs | 4 +- src/chain/mod.rs | 4 +- src/data_store.rs | 7 +- src/event.rs | 9 +- src/ffi/mod.rs | 16 + src/ffi/types.rs | 326 +++++ src/io/mod.rs | 1 + src/io/tier_store.rs | 1270 +++++++++++++++++ src/io/utils.rs | 17 +- src/lib.rs | 14 +- src/liquidity.rs | 4 +- .../asynchronous/static_invoice_store.rs | 6 +- src/peer_store.rs | 6 +- src/types.rs | 3 + src/wallet/persist.rs | 3 +- tests/common/mod.rs | 7 +- tests/integration_tests_rust.rs | 18 +- 22 files changed, 1960 insertions(+), 51 deletions(-) create mode 100644 src/io/tier_store.rs diff --git a/Cargo.toml b/Cargo.toml index e1459c77d..355e77a41 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,6 +63,7 @@ bitcoin = "0.32.7" bip39 = { version = "2.0.0", features = ["rand"] } bip21 = { version = "0.5", features = ["std"], default-features = false } +async-trait = {version = "0.1.89"} base64 = { version = "0.22.1", default-features = false, features = ["std"] } rand = { version = "0.9.2", default-features = false, features = ["std", "thread_rng", "os_rng"] } chrono = { version = "0.4", default-features = false, features = ["clock"] } diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index c4ebf56a6..cb2297c1d 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -69,6 +69,12 @@ enum WordCount { "Words24", }; +dictionary RetryConfig { + u16 initial_retry_delay_ms; + u16 maximum_delay_secs; + f32 backoff_multiplier; +}; + enum LogLevel { "Gossip", "Trace", @@ -90,6 +96,56 @@ interface LogWriter { void log(LogRecord record); }; +interface DynStore { + [Name=from_store] + constructor(SyncAndAsyncKVStore store); +}; + +[Trait, WithForeign] +interface SyncAndAsyncKVStore { + // KVStoreSync versions + [Throws=IOError] + sequence read_sync(string primary_namespace, string secondary_namespace, string key); + [Throws=IOError] + void write_sync(string primary_namespace, string secondary_namespace, string key, sequence buf); + [Throws=IOError] + void remove_sync(string primary_namespace, string secondary_namespace, string key, boolean lazy); + [Throws=IOError] + sequence list_sync(string primary_namespace, string secondary_namespace); + + // KVStore versions + [Throws=IOError, Async] + sequence read_async(string primary_namespace, string secondary_namespace, string key); + [Throws=IOError, Async] + void write_async(string primary_namespace, string secondary_namespace, string key, sequence buf); + [Throws=IOError, Async] + void remove_async(string primary_namespace, string secondary_namespace, string key, boolean lazy); + [Throws=IOError, Async] + sequence list_async(string primary_namespace, string secondary_namespace); +}; + +[Error] +enum IOError { + "NotFound", + "PermissionDenied", + "ConnectionRefused", + "ConnectionReset", + "ConnectionAborted", + "NotConnected", + "AddrInUse", + "AddrNotAvailable", + "BrokenPipe", + "AlreadyExists", + "WouldBlock", + "InvalidInput", + "InvalidData", + "TimedOut", + "WriteZero", + "Interrupted", + "UnexpectedEof", + "Other", +}; + interface Builder { constructor(); [Name=from_config] @@ -114,6 +170,9 @@ interface Builder { void set_announcement_addresses(sequence announcement_addresses); [Throws=BuildError] void set_node_alias(string node_alias); + void set_tier_store_retry_config(RetryConfig retry_config); + void set_tier_store_backup(DynStore backup_store); + void set_tier_store_ephemeral(DynStore ephemeral_store); [Throws=BuildError] void set_async_payments_role(AsyncPaymentsRole? role); [Throws=BuildError] @@ -126,6 +185,8 @@ interface Builder { Node build_with_vss_store_and_fixed_headers(NodeEntropy node_entropy, string vss_url, string store_id, record fixed_headers); [Throws=BuildError] Node build_with_vss_store_and_header_provider(NodeEntropy node_entropy, string vss_url, string store_id, VssHeaderProvider header_provider); + [Throws=BuildError] + Node build_with_tier_store(NodeEntropy node_entropy, DynStore primary_store); }; interface Node { diff --git a/src/builder.rs b/src/builder.rs index 13a7567b7..4031a5619 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -52,6 +52,7 @@ use crate::event::EventQueue; use crate::fee_estimator::OnchainFeeEstimator; use crate::gossip::GossipSource; use crate::io::sqlite_store::SqliteStore; +use crate::io::tier_store::{RetryConfig, TierStore}; use crate::io::utils::{ read_external_pathfinding_scores_from_cache, read_node_metrics, write_node_metrics, }; @@ -68,13 +69,14 @@ use crate::payment::asynchronous::om_mailbox::OnionMessageMailbox; use crate::peer_store::PeerStore; use crate::runtime::Runtime; use crate::tx_broadcaster::TransactionBroadcaster; + use crate::types::{ - ChainMonitor, ChannelManager, DynStore, GossipSync, Graph, KeysManager, MessageRouter, - OnionMessenger, PaymentStore, PeerManager, Persister, + ChainMonitor, ChannelManager, GossipSync, Graph, KeysManager, MessageRouter, OnionMessenger, + PaymentStore, PeerManager, Persister, }; use crate::wallet::persist::KVStoreWalletPersister; use crate::wallet::Wallet; -use crate::{Node, NodeMetrics}; +use crate::{wrap_store, DynStore, Node, NodeMetrics}; const VSS_HARDENED_CHILD_INDEX: u32 = 877; const VSS_LNURL_AUTH_HARDENED_CHILD_INDEX: u32 = 138; @@ -145,6 +147,23 @@ impl std::fmt::Debug for LogWriterConfig { } } +#[derive(Default)] +struct TierStoreConfig { + ephemeral: Option>, + backup: Option>, + retry: Option, +} + +impl std::fmt::Debug for TierStoreConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TierStoreConfig") + .field("ephemeral", &self.ephemeral.as_ref().map(|_| "Arc")) + .field("backup", &self.backup.as_ref().map(|_| "Arc")) + .field("retry", &self.retry) + .finish() + } +} + /// An error encountered during building a [`Node`]. /// /// [`Node`]: crate::Node @@ -237,6 +256,7 @@ pub struct NodeBuilder { liquidity_source_config: Option, log_writer_config: Option, async_payments_role: Option, + tier_store_config: Option, runtime_handle: Option, pathfinding_scores_sync_config: Option, } @@ -254,6 +274,7 @@ impl NodeBuilder { let gossip_source_config = None; let liquidity_source_config = None; let log_writer_config = None; + let tier_store_config = None; let runtime_handle = None; let pathfinding_scores_sync_config = None; Self { @@ -262,6 +283,7 @@ impl NodeBuilder { gossip_source_config, liquidity_source_config, log_writer_config, + tier_store_config, runtime_handle, async_payments_role: None, pathfinding_scores_sync_config, @@ -539,21 +561,67 @@ impl NodeBuilder { Ok(self) } + /// Configures retry behavior for transient errors when accessing the primary store. + /// + /// When building with [`build_with_tier_store`], controls the exponential backoff parameters + /// used when retrying failed operations on the primary store due to transient errors + /// (network issues, timeouts, etc.). + /// + /// If not set, default retry parameters are used. See [`RetryConfig`] for details. + /// + /// [`build_with_tier_store`]: Self::build_with_tier_store + pub fn set_tier_store_retry_config(&mut self, config: RetryConfig) -> &mut Self { + let tier_store_config = self.tier_store_config.get_or_insert(TierStoreConfig::default()); + tier_store_config.retry = Some(config); + self + } + + /// Configures the backup store for local disaster recovery. + /// + /// When building with [`build_with_tier_store`], this store receives asynchronous copies + /// of all critical data written to the primary store. If the primary store becomes + /// unavailable, reads will fall back to this backup store. + /// + /// Backup writes are non-blocking and do not affect primary store operation performance. + /// + /// [`build_with_tier_store`]: Self::build_with_tier_store + pub fn set_tier_store_backup(&mut self, backup_store: Arc) -> &mut Self { + let tier_store_config = self.tier_store_config.get_or_insert(TierStoreConfig::default()); + tier_store_config.backup = Some(backup_store); + self + } + + /// Configures the ephemeral store for non-critical, frequently-accessed data. + /// + /// When building with [`build_with_tier_store`], this store is used for data like + /// the network graph and scorer data to reduce latency for reads. Data stored here + /// can be rebuilt if lost. + /// + /// If not set, non-critical data will be stored in the primary store. + /// + /// [`build_with_tier_store`]: Self::build_with_tier_store + pub fn set_tier_store_ephemeral(&mut self, ephemeral_store: Arc) -> &mut Self { + let tier_store_config = self.tier_store_config.get_or_insert(TierStoreConfig::default()); + tier_store_config.ephemeral = Some(ephemeral_store); + self + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self, node_entropy: NodeEntropy) -> Result { let storage_dir_path = self.config.storage_dir_path.clone(); fs::create_dir_all(storage_dir_path.clone()) .map_err(|_| BuildError::StoragePathAccessFailed)?; - let kv_store = Arc::new( - SqliteStore::new( - storage_dir_path.into(), - Some(io::sqlite_store::SQLITE_DB_FILE_NAME.to_string()), - Some(io::sqlite_store::KV_TABLE_NAME.to_string()), - ) - .map_err(|_| BuildError::KVStoreSetupFailed)?, - ); - self.build_with_store(node_entropy, kv_store) + let kv_store = SqliteStore::new( + storage_dir_path.into(), + Some(io::sqlite_store::SQLITE_DB_FILE_NAME.to_string()), + Some(io::sqlite_store::KV_TABLE_NAME.to_string()), + ) + .map_err(|_| BuildError::KVStoreSetupFailed)?; + + let store = wrap_store!(Arc::new(kv_store)); + + self.build_with_store(node_entropy, store) } /// Builds a [`Node`] instance with a [`FilesystemStore`] backend and according to the options @@ -564,8 +632,11 @@ impl NodeBuilder { fs::create_dir_all(storage_dir_path.clone()) .map_err(|_| BuildError::StoragePathAccessFailed)?; - let kv_store = Arc::new(FilesystemStore::new(storage_dir_path)); - self.build_with_store(node_entropy, kv_store) + let kv_store = FilesystemStore::new(storage_dir_path); + + let store = wrap_store!(Arc::new(kv_store)); + + self.build_with_store(node_entropy, store) } /// Builds a [`Node`] instance with a [VSS] backend and according to the options @@ -687,7 +758,88 @@ impl NodeBuilder { BuildError::KVStoreSetupFailed })?; - self.build_with_store(node_entropy, Arc::new(vss_store)) + let store = wrap_store!(Arc::new(vss_store)); + + self.build_with_store(node_entropy, store) + } + + /// Builds a [`Node`] instance with tiered storage for managing data across multiple storage layers. + /// + /// This build method enables a three-tier storage architecture optimized for different data types + /// and access patterns: + /// + /// ### Storage Tiers + /// + /// - **Primary Store** (required): The authoritative store for critical channel state and payment data. + /// Typically a remote/cloud storage service for durability and accessibility across devices. + /// + /// - **Ephemeral Store** (optional): Local storage for non-critical, frequently-accessed data like + /// the network graph and scorer. Improves performance by reducing latency for data that can be + /// rebuilt if lost. Configure with [`set_tier_store_ephemeral`]. + /// + /// - **Backup Store** (optional): Local backup of critical data for disaster recovery scenarios. + /// Provides a safety net if the primary store becomes temporarily unavailable. Writes are + /// asynchronous to avoid blocking primary operations. Configure with [`set_tier_store_backup`]. + /// + /// ## Configuration + /// + /// Use the setter methods to configure optional stores and retry behavior: + /// - [`set_tier_store_ephemeral`] - Set local store for network graph and scorer + /// - [`set_tier_store_backup`] - Set local backup store for disaster recovery + /// - [`set_tier_store_retry_config`] - Configure retry delays and backoff for transient errors + /// + /// ## Example + /// + /// ```ignore + /// # use ldk_node::{Builder, Config}; + /// # use ldk_node::io::tier_store::RetryConfig; + /// # use std::sync::Arc; + /// let config = Config::default(); + /// let mut builder = NodeBuilder::from_config(config); + /// + /// let primary = Arc::new(VssStore::new(...)); + /// let ephemeral = Arc::new(FilesystemStore::new(...)); + /// let backup = Arc::new(SqliteStore::new(...)); + /// let retry_config = RetryConfig::default(); + /// + /// builder + /// .set_tier_store_ephemeral(ephemeral) + /// .set_tier_store_backup(backup) + /// .set_tier_store_retry_config(retry_config); + /// + /// let node = builder.build_with_tier_store(primary)?; + /// # Ok::<(), ldk_node::BuildError>(()) + /// ``` + /// + /// [`set_tier_store_ephemeral`]: Self::set_tier_store_ephemeral + /// [`set_tier_store_backup`]: Self::set_tier_store_backup + /// [`set_tier_store_retry_config`]: Self::set_tier_store_retry_config + pub fn build_with_tier_store( + &self, node_entropy: NodeEntropy, primary_store: Arc, + ) -> Result { + let logger = setup_logger(&self.log_writer_config, &self.config)?; + let runtime = if let Some(handle) = self.runtime_handle.as_ref() { + Arc::new(Runtime::with_handle(handle.clone(), Arc::clone(&logger))) + } else { + Arc::new(Runtime::new(Arc::clone(&logger)).map_err(|e| { + log_error!(logger, "Failed to setup tokio runtime: {}", e); + BuildError::RuntimeSetupFailed + })?) + }; + + let ts_config = self.tier_store_config.as_ref(); + let retry_config = ts_config.and_then(|c| c.retry).unwrap_or_default(); + + let mut tier_store = + TierStore::new(primary_store, Arc::clone(&runtime), Arc::clone(&logger), retry_config); + + if let Some(config) = ts_config { + config.ephemeral.as_ref().map(|s| tier_store.set_ephemeral_store(Arc::clone(s))); + config.backup.as_ref().map(|s| tier_store.set_backup_store(Arc::clone(s))); + } + + let store = wrap_store!(Arc::new(tier_store)); + self.build_with_store(node_entropy, store) } /// Builds a [`Node`] instance according to the options previously configured. @@ -955,6 +1107,45 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_async_payments_role(role).map(|_| ()) } + /// Configures retry behavior for transient errors when accessing the primary store. + /// + /// When building with [`build_with_tier_store`], controls the exponential backoff parameters + /// used when retrying failed operations on the primary store due to transient errors + /// (network issues, timeouts, etc.). + /// + /// If not set, default retry parameters are used. See [`RetryConfig`] for details. + /// + /// [`build_with_tier_store`]: Self::build_with_tier_store + pub fn set_tier_store_retry_config(&self, config: RetryConfig) { + self.inner.write().unwrap().set_tier_store_retry_config(config); + } + + /// Configures the backup store for local disaster recovery. + /// + /// When building with [`build_with_tier_store`], this store receives asynchronous copies + /// of all critical data written to the primary store. If the primary store becomes + /// unavailable, reads will fall back to this backup store. + /// + /// Backup writes are non-blocking and do not affect primary store operation performance. + /// + /// [`build_with_tier_store`]: Self::build_with_tier_store + pub fn set_tier_store_backup(&self, backup_store: Arc) { + self.inner.write().unwrap().set_tier_store_backup(backup_store); + } + + /// Configures the ephemeral store for non-critical, frequently-accessed data. + /// + /// When building with [`build_with_tier_store`], this store is used for data like + /// the network graph and scorer data to reduce latency for reads. Data stored here + /// can be rebuilt if lost. + /// + /// If not set, non-critical data will be stored in the primary store. + /// + /// [`build_with_tier_store`]: Self::build_with_tier_store + pub fn set_tier_store_ephemeral(&self, ephemeral_store: Arc) { + self.inner.write().unwrap().set_tier_store_ephemeral(ephemeral_store); + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self, node_entropy: Arc) -> Result, BuildError> { @@ -1053,6 +1244,12 @@ impl ArcedNodeBuilder { .map(Arc::new) } + pub fn build_with_tier_store( + &self, node_entropy: Arc, primary_store: Arc, + ) -> Result, BuildError> { + self.inner.read().unwrap().build_with_tier_store(*node_entropy, primary_store).map(Arc::new) + } + /// Builds a [`Node`] instance according to the options previously configured. pub fn build_with_store( &self, node_entropy: Arc, kv_store: Arc, diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index b3d7880d6..4ff6b2e9a 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -38,8 +38,8 @@ use crate::fee_estimator::{ }; use crate::io::utils::write_node_metrics; use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; -use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; -use crate::{Error, NodeMetrics}; +use crate::types::{ChainMonitor, ChannelManager, Sweeper, Wallet}; +use crate::{DynStore, Error, NodeMetrics}; const CHAIN_POLLING_INTERVAL_SECS: u64 = 2; const CHAIN_POLLING_TIMEOUT_SECS: u64 = 10; diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index 9e05dfaee..82ac20cf5 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -36,7 +36,8 @@ use crate::fee_estimator::{ use crate::io::utils::write_node_metrics; use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; use crate::runtime::Runtime; -use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; +use crate::types::{ChainMonitor, ChannelManager, Sweeper, Wallet}; +use crate::DynStore; use crate::NodeMetrics; const BDK_ELECTRUM_CLIENT_BATCH_SIZE: usize = 5; diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs index f6f313955..dd628703b 100644 --- a/src/chain/esplora.rs +++ b/src/chain/esplora.rs @@ -28,8 +28,8 @@ use crate::fee_estimator::{ }; use crate::io::utils::write_node_metrics; use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; -use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; -use crate::{Error, NodeMetrics}; +use crate::types::{ChainMonitor, ChannelManager, Sweeper, Wallet}; +use crate::{DynStore, Error, NodeMetrics}; pub(super) struct EsploraChainSource { pub(super) sync_config: EsploraSyncConfig, diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 2cd98e20d..e6167b1e3 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -28,8 +28,8 @@ use crate::fee_estimator::OnchainFeeEstimator; use crate::io::utils::write_node_metrics; use crate::logger::{log_debug, log_info, log_trace, LdkLogger, Logger}; use crate::runtime::Runtime; -use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; -use crate::{Error, NodeMetrics}; +use crate::types::{Broadcaster, ChainMonitor, ChannelManager, Sweeper, Wallet}; +use crate::{DynStore, Error, NodeMetrics}; pub(crate) enum WalletSyncStatus { Completed, diff --git a/src/data_store.rs b/src/data_store.rs index 87bd831c9..97fbdd3b3 100644 --- a/src/data_store.rs +++ b/src/data_store.rs @@ -13,8 +13,7 @@ use lightning::util::persist::KVStoreSync; use lightning::util::ser::{Readable, Writeable}; use crate::logger::{log_error, LdkLogger}; -use crate::types::DynStore; -use crate::Error; +use crate::{DynStore, Error}; pub(crate) trait StorableObject: Clone + Readable + Writeable { type Id: StorableObjectId; @@ -175,8 +174,8 @@ mod tests { use lightning::util::test_utils::TestLogger; use super::*; - use crate::hex_utils; use crate::io::test_utils::InMemoryStore; + use crate::{hex_utils, wrap_store}; #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] struct TestObjectId { @@ -235,7 +234,7 @@ mod tests { #[test] fn data_is_persisted() { - let store: Arc = Arc::new(InMemoryStore::new()); + let store: Arc = wrap_store!(Arc::new(InMemoryStore::new())); let logger = Arc::new(TestLogger::new()); let primary_namespace = "datastore_test_primary".to_string(); let secondary_namespace = "datastore_test_secondary".to_string(); diff --git a/src/event.rs b/src/event.rs index 41f76f216..e5d971fff 100644 --- a/src/event.rs +++ b/src/event.rs @@ -48,7 +48,8 @@ use crate::payment::store::{ PaymentDetails, PaymentDetailsUpdate, PaymentDirection, PaymentKind, PaymentStatus, }; use crate::runtime::Runtime; -use crate::types::{CustomTlvRecord, DynStore, OnionMessenger, PaymentStore, Sweeper, Wallet}; +use crate::types::{CustomTlvRecord, OnionMessenger, PaymentStore, Sweeper, Wallet}; +use crate::DynStore; use crate::{ hex_utils, BumpTransactionEventHandler, ChannelManager, Error, Graph, PeerInfo, PeerStore, UserChannelId, @@ -1799,12 +1800,14 @@ mod tests { use lightning::util::test_utils::TestLogger; + use crate::wrap_store; + use super::*; use crate::io::test_utils::InMemoryStore; #[tokio::test] async fn event_queue_persistence() { - let store: Arc = Arc::new(InMemoryStore::new()); + let store: Arc = wrap_store!(Arc::new(InMemoryStore::new())); let logger = Arc::new(TestLogger::new()); let event_queue = Arc::new(EventQueue::new(Arc::clone(&store), Arc::clone(&logger))); assert_eq!(event_queue.next_event(), None); @@ -1842,7 +1845,7 @@ mod tests { #[tokio::test] async fn event_queue_concurrency() { - let store: Arc = Arc::new(InMemoryStore::new()); + let store: Arc = wrap_store!(Arc::new(InMemoryStore::new())); let logger = Arc::new(TestLogger::new()); let event_queue = Arc::new(EventQueue::new(Arc::clone(&store), Arc::clone(&logger))); assert_eq!(event_queue.next_event(), None); diff --git a/src/ffi/mod.rs b/src/ffi/mod.rs index 32464d044..f66b3c56e 100644 --- a/src/ffi/mod.rs +++ b/src/ffi/mod.rs @@ -45,3 +45,19 @@ pub fn maybe_try_convert_enum(value: &T) -> Result<&T, crate::error::Error> { pub fn maybe_wrap(value: T) -> T { value } + +/// KVStore* wrapper +#[macro_export] +macro_rules! wrap_store { + ($store:expr) => {{ + #[cfg(feature = "uniffi")] + { + $crate::DynStore::from_ldk_store($store) + } + + #[cfg(not(feature = "uniffi"))] + { + $store + } + }}; +} diff --git a/src/ffi/types.rs b/src/ffi/types.rs index c69987c96..7a2606387 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -11,11 +11,14 @@ // Make sure to add any re-exported items that need to be used in uniffi below. use std::convert::TryInto; +use std::future::Future; use std::ops::Deref; +use std::pin::Pin; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; +use async_trait::async_trait; pub use bip39::Mnemonic; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; @@ -31,6 +34,7 @@ use lightning::offers::offer::{Amount as LdkAmount, Offer as LdkOffer}; use lightning::offers::refund::Refund as LdkRefund; pub use lightning::routing::gossip::{NodeAlias, NodeId, RoutingFees}; pub use lightning::routing::router::RouteParametersConfig; +use lightning::util::persist::{KVStore, KVStoreSync}; use lightning::util::ser::Writeable; use lightning_invoice::{Bolt11Invoice as LdkBolt11Invoice, Bolt11InvoiceDescriptionRef}; pub use lightning_invoice::{Description, SignedRawBolt11Invoice}; @@ -56,8 +60,330 @@ pub use crate::payment::store::{ ConfirmationStatus, LSPFeeLimits, PaymentDirection, PaymentKind, PaymentStatus, }; pub use crate::payment::QrPaymentResult; +use crate::types::SyncAndAsyncKVStore as LdkSyncAndAsyncKVStore; use crate::{hex_utils, SocketAddress, UniffiCustomTypeConverter, UserChannelId}; +#[derive(Debug)] +pub enum IOError { + NotFound, + PermissionDenied, + ConnectionRefused, + ConnectionReset, + ConnectionAborted, + NotConnected, + AddrInUse, + AddrNotAvailable, + BrokenPipe, + AlreadyExists, + WouldBlock, + InvalidInput, + InvalidData, + TimedOut, + WriteZero, + Interrupted, + UnexpectedEof, + Other, +} + +impl From for IOError { + fn from(error: lightning::io::Error) -> Self { + match error.kind() { + lightning::io::ErrorKind::NotFound => IOError::NotFound, + lightning::io::ErrorKind::PermissionDenied => IOError::PermissionDenied, + lightning::io::ErrorKind::ConnectionRefused => IOError::ConnectionRefused, + lightning::io::ErrorKind::ConnectionReset => IOError::ConnectionReset, + lightning::io::ErrorKind::ConnectionAborted => IOError::ConnectionAborted, + lightning::io::ErrorKind::NotConnected => IOError::NotConnected, + lightning::io::ErrorKind::AddrInUse => IOError::AddrInUse, + lightning::io::ErrorKind::AddrNotAvailable => IOError::AddrNotAvailable, + lightning::io::ErrorKind::BrokenPipe => IOError::BrokenPipe, + lightning::io::ErrorKind::AlreadyExists => IOError::AlreadyExists, + lightning::io::ErrorKind::WouldBlock => IOError::WouldBlock, + lightning::io::ErrorKind::InvalidInput => IOError::InvalidInput, + lightning::io::ErrorKind::InvalidData => IOError::InvalidData, + lightning::io::ErrorKind::TimedOut => IOError::TimedOut, + lightning::io::ErrorKind::WriteZero => IOError::WriteZero, + lightning::io::ErrorKind::Interrupted => IOError::Interrupted, + lightning::io::ErrorKind::UnexpectedEof => IOError::UnexpectedEof, + lightning::io::ErrorKind::Other => IOError::Other, + } + } +} + +impl From for lightning::io::Error { + fn from(error: IOError) -> Self { + match error { + IOError::NotFound => lightning::io::ErrorKind::NotFound.into(), + IOError::PermissionDenied => lightning::io::ErrorKind::PermissionDenied.into(), + IOError::ConnectionRefused => lightning::io::ErrorKind::ConnectionRefused.into(), + IOError::ConnectionReset => lightning::io::ErrorKind::ConnectionReset.into(), + IOError::ConnectionAborted => lightning::io::ErrorKind::ConnectionAborted.into(), + IOError::NotConnected => lightning::io::ErrorKind::NotConnected.into(), + IOError::AddrInUse => lightning::io::ErrorKind::AddrInUse.into(), + IOError::AddrNotAvailable => lightning::io::ErrorKind::AddrNotAvailable.into(), + IOError::BrokenPipe => lightning::io::ErrorKind::BrokenPipe.into(), + IOError::AlreadyExists => lightning::io::ErrorKind::AlreadyExists.into(), + IOError::WouldBlock => lightning::io::ErrorKind::WouldBlock.into(), + IOError::InvalidInput => lightning::io::ErrorKind::InvalidInput.into(), + IOError::InvalidData => lightning::io::ErrorKind::InvalidData.into(), + IOError::TimedOut => lightning::io::ErrorKind::TimedOut.into(), + IOError::WriteZero => lightning::io::ErrorKind::WriteZero.into(), + IOError::Interrupted => lightning::io::ErrorKind::Interrupted.into(), + IOError::UnexpectedEof => lightning::io::ErrorKind::UnexpectedEof.into(), + IOError::Other => lightning::io::ErrorKind::Other.into(), + } + } +} + +impl std::fmt::Display for IOError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + IOError::NotFound => write!(f, "NotFound"), + IOError::PermissionDenied => write!(f, "PermissionDenied"), + IOError::ConnectionRefused => write!(f, "ConnectionRefused"), + IOError::ConnectionReset => write!(f, "ConnectionReset"), + IOError::ConnectionAborted => write!(f, "ConnectionAborted"), + IOError::NotConnected => write!(f, "NotConnected"), + IOError::AddrInUse => write!(f, "AddrInUse"), + IOError::AddrNotAvailable => write!(f, "AddrNotAvailable"), + IOError::BrokenPipe => write!(f, "BrokenPipe"), + IOError::AlreadyExists => write!(f, "AlreadyExists"), + IOError::WouldBlock => write!(f, "WouldBlock"), + IOError::InvalidInput => write!(f, "InvalidInput"), + IOError::InvalidData => write!(f, "InvalidData"), + IOError::TimedOut => write!(f, "TimedOut"), + IOError::WriteZero => write!(f, "WriteZero"), + IOError::Interrupted => write!(f, "Interrupted"), + IOError::UnexpectedEof => write!(f, "UnexpectedEof"), + IOError::Other => write!(f, "Other"), + } + } +} + +#[async_trait] +pub trait SyncAndAsyncKVStore: Send + Sync { + // KVStoreSync methods + fn read_sync( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> Result, IOError>; + fn write_sync( + &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> Result<(), IOError>; + fn remove_sync( + &self, primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> Result<(), IOError>; + fn list_sync( + &self, primary_namespace: String, secondary_namespace: String, + ) -> Result, IOError>; + + // KVStore methods + async fn read_async( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> Result, IOError>; + async fn write_async( + &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> Result<(), IOError>; + async fn remove_async( + &self, primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> Result<(), IOError>; + async fn list_async( + &self, primary_namespace: String, secondary_namespace: String, + ) -> Result, IOError>; +} + +pub struct ForeignKVStoreAdapter { + pub(crate) inner: Arc, +} + +impl KVStore for ForeignKVStoreAdapter { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Pin, lightning::io::Error>> + Send>> { + let inner = self.inner.clone(); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + Box::pin(async move { + inner + .read_async(primary_namespace, secondary_namespace, key) + .await + .map_err(|e| e.into()) + }) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Pin> + Send>> { + let inner = self.inner.clone(); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + Box::pin(async move { + inner + .write_async(primary_namespace, secondary_namespace, key, buf) + .await + .map_err(|e| e.into()) + }) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> Pin> + Send>> { + let inner = self.inner.clone(); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + Box::pin(async move { + inner + .remove_async(primary_namespace, secondary_namespace, key, lazy) + .await + .map_err(|e| e.into()) + }) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Pin, lightning::io::Error>> + Send>> { + let inner = self.inner.clone(); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + + Box::pin(async move { + inner.list_async(primary_namespace, secondary_namespace).await.map_err(|e| e.into()) + }) + } +} + +impl KVStoreSync for ForeignKVStoreAdapter { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Result, lightning::io::Error> { + self.inner + .read_sync( + primary_namespace.to_string(), + secondary_namespace.to_string(), + key.to_string(), + ) + .map_err(|e| e.into()) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Result<(), lightning::io::Error> { + self.inner + .write_sync( + primary_namespace.to_string(), + secondary_namespace.to_string(), + key.to_string(), + buf, + ) + .map_err(|e| e.into()) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> Result<(), lightning::io::Error> { + self.inner + .remove_sync( + primary_namespace.to_string(), + secondary_namespace.to_string(), + key.to_string(), + lazy, + ) + .map_err(|e| e.into()) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Result, lightning::io::Error> { + self.inner + .list_sync(primary_namespace.to_string(), secondary_namespace.to_string()) + .map_err(|e| e.into()) + } +} + +pub struct DynStore { + pub(crate) inner: Arc, +} + +impl DynStore { + pub fn from_store(store: Arc) -> Self { + let adapter = ForeignKVStoreAdapter { inner: store }; + Self { inner: Arc::new(adapter) } + } + + pub fn from_ldk_store(store: Arc) -> Arc { + Arc::new(Self { inner: store }) + } +} + +impl Deref for DynStore { + type Target = Arc; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl KVStore for DynStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Pin, lightning::io::Error>> + Send>> { + KVStore::read(self.inner.as_ref(), primary_namespace, secondary_namespace, key) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Pin> + Send>> { + KVStore::write(self.inner.as_ref(), primary_namespace, secondary_namespace, key, buf) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> Pin> + Send>> { + KVStore::remove(self.inner.as_ref(), primary_namespace, secondary_namespace, key, lazy) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Pin, lightning::io::Error>> + Send>> { + KVStore::list(self.inner.as_ref(), primary_namespace, secondary_namespace) + } +} + +impl KVStoreSync for DynStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Result, lightning::io::Error> { + KVStoreSync::read(self.inner.as_ref(), primary_namespace, secondary_namespace, key) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Result<(), lightning::io::Error> { + KVStoreSync::write(self.inner.as_ref(), primary_namespace, secondary_namespace, key, buf) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> Result<(), lightning::io::Error> { + KVStoreSync::remove(self.inner.as_ref(), primary_namespace, secondary_namespace, key, lazy) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Result, lightning::io::Error> { + KVStoreSync::list(self.inner.as_ref(), primary_namespace, secondary_namespace) + } +} + impl UniffiCustomTypeConverter for PublicKey { type Builtin = String; diff --git a/src/io/mod.rs b/src/io/mod.rs index 38fba5114..23a73183e 100644 --- a/src/io/mod.rs +++ b/src/io/mod.rs @@ -10,6 +10,7 @@ pub mod sqlite_store; #[cfg(test)] pub(crate) mod test_utils; +pub(crate) mod tier_store; pub(crate) mod utils; pub(crate) mod vss_store; diff --git a/src/io/tier_store.rs b/src/io/tier_store.rs new file mode 100644 index 000000000..0337ff19a --- /dev/null +++ b/src/io/tier_store.rs @@ -0,0 +1,1270 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use crate::io::utils::{check_namespace_key_validity, is_possibly_transient}; +use crate::logger::{LdkLogger, Logger}; +use crate::runtime::Runtime; +use crate::DynStore; + +use lightning::util::persist::{ + KVStore, KVStoreSync, NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + SCORER_PERSISTENCE_KEY, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, +}; +use lightning::{io, log_trace}; +use lightning::{log_debug, log_error, log_info, log_warn}; + +use tokio::sync::mpsc::{self, error::TrySendError}; + +use std::collections::HashMap; +use std::future::Future; +use std::pin::Pin; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +// todo(enigbe): Uncertain about appropriate queue size and if this would need +// configuring. +const BACKUP_QUEUE_CAPACITY: usize = 100; + +const DEFAULT_INITIAL_RETRY_DELAY_MS: u16 = 50; +const DEFAULT_MAXIMUM_RETRY_DELAY_SECS: u16 = 5; +const DEFAULT_BACKOFF_MULTIPLIER: f32 = 1.5; + +/// Configuration for exponential backoff retry behavior. +#[derive(Debug, Copy, Clone)] +pub struct RetryConfig { + /// The initial delay before the first retry attempt, in milliseconds. + pub initial_retry_delay_ms: u16, + /// The maximum delay between retry attempts, in seconds. + pub maximum_delay_secs: u16, + /// The multiplier applied to the delay after each retry attempt. + /// + /// For example, a value of `2.0` doubles the delay after each failed retry. + pub backoff_multiplier: f32, +} + +impl Default for RetryConfig { + fn default() -> Self { + Self { + initial_retry_delay_ms: DEFAULT_INITIAL_RETRY_DELAY_MS, + maximum_delay_secs: DEFAULT_MAXIMUM_RETRY_DELAY_SECS, + backoff_multiplier: DEFAULT_BACKOFF_MULTIPLIER, + } + } +} + +/// A 3-tiered [`KVStoreSync`] implementation that manages data across +/// three distinct storage locations, i.e. primary (preferably remote) +/// store for all critical data, optional ephemeral (local) store for +/// non-critical and easily rebuildable data, and backup (preferably +/// local) to lazily backup the primary store for disaster recovery +/// scenarios. +pub(crate) struct TierStore { + inner: Arc, + next_version: AtomicU64, + runtime: Arc, + logger: Arc, +} + +impl TierStore { + pub fn new( + primary_store: Arc, runtime: Arc, logger: Arc, + retry_config: RetryConfig, + ) -> Self { + let inner = Arc::new(TierStoreInner::new(primary_store, Arc::clone(&logger), retry_config)); + + Self { inner, next_version: AtomicU64::new(1), runtime, logger } + } + + /// Configures the local backup store for disaster recovery. + /// + /// This store serves as a local copy of the critical data for disaster + /// recovery scenarios. When configured, this method also spawns a background + /// task that asynchronously processes backup writes and removals to avoid + /// blocking primary store operations. + /// + /// The backup operates on a best-effort basis: + /// - Writes are queued asynchronously (non-blocking) + /// - No retry logic (We assume local store is unlikely to have transient failures). + /// - Failures are logged but don't propagate to all the way to caller. + pub fn set_backup_store(&mut self, backup: Arc) { + let (tx, rx) = mpsc::channel::(BACKUP_QUEUE_CAPACITY); + + let backup_clone = Arc::clone(&backup); + let logger = Arc::clone(&self.logger); + + self.runtime.spawn_background_task(Self::process_backup_operation( + rx, + backup_clone, + logger, + )); + + debug_assert_eq!(Arc::strong_count(&self.inner), 1); + + let inner = Arc::get_mut(&mut self.inner).expect( + "TierStore should not be shared during configuration. No other references should exist", + ); + + inner.backup_store = Some(backup); + inner.backup_sender = Some(tx); + } + + async fn process_backup_operation( + mut receiver: mpsc::Receiver, backup_store: Arc, logger: Arc, + ) { + while let Some(op) = receiver.recv().await { + match Self::apply_backup_operation(&op, &backup_store) { + Ok(_) => { + log_trace!( + logger, + "Backup succeeded for key {}/{}/{}", + op.primary_namespace(), + op.secondary_namespace(), + op.key() + ); + }, + Err(e) => { + log_error!( + logger, + "Backup failed permanently for key {}/{}/{}: {}", + op.primary_namespace(), + op.secondary_namespace(), + op.key(), + e + ); + }, + } + } + } + + fn apply_backup_operation(op: &BackupOp, store: &Arc) -> io::Result<()> { + match op { + BackupOp::Write { primary_namespace, secondary_namespace, key, data } => { + KVStoreSync::write( + store.as_ref(), + primary_namespace, + secondary_namespace, + key, + data.clone(), + ) + }, + BackupOp::Remove { primary_namespace, secondary_namespace, key, lazy } => { + KVStoreSync::remove( + store.as_ref(), + primary_namespace, + secondary_namespace, + key, + *lazy, + ) + }, + } + } + + /// Configures the local store for non-critical data storage. + pub fn set_ephemeral_store(&mut self, ephemeral: Arc) { + debug_assert_eq!(Arc::strong_count(&self.inner), 1); + + let inner = Arc::get_mut(&mut self.inner).expect( + "TierStore should not be shared during configuration. No other references should exist", + ); + + inner.ephemeral_store = Some(ephemeral); + } + + fn build_locking_key( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> String { + if primary_namespace.is_empty() { + key.to_owned() + } else { + format!("{}#{}#{}", primary_namespace, secondary_namespace, key) + } + } + + fn get_new_version_and_lock_ref( + &self, locking_key: String, + ) -> (Arc>, u64) { + let version = self.next_version.fetch_add(1, Ordering::Relaxed); + if version == u64::MAX { + panic!("TierStore version counter overflowed"); + } + + // Get a reference to the inner lock. We do this early so that the arc can double as an in-flight counter for + // cleaning up unused locks. + let inner_lock_ref = self.inner.get_inner_lock_ref(locking_key); + + (inner_lock_ref, version) + } +} + +impl KVStore for TierStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Pin, io::Error>> + Send>> { + let inner = Arc::clone(&self.inner); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + Box::pin( + async move { inner.read_internal(primary_namespace, secondary_namespace, key).await }, + ) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Pin> + Send>> { + let inner = Arc::clone(&self.inner); + + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + Box::pin(async move { + inner + .write_internal( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + buf, + ) + .await + }) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> Pin> + Send>> { + let inner = Arc::clone(&self.inner); + + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + Box::pin(async move { + inner + .remove_internal( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + lazy, + ) + .await + }) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Pin, io::Error>> + Send>> { + let inner = Arc::clone(&self.inner); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + + Box::pin(async move { inner.list_internal(primary_namespace, secondary_namespace).await }) + } +} + +impl KVStoreSync for TierStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + self.runtime.block_on(self.inner.read_internal( + primary_namespace.to_string(), + secondary_namespace.to_string(), + key.to_string(), + )) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + self.runtime.block_on(self.inner.write_internal( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + buf, + )) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + self.runtime.block_on(self.inner.remove_internal( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + lazy, + )) + } + + fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { + self.runtime.block_on( + self.inner + .list_internal(primary_namespace.to_string(), secondary_namespace.to_string()), + ) + } +} + +pub struct TierStoreInner { + /// For remote data. + primary_store: Arc, + /// For local non-critical/ephemeral data. + ephemeral_store: Option>, + /// For redundancy (disaster recovery). + backup_store: Option>, + backup_sender: Option>, + logger: Arc, + retry_config: RetryConfig, + /// Per-key locks for the available data tiers, i.e. (primary, backup, ephemeral), + /// that ensures we don't have concurrent writes to the same namespace/key. + locks: Mutex>>>, +} + +impl TierStoreInner { + /// Creates a tier store with the primary (remote) data store. + pub fn new( + primary_store: Arc, logger: Arc, retry_config: RetryConfig, + ) -> Self { + Self { + primary_store, + ephemeral_store: None, + backup_store: None, + backup_sender: None, + logger, + retry_config, + locks: Mutex::new(HashMap::new()), + } + } + + /// Queues data for asynchronous backup/write to the configured backup store. + /// + /// We perform a non-blocking send to avoid impacting primary storage operations. + /// This is a no-op if backup store is not configured. + /// + /// ## Returns + /// - `Ok(())`: Backup was successfully queued or no backup is configured + /// - `Err(WouldBlock)`: Backup queue is full - data was not queued + /// - `Err(BrokenPipe)`: Backup queue is no longer available + fn enqueue_backup_write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + if let Some(backup_sender) = &self.backup_sender { + let backup_res = backup_sender.try_send(BackupOp::Write { + primary_namespace: primary_namespace.to_string(), + secondary_namespace: secondary_namespace.to_string(), + key: key.to_string(), + data: buf, + }); + if let Err(e) = backup_res { + match e { + // Assuming the channel is only full for a short time, should we explore + // retrying here to add some resiliency? + TrySendError::Full(op) => { + log_warn!( + self.logger, + "Backup queue is full. Cannot write data for key: {}/{}/{}", + op.primary_namespace(), + op.secondary_namespace(), + op.key() + ); + let e = io::Error::new( + io::ErrorKind::WouldBlock, + "Backup queue is currently full.", + ); + return Err(e); + }, + TrySendError::Closed(op) => { + log_error!( + self.logger, + "Backup queue is closed. Cannot write data for key: {}/{}/{}", + op.primary_namespace(), + op.secondary_namespace(), + op.key() + ); + let e = + io::Error::new(io::ErrorKind::BrokenPipe, "Backup queue is closed."); + return Err(e); + }, + } + } + } + Ok(()) + } + + /// Queues the removal of data from the configured backup store. + /// + /// We perform a non-blocking send to avoid impacting primary storage operations. + /// This is a no-op if backup store is not configured. + /// + /// # Returns + /// - `Ok(())`: Backup was successfully queued or no backup is configured + /// - `Err(WouldBlock)`: Backup queue is full - data was not queued + /// - `Err(BrokenPipe)`: Backup system is no longer available + fn enqueue_backup_remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + if let Some(backup_sender) = &self.backup_sender { + let removal_res = backup_sender.try_send(BackupOp::Remove { + primary_namespace: primary_namespace.to_string(), + secondary_namespace: secondary_namespace.to_string(), + key: key.to_string(), + lazy, + }); + if let Err(e) = removal_res { + match e { + TrySendError::Full(op) => { + log_warn!( + self.logger, + "Backup queue is full. Cannot remove data for key: {}/{}/{}", + op.primary_namespace(), + op.secondary_namespace(), + op.key() + ); + let e = io::Error::new( + io::ErrorKind::WouldBlock, + "Backup queue is currently full.", + ); + return Err(e); + }, + TrySendError::Closed(op) => { + log_error!( + self.logger, + "Backup queue is closed. Cannot remove data for key: {}/{}/{}", + op.primary_namespace(), + op.secondary_namespace(), + op.key() + ); + let e = + io::Error::new(io::ErrorKind::BrokenPipe, "Backup queue is closed."); + return Err(e); + }, + } + } + } + Ok(()) + } + + /// Reads data from the backup store (if configured). + fn read_from_backup( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + if let Some(backup) = self.backup_store.as_ref() { + KVStoreSync::read(backup.as_ref(), primary_namespace, secondary_namespace, key) + } else { + Err(io::Error::new(io::ErrorKind::NotFound, "Backup store not previously configured.")) + } + } + + /// Lists keys from the given primary and secondary namespace pair from the backup + /// store (if configured). + fn list_from_backup( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> io::Result> { + if let Some(backup) = &self.backup_store { + KVStoreSync::list(backup.as_ref(), primary_namespace, secondary_namespace) + } else { + Err(io::Error::new(io::ErrorKind::NotFound, "Backup store not previously configured.")) + } + } + + /// Reads from the primary data store with basic retry logic, or falls back to backup. + /// + /// For transient errors, retries up to a maximum delay time with exponential + /// backoff. For any error (transient after exhaustion or non-transient), falls + /// to the backup store (if configured). + async fn read_primary_or_backup( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + let mut delay = Duration::from_millis(self.retry_config.initial_retry_delay_ms as u64); + let maximum_delay = Duration::from_secs(self.retry_config.maximum_delay_secs as u64); + let mut tries = 0_u16; + + loop { + match KVStore::read( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + ) + .await + { + Ok(data) => { + log_info!( + self.logger, + "Read succeeded after {} retries for key: {}/{}/{}", + tries, + primary_namespace, + secondary_namespace, + key + ); + return Ok(data); + }, + + Err(e) if is_possibly_transient(&e) && (delay < maximum_delay) => { + log_warn!( + self.logger, + "Possible transient error reading key {}/{}/{} (attempt {}): {}. Retrying...", + primary_namespace, + secondary_namespace, + key, + tries + 1, + e + ); + tries += 1; + tokio::time::sleep(delay).await; + delay = std::cmp::min( + delay.mul_f32(self.retry_config.backoff_multiplier), + maximum_delay, + ); + }, + + Err(e) => { + log_error!(self.logger, "Failed to read from primary store for key {}/{}/{}: {}. Falling back to backup.", + primary_namespace, secondary_namespace, key, e); + return self.read_from_backup(primary_namespace, secondary_namespace, key); + }, + } + } + } + + /// Lists keys from the primary data store with retry logic, or falls back to backup. + /// + /// For transient errors, retries up to a maximum delay time with exponential + /// backoff. For any error (transient after exhaustion or non-transient), falls + /// back to the backup store (if configured) for disaster recovery. + async fn list_primary_or_backup( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> io::Result> { + let mut delay = Duration::from_millis(self.retry_config.initial_retry_delay_ms as u64); + let maximum_delay = Duration::from_secs(self.retry_config.maximum_delay_secs as u64); + let mut tries = 0_u16; + + loop { + match KVStore::list(self.primary_store.as_ref(), primary_namespace, secondary_namespace) + .await + { + Ok(keys) => { + log_info!( + self.logger, + "List succeeded after {} retries for namespace: {}/{}", + tries, + primary_namespace, + secondary_namespace + ); + return Ok(keys); + }, + Err(e) if is_possibly_transient(&e) && (delay < maximum_delay) => { + log_warn!( + self.logger, + "Possible transient error listing namespace {}/{} (attempt {}): {}. Retrying...", + primary_namespace, + secondary_namespace, + tries + 1, + e + ); + tries += 1; + tokio::time::sleep(delay).await; + delay = std::cmp::min( + delay.mul_f32(self.retry_config.backoff_multiplier), + maximum_delay, + ); + }, + Err(e) => { + log_error!(self.logger, "Failed to list from primary store for namespace {}/{}: {}. Falling back to backup.", + primary_namespace, secondary_namespace, e); + return self.list_from_backup(primary_namespace, secondary_namespace); + }, + } + } + } + + /// Writes data to the primary store with retry logic. + /// + /// For transient errors, retries up to a maximum delay time with exponential + /// backoff. + async fn retry_write_with_backoff( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + let mut delay = Duration::from_millis(self.retry_config.initial_retry_delay_ms as u64); + let maximum_delay = Duration::from_secs(self.retry_config.maximum_delay_secs as u64); + let mut tries = 0_u16; + + loop { + match KVStore::write( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + buf.clone(), + ) + .await + { + Ok(res) => { + log_info!( + self.logger, + "Write succeeded after {} retries for key: {}/{}/{}", + tries, + primary_namespace, + secondary_namespace, + key + ); + return Ok(res); + }, + Err(e) if is_possibly_transient(&e) && (delay < maximum_delay) => { + log_warn!( + self.logger, + "Possible transient error writing key {}/{}/{} (attempt {}): {}. Retrying...", + primary_namespace, + secondary_namespace, + key, + tries + 1, + e + ); + tries += 1; + tokio::time::sleep(delay).await; + delay = std::cmp::min( + delay.mul_f32(self.retry_config.backoff_multiplier), + maximum_delay, + ); + }, + Err(e) => { + log_error!( + self.logger, + "Failed to write to primary store for key {}/{}/{}: {}", + primary_namespace, + secondary_namespace, + key, + e + ); + return Err(e); + }, + } + } + } + + /// Removes data from the primary store with retry logic. + /// + /// For transient errors, retries up to a maximum delay time with exponential + /// backoff. + async fn retry_remove_with_backoff( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + let mut delay = Duration::from_millis(self.retry_config.initial_retry_delay_ms as u64); + let maximum_delay = Duration::from_secs(self.retry_config.maximum_delay_secs as u64); + let mut tries = 0_u16; + + loop { + match KVStore::remove( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + lazy, + ) + .await + { + Ok(res) => { + log_info!( + self.logger, + "Successfully removed data from primary store after {} retries for key: {}/{}/{}", + tries, + primary_namespace, + secondary_namespace, + key + ); + return Ok(res); + }, + Err(e) if is_possibly_transient(&e) && (delay < maximum_delay) => { + log_warn!( + self.logger, + "Possible transient error removing key {}/{}/{} from primary store (attempt {}): {}. Retrying...", + primary_namespace, + secondary_namespace, + key, + tries + 1, + e + ); + tries += 1; + tokio::time::sleep(delay).await; + delay = std::cmp::min( + delay.mul_f32(self.retry_config.backoff_multiplier), + maximum_delay, + ); + }, + Err(e) => { + log_error!( + self.logger, + "Failed to remove data from primary store for key {}/{}/{}: {}", + primary_namespace, + secondary_namespace, + key, + e + ); + return Err(e); + }, + } + } + } + + async fn primary_write_then_schedule_backup( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + let primary_write_res = match KVStore::write( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + buf.clone(), + ) + .await + { + Ok(res) => Ok(res), + Err(e) if is_possibly_transient(&e) => { + self.retry_write_with_backoff( + primary_namespace, + secondary_namespace, + key, + buf.clone(), + ) + .await + }, + Err(e) => Err(e), + }; + + match primary_write_res { + Ok(res) => { + // We enqueue for backup only what we successfully write to primary. In doing + // this we avoid data inconsistencies across stores. + if let Err(e) = + self.enqueue_backup_write(primary_namespace, secondary_namespace, key, buf) + { + // We don't propagate backup errors here, opting to log only. + log_warn!( + self.logger, + "Failed to queue backup write for key: {}/{}/{}. Error: {}", + primary_namespace, + secondary_namespace, + key, + e + ) + } + + Ok(res) + }, + Err(e) => { + log_debug!( + self.logger, + "Skipping backup write due to primary write failure for key: {}/{}/{}.", + primary_namespace, + secondary_namespace, + key + ); + Err(e) + }, + } + } + + async fn primary_remove_then_schedule_backup( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + let primary_remove_res = match KVStore::remove( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + lazy, + ) + .await + { + Ok(res) => Ok(res), + Err(e) if is_possibly_transient(&e) => { + self.retry_remove_with_backoff(primary_namespace, secondary_namespace, key, lazy) + .await + }, + Err(e) => Err(e), + }; + + match primary_remove_res { + Ok(res) => { + if let Err(e) = + self.enqueue_backup_remove(primary_namespace, secondary_namespace, key, lazy) + { + // We don't propagate backup errors here, opting to silently log. + log_warn!( + self.logger, + "Failed to queue backup removal for key: {}/{}/{}. Error: {}", + primary_namespace, + secondary_namespace, + key, + e + ) + } + + Ok(res) + }, + Err(e) => { + log_debug!( + self.logger, + "Skipping backup removal due to primary removal failure for key: {}/{}/{}.", + primary_namespace, + secondary_namespace, + key + ); + Err(e) + }, + } + } + + async fn read_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> io::Result> { + check_namespace_key_validity( + primary_namespace.as_str(), + secondary_namespace.as_str(), + Some(key.as_str()), + "read", + )?; + + match (primary_namespace.as_str(), secondary_namespace.as_str(), key.as_str()) { + (NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, _, NETWORK_GRAPH_PERSISTENCE_KEY) + | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _, SCORER_PERSISTENCE_KEY) => { + if let Some(eph_store) = self.ephemeral_store.as_ref() { + // We only try once here (without retry logic) because local failure might be indicative + // of a more serious issue (e.g. full memory, memory corruption, permissions change) that + // do not self-resolve such that retrying would negate the latency benefits. + + // The following questions remain: + // 1. Are there situations where local transient errors may warrant a retry? + // 2. Can we reliably identify/detect these transient errors? + // 3. Should we fall back to the primary or backup stores in the event of any error? + KVStoreSync::read( + eph_store.as_ref(), + &primary_namespace, + &secondary_namespace, + &key, + ) + } else { + log_debug!(self.logger, "Ephemeral store not configured. Reading non-critical data from primary or backup stores."); + self.read_primary_or_backup(&primary_namespace, &secondary_namespace, &key) + .await + } + }, + _ => self.read_primary_or_backup(&primary_namespace, &secondary_namespace, &key).await, + } + } + + async fn write_internal( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> io::Result<()> { + check_namespace_key_validity( + primary_namespace.as_str(), + secondary_namespace.as_str(), + Some(key.as_str()), + "write", + )?; + + match (primary_namespace.as_str(), secondary_namespace.as_str(), key.as_str()) { + (NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, _, NETWORK_GRAPH_PERSISTENCE_KEY) + | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _, SCORER_PERSISTENCE_KEY) => { + if let Some(eph_store) = &self.ephemeral_store { + self.execute_locked_write( + inner_lock_ref, + locking_key, + version, + async move || { + KVStoreSync::write( + eph_store.as_ref(), + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + buf, + ) + }, + ) + .await + } else { + log_debug!(self.logger, "Ephemeral store not configured. Writing non-critical data to primary and backup stores."); + + self.execute_locked_write( + inner_lock_ref, + locking_key, + version, + async move || { + self.primary_write_then_schedule_backup( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + buf, + ) + .await + }, + ) + .await + } + }, + _ => { + self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { + self.primary_write_then_schedule_backup( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + buf, + ) + .await + }) + .await + }, + } + } + + async fn remove_internal( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> io::Result<()> { + check_namespace_key_validity( + primary_namespace.as_str(), + secondary_namespace.as_str(), + Some(key.as_str()), + "remove", + )?; + + match (primary_namespace.as_str(), secondary_namespace.as_str(), key.as_str()) { + (NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, _, NETWORK_GRAPH_PERSISTENCE_KEY) + | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _, SCORER_PERSISTENCE_KEY) => { + if let Some(eph_store) = &self.ephemeral_store { + self.execute_locked_write( + inner_lock_ref, + locking_key, + version, + async move || { + KVStoreSync::remove( + eph_store.as_ref(), + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + lazy, + ) + }, + ) + .await + } else { + log_debug!(self.logger, "Ephemeral store not configured. Removing non-critical data from primary and backup stores."); + + self.execute_locked_write( + inner_lock_ref, + locking_key, + version, + async move || { + self.primary_remove_then_schedule_backup( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + lazy, + ) + .await + }, + ) + .await + } + }, + _ => { + self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { + self.primary_remove_then_schedule_backup( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + lazy, + ) + .await + }) + .await + }, + } + } + + async fn list_internal( + &self, primary_namespace: String, secondary_namespace: String, + ) -> io::Result> { + check_namespace_key_validity( + primary_namespace.as_str(), + secondary_namespace.as_str(), + None, + "list", + )?; + + match (primary_namespace.as_str(), secondary_namespace.as_str()) { + ( + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + ) + | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _) => { + if let Some(eph_store) = self.ephemeral_store.as_ref() { + KVStoreSync::list(eph_store.as_ref(), &primary_namespace, &secondary_namespace) + } else { + log_debug!( + self.logger, + "Ephemeral store not configured. Listing from primary and backup stores." + ); + self.list_primary_or_backup(&primary_namespace, &secondary_namespace).await + } + }, + _ => self.list_primary_or_backup(&primary_namespace, &secondary_namespace).await, + } + } + + fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { + let mut outer_lock = self.locks.lock().unwrap(); + Arc::clone(&outer_lock.entry(locking_key).or_default()) + } + + async fn execute_locked_write< + F: Future>, + FN: FnOnce() -> F, + >( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + callback: FN, + ) -> Result<(), lightning::io::Error> { + let res = { + let mut last_written_version = inner_lock_ref.lock().await; + + // Check if we already have a newer version written. This ensures eventual consistency. + let is_stale_version = version <= *last_written_version; + + if is_stale_version { + Ok(()) + } else { + callback().await.map(|_| { + *last_written_version = version; + }) + } + }; + + self.clean_locks(&inner_lock_ref, locking_key); + res + } + + fn clean_locks(&self, inner_lock_ref: &Arc>, locking_key: String) { + // If there no arcs in use elsewhere, this means that there are no in-flight writes. We can remove the map entry + // to prevent leaking memory. The two arcs that are expected are the one in the map and the one held here in + // inner_lock_ref. The outer lock is obtained first, to avoid a new arc being cloned after we've already + // counted. + let mut outer_lock = self.locks.lock().unwrap(); + + let strong_count = Arc::strong_count(&inner_lock_ref); + debug_assert!(strong_count >= 2, "Unexpected TierStore strong count"); + + if strong_count == 2 { + outer_lock.remove(&locking_key); + } + } +} + +enum BackupOp { + Write { primary_namespace: String, secondary_namespace: String, key: String, data: Vec }, + Remove { primary_namespace: String, secondary_namespace: String, key: String, lazy: bool }, +} + +impl BackupOp { + fn primary_namespace(&self) -> &str { + match self { + BackupOp::Write { primary_namespace, .. } + | BackupOp::Remove { primary_namespace, .. } => primary_namespace, + } + } + + fn secondary_namespace(&self) -> &str { + match self { + BackupOp::Write { secondary_namespace, .. } + | BackupOp::Remove { secondary_namespace, .. } => secondary_namespace, + } + } + + fn key(&self) -> &str { + match self { + BackupOp::Write { key, .. } | BackupOp::Remove { key, .. } => key, + } + } +} + +#[cfg(test)] +mod tests { + use crate::io::test_utils::random_storage_path; + use crate::io::tier_store::{RetryConfig, TierStore}; + use crate::logger::Logger; + use crate::runtime::Runtime; + #[cfg(not(feature = "uniffi"))] + use crate::types::DynStore; + use crate::wrap_store; + #[cfg(feature = "uniffi")] + use crate::DynStore; + + use lightning::util::logger::Level; + use lightning::util::persist::{ + KVStoreSync, NETWORK_GRAPH_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, + }; + use lightning_persister::fs_store::FilesystemStore; + + use std::path::PathBuf; + use std::sync::Arc; + // use std::time::Duration; + + struct StorageFixture { + tier: TierStore, + primary: Arc, + ephemeral: Option>, + backup: Option>, + base_dir: PathBuf, + } + + impl Drop for StorageFixture { + fn drop(&mut self) { + drop(self.backup.take()); + drop(self.ephemeral.take()); + + if let Err(e) = std::fs::remove_dir_all(&self.base_dir) { + eprintln!("Failed to clean up test directory {:?}: {}", self.base_dir, e); + } + } + } + + fn setup_tier_store(ephemeral: bool, backup: bool) -> StorageFixture { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + + let primary: Arc = + wrap_store!(Arc::new(FilesystemStore::new(base_dir.join("primary_store")))); + let logger = Arc::new( + Logger::new_fs_writer(log_path, Level::Debug) + .expect("Failed to create filesystem logger"), + ); + let runtime = + Arc::new(Runtime::new(Arc::clone(&logger)).expect("Failed to create new runtime.")); + let retry_config = RetryConfig::default(); + let mut tier = + TierStore::new(Arc::clone(&primary), Arc::clone(&runtime), logger, retry_config); + + let ephemeral = if ephemeral { + let eph_store: Arc = + wrap_store!(Arc::new(FilesystemStore::new(base_dir.join("eph_store")))); + tier.set_ephemeral_store(Arc::clone(&eph_store)); + Some(eph_store) + } else { + None + }; + + let backup = if backup { + let backup: Arc = + wrap_store!(Arc::new(FilesystemStore::new(base_dir.join("backup_store")))); + tier.set_backup_store(Arc::clone(&backup)); + Some(backup) + } else { + None + }; + + StorageFixture { tier, primary, ephemeral, backup, base_dir } + } + + #[test] + fn writes_to_ephemeral_if_configured() { + let tier = setup_tier_store(true, false); + assert!(tier.ephemeral.is_some()); + + let primary_namespace = NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE; + let secondary_namespace = NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE; + let data = [42u8; 32].to_vec(); + + KVStoreSync::write( + &tier.tier, + primary_namespace, + secondary_namespace, + NETWORK_GRAPH_PERSISTENCE_KEY, + data.clone(), + ) + .unwrap(); + + KVStoreSync::write( + &tier.tier, + primary_namespace, + secondary_namespace, + SCORER_PERSISTENCE_KEY, + data.clone(), + ) + .unwrap(); + + let eph_store = tier.ephemeral.clone().unwrap(); + let ng_read = KVStoreSync::read( + &*eph_store, + primary_namespace, + secondary_namespace, + NETWORK_GRAPH_PERSISTENCE_KEY, + ) + .unwrap(); + + let sc_read = KVStoreSync::read( + &*eph_store, + primary_namespace, + secondary_namespace, + SCORER_PERSISTENCE_KEY, + ) + .unwrap(); + + assert_eq!(ng_read, data); + assert!(KVStoreSync::read( + &*tier.primary, + primary_namespace, + secondary_namespace, + NETWORK_GRAPH_PERSISTENCE_KEY + ) + .is_err()); + + assert_eq!(sc_read, data); + assert!(KVStoreSync::read( + &*tier.primary, + primary_namespace, + secondary_namespace, + SCORER_PERSISTENCE_KEY + ) + .is_err()); + } +} diff --git a/src/io/utils.rs b/src/io/utils.rs index 928d4031b..07c86e866 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -19,6 +19,7 @@ use bdk_chain::ConfirmationBlockTime; use bdk_wallet::ChangeSet as BdkWalletChangeSet; use bitcoin::Network; use lightning::io::Cursor; +use lightning::io::ErrorKind; use lightning::ln::msgs::DecodeError; use lightning::routing::gossip::NetworkGraph; use lightning::routing::scoring::{ @@ -46,9 +47,9 @@ use crate::io::{ }; use crate::logger::{log_error, LdkLogger, Logger}; use crate::peer_store::PeerStore; -use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper}; +use crate::types::{Broadcaster, KeysManager, Sweeper}; use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; -use crate::{Error, EventQueue, NodeMetrics, PaymentDetails}; +use crate::{DynStore, Error, EventQueue, NodeMetrics, PaymentDetails}; pub const EXTERNAL_PATHFINDING_SCORES_CACHE_KEY: &str = "external_pathfinding_scores_cache"; @@ -576,6 +577,18 @@ pub(crate) fn read_bdk_wallet_change_set( Ok(Some(change_set)) } +/// Checks if an error kind is possibly transient. +pub(crate) fn is_possibly_transient(error: &lightning::io::Error) -> bool { + match error.kind() { + ErrorKind::ConnectionRefused + | ErrorKind::ConnectionAborted + | ErrorKind::ConnectionReset + | ErrorKind::TimedOut + | ErrorKind::Interrupted + | ErrorKind::NotConnected => true, + _ => false, + } +} #[cfg(test)] mod tests { use super::read_or_generate_seed_file; diff --git a/src/lib.rs b/src/lib.rs index bbae8ac72..b8b3911ac 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -134,6 +134,7 @@ use fee_estimator::{ConfirmationTarget, FeeEstimator, OnchainFeeEstimator}; use ffi::*; use gossip::GossipSource; use graph::NetworkGraph; +pub use io::tier_store::RetryConfig; use io::utils::write_node_metrics; use lightning::chain::BestBlock; use lightning::events::bump_transaction::{Input, Wallet as LdkWallet}; @@ -161,9 +162,16 @@ use types::{ Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, Graph, KeysManager, OnionMessenger, PaymentStore, PeerManager, Router, Scorer, Sweeper, Wallet, }; -pub use types::{ - ChannelDetails, CustomTlvRecord, DynStore, PeerDetails, SyncAndAsyncKVStore, UserChannelId, -}; +pub use types::{ChannelDetails, CustomTlvRecord, PeerDetails, UserChannelId}; + +#[cfg(feature = "uniffi")] +pub use crate::ffi::DynStore; +#[cfg(not(feature = "uniffi"))] +pub use crate::types::DynStore; + +#[cfg(not(feature = "uniffi"))] +pub use types::SyncAndAsyncKVStore; + pub use { bip39, bitcoin, lightning, lightning_invoice, lightning_liquidity, lightning_types, tokio, vss_client, diff --git a/src/liquidity.rs b/src/liquidity.rs index 74e6098dd..4a8e24a82 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -45,9 +45,9 @@ use crate::connection::ConnectionManager; use crate::logger::{log_debug, log_error, log_info, LdkLogger, Logger}; use crate::runtime::Runtime; use crate::types::{ - Broadcaster, ChannelManager, DynStore, KeysManager, LiquidityManager, PeerManager, Wallet, + Broadcaster, ChannelManager, KeysManager, LiquidityManager, PeerManager, Wallet, }; -use crate::{total_anchor_channels_reserve_sats, Config, Error}; +use crate::{total_anchor_channels_reserve_sats, Config, DynStore, Error}; const LIQUIDITY_REQUEST_TIMEOUT_SECS: u64 = 5; diff --git a/src/payment/asynchronous/static_invoice_store.rs b/src/payment/asynchronous/static_invoice_store.rs index 45125cfee..0b9a40664 100644 --- a/src/payment/asynchronous/static_invoice_store.rs +++ b/src/payment/asynchronous/static_invoice_store.rs @@ -21,7 +21,7 @@ use lightning::util::ser::{Readable, Writeable}; use crate::hex_utils; use crate::io::STATIC_INVOICE_STORE_PRIMARY_NAMESPACE; use crate::payment::asynchronous::rate_limiter::RateLimiter; -use crate::types::DynStore; +use crate::DynStore; struct PersistedStaticInvoice { invoice: StaticInvoice, @@ -161,11 +161,11 @@ mod tests { use crate::io::test_utils::InMemoryStore; use crate::payment::asynchronous::static_invoice_store::StaticInvoiceStore; - use crate::types::DynStore; + use crate::{wrap_store, DynStore}; #[tokio::test] async fn static_invoice_store_test() { - let store: Arc = Arc::new(InMemoryStore::new()); + let store: Arc = wrap_store!(Arc::new(InMemoryStore::new())); let static_invoice_store = StaticInvoiceStore::new(Arc::clone(&store)); let static_invoice = invoice(); diff --git a/src/peer_store.rs b/src/peer_store.rs index 59cd3d94f..e30a51141 100644 --- a/src/peer_store.rs +++ b/src/peer_store.rs @@ -19,7 +19,7 @@ use crate::io::{ PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, }; use crate::logger::{log_error, LdkLogger}; -use crate::types::DynStore; +use crate::DynStore; use crate::{Error, SocketAddress}; pub struct PeerStore @@ -154,12 +154,14 @@ mod tests { use lightning::util::test_utils::TestLogger; + use crate::wrap_store; + use super::*; use crate::io::test_utils::InMemoryStore; #[test] fn peer_info_persistence() { - let store: Arc = Arc::new(InMemoryStore::new()); + let store: Arc = wrap_store!(Arc::new(InMemoryStore::new())); let logger = Arc::new(TestLogger::new()); let peer_store = PeerStore::new(Arc::clone(&store), Arc::clone(&logger)); diff --git a/src/types.rs b/src/types.rs index 38519eca7..d16d2ae95 100644 --- a/src/types.rs +++ b/src/types.rs @@ -47,8 +47,11 @@ where { } +#[cfg(not(feature = "uniffi"))] /// A type alias for [`SyncAndAsyncKVStore`] with `Sync`/`Send` markers; pub type DynStore = dyn SyncAndAsyncKVStore + Sync + Send; +#[cfg(feature = "uniffi")] +pub(crate) use crate::DynStore; pub type Persister = MonitorUpdatingPersister< Arc, diff --git a/src/wallet/persist.rs b/src/wallet/persist.rs index 5c8668937..952ce8115 100644 --- a/src/wallet/persist.rs +++ b/src/wallet/persist.rs @@ -16,7 +16,8 @@ use crate::io::utils::{ write_bdk_wallet_tx_graph, }; use crate::logger::{log_error, LdkLogger, Logger}; -use crate::types::DynStore; +use crate::DynStore; + pub(crate) struct KVStoreWalletPersister { latest_change_set: Option, kv_store: Arc, diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 1783ee1af..f83a0da2d 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -33,7 +33,8 @@ use ldk_node::entropy::{generate_entropy_mnemonic, NodeEntropy}; use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{ - Builder, CustomTlvRecord, Event, LightningBalance, Node, NodeError, PendingSweepBalance, + wrap_store, Builder, CustomTlvRecord, Event, LightningBalance, Node, NodeError, + PendingSweepBalance, }; use lightning::io; use lightning::ln::msgs::SocketAddress; @@ -423,7 +424,9 @@ pub(crate) fn setup_node_for_async_payments( let node = match config.store_type { TestStoreType::TestSyncStore => { - let kv_store = Arc::new(TestSyncStore::new(config.node_config.storage_dir_path.into())); + let kv_store = wrap_store!(Arc::new(TestSyncStore::new( + config.node_config.storage_dir_path.into() + ))); builder.build_with_store(config.node_entropy.into(), kv_store).unwrap() }, TestStoreType::Sqlite => builder.build(config.node_entropy.into()).unwrap(), diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 7c1ed8344..571d31a6f 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -31,7 +31,7 @@ use ldk_node::payment::{ ConfirmationStatus, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, QrPaymentResult, }; -use ldk_node::{Builder, DynStore, Event, NodeError}; +use ldk_node::{wrap_store, Builder, DynStore, Event, NodeError}; use lightning::ln::channelmanager::PaymentId; use lightning::routing::gossip::{NodeAlias, NodeId}; use lightning::routing::router::RouteParametersConfig; @@ -252,15 +252,17 @@ async fn start_stop_reinit() { let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); - let test_sync_store: Arc = - Arc::new(TestSyncStore::new(config.node_config.storage_dir_path.clone().into())); + let test_sync_store: Arc = wrap_store!(Arc::new(TestSyncStore::new( + config.node_config.storage_dir_path.clone().into() + ))); let sync_config = EsploraSyncConfig { background_sync_config: None }; setup_builder!(builder, config.node_config); builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); - let node = - builder.build_with_store(config.node_entropy.into(), Arc::clone(&test_sync_store)).unwrap(); + let node = builder + .build_with_store(config.node_entropy.into(), wrap_store!(Arc::clone(&test_sync_store))) + .unwrap(); node.start().unwrap(); let expected_node_id = node.node_id(); @@ -298,8 +300,10 @@ async fn start_stop_reinit() { setup_builder!(builder, config.node_config); builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); - let reinitialized_node = - builder.build_with_store(config.node_entropy.into(), Arc::clone(&test_sync_store)).unwrap(); + let reinitialized_node = builder + .build_with_store(config.node_entropy.into(), wrap_store!(Arc::clone(&test_sync_store))) + .unwrap(); + reinitialized_node.start().unwrap(); assert_eq!(reinitialized_node.node_id(), expected_node_id); From 178b7a0d34003975865002b398cc3296407a9e3f Mon Sep 17 00:00:00 2001 From: Enigbe Date: Mon, 20 Oct 2025 21:50:37 +0100 Subject: [PATCH 59/60] refactor: set retry config maximum delay in milliseconds --- bindings/ldk_node.udl | 2 +- src/io/tier_store.rs | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index cb2297c1d..db29df83f 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -71,7 +71,7 @@ enum WordCount { dictionary RetryConfig { u16 initial_retry_delay_ms; - u16 maximum_delay_secs; + u16 maximum_delay_ms; f32 backoff_multiplier; }; diff --git a/src/io/tier_store.rs b/src/io/tier_store.rs index 0337ff19a..7cb0963c8 100644 --- a/src/io/tier_store.rs +++ b/src/io/tier_store.rs @@ -31,8 +31,8 @@ use std::time::Duration; // configuring. const BACKUP_QUEUE_CAPACITY: usize = 100; -const DEFAULT_INITIAL_RETRY_DELAY_MS: u16 = 50; -const DEFAULT_MAXIMUM_RETRY_DELAY_SECS: u16 = 5; +const DEFAULT_INITIAL_RETRY_DELAY_MS: u16 = 10; +const DEFAULT_MAXIMUM_RETRY_DELAY_MS: u16 = 500; const DEFAULT_BACKOFF_MULTIPLIER: f32 = 1.5; /// Configuration for exponential backoff retry behavior. @@ -40,8 +40,8 @@ const DEFAULT_BACKOFF_MULTIPLIER: f32 = 1.5; pub struct RetryConfig { /// The initial delay before the first retry attempt, in milliseconds. pub initial_retry_delay_ms: u16, - /// The maximum delay between retry attempts, in seconds. - pub maximum_delay_secs: u16, + /// The maximum delay between retry attempts, in milliseconds. + pub maximum_delay_ms: u16, /// The multiplier applied to the delay after each retry attempt. /// /// For example, a value of `2.0` doubles the delay after each failed retry. @@ -52,7 +52,7 @@ impl Default for RetryConfig { fn default() -> Self { Self { initial_retry_delay_ms: DEFAULT_INITIAL_RETRY_DELAY_MS, - maximum_delay_secs: DEFAULT_MAXIMUM_RETRY_DELAY_SECS, + maximum_delay_ms: DEFAULT_MAXIMUM_RETRY_DELAY_MS, backoff_multiplier: DEFAULT_BACKOFF_MULTIPLIER, } } @@ -515,7 +515,7 @@ impl TierStoreInner { &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result> { let mut delay = Duration::from_millis(self.retry_config.initial_retry_delay_ms as u64); - let maximum_delay = Duration::from_secs(self.retry_config.maximum_delay_secs as u64); + let maximum_delay = Duration::from_millis(self.retry_config.maximum_delay_ms as u64); let mut tries = 0_u16; loop { @@ -575,7 +575,7 @@ impl TierStoreInner { &self, primary_namespace: &str, secondary_namespace: &str, ) -> io::Result> { let mut delay = Duration::from_millis(self.retry_config.initial_retry_delay_ms as u64); - let maximum_delay = Duration::from_secs(self.retry_config.maximum_delay_secs as u64); + let maximum_delay = Duration::from_millis(self.retry_config.maximum_delay_ms as u64); let mut tries = 0_u16; loop { @@ -625,7 +625,7 @@ impl TierStoreInner { &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> io::Result<()> { let mut delay = Duration::from_millis(self.retry_config.initial_retry_delay_ms as u64); - let maximum_delay = Duration::from_secs(self.retry_config.maximum_delay_secs as u64); + let maximum_delay = Duration::from_millis(self.retry_config.maximum_delay_ms as u64); let mut tries = 0_u16; loop { @@ -689,7 +689,7 @@ impl TierStoreInner { &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> io::Result<()> { let mut delay = Duration::from_millis(self.retry_config.initial_retry_delay_ms as u64); - let maximum_delay = Duration::from_secs(self.retry_config.maximum_delay_secs as u64); + let maximum_delay = Duration::from_millis(self.retry_config.maximum_delay_ms as u64); let mut tries = 0_u16; loop { From 1e7bdbc0c4e08fa96bd2aefee43e93fc2826aea6 Mon Sep 17 00:00:00 2001 From: Enigbe Date: Mon, 20 Oct 2025 22:00:58 +0100 Subject: [PATCH 60/60] test: add comprehensive testing for TierStore This commit adds unit, integration, and FFI tests for the TierStore implementation: - Unit tests for TierStore core functionality - Integration tests for nodes built with tiered storage - Python FFI tests for foreign key-value store --- benches/payments.rs | 1 + bindings/python/src/ldk_node/kv_store.py | 118 ++++++ bindings/python/src/ldk_node/test_ldk_node.py | 329 ++++++++++------- src/io/test_utils.rs | 170 ++++++++- src/io/tier_store.rs | 344 +++++++++++++----- tests/common/mod.rs | 44 ++- tests/integration_tests_rust.rs | 88 +++++ 7 files changed, 862 insertions(+), 232 deletions(-) create mode 100644 bindings/python/src/ldk_node/kv_store.py diff --git a/benches/payments.rs b/benches/payments.rs index ba69e046d..21bca8d72 100644 --- a/benches/payments.rs +++ b/benches/payments.rs @@ -127,6 +127,7 @@ fn payment_benchmark(c: &mut Criterion) { true, false, common::TestStoreType::Sqlite, + common::TestStoreType::Sqlite, ); let runtime = diff --git a/bindings/python/src/ldk_node/kv_store.py b/bindings/python/src/ldk_node/kv_store.py new file mode 100644 index 000000000..6d4eb9bde --- /dev/null +++ b/bindings/python/src/ldk_node/kv_store.py @@ -0,0 +1,118 @@ +import threading + +from abc import ABC, abstractmethod +from typing import List + +from ldk_node import IoError + +class AbstractKvStore(ABC): + @abstractmethod + async def list_async(self, primary_namespace: "str",secondary_namespace: "str") -> "typing.List[str]": + pass + + @abstractmethod + def list_sync(self, primary_namespace: "str",secondary_namespace: "str") -> "typing.List[str]": + pass + + @abstractmethod + async def read_async(self, primary_namespace: "str",secondary_namespace: "str",key: "str") -> "typing.List[int]": + pass + + @abstractmethod + def read_sync(self, primary_namespace: "str",secondary_namespace: "str",key: "str") -> "typing.List[int]": + pass + + @abstractmethod + async def remove_async(self, primary_namespace: "str",secondary_namespace: "str",key: "str",lazy: "bool") -> None: + pass + + @abstractmethod + def remove_sync(self, primary_namespace: "str",secondary_namespace: "str",key: "str",lazy: "bool") -> None: + pass + + @abstractmethod + async def write_async(self, primary_namespace: "str",secondary_namespace: "str",key: "str",buf: "typing.List[int]") -> None: + pass + + @abstractmethod + def write_sync(self, primary_namespace: "str",secondary_namespace: "str",key: "str",buf: "typing.List[int]") -> None: + pass + +class TestKvStore(AbstractKvStore): + def __init__(self, name: str): + self.name = name + # Storage structure: {(primary_ns, secondary_ns): {key: [bytes]}} + self.storage = {} + self._lock = threading.Lock() + + def dump(self): + print(f"\n[{self.name}] Store contents:") + for (primary_ns, secondary_ns), keys_dict in self.storage.items(): + print(f" Namespace: ({primary_ns!r}, {secondary_ns!r})") + for key, data in keys_dict.items(): + print(f" Key: {key!r} -> {len(data)} bytes") + # Optionally show first few bytes + preview = data[:20] if len(data) > 20 else data + print(f" Data preview: {preview}...") + + # KVStoreSync methods + def list_sync(self, primary_namespace: str, secondary_namespace: str) -> List[str]: + with self._lock: + namespace_key = (primary_namespace, secondary_namespace) + if namespace_key in self.storage: + return list(self.storage[namespace_key].keys()) + return [] + + def read_sync(self, primary_namespace: str, secondary_namespace: str, key: str) -> List[int]: + with self._lock: + print(f"[{self.name}] READ: {primary_namespace}/{secondary_namespace}/{key}") + namespace_key = (primary_namespace, secondary_namespace) + + if namespace_key not in self.storage: + print(f" -> namespace not found, keys: {list(self.storage.keys())}") + raise IoError.NotFound(f"Namespace not found: {primary_namespace}/{secondary_namespace}") + + if key not in self.storage[namespace_key]: + print(f" -> key not found, keys: {list(self.storage[namespace_key].keys())}") + raise IoError.NotFound(f"Key not found: {key}") + + data = self.storage[namespace_key][key] + print(f" -> returning {len(data)} bytes") + return data + + def write_sync(self, primary_namespace: str, secondary_namespace: str, key: str, buf: List[int]) -> None: + with self._lock: + namespace_key = (primary_namespace, secondary_namespace) + if namespace_key not in self.storage: + self.storage[namespace_key] = {} + + self.storage[namespace_key][key] = buf.copy() + + def remove_sync(self, primary_namespace: str, secondary_namespace: str, key: str, lazy: bool) -> None: + with self._lock: + namespace_key = (primary_namespace, secondary_namespace) + if namespace_key not in self.storage: + raise IoError.NotFound(f"Namespace not found: {primary_namespace}/{secondary_namespace}") + + if key not in self.storage[namespace_key]: + raise IoError.NotFound(f"Key not found: {key}") + + del self.storage[namespace_key][key] + + if not self.storage[namespace_key]: + del self.storage[namespace_key] + + # KVStore methods + async def list_async(self, primary_namespace: str, secondary_namespace: str) -> List[str]: + return self.list_sync(primary_namespace, secondary_namespace) + + async def read_async(self, primary_namespace: str, secondary_namespace: str, key: str) -> List[int]: + return self.read_sync(primary_namespace, secondary_namespace, key) + + async def write_async(self, primary_namespace: str, secondary_namespace: str, key: str, buf: List[int]) -> None: + self.write_sync(primary_namespace, secondary_namespace, key, buf) + + async def remove_async(self, primary_namespace: str, secondary_namespace: str, key: str, lazy: bool) -> None: + self.remove_sync(primary_namespace, secondary_namespace, key, lazy) + + \ No newline at end of file diff --git a/bindings/python/src/ldk_node/test_ldk_node.py b/bindings/python/src/ldk_node/test_ldk_node.py index 0b73e6a47..1da3d5582 100644 --- a/bindings/python/src/ldk_node/test_ldk_node.py +++ b/bindings/python/src/ldk_node/test_ldk_node.py @@ -5,13 +5,67 @@ import os import re import requests +import asyncio +import threading +import ldk_node from ldk_node import * +from kv_store import TestKvStore DEFAULT_ESPLORA_SERVER_URL = "http://127.0.0.1:3002" DEFAULT_TEST_NETWORK = Network.REGTEST DEFAULT_BITCOIN_CLI_BIN = "bitcoin-cli" +class NodeSetup: + def __init__(self, node, node_id, tmp_dir, listening_addresses, stores=None): + self.node = node + self.node_id = node_id + self.tmp_dir = tmp_dir + self.listening_addresses = listening_addresses + self.stores = stores # (primary, backup, ephemeral) or None + + def cleanup(self): + self.node.stop() + time.sleep(1) + self.tmp_dir.cleanup() + +def setup_two_nodes(esplora_endpoint, port_1=2323, port_2=2324, use_tier_store=False): + # Setup Node 1 + tmp_dir_1 = tempfile.TemporaryDirectory("_ldk_node_1") + print("TMP DIR 1:", tmp_dir_1.name) + + listening_addresses_1 = [f"127.0.0.1:{port_1}"] + if use_tier_store: + node_1, stores_1 = setup_node_with_tier_store(tmp_dir_1.name, esplora_endpoint, listening_addresses_1) + else: + node_1 = setup_node(tmp_dir_1.name, esplora_endpoint, listening_addresses_1) + stores_1 = None + + node_1.start() + node_id_1 = node_1.node_id() + print("Node ID 1:", node_id_1) + + setup_1 = NodeSetup(node_1, node_id_1, tmp_dir_1, listening_addresses_1, stores_1) + + # Setup Node 2 + tmp_dir_2 = tempfile.TemporaryDirectory("_ldk_node_2") + print("TMP DIR 2:", tmp_dir_2.name) + + listening_addresses_2 = [f"127.0.0.1:{port_2}"] + if use_tier_store: + node_2, stores_2 = setup_node_with_tier_store(tmp_dir_2.name, esplora_endpoint, listening_addresses_2) + else: + node_2 = setup_node(tmp_dir_2.name, esplora_endpoint, listening_addresses_2) + stores_2 = None + + node_2.start() + node_id_2 = node_2.node_id() + print("Node ID 2:", node_id_2) + + setup_2 = NodeSetup(node_2, node_id_2, tmp_dir_2, listening_addresses_2, stores_2) + + return setup_1, setup_2 + def bitcoin_cli(cmd): args = [] @@ -95,7 +149,6 @@ def send_to_address(address, amount_sats): print("SEND TX:", res) return res - def setup_node(tmp_dir, esplora_endpoint, listening_addresses): mnemonic = generate_entropy_mnemonic(None) node_entropy = NodeEntropy.from_bip39_mnemonic(mnemonic, None) @@ -107,6 +160,124 @@ def setup_node(tmp_dir, esplora_endpoint, listening_addresses): builder.set_listening_addresses(listening_addresses) return builder.build(node_entropy) +def setup_node_with_tier_store(tmp_dir, esplora_endpoint, listening_addresses): + mnemonic = generate_entropy_mnemonic(None) + node_entropy = NodeEntropy.from_bip39_mnemonic(mnemonic, None) + config = default_config() + + primary = TestKvStore("primary") + backup = TestKvStore("backup") + ephemeral = TestKvStore("ephemeral") + retry_config = RetryConfig( + initial_retry_delay_ms=10, + maximum_delay_ms=100, + backoff_multiplier=2.0 + ) + + # Set event loop for async Python callbacks from Rust + # (https://mozilla.github.io/uniffi-rs/0.27/futures.html#python-uniffi_set_event_loop) + loop = asyncio.new_event_loop() + + def run_loop(): + asyncio.set_event_loop(loop) + loop.run_forever() + + loop_thread = threading.Thread(target=run_loop, daemon=True) + loop_thread.start() + ldk_node.uniffi_set_event_loop(loop) + + builder = Builder.from_config(config) + builder.set_storage_dir_path(tmp_dir) + builder.set_chain_source_esplora(esplora_endpoint, None) + builder.set_network(DEFAULT_TEST_NETWORK) + builder.set_listening_addresses(listening_addresses) + builder.set_tier_store_retry_config(retry_config) + builder.set_tier_store_backup(DynStore.from_store(backup)) + builder.set_tier_store_ephemeral(DynStore.from_store(ephemeral)) + + return builder.build_with_tier_store(node_entropy, DynStore.from_store(primary)), (primary, backup, ephemeral) + +def do_channel_full_cycle(setup_1, setup_2, esplora_endpoint): + # Fund both nodes + address_1 = setup_1.node.onchain_payment().new_address() + txid_1 = send_to_address(address_1, 100000) + address_2 = setup_2.node.onchain_payment().new_address() + txid_2 = send_to_address(address_2, 100000) + + wait_for_tx(esplora_endpoint, txid_1) + wait_for_tx(esplora_endpoint, txid_2) + mine_and_wait(esplora_endpoint, 6) + + setup_1.node.sync_wallets() + setup_2.node.sync_wallets() + + # Verify balances + spendable_balance_1 = setup_1.node.list_balances().spendable_onchain_balance_sats + spendable_balance_2 = setup_2.node.list_balances().spendable_onchain_balance_sats + assert spendable_balance_1 == 100000 + assert spendable_balance_2 == 100000 + + # Open channel + setup_1.node.open_channel(setup_2.node_id, setup_2.listening_addresses[0], 50000, None, None) + + channel_pending_event_1 = setup_1.node.wait_next_event() + assert isinstance(channel_pending_event_1, Event.CHANNEL_PENDING) + setup_1.node.event_handled() + + channel_pending_event_2 = setup_2.node.wait_next_event() + assert isinstance(channel_pending_event_2, Event.CHANNEL_PENDING) + setup_2.node.event_handled() + + funding_txid = channel_pending_event_1.funding_txo.txid + wait_for_tx(esplora_endpoint, funding_txid) + mine_and_wait(esplora_endpoint, 6) + + setup_1.node.sync_wallets() + setup_2.node.sync_wallets() + + channel_ready_event_1 = setup_1.node.wait_next_event() + assert isinstance(channel_ready_event_1, Event.CHANNEL_READY) + setup_1.node.event_handled() + + channel_ready_event_2 = setup_2.node.wait_next_event() + assert isinstance(channel_ready_event_2, Event.CHANNEL_READY) + setup_2.node.event_handled() + + # Make payment + description = Bolt11InvoiceDescription.DIRECT("asdf") + invoice = setup_2.node.bolt11_payment().receive(2500000, description, 9217) + setup_1.node.bolt11_payment().send(invoice, None) + + payment_successful_event_1 = setup_1.node.wait_next_event() + assert isinstance(payment_successful_event_1, Event.PAYMENT_SUCCESSFUL) + setup_1.node.event_handled() + + payment_received_event_2 = setup_2.node.wait_next_event() + assert isinstance(payment_received_event_2, Event.PAYMENT_RECEIVED) + setup_2.node.event_handled() + + # Close channel + setup_2.node.close_channel(channel_ready_event_2.user_channel_id, setup_1.node_id) + + channel_closed_event_1 = setup_1.node.wait_next_event() + assert isinstance(channel_closed_event_1, Event.CHANNEL_CLOSED) + setup_1.node.event_handled() + + channel_closed_event_2 = setup_2.node.wait_next_event() + assert isinstance(channel_closed_event_2, Event.CHANNEL_CLOSED) + setup_2.node.event_handled() + + mine_and_wait(esplora_endpoint, 1) + setup_1.node.sync_wallets() + setup_2.node.sync_wallets() + + # Verify final balances + spendable_balance_after_close_1 = setup_1.node.list_balances().spendable_onchain_balance_sats + assert spendable_balance_after_close_1 > 95000 + assert spendable_balance_after_close_1 < 100000 + spendable_balance_after_close_2 = setup_2.node.list_balances().spendable_onchain_balance_sats + assert spendable_balance_after_close_2 == 102500 + def get_esplora_endpoint(): if os.environ.get('ESPLORA_ENDPOINT'): return str(os.environ['ESPLORA_ENDPOINT']) @@ -122,132 +293,36 @@ def setUp(self): def test_channel_full_cycle(self): esplora_endpoint = get_esplora_endpoint() - - ## Setup Node 1 - tmp_dir_1 = tempfile.TemporaryDirectory("_ldk_node_1") - print("TMP DIR 1:", tmp_dir_1.name) - - listening_addresses_1 = ["127.0.0.1:2323"] - node_1 = setup_node(tmp_dir_1.name, esplora_endpoint, listening_addresses_1) - node_1.start() - node_id_1 = node_1.node_id() - print("Node ID 1:", node_id_1) - - # Setup Node 2 - tmp_dir_2 = tempfile.TemporaryDirectory("_ldk_node_2") - print("TMP DIR 2:", tmp_dir_2.name) - - listening_addresses_2 = ["127.0.0.1:2324"] - node_2 = setup_node(tmp_dir_2.name, esplora_endpoint, listening_addresses_2) - node_2.start() - node_id_2 = node_2.node_id() - print("Node ID 2:", node_id_2) - - address_1 = node_1.onchain_payment().new_address() - txid_1 = send_to_address(address_1, 100000) - address_2 = node_2.onchain_payment().new_address() - txid_2 = send_to_address(address_2, 100000) - - wait_for_tx(esplora_endpoint, txid_1) - wait_for_tx(esplora_endpoint, txid_2) - - mine_and_wait(esplora_endpoint, 6) - - node_1.sync_wallets() - node_2.sync_wallets() - - spendable_balance_1 = node_1.list_balances().spendable_onchain_balance_sats - spendable_balance_2 = node_2.list_balances().spendable_onchain_balance_sats - total_balance_1 = node_1.list_balances().total_onchain_balance_sats - total_balance_2 = node_2.list_balances().total_onchain_balance_sats - - print("SPENDABLE 1:", spendable_balance_1) - self.assertEqual(spendable_balance_1, 100000) - - print("SPENDABLE 2:", spendable_balance_2) - self.assertEqual(spendable_balance_2, 100000) - - print("TOTAL 1:", total_balance_1) - self.assertEqual(total_balance_1, 100000) - - print("TOTAL 2:", total_balance_2) - self.assertEqual(total_balance_2, 100000) - - node_1.open_channel(node_id_2, listening_addresses_2[0], 50000, None, None) - - channel_pending_event_1 = node_1.wait_next_event() - assert isinstance(channel_pending_event_1, Event.CHANNEL_PENDING) - print("EVENT:", channel_pending_event_1) - node_1.event_handled() - - channel_pending_event_2 = node_2.wait_next_event() - assert isinstance(channel_pending_event_2, Event.CHANNEL_PENDING) - print("EVENT:", channel_pending_event_2) - node_2.event_handled() - - funding_txid = channel_pending_event_1.funding_txo.txid - wait_for_tx(esplora_endpoint, funding_txid) - mine_and_wait(esplora_endpoint, 6) - - node_1.sync_wallets() - node_2.sync_wallets() - - channel_ready_event_1 = node_1.wait_next_event() - assert isinstance(channel_ready_event_1, Event.CHANNEL_READY) - print("EVENT:", channel_ready_event_1) - print("funding_txo:", funding_txid) - node_1.event_handled() - - channel_ready_event_2 = node_2.wait_next_event() - assert isinstance(channel_ready_event_2, Event.CHANNEL_READY) - print("EVENT:", channel_ready_event_2) - node_2.event_handled() - - description = Bolt11InvoiceDescription.DIRECT("asdf") - invoice = node_2.bolt11_payment().receive(2500000, description, 9217) - node_1.bolt11_payment().send(invoice, None) - - payment_successful_event_1 = node_1.wait_next_event() - assert isinstance(payment_successful_event_1, Event.PAYMENT_SUCCESSFUL) - print("EVENT:", payment_successful_event_1) - node_1.event_handled() - - payment_received_event_2 = node_2.wait_next_event() - assert isinstance(payment_received_event_2, Event.PAYMENT_RECEIVED) - print("EVENT:", payment_received_event_2) - node_2.event_handled() - - node_2.close_channel(channel_ready_event_2.user_channel_id, node_id_1) - - channel_closed_event_1 = node_1.wait_next_event() - assert isinstance(channel_closed_event_1, Event.CHANNEL_CLOSED) - print("EVENT:", channel_closed_event_1) - node_1.event_handled() - - channel_closed_event_2 = node_2.wait_next_event() - assert isinstance(channel_closed_event_2, Event.CHANNEL_CLOSED) - print("EVENT:", channel_closed_event_2) - node_2.event_handled() - - mine_and_wait(esplora_endpoint, 1) - - node_1.sync_wallets() - node_2.sync_wallets() - - spendable_balance_after_close_1 = node_1.list_balances().spendable_onchain_balance_sats - assert spendable_balance_after_close_1 > 95000 - assert spendable_balance_after_close_1 < 100000 - spendable_balance_after_close_2 = node_2.list_balances().spendable_onchain_balance_sats - self.assertEqual(spendable_balance_after_close_2, 102500) - - # Stop nodes - node_1.stop() - node_2.stop() - - # Cleanup - time.sleep(1) # Wait a sec so our logs can finish writing - tmp_dir_1.cleanup() - tmp_dir_2.cleanup() + setup_1, setup_2 = setup_two_nodes(esplora_endpoint) + + do_channel_full_cycle(setup_1, setup_2, esplora_endpoint) + + setup_1.cleanup() + setup_2.cleanup() + + def test_tier_store(self): + esplora_endpoint = get_esplora_endpoint() + setup_1, setup_2 = setup_two_nodes(esplora_endpoint, port_1=2325, port_2=2326, use_tier_store=True) + + do_channel_full_cycle(setup_1, setup_2, esplora_endpoint) + + primary, backup, ephemeral = setup_1.stores + + # Wait for async backup + time.sleep(2) + + self.assertGreater(len(primary.storage), 0, "Primary should have data") + self.assertGreater(len(backup.storage), 0, "Backup should have data") + self.assertEqual(list(primary.storage.keys()), list(backup.storage.keys()), + "Backup should mirror primary") + + self.assertGreater(len(ephemeral.storage), 0, "Ephemeral should have data") + ephemeral_keys = [key for namespace in ephemeral.storage.values() for key in namespace.keys()] + has_scorer_or_graph = any(key in ['scorer', 'network_graph'] for key in ephemeral_keys) + self.assertTrue(has_scorer_or_graph, "Ephemeral should contain scorer or network_graph data") + + setup_1.cleanup() + setup_2.cleanup() if __name__ == '__main__': unittest.main() diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index a360b443b..622e29528 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -11,7 +11,8 @@ use std::future::Future; use std::panic::RefUnwindSafe; use std::path::PathBuf; use std::pin::Pin; -use std::sync::Mutex; +use std::sync::{Arc, Mutex}; +use std::time::Duration; use lightning::events::ClosureReason; use lightning::ln::functional_test_utils::{ @@ -27,6 +28,8 @@ use lightning::{check_added_monitors, check_closed_broadcast, io}; use rand::distr::Alphanumeric; use rand::{rng, Rng}; +use crate::runtime::Runtime; + type TestMonitorUpdatePersister<'a, K> = MonitorUpdatingPersister< &'a K, &'a test_utils::TestLogger, @@ -352,3 +355,168 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { // Make sure everything is persisted as expected after close. check_persisted_data!(persister_0_max_pending_updates * 2 * EXPECTED_UPDATES_PER_PAYMENT + 1); } + +struct DelayedStoreInner { + storage: Mutex>>, + delay: Duration, +} + +impl DelayedStoreInner { + fn new(delay: Duration) -> Self { + Self { storage: Mutex::new(HashMap::new()), delay } + } + + fn make_key(pn: &str, sn: &str, key: &str) -> String { + format!("{}/{}/{}", pn, sn, key) + } + + async fn read_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> Result, io::Error> { + tokio::time::sleep(self.delay).await; + + let full_key = Self::make_key(&primary_namespace, &secondary_namespace, &key); + let storage = self.storage.lock().unwrap(); + storage + .get(&full_key) + .cloned() + .ok_or_else(|| io::Error::new(io::ErrorKind::NotFound, "key not found")) + } + + async fn write_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> Result<(), io::Error> { + tokio::time::sleep(self.delay).await; + + let full_key = Self::make_key(&primary_namespace, &secondary_namespace, &key); + let mut storage = self.storage.lock().unwrap(); + storage.insert(full_key, buf); + Ok(()) + } + + async fn remove_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> Result<(), io::Error> { + tokio::time::sleep(self.delay).await; + + let full_key = Self::make_key(&primary_namespace, &secondary_namespace, &key); + let mut storage = self.storage.lock().unwrap(); + storage.remove(&full_key); + Ok(()) + } + + async fn list_internal( + &self, primary_namespace: String, secondary_namespace: String, + ) -> Result, io::Error> { + tokio::time::sleep(self.delay).await; + + let prefix = format!("{}/{}/", primary_namespace, secondary_namespace); + let storage = self.storage.lock().unwrap(); + Ok(storage + .keys() + .filter(|k| k.starts_with(&prefix)) + .map(|k| k.strip_prefix(&prefix).unwrap().to_string()) + .collect()) + } +} + +pub struct DelayedStore { + inner: Arc, + runtime: Arc, +} + +impl DelayedStore { + pub fn new(delay_ms: u64, runtime: Arc) -> Self { + Self { inner: Arc::new(DelayedStoreInner::new(Duration::from_millis(delay_ms))), runtime } + } +} + +impl KVStore for DelayedStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Pin, io::Error>> + Send>> { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + let key = key.to_string(); + + Box::pin(async move { inner.read_internal(pn, sn, key).await }) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Pin> + Send>> { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + let key = key.to_string(); + + Box::pin(async move { inner.write_internal(pn, sn, key, buf).await }) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + ) -> Pin> + Send>> { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + let key = key.to_string(); + + Box::pin(async move { inner.remove_internal(pn, sn, key).await }) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Pin, io::Error>> + Send>> { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + + Box::pin(async move { inner.list_internal(pn, sn).await }) + } +} + +impl KVStoreSync for DelayedStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Result, io::Error> { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + let key = key.to_string(); + + self.runtime.block_on(async move { inner.read_internal(pn, sn, key).await }) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Result<(), io::Error> { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + let key = key.to_string(); + + self.runtime.block_on(async move { inner.write_internal(pn, sn, key, buf).await }) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + ) -> Result<(), io::Error> { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + let key = key.to_string(); + + self.runtime.block_on(async move { inner.remove_internal(pn, sn, key).await }) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Result, io::Error> { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + + self.runtime.block_on(async move { inner.list_internal(pn, sn).await }) + } +} diff --git a/src/io/tier_store.rs b/src/io/tier_store.rs index 7cb0963c8..b471fdb62 100644 --- a/src/io/tier_store.rs +++ b/src/io/tier_store.rs @@ -29,7 +29,10 @@ use std::time::Duration; // todo(enigbe): Uncertain about appropriate queue size and if this would need // configuring. +#[cfg(not(test))] const BACKUP_QUEUE_CAPACITY: usize = 100; +#[cfg(test)] +const BACKUP_QUEUE_CAPACITY: usize = 5; const DEFAULT_INITIAL_RETRY_DELAY_MS: u16 = 10; const DEFAULT_MAXIMUM_RETRY_DELAY_MS: u16 = 500; @@ -1128,143 +1131,288 @@ impl BackupOp { #[cfg(test)] mod tests { - use crate::io::test_utils::random_storage_path; - use crate::io::tier_store::{RetryConfig, TierStore}; - use crate::logger::Logger; - use crate::runtime::Runtime; - #[cfg(not(feature = "uniffi"))] - use crate::types::DynStore; - use crate::wrap_store; - #[cfg(feature = "uniffi")] - use crate::DynStore; + use std::panic::RefUnwindSafe; + use std::path::PathBuf; + use std::sync::Arc; + use std::thread; use lightning::util::logger::Level; use lightning::util::persist::{ - KVStoreSync, NETWORK_GRAPH_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, + CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, }; use lightning_persister::fs_store::FilesystemStore; - use std::path::PathBuf; - use std::sync::Arc; - // use std::time::Duration; - - struct StorageFixture { - tier: TierStore, - primary: Arc, - ephemeral: Option>, - backup: Option>, - base_dir: PathBuf, - } + use crate::io::test_utils::{ + do_read_write_remove_list_persist, random_storage_path, DelayedStore, + }; + use crate::logger::Logger; + use crate::runtime::Runtime; + use crate::{wrap_store, RetryConfig}; - impl Drop for StorageFixture { - fn drop(&mut self) { - drop(self.backup.take()); - drop(self.ephemeral.take()); + use super::*; - if let Err(e) = std::fs::remove_dir_all(&self.base_dir) { - eprintln!("Failed to clean up test directory {:?}: {}", self.base_dir, e); - } + impl RefUnwindSafe for TierStore {} + + struct CleanupDir(PathBuf); + impl Drop for CleanupDir { + fn drop(&mut self) { + let _ = std::fs::remove_dir_all(&self.0); } } - fn setup_tier_store(ephemeral: bool, backup: bool) -> StorageFixture { + fn setup_tier_store( + primary_store: Arc, logger: Arc, runtime: Arc, + ) -> TierStore { + let retry_config = RetryConfig::default(); + TierStore::new(primary_store, runtime, logger, retry_config) + } + + #[test] + fn write_read_list_remove() { let base_dir = random_storage_path(); let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path, Level::Trace).unwrap()); - let primary: Arc = - wrap_store!(Arc::new(FilesystemStore::new(base_dir.join("primary_store")))); - let logger = Arc::new( - Logger::new_fs_writer(log_path, Level::Debug) - .expect("Failed to create filesystem logger"), - ); - let runtime = - Arc::new(Runtime::new(Arc::clone(&logger)).expect("Failed to create new runtime.")); - let retry_config = RetryConfig::default(); - let mut tier = - TierStore::new(Arc::clone(&primary), Arc::clone(&runtime), logger, retry_config); - - let ephemeral = if ephemeral { - let eph_store: Arc = - wrap_store!(Arc::new(FilesystemStore::new(base_dir.join("eph_store")))); - tier.set_ephemeral_store(Arc::clone(&eph_store)); - Some(eph_store) - } else { - None - }; + let runtime = Arc::new(Runtime::new(Arc::clone(&logger)).unwrap()); + let _cleanup = CleanupDir(base_dir.clone()); - let backup = if backup { - let backup: Arc = - wrap_store!(Arc::new(FilesystemStore::new(base_dir.join("backup_store")))); - tier.set_backup_store(Arc::clone(&backup)); - Some(backup) - } else { - None - }; + let primary_store = wrap_store!(Arc::new(FilesystemStore::new(base_dir.join("primary")))); + let tier = setup_tier_store(primary_store, logger, runtime); - StorageFixture { tier, primary, ephemeral, backup, base_dir } + do_read_write_remove_list_persist(&tier); } #[test] - fn writes_to_ephemeral_if_configured() { - let tier = setup_tier_store(true, false); - assert!(tier.ephemeral.is_some()); + fn ephemeral_routing() { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path, Level::Trace).unwrap()); + + let runtime = Arc::new(Runtime::new(Arc::clone(&logger)).unwrap()); + + let _cleanup = CleanupDir(base_dir.clone()); + + let primary_store: Arc = + wrap_store!(Arc::new(FilesystemStore::new(base_dir.join("primary")))); + let mut tier = setup_tier_store(Arc::clone(&primary_store), logger, runtime); - let primary_namespace = NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE; - let secondary_namespace = NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE; - let data = [42u8; 32].to_vec(); + let ephemeral_store: Arc = + wrap_store!(Arc::new(FilesystemStore::new(base_dir.join("ephemeral")))); + tier.set_ephemeral_store(Arc::clone(&ephemeral_store)); + let data = vec![42u8; 32]; + + // Non-critical KVStoreSync::write( - &tier.tier, - primary_namespace, - secondary_namespace, + &tier, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, data.clone(), ) .unwrap(); + // Critical KVStoreSync::write( - &tier.tier, - primary_namespace, - secondary_namespace, - SCORER_PERSISTENCE_KEY, + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, data.clone(), ) .unwrap(); - let eph_store = tier.ephemeral.clone().unwrap(); - let ng_read = KVStoreSync::read( - &*eph_store, - primary_namespace, - secondary_namespace, + let primary_read_ng = KVStoreSync::read( + &*primary_store, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, + ); + let ephemeral_read_ng = KVStoreSync::read( + &*ephemeral_store, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_KEY, + ); + + let primary_read_cm = KVStoreSync::read( + &*primary_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + let ephemeral_read_cm = KVStoreSync::read( + &*ephemeral_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + + assert!(primary_read_ng.is_err()); + assert_eq!(ephemeral_read_ng.unwrap(), data); + + assert!(ephemeral_read_cm.is_err()); + assert_eq!(primary_read_cm.unwrap(), data); + } + + #[test] + fn lazy_backup() { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path, Level::Trace).unwrap()); + let runtime = Arc::new(Runtime::new(Arc::clone(&logger)).unwrap()); + + let _cleanup = CleanupDir(base_dir.clone()); + + let primary_store: Arc = + wrap_store!(Arc::new(FilesystemStore::new(base_dir.join("primary")))); + let mut tier = setup_tier_store(Arc::clone(&primary_store), logger, runtime); + + let backup_store: Arc = + wrap_store!(Arc::new(FilesystemStore::new(base_dir.join("backup")))); + tier.set_backup_store(Arc::clone(&backup_store)); + + let data = vec![42u8; 32]; + + KVStoreSync::write( + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + data.clone(), ) .unwrap(); - let sc_read = KVStoreSync::read( - &*eph_store, - primary_namespace, - secondary_namespace, - SCORER_PERSISTENCE_KEY, + // Immediate read from backup should fail + let backup_read_cm = KVStoreSync::read( + &*backup_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + assert!(backup_read_cm.is_err()); + + // Primary not blocked by backup hence immediate read should succeed + let primary_read_cm = KVStoreSync::read( + &*primary_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + assert_eq!(primary_read_cm.unwrap(), data); + + // Delayed read from backup should succeed + thread::sleep(Duration::from_millis(50)); + let backup_read_cm = KVStoreSync::read( + &*backup_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + assert_eq!(backup_read_cm.unwrap(), data); + } + + #[test] + fn backup_overflow_doesnt_fail_writes() { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path.clone(), Level::Trace).unwrap()); + let runtime = Arc::new(Runtime::new(Arc::clone(&logger)).unwrap()); + + let _cleanup = CleanupDir(base_dir.clone()); + + let primary_store: Arc = + wrap_store!(Arc::new(FilesystemStore::new(base_dir.join("primary")))); + let mut tier = + setup_tier_store(Arc::clone(&primary_store), Arc::clone(&logger), Arc::clone(&runtime)); + + let backup_store: Arc = wrap_store!(Arc::new(DelayedStore::new(100, runtime))); + tier.set_backup_store(Arc::clone(&backup_store)); + + let data = vec![42u8; 32]; + + let key = CHANNEL_MANAGER_PERSISTENCE_KEY; + for i in 0..=10 { + let result = KVStoreSync::write( + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + &format!("{}_{}", key, i), + data.clone(), + ); + + assert!(result.is_ok(), "Write {} should succeed", i); + } + + // Check logs for backup queue overflow message + let log_contents = std::fs::read_to_string(&log_path).unwrap(); + assert!( + log_contents.contains("Backup queue is full"), + "Logs should contain backup queue overflow message" + ); + } + + #[test] + fn lazy_removal() { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path.clone(), Level::Trace).unwrap()); + let runtime = Arc::new(Runtime::new(Arc::clone(&logger)).unwrap()); + + let _cleanup = CleanupDir(base_dir.clone()); + + let primary_store: Arc = + wrap_store!(Arc::new(FilesystemStore::new(base_dir.join("primary")))); + let mut tier = + setup_tier_store(Arc::clone(&primary_store), Arc::clone(&logger), Arc::clone(&runtime)); + + let backup_store: Arc = wrap_store!(Arc::new(DelayedStore::new(100, runtime))); + tier.set_backup_store(Arc::clone(&backup_store)); + + let data = vec![42u8; 32]; + + let key = CHANNEL_MANAGER_PERSISTENCE_KEY; + let write_result = KVStoreSync::write( + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + key, + data.clone(), + ); + assert!(write_result.is_ok(), "Write should succeed"); + + thread::sleep(Duration::from_millis(10)); + + assert_eq!( + KVStoreSync::read( + &*backup_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + key, + ) + .unwrap(), + data + ); + + KVStoreSync::remove( + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + key, + true, ) .unwrap(); - assert_eq!(ng_read, data); - assert!(KVStoreSync::read( - &*tier.primary, - primary_namespace, - secondary_namespace, - NETWORK_GRAPH_PERSISTENCE_KEY - ) - .is_err()); + thread::sleep(Duration::from_millis(10)); - assert_eq!(sc_read, data); - assert!(KVStoreSync::read( - &*tier.primary, - primary_namespace, - secondary_namespace, - SCORER_PERSISTENCE_KEY - ) - .is_err()); + let res = KVStoreSync::read( + &*backup_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + key, + ); + + assert!(res.is_err()); } } diff --git a/tests/common/mod.rs b/tests/common/mod.rs index f83a0da2d..b2b3e4b1b 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -33,8 +33,8 @@ use ldk_node::entropy::{generate_entropy_mnemonic, NodeEntropy}; use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{ - wrap_store, Builder, CustomTlvRecord, Event, LightningBalance, Node, NodeError, - PendingSweepBalance, + wrap_store, Builder, CustomTlvRecord, DynStore, Event, LightningBalance, Node, NodeError, + PendingSweepBalance, RetryConfig, }; use lightning::io; use lightning::ln::msgs::SocketAddress; @@ -277,10 +277,15 @@ pub(crate) enum TestChainSource<'a> { BitcoindRestSync(&'a BitcoinD), } -#[derive(Clone, Copy)] +#[derive(Clone)] pub(crate) enum TestStoreType { TestSyncStore, Sqlite, + TierStore { + primary: Arc, + backup: Option>, + ephemeral: Option>, + }, } impl Default for TestStoreType { @@ -320,6 +325,22 @@ macro_rules! setup_builder { pub(crate) use setup_builder; +pub(crate) fn create_tier_stores( + base_path: PathBuf, +) -> (Arc, Arc, Arc) { + let primary = wrap_store!(Arc::new( + SqliteStore::new( + base_path.join("primary"), + Some("primary_db".to_string()), + Some("primary_kv".to_string()), + ) + .unwrap(), + )); + let backup = wrap_store!(Arc::new(FilesystemStore::new(base_path.join("backup")))); + let ephemeral = wrap_store!(Arc::new(TestStore::new(false))); + (primary, backup, ephemeral) +} + pub(crate) fn setup_two_nodes( chain_source: &TestChainSource, allow_0conf: bool, anchor_channels: bool, anchors_trusted_no_reserve: bool, @@ -330,21 +351,22 @@ pub(crate) fn setup_two_nodes( anchor_channels, anchors_trusted_no_reserve, TestStoreType::TestSyncStore, + TestStoreType::TestSyncStore, ) } pub(crate) fn setup_two_nodes_with_store( chain_source: &TestChainSource, allow_0conf: bool, anchor_channels: bool, - anchors_trusted_no_reserve: bool, store_type: TestStoreType, + anchors_trusted_no_reserve: bool, store_type_a: TestStoreType, store_type_b: TestStoreType, ) -> (TestNode, TestNode) { println!("== Node A =="); let mut config_a = random_config(anchor_channels); - config_a.store_type = store_type; + config_a.store_type = store_type_a; let node_a = setup_node(chain_source, config_a); println!("\n== Node B =="); let mut config_b = random_config(anchor_channels); - config_b.store_type = store_type; + config_b.store_type = store_type_b; if allow_0conf { config_b.node_config.trusted_peers_0conf.push(node_a.node_id()); } @@ -430,6 +452,16 @@ pub(crate) fn setup_node_for_async_payments( builder.build_with_store(config.node_entropy.into(), kv_store).unwrap() }, TestStoreType::Sqlite => builder.build(config.node_entropy.into()).unwrap(), + TestStoreType::TierStore { primary, backup, ephemeral } => { + if let Some(backup) = backup { + builder.set_tier_store_backup(backup); + } + if let Some(ephemeral) = ephemeral { + builder.set_tier_store_ephemeral(ephemeral); + } + builder.set_tier_store_retry_config(RetryConfig::default()); + builder.build_with_tier_store(config.node_entropy.into(), primary).unwrap() + }, }; node.start().unwrap(); diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 571d31a6f..2efdee000 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -35,10 +35,19 @@ use ldk_node::{wrap_store, Builder, DynStore, Event, NodeError}; use lightning::ln::channelmanager::PaymentId; use lightning::routing::gossip::{NodeAlias, NodeId}; use lightning::routing::router::RouteParametersConfig; +use lightning::util::persist::{ + KVStoreSync, CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, +}; use lightning_invoice::{Bolt11InvoiceDescription, Description}; use lightning_types::payment::{PaymentHash, PaymentPreimage}; use log::LevelFilter; +use crate::common::{ + create_tier_stores, random_storage_path, setup_two_nodes_with_store, TestStoreType, +}; + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn channel_full_cycle() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); @@ -48,6 +57,85 @@ async fn channel_full_cycle() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_tier_store() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + let (primary_a, backup_a, ephemeral_a) = create_tier_stores(random_storage_path()); + let (primary_b, backup_b, ephemeral_b) = create_tier_stores(random_storage_path()); + + let (node_a, node_b) = setup_two_nodes_with_store( + &chain_source, + false, + true, + false, + TestStoreType::TierStore { + primary: Arc::clone(&primary_a), + backup: Some(Arc::clone(&backup_a)), + ephemeral: Some(Arc::clone(&ephemeral_a)), + }, + TestStoreType::TierStore { + primary: Arc::clone(&primary_b), + backup: Some(Arc::clone(&backup_b)), + ephemeral: Some(Arc::clone(&ephemeral_b)), + }, + ); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false) + .await; + + // Verify Primary store contains channel manager data + let primary_channel_manager = KVStoreSync::read( + primary_a.as_ref(), + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + assert!(primary_channel_manager.is_ok(), "Primary should have channel manager data"); + + // Verify Primary store contains payment info + let primary_payments = KVStoreSync::list(primary_a.as_ref(), "payments", ""); + assert!(primary_payments.is_ok(), "Primary should have payment data"); + assert!(!primary_payments.unwrap().is_empty(), "Primary should have payment entries"); + + // Verify Backup store synced critical data + let backup_channel_manager = KVStoreSync::read( + backup_a.as_ref(), + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + assert!(backup_channel_manager.is_ok(), "Backup should have synced channel manager"); + + // Verify backup is not empty + let backup_all_keys = KVStoreSync::list(backup_a.as_ref(), "", "").unwrap(); + assert!(!backup_all_keys.is_empty(), "Backup store should not be empty"); + + // Verify Ephemeral does NOT have channel manager + let ephemeral_channel_manager = KVStoreSync::read( + ephemeral_a.as_ref(), + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + assert!(ephemeral_channel_manager.is_err(), "Ephemeral should NOT have channel manager"); + + // Verify Ephemeral does NOT have payment info + let ephemeral_payments = KVStoreSync::list(ephemeral_a.as_ref(), "payments", ""); + assert!( + ephemeral_payments.is_err() || ephemeral_payments.unwrap().is_empty(), + "Ephemeral should NOT have payment data" + ); + + //Verify Ephemeral does have network graph + let ephemeral_network_graph = KVStoreSync::read( + ephemeral_a.as_ref(), + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_KEY, + ); + assert!(ephemeral_network_graph.is_ok(), "Ephemeral should have network graph"); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn channel_full_cycle_electrum() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd();