Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 45 additions & 5 deletions src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,13 @@ use lightning::routing::scoring::{
use lightning::sign::{EntropySource, NodeSigner};
use lightning::util::config::HTLCInterceptionFlags;
use lightning::util::persist::{
KVStore, CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE,
CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
migrate_kv_store_data, KVStore, CHANNEL_MANAGER_PERSISTENCE_KEY,
CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
};
use lightning::util::ser::ReadableArgs;
use lightning::util::sweep::OutputSweeper;
use lightning_persister::fs_store::v1::FilesystemStore;
use lightning_persister::fs_store::v2::FilesystemStoreV2;
use vss_client::headers::VssHeaderProvider;

use crate::chain::ChainSource;
Expand Down Expand Up @@ -629,15 +630,20 @@ impl NodeBuilder {
self.build_with_store(node_entropy, kv_store)
}

/// Builds a [`Node`] instance with a [`FilesystemStore`] backend and according to the options
/// Builds a [`Node`] instance with a [`FilesystemStoreV2`] backend and according to the options
/// previously configured.
///
/// If the storage directory contains data from a v1 filesystem store, it will be
/// automatically migrated to the v2 format.
pub fn build_with_fs_store(&self, node_entropy: NodeEntropy) -> Result<Node, BuildError> {
let mut storage_dir_path: PathBuf = self.config.storage_dir_path.clone().into();
storage_dir_path.push("fs_store");

fs::create_dir_all(storage_dir_path.clone())
.map_err(|_| BuildError::StoragePathAccessFailed)?;
let kv_store = FilesystemStore::new(storage_dir_path);

let kv_store = open_or_migrate_fs_store(storage_dir_path)?;

self.build_with_store(node_entropy, kv_store)
}

Expand Down Expand Up @@ -1087,7 +1093,7 @@ impl ArcedNodeBuilder {
self.inner.read().unwrap().build(*node_entropy).map(Arc::new)
}

/// Builds a [`Node`] instance with a [`FilesystemStore`] backend and according to the options
/// Builds a [`Node`] instance with a [`FilesystemStoreV2`] backend and according to the options
/// previously configured.
pub fn build_with_fs_store(
&self, node_entropy: Arc<NodeEntropy>,
Expand Down Expand Up @@ -1998,6 +2004,40 @@ fn build_with_store_internal(
})
}

/// Opens a [`FilesystemStoreV2`], automatically migrating from v1 format if necessary.
///
/// If the directory contains v1 data (files at the top level), the data is migrated to v2 format
/// in a temporary directory, the original is renamed to `fs_store_v1_backup`, and the migrated
/// directory is moved into place.
fn open_or_migrate_fs_store(storage_dir_path: PathBuf) -> Result<FilesystemStoreV2, BuildError> {
match FilesystemStoreV2::new(storage_dir_path.clone()) {
Ok(store) => Ok(store),
Err(e) if e.kind() == std::io::ErrorKind::InvalidData => {
// The directory contains v1 data, migrate to v2.
let mut v1_store = FilesystemStore::new(storage_dir_path.clone());

let mut v2_dir = storage_dir_path.clone();
v2_dir.set_file_name("fs_store_v2_migrating");
fs::create_dir_all(v2_dir.clone()).map_err(|_| BuildError::StoragePathAccessFailed)?;
let mut v2_store = FilesystemStoreV2::new(v2_dir.clone())
.map_err(|_| BuildError::KVStoreSetupFailed)?;

migrate_kv_store_data(&mut v1_store, &mut v2_store)
.map_err(|_| BuildError::KVStoreSetupFailed)?;

// Swap directories: rename v1 out of the way, move v2 into place.
let mut backup_dir = storage_dir_path.clone();
backup_dir.set_file_name("fs_store_v1_backup");
fs::rename(&storage_dir_path, &backup_dir)
.map_err(|_| BuildError::KVStoreSetupFailed)?;
fs::rename(&v2_dir, &storage_dir_path).map_err(|_| BuildError::KVStoreSetupFailed)?;

FilesystemStoreV2::new(storage_dir_path).map_err(|_| BuildError::KVStoreSetupFailed)
},
Err(_) => Err(BuildError::KVStoreSetupFailed),
}
}

fn optionally_install_rustls_cryptoprovider() {
// Acquire a global Mutex, ensuring that only one process at a time install the provider. This
// is mostly required for running tests concurrently.
Expand Down
56 changes: 54 additions & 2 deletions tests/integration_tests_rust.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2458,14 +2458,18 @@ async fn build_0_6_2_node(

async fn build_0_7_0_node(
bitcoind: &BitcoinD, electrsd: &ElectrsD, storage_path: String, esplora_url: String,
seed_bytes: [u8; 64],
seed_bytes: [u8; 64], use_fs_store: bool,
) -> (u64, bitcoin::secp256k1::PublicKey) {
let mut builder_old = ldk_node_070::Builder::new();
builder_old.set_network(bitcoin::Network::Regtest);
builder_old.set_storage_dir_path(storage_path);
builder_old.set_entropy_seed_bytes(seed_bytes);
builder_old.set_chain_source_esplora(esplora_url, None);
let node_old = builder_old.build().unwrap();
let node_old = if use_fs_store {
builder_old.build_with_fs_store().unwrap()
} else {
builder_old.build().unwrap()
};

node_old.start().unwrap();
let addr_old = node_old.onchain_payment().new_address().unwrap();
Expand Down Expand Up @@ -2512,6 +2516,7 @@ async fn do_persistence_backwards_compatibility(version: OldLdkVersion) {
storage_path.clone(),
esplora_url.clone(),
seed_bytes,
false,
)
.await
},
Expand Down Expand Up @@ -2550,6 +2555,53 @@ async fn persistence_backwards_compatibility() {
do_persistence_backwards_compatibility(OldLdkVersion::V0_7_0).await;
}

#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn fs_store_persistence_backwards_compatibility() {
let (bitcoind, electrsd) = common::setup_bitcoind_and_electrsd();
let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap());

let storage_path = common::random_storage_path().to_str().unwrap().to_owned();
let seed_bytes = [42u8; 64];

// Build a node using v0.7.0's build_with_fs_store (FilesystemStore v1).
let (old_balance, old_node_id) = build_0_7_0_node(
&bitcoind,
&electrsd,
storage_path.clone(),
esplora_url.clone(),
seed_bytes,
true,
)
.await;

// Now reopen with current code's build_with_fs_store, which should
// auto-migrate from FilesystemStore v1 to FilesystemStoreV2.
#[cfg(feature = "uniffi")]
let builder_new = Builder::new();
#[cfg(not(feature = "uniffi"))]
let mut builder_new = Builder::new();
builder_new.set_network(bitcoin::Network::Regtest);
builder_new.set_storage_dir_path(storage_path);
builder_new.set_chain_source_esplora(esplora_url, None);

#[cfg(feature = "uniffi")]
let node_entropy = NodeEntropy::from_seed_bytes(seed_bytes.to_vec()).unwrap();
#[cfg(not(feature = "uniffi"))]
let node_entropy = NodeEntropy::from_seed_bytes(seed_bytes);
let node_new = builder_new.build_with_fs_store(node_entropy.into()).unwrap();

node_new.start().unwrap();
node_new.sync_wallets().unwrap();

let new_balance = node_new.list_balances().spendable_onchain_balance_sats;
let new_node_id = node_new.node_id();

assert_eq!(old_node_id, new_node_id);
assert_eq!(old_balance, new_balance);

node_new.stop().unwrap();
}

#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn onchain_fee_bump_rbf() {
let (bitcoind, electrsd) = setup_bitcoind_and_electrsd();
Expand Down