From c08ea1de8136e96fe36cefd6c282ac23906ed221 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 22 Sep 2025 14:13:09 +0000 Subject: [PATCH 1/4] Rename namespace validity constants to be more concise The constants in `lightning::util::persist` are sufficiently long that its often difficult eyeball their correctness which nearly led to several bugs when adding async support. Here we take the first step towards condensing them by making them somewhat more concise, dropping words which are obvious from context. --- lightning-persister/src/test_utils.rs | 13 +++++---- lightning-persister/src/utils.rs | 5 ++-- lightning/src/util/persist.rs | 38 +++++++++++++-------------- 3 files changed, 26 insertions(+), 30 deletions(-) diff --git a/lightning-persister/src/test_utils.rs b/lightning-persister/src/test_utils.rs index 636967a6937..f01ec85ae90 100644 --- a/lightning-persister/src/test_utils.rs +++ b/lightning-persister/src/test_utils.rs @@ -5,7 +5,7 @@ use lightning::ln::functional_test_utils::{ }; use lightning::util::persist::{ migrate_kv_store_data, read_channel_monitors, KVStoreSync, MigratableKVStore, - KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN, + NAMESPACE_ALPHABET, NAMESPACE_MAX_LEN, }; use lightning::util::test_utils; use lightning::{check_added_monitors, check_closed_broadcast, check_closed_event}; @@ -46,8 +46,8 @@ pub(crate) fn do_read_write_remove_list_persist( assert_eq!(listed_keys.len(), 0); // Ensure we have no issue operating with primary_namespace/secondary_namespace/key being - // KVSTORE_NAMESPACE_KEY_MAX_LEN - let max_chars = "A".repeat(KVSTORE_NAMESPACE_KEY_MAX_LEN); + // NAMESPACE_MAX_LEN + let max_chars = "A".repeat(NAMESPACE_MAX_LEN); kv_store.write(&max_chars, &max_chars, &max_chars, data.clone()).unwrap(); let listed_keys = kv_store.list(&max_chars, &max_chars).unwrap(); @@ -76,17 +76,16 @@ pub(crate) fn do_test_data_migration let primary_namespace = if i == 0 { String::new() } else { - format!("testspace{}", KVSTORE_NAMESPACE_KEY_ALPHABET.chars().nth(i).unwrap()) + format!("testspace{}", NAMESPACE_ALPHABET.chars().nth(i).unwrap()) }; for j in 0..num_secondary_namespaces { let secondary_namespace = if i == 0 || j == 0 { String::new() } else { - format!("testsubspace{}", KVSTORE_NAMESPACE_KEY_ALPHABET.chars().nth(j).unwrap()) + format!("testsubspace{}", NAMESPACE_ALPHABET.chars().nth(j).unwrap()) }; for k in 0..num_keys { - let key = - format!("testkey{}", KVSTORE_NAMESPACE_KEY_ALPHABET.chars().nth(k).unwrap()); + let key = format!("testkey{}", NAMESPACE_ALPHABET.chars().nth(k).unwrap()); source_store .write(&primary_namespace, &secondary_namespace, &key, dummy_data.clone()) .unwrap(); diff --git a/lightning-persister/src/utils.rs b/lightning-persister/src/utils.rs index e8e7be5ce5d..ca543ffab17 100644 --- a/lightning-persister/src/utils.rs +++ b/lightning-persister/src/utils.rs @@ -1,9 +1,8 @@ use lightning::types::string::PrintableString; -use lightning::util::persist::{KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN}; +use lightning::util::persist::{NAMESPACE_ALPHABET, NAMESPACE_MAX_LEN}; pub(crate) fn is_valid_kvstore_str(key: &str) -> bool { - key.len() <= KVSTORE_NAMESPACE_KEY_MAX_LEN - && key.chars().all(|c| KVSTORE_NAMESPACE_KEY_ALPHABET.contains(c)) + key.len() <= NAMESPACE_MAX_LEN && key.chars().all(|c| NAMESPACE_ALPHABET.contains(c)) } pub(crate) fn check_namespace_key_validity( diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index d00e29e686a..6238e2d96fa 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -41,11 +41,11 @@ use crate::util::ser::{Readable, ReadableArgs, Writeable}; use crate::util::wakers::Notifier; /// The alphabet of characters allowed for namespaces and keys. -pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = +pub const NAMESPACE_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-"; /// The maximum number of characters namespaces and keys may have. -pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120; +pub const NAMESPACE_MAX_LEN: usize = 120; /// The primary namespace under which the [`ChannelManager`] will be persisted. /// @@ -126,15 +126,14 @@ pub const MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL: &[u8] = &[0xFF; 2]; /// ways, as long as per-namespace key uniqueness is asserted. /// /// Keys and namespaces are required to be valid ASCII strings in the range of -/// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty -/// primary namespaces and secondary namespaces (`""`) are assumed to be a valid, however, if -/// `primary_namespace` is empty, `secondary_namespace` is required to be empty, too. This means -/// that concerns should always be separated by primary namespace first, before secondary -/// namespaces are used. While the number of primary namespaces will be relatively small and is -/// determined at compile time, there may be many secondary namespaces per primary namespace. Note -/// that per-namespace uniqueness needs to also hold for keys *and* namespaces in any given -/// namespace, i.e., conflicts between keys and equally named -/// primary namespaces/secondary namespaces must be avoided. +/// [`NAMESPACE_ALPHABET`] and no longer than [`NAMESPACE_MAX_LEN`]. Empty primary namespaces and +/// secondary namespaces (`""`) are assumed to be a valid, however, if `primary_namespace` is empty, +/// `secondary_namespace` is required to be empty, too. This means that concerns should always be +/// separated by primary namespace first, before secondary namespaces are used. While the number of +/// primary namespaces will be relatively small and is determined at compile time, there may be many +/// secondary namespaces per primary namespace. Note that per-namespace uniqueness needs to also +/// hold for keys *and* namespaces in any given namespace, i.e., conflicts between keys and equally +/// named primary namespaces/secondary namespaces must be avoided. /// /// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister` /// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to @@ -255,15 +254,14 @@ where /// ways, as long as per-namespace key uniqueness is asserted. /// /// Keys and namespaces are required to be valid ASCII strings in the range of -/// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty -/// primary namespaces and secondary namespaces (`""`) are assumed to be a valid, however, if -/// `primary_namespace` is empty, `secondary_namespace` is required to be empty, too. This means -/// that concerns should always be separated by primary namespace first, before secondary -/// namespaces are used. While the number of primary namespaces will be relatively small and is -/// determined at compile time, there may be many secondary namespaces per primary namespace. Note -/// that per-namespace uniqueness needs to also hold for keys *and* namespaces in any given -/// namespace, i.e., conflicts between keys and equally named -/// primary namespaces/secondary namespaces must be avoided. +/// [`NAMESPACE_ALPHABET`] and no longer than [`NAMESPACE_MAX_LEN`]. Empty primary namespaces and +/// secondary namespaces (`""`) are assumed to be a valid, however, if `primary_namespace` is +/// empty, `secondary_namespace` is required to be empty, too. This means that concerns should +/// always be separated by primary namespace first, before secondary namespaces are used. While the +/// number of primary namespaces will be relatively small and is determined at compile time, there +/// may be many secondary namespaces per primary namespace. Note that per-namespace uniqueness +/// needs to also hold for keys *and* namespaces in any given namespace, i.e., conflicts between +/// keys and equally named primary namespaces/secondary namespaces must be avoided. /// /// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister` /// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to From 1660c72f786a8d805949b5ef6515d1c5026c2a8b Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 22 Sep 2025 14:18:14 +0000 Subject: [PATCH 2/4] Rename persistence namespace/key constants to be more concise The constants in `lightning::util::persist` are sufficiently long that its often difficult eyeball their correctness which nearly led to several bugs when adding async support. Here we take a further step towards condensing them by making them somewhat more concise, dropping words which are obvious from context. Importantly, this changes the prefix for monitor *update* persistence namespaces from monitor persistence namespaces so that they are visually distinct. This runs the following replacements: * s/_PERSISTENCE_/_/g * s/CHANNEL_MONITOR_UPDATE/MONITOR_UPDATE/g * s/ARCHIVED_CHANNEL_MONITOR/ARCHIVED_MONITOR/g --- lightning-background-processor/src/lib.rs | 121 +++++++++-------- lightning/src/ln/chanmon_update_fail_tests.rs | 65 +++------ lightning/src/util/persist.rs | 123 +++++++++--------- lightning/src/util/sweep.rs | 10 +- 4 files changed, 141 insertions(+), 178 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 19333c5823a..a85499cdc94 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -53,11 +53,10 @@ use lightning::sign::{ }; use lightning::util::logger::Logger; use lightning::util::persist::{ - KVStore, KVStoreSync, KVStoreSyncWrapper, CHANNEL_MANAGER_PERSISTENCE_KEY, - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, + KVStore, KVStoreSync, KVStoreSyncWrapper, CHANNEL_MANAGER_KEY, + CHANNEL_MANAGER_PRIMARY_NAMESPACE, CHANNEL_MANAGER_SECONDARY_NAMESPACE, NETWORK_GRAPH_KEY, + NETWORK_GRAPH_PRIMARY_NAMESPACE, NETWORK_GRAPH_SECONDARY_NAMESPACE, SCORER_KEY, + SCORER_PRIMARY_NAMESPACE, SCORER_SECONDARY_NAMESPACE, }; use lightning::util::sweep::{OutputSweeper, OutputSweeperSync}; #[cfg(feature = "std")] @@ -944,9 +943,9 @@ where log_trace!(logger, "Persisting scorer after update"); if let Err(e) = kv_store .write( - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - SCORER_PERSISTENCE_SECONDARY_NAMESPACE, - SCORER_PERSISTENCE_KEY, + SCORER_PRIMARY_NAMESPACE, + SCORER_SECONDARY_NAMESPACE, + SCORER_KEY, scorer.encode(), ) .await @@ -1080,9 +1079,9 @@ where let fut = async { kv_store .write( - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_KEY, + CHANNEL_MANAGER_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_KEY, channel_manager.get_cm().encode(), ) .await @@ -1143,9 +1142,9 @@ where let fut = async { if let Err(e) = kv_store .write( - NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_PRIMARY_NAMESPACE, + NETWORK_GRAPH_SECONDARY_NAMESPACE, + NETWORK_GRAPH_KEY, network_graph.encode(), ) .await @@ -1185,9 +1184,9 @@ where let fut = async { if let Err(e) = kv_store .write( - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - SCORER_PERSISTENCE_SECONDARY_NAMESPACE, - SCORER_PERSISTENCE_KEY, + SCORER_PRIMARY_NAMESPACE, + SCORER_SECONDARY_NAMESPACE, + SCORER_KEY, scorer.encode(), ) .await @@ -1301,18 +1300,18 @@ where // ChannelMonitor update(s) persisted without a corresponding ChannelManager update. kv_store .write( - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_KEY, + CHANNEL_MANAGER_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_KEY, channel_manager.get_cm().encode(), ) .await?; if let Some(ref scorer) = scorer { kv_store .write( - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - SCORER_PERSISTENCE_SECONDARY_NAMESPACE, - SCORER_PERSISTENCE_KEY, + SCORER_PRIMARY_NAMESPACE, + SCORER_SECONDARY_NAMESPACE, + SCORER_KEY, scorer.encode(), ) .await?; @@ -1320,9 +1319,9 @@ where if let Some(network_graph) = gossip_sync.network_graph() { kv_store .write( - NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_PRIMARY_NAMESPACE, + NETWORK_GRAPH_SECONDARY_NAMESPACE, + NETWORK_GRAPH_KEY, network_graph.encode(), ) .await?; @@ -1527,9 +1526,9 @@ impl BackgroundProcessor { if update_scorer(scorer, &event, duration_since_epoch) { log_trace!(logger, "Persisting scorer after update"); if let Err(e) = kv_store.write( - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - SCORER_PERSISTENCE_SECONDARY_NAMESPACE, - SCORER_PERSISTENCE_KEY, + SCORER_PRIMARY_NAMESPACE, + SCORER_SECONDARY_NAMESPACE, + SCORER_KEY, scorer.encode(), ) { log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e) @@ -1627,9 +1626,9 @@ impl BackgroundProcessor { if channel_manager.get_cm().get_and_clear_needs_persistence() { log_trace!(logger, "Persisting ChannelManager..."); (kv_store.write( - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_KEY, + CHANNEL_MANAGER_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_KEY, channel_manager.get_cm().encode(), ))?; log_trace!(logger, "Done persisting ChannelManager."); @@ -1666,9 +1665,9 @@ impl BackgroundProcessor { duration_since_epoch.as_secs(), ); if let Err(e) = kv_store.write( - NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_PRIMARY_NAMESPACE, + NETWORK_GRAPH_SECONDARY_NAMESPACE, + NETWORK_GRAPH_KEY, network_graph.encode(), ) { log_error!(logger, "Error: Failed to persist network graph, check your disk and permissions {}", e); @@ -1695,9 +1694,9 @@ impl BackgroundProcessor { log_trace!(logger, "Calling time_passed and persisting scorer"); scorer.write_lock().time_passed(duration_since_epoch); if let Err(e) = kv_store.write( - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - SCORER_PERSISTENCE_SECONDARY_NAMESPACE, - SCORER_PERSISTENCE_KEY, + SCORER_PRIMARY_NAMESPACE, + SCORER_SECONDARY_NAMESPACE, + SCORER_KEY, scorer.encode(), ) { log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e); @@ -1735,24 +1734,24 @@ impl BackgroundProcessor { // some races where users quit while channel updates were in-flight, with // ChannelMonitor update(s) persisted without a corresponding ChannelManager update. kv_store.write( - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_KEY, + CHANNEL_MANAGER_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_KEY, channel_manager.get_cm().encode(), )?; if let Some(ref scorer) = scorer { kv_store.write( - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - SCORER_PERSISTENCE_SECONDARY_NAMESPACE, - SCORER_PERSISTENCE_KEY, + SCORER_PRIMARY_NAMESPACE, + SCORER_SECONDARY_NAMESPACE, + SCORER_KEY, scorer.encode(), )?; } if let Some(network_graph) = gossip_sync.network_graph() { kv_store.write( - NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_PRIMARY_NAMESPACE, + NETWORK_GRAPH_SECONDARY_NAMESPACE, + NETWORK_GRAPH_KEY, network_graph.encode(), )?; } @@ -1844,12 +1843,10 @@ mod tests { use lightning::types::payment::PaymentHash; use lightning::util::config::UserConfig; use lightning::util::persist::{ - KVStoreSync, KVStoreSyncWrapper, CHANNEL_MANAGER_PERSISTENCE_KEY, - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, - NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, - SCORER_PERSISTENCE_KEY, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - SCORER_PERSISTENCE_SECONDARY_NAMESPACE, + KVStoreSync, KVStoreSyncWrapper, CHANNEL_MANAGER_KEY, CHANNEL_MANAGER_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_SECONDARY_NAMESPACE, NETWORK_GRAPH_KEY, NETWORK_GRAPH_PRIMARY_NAMESPACE, + NETWORK_GRAPH_SECONDARY_NAMESPACE, SCORER_KEY, SCORER_PRIMARY_NAMESPACE, + SCORER_SECONDARY_NAMESPACE, }; use lightning::util::ser::Writeable; use lightning::util::sweep::{ @@ -2104,18 +2101,18 @@ mod tests { fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> lightning::io::Result<()> { - if primary_namespace == CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE - && secondary_namespace == CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE - && key == CHANNEL_MANAGER_PERSISTENCE_KEY + if primary_namespace == CHANNEL_MANAGER_PRIMARY_NAMESPACE + && secondary_namespace == CHANNEL_MANAGER_SECONDARY_NAMESPACE + && key == CHANNEL_MANAGER_KEY { if let Some((error, message)) = self.manager_error { return Err(std::io::Error::new(error, message).into()); } } - if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE - && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE - && key == NETWORK_GRAPH_PERSISTENCE_KEY + if primary_namespace == NETWORK_GRAPH_PRIMARY_NAMESPACE + && secondary_namespace == NETWORK_GRAPH_SECONDARY_NAMESPACE + && key == NETWORK_GRAPH_KEY { if let Some(sender) = &self.graph_persistence_notifier { match sender.send(()) { @@ -2131,9 +2128,9 @@ mod tests { } } - if primary_namespace == SCORER_PERSISTENCE_PRIMARY_NAMESPACE - && secondary_namespace == SCORER_PERSISTENCE_SECONDARY_NAMESPACE - && key == SCORER_PERSISTENCE_KEY + if primary_namespace == SCORER_PRIMARY_NAMESPACE + && secondary_namespace == SCORER_SECONDARY_NAMESPACE + && key == SCORER_KEY { if let Some((error, message)) = self.scorer_error { return Err(std::io::Error::new(error, message).into()); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 1a9af4f2071..eb8f6c09fe7 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -28,9 +28,8 @@ use crate::ln::types::ChannelId; use crate::sign::NodeSigner; use crate::util::native_async::FutureQueue; use crate::util::persist::{ - MonitorName, MonitorUpdatingPersisterAsync, CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + MonitorName, MonitorUpdatingPersisterAsync, CHANNEL_MONITOR_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_SECONDARY_NAMESPACE, MONITOR_UPDATE_PRIMARY_NAMESPACE, }; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::test_channel_signer::TestChannelSigner; @@ -4940,8 +4939,8 @@ fn native_async_persist() { let funding_txo = OutPoint { txid: funding_tx.compute_txid(), index: 0 }; let key = MonitorName::V1Channel(funding_txo).to_string(); let pending_writes = kv_store.list_pending_async_writes( - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MONITOR_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_SECONDARY_NAMESPACE, &key, ); assert_eq!(pending_writes.len(), 1); @@ -4971,37 +4970,21 @@ fn native_async_persist() { persist_futures.poll_futures(); assert_eq!(async_chain_monitor.release_pending_monitor_events().len(), 0); - let pending_writes = kv_store.list_pending_async_writes( - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &key, - "1", - ); + let pending_writes = + kv_store.list_pending_async_writes(MONITOR_UPDATE_PRIMARY_NAMESPACE, &key, "1"); assert_eq!(pending_writes.len(), 1); - let pending_writes = kv_store.list_pending_async_writes( - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &key, - "2", - ); + let pending_writes = + kv_store.list_pending_async_writes(MONITOR_UPDATE_PRIMARY_NAMESPACE, &key, "2"); assert_eq!(pending_writes.len(), 1); - kv_store.complete_async_writes_through( - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &key, - "1", - usize::MAX, - ); + kv_store.complete_async_writes_through(MONITOR_UPDATE_PRIMARY_NAMESPACE, &key, "1", usize::MAX); persist_futures.poll_futures(); // While the `ChainMonitor` could return a `MonitorEvent::Completed` here, it currently // doesn't. If that ever changes we should validate that the `Completed` event has the correct // `monitor_update_id` (1). assert!(async_chain_monitor.release_pending_monitor_events().is_empty()); - kv_store.complete_async_writes_through( - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &key, - "2", - usize::MAX, - ); + kv_store.complete_async_writes_through(MONITOR_UPDATE_PRIMARY_NAMESPACE, &key, "2", usize::MAX); persist_futures.poll_futures(); let completed_persist = async_chain_monitor.release_pending_monitor_events(); assert_eq!(completed_persist.len(), 1); @@ -5020,34 +5003,18 @@ fn native_async_persist() { persist_futures.poll_futures(); assert_eq!(async_chain_monitor.release_pending_monitor_events().len(), 0); - let pending_writes = kv_store.list_pending_async_writes( - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &key, - "3", - ); + let pending_writes = + kv_store.list_pending_async_writes(MONITOR_UPDATE_PRIMARY_NAMESPACE, &key, "3"); assert_eq!(pending_writes.len(), 1); - let pending_writes = kv_store.list_pending_async_writes( - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &key, - "4", - ); + let pending_writes = + kv_store.list_pending_async_writes(MONITOR_UPDATE_PRIMARY_NAMESPACE, &key, "4"); assert_eq!(pending_writes.len(), 1); - kv_store.complete_async_writes_through( - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &key, - "4", - usize::MAX, - ); + kv_store.complete_async_writes_through(MONITOR_UPDATE_PRIMARY_NAMESPACE, &key, "4", usize::MAX); persist_futures.poll_futures(); assert_eq!(async_chain_monitor.release_pending_monitor_events().len(), 0); - kv_store.complete_async_writes_through( - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &key, - "3", - usize::MAX, - ); + kv_store.complete_async_writes_through(MONITOR_UPDATE_PRIMARY_NAMESPACE, &key, "3", usize::MAX); persist_futures.poll_futures(); let completed_persist = async_chain_monitor.release_pending_monitor_events(); assert_eq!(completed_persist.len(), 1); diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 6238e2d96fa..51c902f0da7 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -50,67 +50,67 @@ pub const NAMESPACE_MAX_LEN: usize = 120; /// The primary namespace under which the [`ChannelManager`] will be persisted. /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager -pub const CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE: &str = ""; +pub const CHANNEL_MANAGER_PRIMARY_NAMESPACE: &str = ""; /// The secondary namespace under which the [`ChannelManager`] will be persisted. /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager -pub const CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; +pub const CHANNEL_MANAGER_SECONDARY_NAMESPACE: &str = ""; /// The key under which the [`ChannelManager`] will be persisted. /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager -pub const CHANNEL_MANAGER_PERSISTENCE_KEY: &str = "manager"; +pub const CHANNEL_MANAGER_KEY: &str = "manager"; /// The primary namespace under which [`ChannelMonitor`]s will be persisted. -pub const CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitors"; +pub const CHANNEL_MONITOR_PRIMARY_NAMESPACE: &str = "monitors"; /// The secondary namespace under which [`ChannelMonitor`]s will be persisted. -pub const CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; +pub const CHANNEL_MONITOR_SECONDARY_NAMESPACE: &str = ""; /// The primary namespace under which [`ChannelMonitorUpdate`]s will be persisted. -pub const CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitor_updates"; +pub const MONITOR_UPDATE_PRIMARY_NAMESPACE: &str = "monitor_updates"; /// The primary namespace under which archived [`ChannelMonitor`]s will be persisted. -pub const ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE: &str = "archived_monitors"; +pub const ARCHIVED_MONITOR_PRIMARY_NAMESPACE: &str = "archived_monitors"; /// The secondary namespace under which archived [`ChannelMonitor`]s will be persisted. -pub const ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; +pub const ARCHIVED_MONITOR_SECONDARY_NAMESPACE: &str = ""; /// The primary namespace under which the [`NetworkGraph`] will be persisted. /// /// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph -pub const NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE: &str = ""; +pub const NETWORK_GRAPH_PRIMARY_NAMESPACE: &str = ""; /// The secondary namespace under which the [`NetworkGraph`] will be persisted. /// /// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph -pub const NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; +pub const NETWORK_GRAPH_SECONDARY_NAMESPACE: &str = ""; /// The key under which the [`NetworkGraph`] will be persisted. /// /// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph -pub const NETWORK_GRAPH_PERSISTENCE_KEY: &str = "network_graph"; +pub const NETWORK_GRAPH_KEY: &str = "network_graph"; /// The primary namespace under which the [`WriteableScore`] will be persisted. /// /// [`WriteableScore`]: crate::routing::scoring::WriteableScore -pub const SCORER_PERSISTENCE_PRIMARY_NAMESPACE: &str = ""; +pub const SCORER_PRIMARY_NAMESPACE: &str = ""; /// The secondary namespace under which the [`WriteableScore`] will be persisted. /// /// [`WriteableScore`]: crate::routing::scoring::WriteableScore -pub const SCORER_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; +pub const SCORER_SECONDARY_NAMESPACE: &str = ""; /// The key under which the [`WriteableScore`] will be persisted. /// /// [`WriteableScore`]: crate::routing::scoring::WriteableScore -pub const SCORER_PERSISTENCE_KEY: &str = "scorer"; +pub const SCORER_KEY: &str = "scorer"; /// The primary namespace under which [`OutputSweeper`] state will be persisted. /// /// [`OutputSweeper`]: crate::util::sweep::OutputSweeper -pub const OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE: &str = ""; +pub const OUTPUT_SWEEPER_PRIMARY_NAMESPACE: &str = ""; /// The secondary namespace under which [`OutputSweeper`] state will be persisted. /// /// [`OutputSweeper`]: crate::util::sweep::OutputSweeper -pub const OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; +pub const OUTPUT_SWEEPER_SECONDARY_NAMESPACE: &str = ""; /// The secondary namespace under which [`OutputSweeper`] state will be persisted. /// The key under which [`OutputSweeper`] state will be persisted. /// /// [`OutputSweeper`]: crate::util::sweep::OutputSweeper -pub const OUTPUT_SWEEPER_PERSISTENCE_KEY: &str = "output_sweeper"; +pub const OUTPUT_SWEEPER_KEY: &str = "output_sweeper"; /// A sentinel value to be prepended to monitors persisted by the [`MonitorUpdatingPersister`]. /// @@ -379,8 +379,8 @@ impl Persist, ) -> chain::ChannelMonitorUpdateStatus { match self.write( - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MONITOR_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_SECONDARY_NAMESPACE, &monitor_name.to_string(), monitor.encode(), ) { @@ -394,8 +394,8 @@ impl Persist, ) -> chain::ChannelMonitorUpdateStatus { match self.write( - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MONITOR_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_SECONDARY_NAMESPACE, &monitor_name.to_string(), monitor.encode(), ) { @@ -407,16 +407,16 @@ impl Persist monitor, Err(_) => return, }; match self.write( - ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + ARCHIVED_MONITOR_PRIMARY_NAMESPACE, + ARCHIVED_MONITOR_SECONDARY_NAMESPACE, monitor_key.as_str(), monitor, ) { @@ -424,8 +424,8 @@ impl Persist return, }; let _ = self.remove( - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MONITOR_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_SECONDARY_NAMESPACE, monitor_key.as_str(), true, ); @@ -443,14 +443,13 @@ where { let mut res = Vec::new(); - for stored_key in kv_store.list( - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - )? { + for stored_key in + kv_store.list(CHANNEL_MONITOR_PRIMARY_NAMESPACE, CHANNEL_MONITOR_SECONDARY_NAMESPACE)? + { match ::EcdsaSigner>)>>::read( &mut io::Cursor::new(kv_store.read( - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MONITOR_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_SECONDARY_NAMESPACE, &stored_key, )?), (&*entropy_source, &*signer_provider), @@ -524,13 +523,13 @@ fn poll_sync_future(future: F) -> F::Output { /// - [`Persist::persist_new_channel`], which persists whole [`ChannelMonitor`]s. /// - [`Persist::update_persisted_channel`], which persists only a [`ChannelMonitorUpdate`] /// -/// Whole [`ChannelMonitor`]s are stored in the [`CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE`], +/// Whole [`ChannelMonitor`]s are stored in the [`CHANNEL_MONITOR_PRIMARY_NAMESPACE`], /// using the familiar encoding of an [`OutPoint`] (e.g., `[SOME-64-CHAR-HEX-STRING]_1`) for v1 /// channels or a [`ChannelId`] (e.g., `[SOME-64-CHAR-HEX-STRING]`) for v2 channels. /// /// Each [`ChannelMonitorUpdate`] is stored in a dynamic secondary namespace, as follows: /// -/// - primary namespace: [`CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE`] +/// - primary namespace: [`MONITOR_UPDATE_PRIMARY_NAMESPACE`] /// - secondary namespace: [the monitor's encoded outpoint or channel id name] /// /// Under that secondary namespace, each update is stored with a number string, like `21`, which @@ -543,14 +542,14 @@ fn poll_sync_future(future: F) -> F::Output { /// /// Full channel monitors would be stored at a single key: /// -/// `[CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1` +/// `[CHANNEL_MONITOR_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1` /// /// Updates would be stored as follows (with `/` delimiting primary_namespace/secondary_namespace/key): /// /// ```text -/// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/1 -/// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/2 -/// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/3 +/// [MONITOR_UPDATE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/1 +/// [MONITOR_UPDATE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/2 +/// [MONITOR_UPDATE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/3 /// ``` /// ... and so on. /// @@ -722,8 +721,8 @@ where log_error!( self.0 .0.logger, "Failed to write ChannelMonitor {}/{}/{} reason: {}", - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MONITOR_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_SECONDARY_NAMESPACE, monitor_name, e ); @@ -864,8 +863,8 @@ where Vec<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, io::Error, > { - let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; - let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; + let primary = CHANNEL_MONITOR_PRIMARY_NAMESPACE; + let secondary = CHANNEL_MONITOR_SECONDARY_NAMESPACE; let monitor_list = self.0.kv_store.list(primary, secondary).await?; let mut res = Vec::with_capacity(monitor_list.len()); for monitor_key in monitor_list { @@ -1084,8 +1083,8 @@ where Option<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, io::Error, > { - let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; - let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; + let primary = CHANNEL_MONITOR_PRIMARY_NAMESPACE; + let secondary = CHANNEL_MONITOR_SECONDARY_NAMESPACE; let monitor_bytes = self.kv_store.read(primary, secondary, monitor_key).await?; let mut monitor_cursor = io::Cursor::new(monitor_bytes); // Discard the sentinel bytes if found. @@ -1128,13 +1127,13 @@ where async fn read_monitor_update( &self, monitor_key: &str, update_name: &UpdateName, ) -> Result { - let primary = CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE; + let primary = MONITOR_UPDATE_PRIMARY_NAMESPACE; let update_bytes = self.kv_store.read(primary, monitor_key, update_name.as_str()).await?; ChannelMonitorUpdate::read(&mut &update_bytes[..]).map_err(|e| { log_error!( self.logger, "Failed to read ChannelMonitorUpdate {}/{}/{}, reason: {}", - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + MONITOR_UPDATE_PRIMARY_NAMESPACE, monitor_key, update_name.as_str(), e, @@ -1144,8 +1143,8 @@ where } async fn cleanup_stale_updates(&self, lazy: bool) -> Result<(), io::Error> { - let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; - let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; + let primary = CHANNEL_MONITOR_PRIMARY_NAMESPACE; + let secondary = CHANNEL_MONITOR_SECONDARY_NAMESPACE; let monitor_keys = self.kv_store.list(primary, secondary).await?; for monitor_key in monitor_keys { let monitor_name = MonitorName::from_str(&monitor_key)?; @@ -1165,7 +1164,7 @@ where async fn cleanup_stale_updates_for_monitor_to( &self, monitor_key: &str, latest_update_id: u64, lazy: bool, ) -> Result<(), io::Error> { - let primary = CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE; + let primary = MONITOR_UPDATE_PRIMARY_NAMESPACE; let updates = self.kv_store.list(primary, monitor_key).await?; for update in updates { let update_name = UpdateName::new(update)?; @@ -1196,8 +1195,8 @@ where // Note that this is NOT an async function, but rather calls the *sync* KVStore write // method, allowing it to do its queueing immediately, and then return a future for the // completion of the write. This ensures monitor persistence ordering is preserved. - let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; - let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; + let primary = CHANNEL_MONITOR_PRIMARY_NAMESPACE; + let secondary = CHANNEL_MONITOR_SECONDARY_NAMESPACE; self.kv_store.write(primary, secondary, monitor_key.as_str(), monitor_bytes) } @@ -1219,7 +1218,7 @@ where if persist_update { let monitor_key = monitor_name.to_string(); let update_name = UpdateName::from(update.update_id); - let primary = CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE; + let primary = MONITOR_UPDATE_PRIMARY_NAMESPACE; // Note that this is NOT an async function, but rather calls the *sync* KVStore // write method, allowing it to do its queueing immediately, and then return a // future for the completion of the write. This ensures monitor persistence @@ -1290,14 +1289,14 @@ where Ok((_block_hash, monitor)) => monitor, Err(_) => return, }; - let primary = ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; - let secondary = ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; + let primary = ARCHIVED_MONITOR_PRIMARY_NAMESPACE; + let secondary = ARCHIVED_MONITOR_SECONDARY_NAMESPACE; match self.kv_store.write(primary, secondary, &monitor_key, monitor.encode()).await { Ok(()) => {}, Err(_e) => return, }; - let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; - let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; + let primary = CHANNEL_MONITOR_PRIMARY_NAMESPACE; + let secondary = CHANNEL_MONITOR_SECONDARY_NAMESPACE; let _ = self.kv_store.remove(primary, secondary, &monitor_key, true).await; } @@ -1306,7 +1305,7 @@ where let monitor_key = monitor_name.to_string(); for update_id in start..=end { let update_name = UpdateName::from(update_id); - let primary = CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE; + let primary = MONITOR_UPDATE_PRIMARY_NAMESPACE; let res = self.kv_store.remove(primary, &monitor_key, update_name.as_str(), true).await; if let Err(e) = res { log_error!( @@ -1659,7 +1658,7 @@ mod tests { }; let update_list = KVStoreSync::list( &kv_store_0, - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + MONITOR_UPDATE_PRIMARY_NAMESPACE, &monitor_name.to_string(), ); assert_eq!(update_list.unwrap().len() as u64, expected_updates, "persister 0"); @@ -1677,7 +1676,7 @@ mod tests { }; let update_list = KVStoreSync::list( &kv_store_1, - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + MONITOR_UPDATE_PRIMARY_NAMESPACE, &monitor_name.to_string(), ); assert_eq!(update_list.unwrap().len() as u64, expected_updates, "persister 1"); @@ -1888,7 +1887,7 @@ mod tests { let monitor_name = monitor.persistence_key(); KVStoreSync::write( &kv_store_0, - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + MONITOR_UPDATE_PRIMARY_NAMESPACE, &monitor_name.to_string(), UpdateName::from(1).as_str(), vec![0u8; 1], @@ -1901,7 +1900,7 @@ mod tests { // Confirm the stale update is unreadable/gone assert!(KVStoreSync::read( &kv_store_0, - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + MONITOR_UPDATE_PRIMARY_NAMESPACE, &monitor_name.to_string(), UpdateName::from(1).as_str() ) diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index 5a1ffad3e04..e0334fce5ab 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -22,8 +22,8 @@ use crate::sign::{ use crate::sync::Mutex; use crate::util::logger::Logger; use crate::util::persist::{ - KVStore, KVStoreSync, KVStoreSyncWrapper, OUTPUT_SWEEPER_PERSISTENCE_KEY, - OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, + KVStore, KVStoreSync, KVStoreSyncWrapper, OUTPUT_SWEEPER_KEY, OUTPUT_SWEEPER_PRIMARY_NAMESPACE, + OUTPUT_SWEEPER_SECONDARY_NAMESPACE, }; use crate::util::ser::{Readable, ReadableArgs, Writeable}; use crate::{impl_writeable_tlv_based, log_debug, log_error}; @@ -613,9 +613,9 @@ where let encoded = sweeper_state.encode(); self.kv_store.write( - OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, - OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, - OUTPUT_SWEEPER_PERSISTENCE_KEY, + OUTPUT_SWEEPER_PRIMARY_NAMESPACE, + OUTPUT_SWEEPER_SECONDARY_NAMESPACE, + OUTPUT_SWEEPER_KEY, encoded, ) } From 98e6a8ed39106fedb50aa55fcd594ae235d718c7 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 22 Sep 2025 14:41:14 +0000 Subject: [PATCH 3/4] Drop verbose `*_SECONDARY_NAMESPACE` consts that are always "" The constants in `lightning::util::persist` are sufficiently long that its often difficult eyeball their correctness which nearly led to several bugs when adding async support. As it turns out, all of the `*_SECONDARY_NAMESPACE` constants were simply "", so having a huge pile of them everywhere is quite verbose and makes scanning the `*_NAMESPACE` constants more difficult. Here, we simply drop the `*_SECONDARY_NAMESPACE` constants entirely. --- lightning-background-processor/src/lib.rs | 119 +++++--------- lightning/src/ln/chanmon_update_fail_tests.rs | 29 ++-- lightning/src/util/persist.rs | 150 ++++++------------ lightning/src/util/sweep.rs | 11 +- 4 files changed, 100 insertions(+), 209 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index a85499cdc94..7b4f29896c7 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -53,10 +53,8 @@ use lightning::sign::{ }; use lightning::util::logger::Logger; use lightning::util::persist::{ - KVStore, KVStoreSync, KVStoreSyncWrapper, CHANNEL_MANAGER_KEY, - CHANNEL_MANAGER_PRIMARY_NAMESPACE, CHANNEL_MANAGER_SECONDARY_NAMESPACE, NETWORK_GRAPH_KEY, - NETWORK_GRAPH_PRIMARY_NAMESPACE, NETWORK_GRAPH_SECONDARY_NAMESPACE, SCORER_KEY, - SCORER_PRIMARY_NAMESPACE, SCORER_SECONDARY_NAMESPACE, + KVStore, KVStoreSync, KVStoreSyncWrapper, CHANNEL_MANAGER_KEY, CHANNEL_MANAGER_NAMESPACE, + NETWORK_GRAPH_KEY, NETWORK_GRAPH_NAMESPACE, SCORER_KEY, SCORER_NAMESPACE, }; use lightning::util::sweep::{OutputSweeper, OutputSweeperSync}; #[cfg(feature = "std")] @@ -941,14 +939,8 @@ where if let Some(duration_since_epoch) = fetch_time() { if update_scorer(scorer, &event, duration_since_epoch) { log_trace!(logger, "Persisting scorer after update"); - if let Err(e) = kv_store - .write( - SCORER_PRIMARY_NAMESPACE, - SCORER_SECONDARY_NAMESPACE, - SCORER_KEY, - scorer.encode(), - ) - .await + if let Err(e) = + kv_store.write(SCORER_NAMESPACE, "", SCORER_KEY, scorer.encode()).await { log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e); // We opt not to abort early on persistence failure here as persisting @@ -1079,8 +1071,8 @@ where let fut = async { kv_store .write( - CHANNEL_MANAGER_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_NAMESPACE, + "", CHANNEL_MANAGER_KEY, channel_manager.get_cm().encode(), ) @@ -1142,8 +1134,8 @@ where let fut = async { if let Err(e) = kv_store .write( - NETWORK_GRAPH_PRIMARY_NAMESPACE, - NETWORK_GRAPH_SECONDARY_NAMESPACE, + NETWORK_GRAPH_NAMESPACE, + "", NETWORK_GRAPH_KEY, network_graph.encode(), ) @@ -1182,14 +1174,8 @@ where log_trace!(logger, "Persisting scorer"); } let fut = async { - if let Err(e) = kv_store - .write( - SCORER_PRIMARY_NAMESPACE, - SCORER_SECONDARY_NAMESPACE, - SCORER_KEY, - scorer.encode(), - ) - .await + if let Err(e) = + kv_store.write(SCORER_NAMESPACE, "", SCORER_KEY, scorer.encode()).await { log_error!( logger, @@ -1300,30 +1286,18 @@ where // ChannelMonitor update(s) persisted without a corresponding ChannelManager update. kv_store .write( - CHANNEL_MANAGER_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_NAMESPACE, + "", CHANNEL_MANAGER_KEY, channel_manager.get_cm().encode(), ) .await?; if let Some(ref scorer) = scorer { - kv_store - .write( - SCORER_PRIMARY_NAMESPACE, - SCORER_SECONDARY_NAMESPACE, - SCORER_KEY, - scorer.encode(), - ) - .await?; + kv_store.write(SCORER_NAMESPACE, "", SCORER_KEY, scorer.encode()).await?; } if let Some(network_graph) = gossip_sync.network_graph() { kv_store - .write( - NETWORK_GRAPH_PRIMARY_NAMESPACE, - NETWORK_GRAPH_SECONDARY_NAMESPACE, - NETWORK_GRAPH_KEY, - network_graph.encode(), - ) + .write(NETWORK_GRAPH_NAMESPACE, "", NETWORK_GRAPH_KEY, network_graph.encode()) .await?; } Ok(()) @@ -1525,12 +1499,9 @@ impl BackgroundProcessor { .expect("Time should be sometime after 1970"); if update_scorer(scorer, &event, duration_since_epoch) { log_trace!(logger, "Persisting scorer after update"); - if let Err(e) = kv_store.write( - SCORER_PRIMARY_NAMESPACE, - SCORER_SECONDARY_NAMESPACE, - SCORER_KEY, - scorer.encode(), - ) { + if let Err(e) = + kv_store.write(SCORER_NAMESPACE, "", SCORER_KEY, scorer.encode()) + { log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e) } } @@ -1626,8 +1597,8 @@ impl BackgroundProcessor { if channel_manager.get_cm().get_and_clear_needs_persistence() { log_trace!(logger, "Persisting ChannelManager..."); (kv_store.write( - CHANNEL_MANAGER_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_NAMESPACE, + "", CHANNEL_MANAGER_KEY, channel_manager.get_cm().encode(), ))?; @@ -1665,8 +1636,8 @@ impl BackgroundProcessor { duration_since_epoch.as_secs(), ); if let Err(e) = kv_store.write( - NETWORK_GRAPH_PRIMARY_NAMESPACE, - NETWORK_GRAPH_SECONDARY_NAMESPACE, + NETWORK_GRAPH_NAMESPACE, + "", NETWORK_GRAPH_KEY, network_graph.encode(), ) { @@ -1693,12 +1664,9 @@ impl BackgroundProcessor { .expect("Time should be sometime after 1970"); log_trace!(logger, "Calling time_passed and persisting scorer"); scorer.write_lock().time_passed(duration_since_epoch); - if let Err(e) = kv_store.write( - SCORER_PRIMARY_NAMESPACE, - SCORER_SECONDARY_NAMESPACE, - SCORER_KEY, - scorer.encode(), - ) { + if let Err(e) = + kv_store.write(SCORER_NAMESPACE, "", SCORER_KEY, scorer.encode()) + { log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e); } } @@ -1734,23 +1702,18 @@ impl BackgroundProcessor { // some races where users quit while channel updates were in-flight, with // ChannelMonitor update(s) persisted without a corresponding ChannelManager update. kv_store.write( - CHANNEL_MANAGER_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_NAMESPACE, + "", CHANNEL_MANAGER_KEY, channel_manager.get_cm().encode(), )?; if let Some(ref scorer) = scorer { - kv_store.write( - SCORER_PRIMARY_NAMESPACE, - SCORER_SECONDARY_NAMESPACE, - SCORER_KEY, - scorer.encode(), - )?; + kv_store.write(SCORER_NAMESPACE, "", SCORER_KEY, scorer.encode())?; } if let Some(network_graph) = gossip_sync.network_graph() { kv_store.write( - NETWORK_GRAPH_PRIMARY_NAMESPACE, - NETWORK_GRAPH_SECONDARY_NAMESPACE, + NETWORK_GRAPH_NAMESPACE, + "", NETWORK_GRAPH_KEY, network_graph.encode(), )?; @@ -1843,10 +1806,8 @@ mod tests { use lightning::types::payment::PaymentHash; use lightning::util::config::UserConfig; use lightning::util::persist::{ - KVStoreSync, KVStoreSyncWrapper, CHANNEL_MANAGER_KEY, CHANNEL_MANAGER_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_SECONDARY_NAMESPACE, NETWORK_GRAPH_KEY, NETWORK_GRAPH_PRIMARY_NAMESPACE, - NETWORK_GRAPH_SECONDARY_NAMESPACE, SCORER_KEY, SCORER_PRIMARY_NAMESPACE, - SCORER_SECONDARY_NAMESPACE, + KVStoreSync, KVStoreSyncWrapper, CHANNEL_MANAGER_KEY, CHANNEL_MANAGER_NAMESPACE, + NETWORK_GRAPH_KEY, NETWORK_GRAPH_NAMESPACE, SCORER_KEY, SCORER_NAMESPACE, }; use lightning::util::ser::Writeable; use lightning::util::sweep::{ @@ -2101,19 +2062,15 @@ mod tests { fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> lightning::io::Result<()> { - if primary_namespace == CHANNEL_MANAGER_PRIMARY_NAMESPACE - && secondary_namespace == CHANNEL_MANAGER_SECONDARY_NAMESPACE - && key == CHANNEL_MANAGER_KEY - { + if primary_namespace == CHANNEL_MANAGER_NAMESPACE && key == CHANNEL_MANAGER_KEY { + assert_eq!(secondary_namespace, ""); if let Some((error, message)) = self.manager_error { return Err(std::io::Error::new(error, message).into()); } } - if primary_namespace == NETWORK_GRAPH_PRIMARY_NAMESPACE - && secondary_namespace == NETWORK_GRAPH_SECONDARY_NAMESPACE - && key == NETWORK_GRAPH_KEY - { + if primary_namespace == NETWORK_GRAPH_NAMESPACE && key == NETWORK_GRAPH_KEY { + assert_eq!(secondary_namespace, ""); if let Some(sender) = &self.graph_persistence_notifier { match sender.send(()) { Ok(()) => {}, @@ -2128,10 +2085,8 @@ mod tests { } } - if primary_namespace == SCORER_PRIMARY_NAMESPACE - && secondary_namespace == SCORER_SECONDARY_NAMESPACE - && key == SCORER_KEY - { + if primary_namespace == SCORER_NAMESPACE && key == SCORER_KEY { + assert_eq!(secondary_namespace, ""); if let Some((error, message)) = self.scorer_error { return Err(std::io::Error::new(error, message).into()); } diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index eb8f6c09fe7..0bb772de5d7 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -28,8 +28,7 @@ use crate::ln::types::ChannelId; use crate::sign::NodeSigner; use crate::util::native_async::FutureQueue; use crate::util::persist::{ - MonitorName, MonitorUpdatingPersisterAsync, CHANNEL_MONITOR_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_SECONDARY_NAMESPACE, MONITOR_UPDATE_PRIMARY_NAMESPACE, + MonitorName, MonitorUpdatingPersisterAsync, CHANNEL_MONITOR_NAMESPACE, MONITOR_UPDATE_NAMESPACE, }; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::test_channel_signer::TestChannelSigner; @@ -4938,11 +4937,7 @@ fn native_async_persist() { let funding_txo = OutPoint { txid: funding_tx.compute_txid(), index: 0 }; let key = MonitorName::V1Channel(funding_txo).to_string(); - let pending_writes = kv_store.list_pending_async_writes( - CHANNEL_MONITOR_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_SECONDARY_NAMESPACE, - &key, - ); + let pending_writes = kv_store.list_pending_async_writes(CHANNEL_MONITOR_NAMESPACE, "", &key); assert_eq!(pending_writes.len(), 1); // Once we complete the future, the write will still be pending until the future gets `poll`ed. @@ -4970,21 +4965,19 @@ fn native_async_persist() { persist_futures.poll_futures(); assert_eq!(async_chain_monitor.release_pending_monitor_events().len(), 0); - let pending_writes = - kv_store.list_pending_async_writes(MONITOR_UPDATE_PRIMARY_NAMESPACE, &key, "1"); + let pending_writes = kv_store.list_pending_async_writes(MONITOR_UPDATE_NAMESPACE, &key, "1"); assert_eq!(pending_writes.len(), 1); - let pending_writes = - kv_store.list_pending_async_writes(MONITOR_UPDATE_PRIMARY_NAMESPACE, &key, "2"); + let pending_writes = kv_store.list_pending_async_writes(MONITOR_UPDATE_NAMESPACE, &key, "2"); assert_eq!(pending_writes.len(), 1); - kv_store.complete_async_writes_through(MONITOR_UPDATE_PRIMARY_NAMESPACE, &key, "1", usize::MAX); + kv_store.complete_async_writes_through(MONITOR_UPDATE_NAMESPACE, &key, "1", usize::MAX); persist_futures.poll_futures(); // While the `ChainMonitor` could return a `MonitorEvent::Completed` here, it currently // doesn't. If that ever changes we should validate that the `Completed` event has the correct // `monitor_update_id` (1). assert!(async_chain_monitor.release_pending_monitor_events().is_empty()); - kv_store.complete_async_writes_through(MONITOR_UPDATE_PRIMARY_NAMESPACE, &key, "2", usize::MAX); + kv_store.complete_async_writes_through(MONITOR_UPDATE_NAMESPACE, &key, "2", usize::MAX); persist_futures.poll_futures(); let completed_persist = async_chain_monitor.release_pending_monitor_events(); assert_eq!(completed_persist.len(), 1); @@ -5003,18 +4996,16 @@ fn native_async_persist() { persist_futures.poll_futures(); assert_eq!(async_chain_monitor.release_pending_monitor_events().len(), 0); - let pending_writes = - kv_store.list_pending_async_writes(MONITOR_UPDATE_PRIMARY_NAMESPACE, &key, "3"); + let pending_writes = kv_store.list_pending_async_writes(MONITOR_UPDATE_NAMESPACE, &key, "3"); assert_eq!(pending_writes.len(), 1); - let pending_writes = - kv_store.list_pending_async_writes(MONITOR_UPDATE_PRIMARY_NAMESPACE, &key, "4"); + let pending_writes = kv_store.list_pending_async_writes(MONITOR_UPDATE_NAMESPACE, &key, "4"); assert_eq!(pending_writes.len(), 1); - kv_store.complete_async_writes_through(MONITOR_UPDATE_PRIMARY_NAMESPACE, &key, "4", usize::MAX); + kv_store.complete_async_writes_through(MONITOR_UPDATE_NAMESPACE, &key, "4", usize::MAX); persist_futures.poll_futures(); assert_eq!(async_chain_monitor.release_pending_monitor_events().len(), 0); - kv_store.complete_async_writes_through(MONITOR_UPDATE_PRIMARY_NAMESPACE, &key, "3", usize::MAX); + kv_store.complete_async_writes_through(MONITOR_UPDATE_NAMESPACE, &key, "3", usize::MAX); persist_futures.poll_futures(); let completed_persist = async_chain_monitor.release_pending_monitor_events(); assert_eq!(completed_persist.len(), 1); diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 51c902f0da7..599e3a7fc45 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -50,36 +50,27 @@ pub const NAMESPACE_MAX_LEN: usize = 120; /// The primary namespace under which the [`ChannelManager`] will be persisted. /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager -pub const CHANNEL_MANAGER_PRIMARY_NAMESPACE: &str = ""; -/// The secondary namespace under which the [`ChannelManager`] will be persisted. -/// -/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager -pub const CHANNEL_MANAGER_SECONDARY_NAMESPACE: &str = ""; +pub const CHANNEL_MANAGER_NAMESPACE: &str = ""; /// The key under which the [`ChannelManager`] will be persisted. /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager pub const CHANNEL_MANAGER_KEY: &str = "manager"; /// The primary namespace under which [`ChannelMonitor`]s will be persisted. -pub const CHANNEL_MONITOR_PRIMARY_NAMESPACE: &str = "monitors"; -/// The secondary namespace under which [`ChannelMonitor`]s will be persisted. -pub const CHANNEL_MONITOR_SECONDARY_NAMESPACE: &str = ""; +pub const CHANNEL_MONITOR_NAMESPACE: &str = "monitors"; /// The primary namespace under which [`ChannelMonitorUpdate`]s will be persisted. -pub const MONITOR_UPDATE_PRIMARY_NAMESPACE: &str = "monitor_updates"; +/// +/// Note that unlike all other LDK-native persistence calls, monitor updates have a non-empty +/// secondary namespace. +pub const MONITOR_UPDATE_NAMESPACE: &str = "monitor_updates"; /// The primary namespace under which archived [`ChannelMonitor`]s will be persisted. -pub const ARCHIVED_MONITOR_PRIMARY_NAMESPACE: &str = "archived_monitors"; -/// The secondary namespace under which archived [`ChannelMonitor`]s will be persisted. -pub const ARCHIVED_MONITOR_SECONDARY_NAMESPACE: &str = ""; +pub const ARCHIVED_MONITOR_NAMESPACE: &str = "archived_monitors"; /// The primary namespace under which the [`NetworkGraph`] will be persisted. /// /// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph -pub const NETWORK_GRAPH_PRIMARY_NAMESPACE: &str = ""; -/// The secondary namespace under which the [`NetworkGraph`] will be persisted. -/// -/// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph -pub const NETWORK_GRAPH_SECONDARY_NAMESPACE: &str = ""; +pub const NETWORK_GRAPH_NAMESPACE: &str = ""; /// The key under which the [`NetworkGraph`] will be persisted. /// /// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph @@ -88,11 +79,7 @@ pub const NETWORK_GRAPH_KEY: &str = "network_graph"; /// The primary namespace under which the [`WriteableScore`] will be persisted. /// /// [`WriteableScore`]: crate::routing::scoring::WriteableScore -pub const SCORER_PRIMARY_NAMESPACE: &str = ""; -/// The secondary namespace under which the [`WriteableScore`] will be persisted. -/// -/// [`WriteableScore`]: crate::routing::scoring::WriteableScore -pub const SCORER_SECONDARY_NAMESPACE: &str = ""; +pub const SCORER_NAMESPACE: &str = ""; /// The key under which the [`WriteableScore`] will be persisted. /// /// [`WriteableScore`]: crate::routing::scoring::WriteableScore @@ -101,11 +88,7 @@ pub const SCORER_KEY: &str = "scorer"; /// The primary namespace under which [`OutputSweeper`] state will be persisted. /// /// [`OutputSweeper`]: crate::util::sweep::OutputSweeper -pub const OUTPUT_SWEEPER_PRIMARY_NAMESPACE: &str = ""; -/// The secondary namespace under which [`OutputSweeper`] state will be persisted. -/// -/// [`OutputSweeper`]: crate::util::sweep::OutputSweeper -pub const OUTPUT_SWEEPER_SECONDARY_NAMESPACE: &str = ""; +pub const OUTPUT_SWEEPER_NAMESPACE: &str = ""; /// The secondary namespace under which [`OutputSweeper`] state will be persisted. /// The key under which [`OutputSweeper`] state will be persisted. /// @@ -284,6 +267,9 @@ pub trait KVStore { ) -> AsyncResult<'static, Vec, io::Error>; /// Persists the given data under the given `key`. /// + /// Note that LDK-originating persistence calls will always set `secondary_namespace` to "" + /// unless `primary_namespace` is [`MONITOR_UPDATE_NAMESPACE`]. + /// /// The order of multiple writes to the same key needs to be retained while persisting /// asynchronously. In other words, if two writes to the same key occur, the state (as seen by /// [`Self::read`]) must either see the first write then the second, or only ever the second, @@ -378,12 +364,8 @@ impl Persist, ) -> chain::ChannelMonitorUpdateStatus { - match self.write( - CHANNEL_MONITOR_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_SECONDARY_NAMESPACE, - &monitor_name.to_string(), - monitor.encode(), - ) { + let key = monitor_name.to_string(); + match self.write(CHANNEL_MONITOR_NAMESPACE, "", &key, monitor.encode()) { Ok(()) => chain::ChannelMonitorUpdateStatus::Completed, Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError, } @@ -393,12 +375,8 @@ impl Persist, monitor: &ChannelMonitor, ) -> chain::ChannelMonitorUpdateStatus { - match self.write( - CHANNEL_MONITOR_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_SECONDARY_NAMESPACE, - &monitor_name.to_string(), - monitor.encode(), - ) { + let key = monitor_name.to_string(); + match self.write(CHANNEL_MONITOR_NAMESPACE, "", &key, monitor.encode()) { Ok(()) => chain::ChannelMonitorUpdateStatus::Completed, Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError, } @@ -406,29 +384,15 @@ impl Persist monitor, Err(_) => return, }; - match self.write( - ARCHIVED_MONITOR_PRIMARY_NAMESPACE, - ARCHIVED_MONITOR_SECONDARY_NAMESPACE, - monitor_key.as_str(), - monitor, - ) { + match self.write(ARCHIVED_MONITOR_NAMESPACE, "", monitor_key.as_str(), monitor) { Ok(()) => {}, Err(_e) => return, }; - let _ = self.remove( - CHANNEL_MONITOR_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_SECONDARY_NAMESPACE, - monitor_key.as_str(), - true, - ); + let _ = self.remove(CHANNEL_MONITOR_NAMESPACE, "", monitor_key.as_str(), true); } } @@ -443,15 +407,9 @@ where { let mut res = Vec::new(); - for stored_key in - kv_store.list(CHANNEL_MONITOR_PRIMARY_NAMESPACE, CHANNEL_MONITOR_SECONDARY_NAMESPACE)? - { + for stored_key in kv_store.list(CHANNEL_MONITOR_NAMESPACE, "")? { match ::EcdsaSigner>)>>::read( - &mut io::Cursor::new(kv_store.read( - CHANNEL_MONITOR_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_SECONDARY_NAMESPACE, - &stored_key, - )?), + &mut io::Cursor::new(kv_store.read(CHANNEL_MONITOR_NAMESPACE, "", &stored_key)?), (&*entropy_source, &*signer_provider), ) { Ok(Some((block_hash, channel_monitor))) => { @@ -523,13 +481,13 @@ fn poll_sync_future(future: F) -> F::Output { /// - [`Persist::persist_new_channel`], which persists whole [`ChannelMonitor`]s. /// - [`Persist::update_persisted_channel`], which persists only a [`ChannelMonitorUpdate`] /// -/// Whole [`ChannelMonitor`]s are stored in the [`CHANNEL_MONITOR_PRIMARY_NAMESPACE`], +/// Whole [`ChannelMonitor`]s are stored in the [`CHANNEL_MONITOR_NAMESPACE`], /// using the familiar encoding of an [`OutPoint`] (e.g., `[SOME-64-CHAR-HEX-STRING]_1`) for v1 /// channels or a [`ChannelId`] (e.g., `[SOME-64-CHAR-HEX-STRING]`) for v2 channels. /// /// Each [`ChannelMonitorUpdate`] is stored in a dynamic secondary namespace, as follows: /// -/// - primary namespace: [`MONITOR_UPDATE_PRIMARY_NAMESPACE`] +/// - primary namespace: [`MONITOR_UPDATE_NAMESPACE`] /// - secondary namespace: [the monitor's encoded outpoint or channel id name] /// /// Under that secondary namespace, each update is stored with a number string, like `21`, which @@ -542,14 +500,14 @@ fn poll_sync_future(future: F) -> F::Output { /// /// Full channel monitors would be stored at a single key: /// -/// `[CHANNEL_MONITOR_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1` +/// `[CHANNEL_MONITOR_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1` /// /// Updates would be stored as follows (with `/` delimiting primary_namespace/secondary_namespace/key): /// /// ```text -/// [MONITOR_UPDATE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/1 -/// [MONITOR_UPDATE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/2 -/// [MONITOR_UPDATE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/3 +/// [MONITOR_UPDATE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/1 +/// [MONITOR_UPDATE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/2 +/// [MONITOR_UPDATE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/3 /// ``` /// ... and so on. /// @@ -721,8 +679,8 @@ where log_error!( self.0 .0.logger, "Failed to write ChannelMonitor {}/{}/{} reason: {}", - CHANNEL_MONITOR_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_SECONDARY_NAMESPACE, + CHANNEL_MONITOR_NAMESPACE, + "", monitor_name, e ); @@ -863,9 +821,8 @@ where Vec<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, io::Error, > { - let primary = CHANNEL_MONITOR_PRIMARY_NAMESPACE; - let secondary = CHANNEL_MONITOR_SECONDARY_NAMESPACE; - let monitor_list = self.0.kv_store.list(primary, secondary).await?; + let primary = CHANNEL_MONITOR_NAMESPACE; + let monitor_list = self.0.kv_store.list(primary, "").await?; let mut res = Vec::with_capacity(monitor_list.len()); for monitor_key in monitor_list { let result = @@ -1083,9 +1040,8 @@ where Option<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, io::Error, > { - let primary = CHANNEL_MONITOR_PRIMARY_NAMESPACE; - let secondary = CHANNEL_MONITOR_SECONDARY_NAMESPACE; - let monitor_bytes = self.kv_store.read(primary, secondary, monitor_key).await?; + let primary = CHANNEL_MONITOR_NAMESPACE; + let monitor_bytes = self.kv_store.read(primary, "", monitor_key).await?; let mut monitor_cursor = io::Cursor::new(monitor_bytes); // Discard the sentinel bytes if found. if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) { @@ -1127,13 +1083,13 @@ where async fn read_monitor_update( &self, monitor_key: &str, update_name: &UpdateName, ) -> Result { - let primary = MONITOR_UPDATE_PRIMARY_NAMESPACE; + let primary = MONITOR_UPDATE_NAMESPACE; let update_bytes = self.kv_store.read(primary, monitor_key, update_name.as_str()).await?; ChannelMonitorUpdate::read(&mut &update_bytes[..]).map_err(|e| { log_error!( self.logger, "Failed to read ChannelMonitorUpdate {}/{}/{}, reason: {}", - MONITOR_UPDATE_PRIMARY_NAMESPACE, + MONITOR_UPDATE_NAMESPACE, monitor_key, update_name.as_str(), e, @@ -1143,9 +1099,8 @@ where } async fn cleanup_stale_updates(&self, lazy: bool) -> Result<(), io::Error> { - let primary = CHANNEL_MONITOR_PRIMARY_NAMESPACE; - let secondary = CHANNEL_MONITOR_SECONDARY_NAMESPACE; - let monitor_keys = self.kv_store.list(primary, secondary).await?; + let primary = CHANNEL_MONITOR_NAMESPACE; + let monitor_keys = self.kv_store.list(primary, "").await?; for monitor_key in monitor_keys { let monitor_name = MonitorName::from_str(&monitor_key)?; let maybe_monitor = self.maybe_read_monitor(&monitor_name, &monitor_key).await?; @@ -1164,7 +1119,7 @@ where async fn cleanup_stale_updates_for_monitor_to( &self, monitor_key: &str, latest_update_id: u64, lazy: bool, ) -> Result<(), io::Error> { - let primary = MONITOR_UPDATE_PRIMARY_NAMESPACE; + let primary = MONITOR_UPDATE_NAMESPACE; let updates = self.kv_store.list(primary, monitor_key).await?; for update in updates { let update_name = UpdateName::new(update)?; @@ -1195,9 +1150,8 @@ where // Note that this is NOT an async function, but rather calls the *sync* KVStore write // method, allowing it to do its queueing immediately, and then return a future for the // completion of the write. This ensures monitor persistence ordering is preserved. - let primary = CHANNEL_MONITOR_PRIMARY_NAMESPACE; - let secondary = CHANNEL_MONITOR_SECONDARY_NAMESPACE; - self.kv_store.write(primary, secondary, monitor_key.as_str(), monitor_bytes) + let primary = CHANNEL_MONITOR_NAMESPACE; + self.kv_store.write(primary, "", monitor_key.as_str(), monitor_bytes) } fn update_persisted_channel<'a, ChannelSigner: EcdsaChannelSigner + 'a>( @@ -1218,7 +1172,7 @@ where if persist_update { let monitor_key = monitor_name.to_string(); let update_name = UpdateName::from(update.update_id); - let primary = MONITOR_UPDATE_PRIMARY_NAMESPACE; + let primary = MONITOR_UPDATE_NAMESPACE; // Note that this is NOT an async function, but rather calls the *sync* KVStore // write method, allowing it to do its queueing immediately, and then return a // future for the completion of the write. This ensures monitor persistence @@ -1289,15 +1243,13 @@ where Ok((_block_hash, monitor)) => monitor, Err(_) => return, }; - let primary = ARCHIVED_MONITOR_PRIMARY_NAMESPACE; - let secondary = ARCHIVED_MONITOR_SECONDARY_NAMESPACE; - match self.kv_store.write(primary, secondary, &monitor_key, monitor.encode()).await { + let primary = ARCHIVED_MONITOR_NAMESPACE; + match self.kv_store.write(primary, "", &monitor_key, monitor.encode()).await { Ok(()) => {}, Err(_e) => return, }; - let primary = CHANNEL_MONITOR_PRIMARY_NAMESPACE; - let secondary = CHANNEL_MONITOR_SECONDARY_NAMESPACE; - let _ = self.kv_store.remove(primary, secondary, &monitor_key, true).await; + let primary = CHANNEL_MONITOR_NAMESPACE; + let _ = self.kv_store.remove(primary, "", &monitor_key, true).await; } // Cleans up monitor updates for given monitor in range `start..=end`. @@ -1305,7 +1257,7 @@ where let monitor_key = monitor_name.to_string(); for update_id in start..=end { let update_name = UpdateName::from(update_id); - let primary = MONITOR_UPDATE_PRIMARY_NAMESPACE; + let primary = MONITOR_UPDATE_NAMESPACE; let res = self.kv_store.remove(primary, &monitor_key, update_name.as_str(), true).await; if let Err(e) = res { log_error!( @@ -1658,7 +1610,7 @@ mod tests { }; let update_list = KVStoreSync::list( &kv_store_0, - MONITOR_UPDATE_PRIMARY_NAMESPACE, + MONITOR_UPDATE_NAMESPACE, &monitor_name.to_string(), ); assert_eq!(update_list.unwrap().len() as u64, expected_updates, "persister 0"); @@ -1676,7 +1628,7 @@ mod tests { }; let update_list = KVStoreSync::list( &kv_store_1, - MONITOR_UPDATE_PRIMARY_NAMESPACE, + MONITOR_UPDATE_NAMESPACE, &monitor_name.to_string(), ); assert_eq!(update_list.unwrap().len() as u64, expected_updates, "persister 1"); @@ -1887,7 +1839,7 @@ mod tests { let monitor_name = monitor.persistence_key(); KVStoreSync::write( &kv_store_0, - MONITOR_UPDATE_PRIMARY_NAMESPACE, + MONITOR_UPDATE_NAMESPACE, &monitor_name.to_string(), UpdateName::from(1).as_str(), vec![0u8; 1], @@ -1900,7 +1852,7 @@ mod tests { // Confirm the stale update is unreadable/gone assert!(KVStoreSync::read( &kv_store_0, - MONITOR_UPDATE_PRIMARY_NAMESPACE, + MONITOR_UPDATE_NAMESPACE, &monitor_name.to_string(), UpdateName::from(1).as_str() ) diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index e0334fce5ab..10260a803a1 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -22,8 +22,7 @@ use crate::sign::{ use crate::sync::Mutex; use crate::util::logger::Logger; use crate::util::persist::{ - KVStore, KVStoreSync, KVStoreSyncWrapper, OUTPUT_SWEEPER_KEY, OUTPUT_SWEEPER_PRIMARY_NAMESPACE, - OUTPUT_SWEEPER_SECONDARY_NAMESPACE, + KVStore, KVStoreSync, KVStoreSyncWrapper, OUTPUT_SWEEPER_KEY, OUTPUT_SWEEPER_NAMESPACE, }; use crate::util::ser::{Readable, ReadableArgs, Writeable}; use crate::{impl_writeable_tlv_based, log_debug, log_error}; @@ -611,13 +610,7 @@ where fn persist_state<'a>(&self, sweeper_state: &SweeperState) -> AsyncResult<'a, (), io::Error> { let encoded = sweeper_state.encode(); - - self.kv_store.write( - OUTPUT_SWEEPER_PRIMARY_NAMESPACE, - OUTPUT_SWEEPER_SECONDARY_NAMESPACE, - OUTPUT_SWEEPER_KEY, - encoded, - ) + self.kv_store.write(OUTPUT_SWEEPER_NAMESPACE, "", OUTPUT_SWEEPER_KEY, encoded) } /// Updates the sweeper state by executing the given callback. Persists the state afterwards if it is marked dirty, From 5ffee028d9ff8406deb74bd0f808e91fc9bd0e61 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 22 Sep 2025 14:55:04 +0000 Subject: [PATCH 4/4] Remove unnecessary variable indirection in `persist.rs` --- lightning/src/util/persist.rs | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 599e3a7fc45..6f1ad74fbe5 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -821,8 +821,7 @@ where Vec<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, io::Error, > { - let primary = CHANNEL_MONITOR_NAMESPACE; - let monitor_list = self.0.kv_store.list(primary, "").await?; + let monitor_list = self.0.kv_store.list(CHANNEL_MONITOR_NAMESPACE, "").await?; let mut res = Vec::with_capacity(monitor_list.len()); for monitor_key in monitor_list { let result = @@ -1040,8 +1039,7 @@ where Option<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, io::Error, > { - let primary = CHANNEL_MONITOR_NAMESPACE; - let monitor_bytes = self.kv_store.read(primary, "", monitor_key).await?; + let monitor_bytes = self.kv_store.read(CHANNEL_MONITOR_NAMESPACE, "", monitor_key).await?; let mut monitor_cursor = io::Cursor::new(monitor_bytes); // Discard the sentinel bytes if found. if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) { @@ -1099,8 +1097,7 @@ where } async fn cleanup_stale_updates(&self, lazy: bool) -> Result<(), io::Error> { - let primary = CHANNEL_MONITOR_NAMESPACE; - let monitor_keys = self.kv_store.list(primary, "").await?; + let monitor_keys = self.kv_store.list(CHANNEL_MONITOR_NAMESPACE, "").await?; for monitor_key in monitor_keys { let monitor_name = MonitorName::from_str(&monitor_key)?; let maybe_monitor = self.maybe_read_monitor(&monitor_name, &monitor_key).await?; @@ -1119,13 +1116,14 @@ where async fn cleanup_stale_updates_for_monitor_to( &self, monitor_key: &str, latest_update_id: u64, lazy: bool, ) -> Result<(), io::Error> { - let primary = MONITOR_UPDATE_NAMESPACE; - let updates = self.kv_store.list(primary, monitor_key).await?; + let updates = self.kv_store.list(MONITOR_UPDATE_NAMESPACE, monitor_key).await?; for update in updates { let update_name = UpdateName::new(update)?; // if the update_id is lower than the stored monitor, delete if update_name.0 <= latest_update_id { - self.kv_store.remove(primary, monitor_key, update_name.as_str(), lazy).await?; + self.kv_store + .remove(MONITOR_UPDATE_NAMESPACE, monitor_key, update_name.as_str(), lazy) + .await?; } } Ok(()) @@ -1150,8 +1148,7 @@ where // Note that this is NOT an async function, but rather calls the *sync* KVStore write // method, allowing it to do its queueing immediately, and then return a future for the // completion of the write. This ensures monitor persistence ordering is preserved. - let primary = CHANNEL_MONITOR_NAMESPACE; - self.kv_store.write(primary, "", monitor_key.as_str(), monitor_bytes) + self.kv_store.write(CHANNEL_MONITOR_NAMESPACE, "", monitor_key.as_str(), monitor_bytes) } fn update_persisted_channel<'a, ChannelSigner: EcdsaChannelSigner + 'a>( @@ -1172,13 +1169,12 @@ where if persist_update { let monitor_key = monitor_name.to_string(); let update_name = UpdateName::from(update.update_id); - let primary = MONITOR_UPDATE_NAMESPACE; // Note that this is NOT an async function, but rather calls the *sync* KVStore // write method, allowing it to do its queueing immediately, and then return a // future for the completion of the write. This ensures monitor persistence // ordering is preserved. res_a = Some(self.kv_store.write( - primary, + MONITOR_UPDATE_NAMESPACE, &monitor_key, update_name.as_str(), update.encode(), @@ -1248,8 +1244,7 @@ where Ok(()) => {}, Err(_e) => return, }; - let primary = CHANNEL_MONITOR_NAMESPACE; - let _ = self.kv_store.remove(primary, "", &monitor_key, true).await; + let _ = self.kv_store.remove(CHANNEL_MONITOR_NAMESPACE, "", &monitor_key, true).await; } // Cleans up monitor updates for given monitor in range `start..=end`.