diff --git a/Cargo.toml b/Cargo.toml index d14280bc2..5848fb275 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -95,9 +95,9 @@ futures-util = "0.3.30" testdir = "0.9.1" [features] -default = ["fs-store", "rpc", "net_protocol"] +default = ["fs-store", "net_protocol"] downloader = ["dep:parking_lot", "tokio-util/time", "dep:hashlink"] -net_protocol = ["downloader"] +net_protocol = ["downloader", "dep:futures-util"] fs-store = ["dep:reflink-copy", "redb", "dep:redb_v1", "dep:tempfile"] metrics = ["iroh-metrics/metrics"] redb = ["dep:redb"] @@ -136,6 +136,7 @@ name = "fetch-stream" [[example]] name = "transfer" +required-features = ["rpc"] [[example]] name = "hello-world-fetch" diff --git a/src/net_protocol.rs b/src/net_protocol.rs index 770e3e65e..710e2f29e 100644 --- a/src/net_protocol.rs +++ b/src/net_protocol.rs @@ -3,12 +3,7 @@ // TODO: reduce API surface and add documentation #![allow(missing_docs)] -use std::{ - collections::{BTreeMap, BTreeSet}, - fmt::Debug, - ops::DerefMut, - sync::{Arc, OnceLock}, -}; +use std::{collections::BTreeSet, fmt::Debug, ops::DerefMut, sync::Arc}; use anyhow::{anyhow, bail, Result}; use futures_lite::future::Boxed as BoxedFuture; @@ -31,7 +26,7 @@ use crate::{ progress::{AsyncChannelProgressSender, ProgressSender}, SetTagOption, }, - HashAndFormat, TempTag, + HashAndFormat, }; /// A callback that blobs can ask about a set of hashes that should not be garbage collected. @@ -58,32 +53,33 @@ pub struct Blobs { pub(crate) store: S, events: EventSender, downloader: Downloader, + #[cfg(feature = "rpc")] batches: tokio::sync::Mutex, endpoint: Endpoint, gc_state: Arc>, #[cfg(feature = "rpc")] - pub(crate) rpc_handler: Arc>, + pub(crate) rpc_handler: Arc>, } -/// Name used for logging when new node addresses are added from gossip. -const BLOB_DOWNLOAD_SOURCE_NAME: &str = "blob_download"; - /// Keeps track of all the currently active batch operations of the blobs api. +#[cfg(feature = "rpc")] #[derive(Debug, Default)] pub(crate) struct BlobBatches { /// Currently active batches - batches: BTreeMap, + batches: std::collections::BTreeMap, /// Used to generate new batch ids. max: u64, } /// A single batch of blob operations +#[cfg(feature = "rpc")] #[derive(Debug, Default)] struct BlobBatch { /// The tags in this batch. - tags: BTreeMap>, + tags: std::collections::BTreeMap>, } +#[cfg(feature = "rpc")] impl BlobBatches { /// Create a new unique batch id. pub fn create(&mut self) -> BatchId { @@ -93,7 +89,7 @@ impl BlobBatches { } /// Store a temp tag in a batch identified by a batch id. - pub fn store(&mut self, batch: BatchId, tt: TempTag) { + pub fn store(&mut self, batch: BatchId, tt: crate::TempTag) { let entry = self.batches.entry(batch).or_default(); entry.tags.entry(tt.hash_and_format()).or_default().push(tt); } @@ -187,10 +183,11 @@ impl Blobs { events, downloader, endpoint, + #[cfg(feature = "rpc")] batches: Default::default(), gc_state: Default::default(), #[cfg(feature = "rpc")] - rpc_handler: Arc::new(OnceLock::new()), + rpc_handler: Default::default(), } } @@ -252,6 +249,7 @@ impl Blobs { Ok(()) } + #[cfg(feature = "rpc")] pub(crate) async fn batches(&self) -> tokio::sync::MutexGuard<'_, BlobBatches> { self.batches.lock().await } @@ -303,6 +301,9 @@ impl Blobs { nodes: Vec, progress: AsyncChannelProgressSender, ) -> Result { + /// Name used for logging when new node addresses are added from gossip. + const BLOB_DOWNLOAD_SOURCE_NAME: &str = "blob_download"; + let mut node_ids = Vec::with_capacity(nodes.len()); let mut any_added = false; for node in nodes { diff --git a/tests/blobs.rs b/tests/blobs.rs index 4b91f39f0..ad1198f92 100644 --- a/tests/blobs.rs +++ b/tests/blobs.rs @@ -1,4 +1,4 @@ -#![cfg(feature = "net_protocol")] +#![cfg(all(feature = "net_protocol", feature = "rpc"))] use std::{ sync::{Arc, Mutex}, time::Duration,