Skip to content

Commit

Permalink
Merge pull request #518 from EspressoSystems/rm/update-deps
Browse files Browse the repository at this point in the history
Update dependencies
  • Loading branch information
rob-maron authored Apr 9, 2024
2 parents cd6d2ab + 04ee5b9 commit 4dca9ff
Show file tree
Hide file tree
Showing 27 changed files with 779 additions and 846 deletions.
1,035 changes: 536 additions & 499 deletions Cargo.lock

Large diffs are not rendered by default.

25 changes: 13 additions & 12 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ required-features = ["sql-data-source", "testing"]
[dependencies]
anyhow = "1.0"
ark-serialize = "0.4.2"
async-compatibility-layer = { git = "https://github.com/EspressoSystems/async-compatibility-layer.git", tag = "1.4.1", features = [
async-compatibility-layer = { version = "1.1", default-features = false, features = [
"logging-utils",
] }
async-std = { version = "1.9.0", features = ["unstable", "attributes"] }
Expand All @@ -70,29 +70,30 @@ bit-vec = { version = "0.6.3", features = ["serde_std"] }
chrono = "0.4"
clap = { version = "4.5", features = ["derive", "env"] }
cld = "0.5"
commit = { git = "https://github.com/EspressoSystems/commit.git" }
committable = "0.2"
custom_debug = "0.6"
derivative = "2.2"
derive_more = "0.99"
either = "1.10"
futures = "0.3"
hotshot = { git = "https://github.com/EspressoSystems/HotShot.git", tag = "0.5.32" }
hotshot-types = { git = "https://github.com/EspressoSystems/HotShot.git", tag = "0.5.32" }
hotshot = { git = "https://github.com/EspressoSystems/HotShot.git", tag = "0.5.34" }
hotshot-types = { git = "https://github.com/EspressoSystems/HotShot.git", tag = "0.5.34" }
hotshot-testing = { git = "https://github.com/EspressoSystems/HotShot.git", tag = "0.5.34" }
itertools = "0.12.1"
jf-primitives = { git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.2" }
jf-primitives = { git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.3" }
prometheus = "0.13"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
snafu = "0.8"
surf-disco = { git = "https://github.com/EspressoSystems/surf-disco.git", tag = "v0.5.0" }
tagged-base64 = { git = "https://github.com/EspressoSystems/tagged-base64", tag = "0.3.4" }
tide-disco = { git = "https://github.com/EspressoSystems/tide-disco.git", tag = "v0.5.0" }
surf-disco = "0.6"
tagged-base64 = "0.4"
tide-disco = "0.6"
time = "0.3"
toml = "0.8"
tracing = "0.1"
trait-variant = "0.1"
typenum = "1"
versioned-binary-serialization = { git = "https://github.com/EspressoSystems/versioned-binary-serialization.git", tag = "0.1.2" }
vbs = "0.1"

# Dependencies enabled by feature "file-system-data-source".
atomic_store = { git = "https://github.com/EspressoSystems/atomicstore.git", tag = "0.1.4", optional = true }
Expand All @@ -111,7 +112,7 @@ tokio-postgres = { version = "0.7", optional = true, default-features = false, f

# Dependencies enabled by feature "testing".
espresso-macros = { git = "https://github.com/EspressoSystems/espresso-macros.git", tag = "0.1.0", optional = true }
hotshot-example-types = { git = "https://github.com/EspressoSystems/HotShot.git", tag = "0.5.32", optional = true }
hotshot-example-types = { git = "https://github.com/EspressoSystems/HotShot.git", tag = "0.5.34", optional = true }
portpicker = { version = "0.1", optional = true }
rand = { version = "0.8", optional = true }
spin_sleep = { version = "1.2", optional = true }
Expand All @@ -131,9 +132,9 @@ backtrace-on-stack-overflow = { version = "0.3", optional = true }
[dev-dependencies]
espresso-macros = { git = "https://github.com/EspressoSystems/espresso-macros.git", tag = "0.1.0" }
generic-array = "0.14"
hotshot-example-types = { git = "https://github.com/EspressoSystems/HotShot.git", tag = "0.5.32" }
hotshot-example-types = { git = "https://github.com/EspressoSystems/HotShot.git", tag = "0.5.34" }
portpicker = "0.1"
rand = "0.8"
spin_sleep = "1.2"
surf = "2.3"
tempfile = "3.10"
reqwest = "0.12.3"
17 changes: 1 addition & 16 deletions api/status.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@ Node-specific state and uncommitted data.
Unlike the availability and node APIs, which deal only with committed data (albeit with different
consistency properties), the status API offers a glimpse into internal consensus state and
uncommitted data. Here you can find low-level information about a particular node, such as consensus
and networking metrics. You can also find information about pending blocks and transactions in the
mempool.
and networking metrics.
The status API is intended to be a lightweight way to inspect the activities and health of a
consensus node. It is the only API that can be run without any persistent storage, and its memory
Expand All @@ -35,20 +34,6 @@ DOC = """
Get the height of the latest committed block.
"""

[route.mempool_info]
PATH = ["/mempool-info"]
DOC = """
Get information about the mempool.
Returns
```
{
"transaction_count": "integer",
"memory_footprint": "integer",
}
```
"""

[route.success_rate]
PATH = ["/success-rate"]
DOC = """
Expand Down
57 changes: 38 additions & 19 deletions examples/simple-server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ use hotshot_query_service::{
},
Error,
};
use hotshot_testing::block_builder::{SimpleBuilderImplementation, TestBuilderImplementation};
use hotshot_types::{
consensus::ConsensusMetricsValue, light_client::StateKeyPair, signature_key::BLSPubKey,
traits::election::Membership, ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig,
Expand Down Expand Up @@ -151,9 +152,32 @@ async fn init_consensus(
})
.collect::<Vec<_>>();

// Get the number of nodes with stake
let num_nodes_with_stake = NonZeroUsize::new(pub_keys.len()).unwrap();

// Create memberships
let election_config =
MockMembership::default_election_config(num_nodes_with_stake.get() as u64, 0);
let membership =
MockMembership::create_election(known_nodes_with_stake.clone(), election_config, 0);
let memberships = Memberships {
quorum_membership: membership.clone(),
da_membership: membership.clone(),
vid_membership: membership.clone(),
view_sync_membership: membership.clone(),
};

// Start the builder server
let (builder_task, builder_url) = <SimpleBuilderImplementation as TestBuilderImplementation<
MockTypes,
>>::start(Arc::new(membership))
.await;

// Create the configuration
let config = HotShotConfig {
builder_url,
fixed_leader_for_gpuvid: 0,
num_nodes_with_stake: NonZeroUsize::new(pub_keys.len()).unwrap(),
num_nodes_with_stake,
num_nodes_without_stake: 0,
known_nodes_with_stake: known_nodes_with_stake.clone(),
known_nodes_without_stake: vec![],
Expand All @@ -174,14 +198,16 @@ async fn init_consensus(
data_request_delay: Duration::from_millis(200),
view_sync_timeout: Duration::from_millis(250),
};
join_all(priv_keys.into_iter().zip(data_sources).enumerate().map(

let nodes = join_all(priv_keys.into_iter().zip(data_sources).enumerate().map(
|(node_id, (priv_key, data_source))| {
let pub_keys = pub_keys.clone();
let known_nodes_with_stake = known_nodes_with_stake.clone();
let state_key_pairs = state_key_pairs.clone();
let mut config = config.clone();
let master_map = master_map.clone();

let memberships = memberships.clone();
async move {
config.my_own_validator_config = ValidatorConfig {
public_key: pub_keys[node_id],
Expand All @@ -193,22 +219,6 @@ async fn init_consensus(
state_key_pair: state_key_pairs[node_id].clone(),
};

let election_config = MockMembership::default_election_config(
config.num_nodes_with_stake.get() as u64,
0,
);
let membership = MockMembership::create_election(
known_nodes_with_stake.clone(),
election_config,
0,
);
let memberships = Memberships {
quorum_membership: membership.clone(),
da_membership: membership.clone(),
vid_membership: membership.clone(),
view_sync_membership: membership,
};

let network = Arc::new(MemoryNetwork::new(
pub_keys[node_id],
NetworkingMetricsValue::new(&*data_source.populate_metrics()),
Expand Down Expand Up @@ -240,5 +250,14 @@ async fn init_consensus(
}
},
))
.await
.await;

// Hook the builder up to the event stream from the first node
if let Some(builder_task) = builder_task {
builder_task.start(Box::new(
nodes[0].hotshot.output_event_stream.1.activate_cloned(),
));
}

nodes
}
4 changes: 2 additions & 2 deletions src/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ use std::fs;
use std::path::Path;
use tide_disco::api::{Api, ApiError};
use toml::{map::Entry, Value};
use versioned_binary_serialization::version::StaticVersionType;
use vbs::version::StaticVersionType;

pub(crate) fn load_api<State, Error, Ver: StaticVersionType>(
pub(crate) fn load_api<State: 'static, Error: 'static, Ver: StaticVersionType + 'static>(
path: Option<impl AsRef<Path>>,
default: &str,
extensions: impl IntoIterator<Item = Value>,
Expand Down
8 changes: 4 additions & 4 deletions src/availability.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ use serde::{Deserialize, Serialize};
use snafu::{OptionExt, Snafu};
use std::{fmt::Display, path::PathBuf, str::FromStr, time::Duration};
use tide_disco::{api::ApiError, method::ReadState, Api, RequestError, StatusCode};
use versioned_binary_serialization::version::StaticVersionType;
use vbs::version::StaticVersionType;

pub(crate) mod data_source;
mod fetch;
Expand Down Expand Up @@ -496,7 +496,7 @@ mod test {
Error, Header,
};
use async_std::sync::RwLock;
use commit::Committable;
use committable::Committable;
use futures::future::FutureExt;
use hotshot_example_types::state_types::TestInstanceState;
use hotshot_types::{
Expand Down Expand Up @@ -790,7 +790,7 @@ mod test {

// Start the web server.
let port = pick_unused_port().unwrap();
let mut app = App::<_, Error, Version01>::with_state(network.data_source());
let mut app = App::<_, Error>::with_state(network.data_source());
app.register_module(
"availability",
define_api(
Expand Down Expand Up @@ -942,7 +942,7 @@ mod test {
})
.unwrap();

let mut app = App::<_, Error, Version01>::with_state(RwLock::new(data_source));
let mut app = App::<_, Error>::with_state(RwLock::new(data_source));
app.register_module("availability", api).unwrap();

let port = pick_unused_port().unwrap();
Expand Down
2 changes: 1 addition & 1 deletion src/availability/query_data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
// see <https://www.gnu.org/licenses/>.

use crate::{types::HeightIndexed, Header, Metadata, Payload, Transaction, VidCommon};
use commit::{Commitment, Committable};
use committable::{Commitment, Committable};
use hotshot_types::{
data::Leaf,
simple_certificate::QuorumCertificate,
Expand Down
74 changes: 7 additions & 67 deletions src/data_source.rs
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ pub mod availability_tests {
types::HeightIndexed,
};
use async_std::sync::RwLock;
use commit::Committable;
use committable::Committable;
use futures::stream::StreamExt;
use std::collections::HashMap;
use std::fmt::Debug;
Expand Down Expand Up @@ -454,7 +454,7 @@ pub mod persistence_tests {
},
Leaf,
};
use commit::Committable;
use committable::Committable;
use hotshot_example_types::state_types::TestInstanceState;
use hotshot_types::simple_certificate::QuorumCertificate;

Expand Down Expand Up @@ -563,7 +563,7 @@ pub mod node_tests {
types::HeightIndexed,
Header, VidShare,
};
use commit::Committable;
use committable::Committable;
use futures::{future::join_all, stream::StreamExt};
use hotshot_example_types::{
block_types::{TestBlockHeader, TestBlockPayload},
Expand Down Expand Up @@ -1025,15 +1025,13 @@ pub mod node_tests {
#[espresso_macros::generic_tests]
pub mod status_tests {
use crate::{
status::{MempoolQueryData, StatusDataSource},
status::StatusDataSource,
testing::{
consensus::{DataSourceLifeCycle, MockNetwork},
mocks::mock_transaction,
setup_test, sleep,
},
};
use bincode::Options;
use hotshot_types::utils::bincode_opts;
use std::time::Duration;

#[async_std::test]
Expand Down Expand Up @@ -1061,62 +1059,15 @@ pub mod status_tests {
);
}

// Submit a transaction, and check that it is reflected in the mempool.
// Submit a transaction
let txn = mock_transaction(vec![1, 2, 3]);
network.submit_transaction(txn.clone()).await;
loop {
let mempool = { ds.read().await.mempool_info().await.unwrap() };
let expected = MempoolQueryData {
transaction_count: 1,
memory_footprint: bincode_opts().serialized_size(&txn).unwrap(),
};
if mempool == expected {
break;
}
tracing::info!(?mempool, "waiting for mempool to reflect transaction");
sleep(Duration::from_secs(1)).await;
}
{
assert_eq!(
ds.read().await.mempool_info().await.unwrap(),
MempoolQueryData {
transaction_count: 1,
memory_footprint: bincode_opts().serialized_size(&txn).unwrap(),
}
);
}

// Submitting the same transaction should not affect the mempool.
network.submit_transaction(txn.clone()).await;
sleep(Duration::from_secs(3)).await;
{
assert_eq!(
ds.read().await.mempool_info().await.unwrap(),
MempoolQueryData {
transaction_count: 1,
memory_footprint: bincode_opts().serialized_size(&txn).unwrap(),
}
);
}

// Start consensus and wait for the transaction to be finalized.
network.start().await;
// First wait for the transaction to be taken out of the mempool.
let block_height = loop {
{
let ds = ds.read().await;
let mempool = ds.mempool_info().await.unwrap();
if mempool.transaction_count == 0 {
break ds.block_height().await.unwrap();
}
tracing::info!(
?mempool,
"waiting for transaction to be taken out of mempool"
);
}
sleep(Duration::from_secs(1)).await;
};

// Now wait for at least one block to be finalized.
let block_height = ds.read().await.block_height().await.unwrap();
loop {
let current_height = ds.read().await.block_height().await.unwrap();
if current_height > block_height {
Expand All @@ -1139,17 +1090,6 @@ pub mod status_tests {
assert!(success_rate > 0.0, "{success_rate}");
}

{
// Check that the transaction is no longer reflected in the mempool.
assert_eq!(
ds.read().await.mempool_info().await.unwrap(),
MempoolQueryData {
transaction_count: 0,
memory_footprint: 0,
}
);
}

{
// Shutting down the consensus to halt block production
// Introducing a delay of 3 seconds to ensure that elapsed time since last block is atleast 3seconds
Expand Down
Loading

0 comments on commit 4dca9ff

Please sign in to comment.