From 13cfc394062c5a6223446f0fbf5220709f94a8bf Mon Sep 17 00:00:00 2001 From: snorochevskiy Date: Wed, 20 Nov 2024 00:08:00 +0200 Subject: [PATCH] MTG-703 Adding peer to peer consistency checks --- Cargo.toml | 1 + entities/src/models.rs | 29 + grpc/Cargo.toml | 1 + grpc/build.rs | 1 + grpc/proto/consistency_api.proto | 146 ++ grpc/src/client.rs | 7 +- grpc/src/consistencyapi.rs | 1240 +++++++++++++++++ grpc/src/consistencyapi_impl.rs | 633 +++++++++ grpc/src/lib.rs | 2 + interface/Cargo.toml | 1 + interface/src/aura_peers_provides.rs | 9 + interface/src/checksums_storage.rs | 108 ++ interface/src/lib.rs | 2 + nft_ingester/Cargo.toml | 1 + nft_ingester/benches/ingester_benchmark.rs | 1 + nft_ingester/src/bin/ingester/main.rs | 27 +- nft_ingester/src/bin/raw_backfiller/main.rs | 13 + nft_ingester/src/config.rs | 1 + nft_ingester/src/consistency_bg_job.rs | 635 +++++++++ nft_ingester/src/consistency_calculator.rs | 785 +++++++++++ nft_ingester/src/fork_cleaner.rs | 33 +- nft_ingester/src/lib.rs | 2 + nft_ingester/src/message_parser.rs | 25 + .../src/processors/accounts_processor.rs | 21 +- .../bubblegum_updates_processor.rs | 26 + nft_ingester/tests/api_tests.rs | 24 + nft_ingester/tests/batch_mint_test.rs | 1 + nft_ingester/tests/bubblegum_tests.rs | 2 + nft_ingester/tests/clean_forks_test.rs | 4 + nft_ingester/tests/consistency_bg_job_test.rs | 96 ++ .../tests/consistency_calculator_test.rs | 253 ++++ nft_ingester/tests/decompress.rs | 4 + nft_ingester/tests/dump_tests.rs | 1 + nft_ingester/tests/process_accounts.rs | 9 + rocks-db/Cargo.toml | 2 + rocks-db/src/batch_savers.rs | 27 + rocks-db/src/column.rs | 29 + rocks-db/src/lib.rs | 46 + rocks-db/src/migrations/spl2022.rs | 2 + rocks-db/src/storage_consistency.rs | 866 ++++++++++++ rocks-db/src/transaction.rs | 2 + rocks-db/src/transaction_client.rs | 1 + rocks-db/tests/storage_consistency_test.rs | 170 +++ 43 files changed, 5283 insertions(+), 6 deletions(-) create mode 100644 grpc/proto/consistency_api.proto create mode 100644 grpc/src/consistencyapi.rs create mode 100644 grpc/src/consistencyapi_impl.rs create mode 100644 interface/src/aura_peers_provides.rs create mode 100644 interface/src/checksums_storage.rs create mode 100644 nft_ingester/src/consistency_bg_job.rs create mode 100644 nft_ingester/src/consistency_calculator.rs create mode 100644 nft_ingester/tests/consistency_bg_job_test.rs create mode 100644 nft_ingester/tests/consistency_calculator_test.rs create mode 100644 rocks-db/src/storage_consistency.rs create mode 100644 rocks-db/tests/storage_consistency_test.rs diff --git a/Cargo.toml b/Cargo.toml index 023e513e5..e02ca5b1e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,6 +81,7 @@ indicatif = "0.17" # Errors, futures, helpers, tools, time, etc... # Errors +anyhow = "1" thiserror = { version = "1"} # Clients arweave-rs = { version = "0.2.0", git = "https://github.com/RequescoS/arweave-rs.git", rev = "d8f5ef76f06c96afdf013fe5b62301790631b33f" } diff --git a/entities/src/models.rs b/entities/src/models.rs index bfe07b8be..00db5063a 100644 --- a/entities/src/models.rs +++ b/entities/src/models.rs @@ -519,6 +519,7 @@ pub struct MetadataInfo { pub rent_epoch: u64, pub executable: bool, pub metadata_owner: Option, + pub data_hash: u64, } #[derive(Clone)] @@ -526,12 +527,14 @@ pub struct EditionMetadata { pub edition: TokenMetadataEdition, pub write_version: u64, pub slot_updated: u64, + pub data_hash: u64, } #[derive(Clone, Debug)] pub struct BurntMetadataSlot { pub slot_updated: u64, pub write_version: u64, + pub data_hash: u64, } #[derive(Clone)] @@ -542,6 +545,7 @@ pub struct IndexableAssetWithAccountInfo { pub slot_updated: u64, pub write_version: u64, pub rent_epoch: u64, + pub data_hash: u64, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -556,6 +560,7 @@ pub struct TokenAccount { pub slot_updated: i64, pub amount: i64, pub write_version: u64, + pub data_hash: u64, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -569,12 +574,14 @@ pub struct Mint { pub token_program: Pubkey, pub extensions: Option, pub write_version: u64, + pub data_hash: u64, } pub struct InscriptionInfo { pub inscription: Inscription, pub write_version: u64, pub slot_updated: u64, + pub data_hash: u64, } #[derive(Clone)] @@ -582,6 +589,7 @@ pub struct InscriptionDataInfo { pub inscription_data: Vec, pub write_version: u64, pub slot_updated: u64, + pub data_hash: u64, } #[derive(Clone)] @@ -592,6 +600,7 @@ pub struct CoreAssetFee { pub rent_epoch: u64, pub slot_updated: u64, pub write_version: u64, + pub data_hash: u64, } pub struct UnprocessedAccountMessage { @@ -600,6 +609,26 @@ pub struct UnprocessedAccountMessage { pub id: String, } +impl UnprocessedAccountMessage { + pub fn solana_change_info(&self) -> (Pubkey, u64, u64, u64) { + let (slot, write_version, data_hash) = match &self.account { + UnprocessedAccount::MetadataInfo(v) => (v.slot_updated, v.write_version, v.data_hash), + UnprocessedAccount::Token(v) => (v.slot_updated as u64, v.write_version, v.data_hash), + UnprocessedAccount::Mint(v) => (v.slot_updated as u64, v.write_version, v.data_hash), + UnprocessedAccount::Edition(v) => (v.slot_updated, v.write_version, v.data_hash), + UnprocessedAccount::BurnMetadata(v) => (v.slot_updated, v.write_version, v.data_hash), + UnprocessedAccount::BurnMplCore(v) => (v.slot_updated, v.write_version, v.data_hash), + UnprocessedAccount::MplCore(v) => (v.slot_updated, v.write_version, v.data_hash), + UnprocessedAccount::Inscription(v) => (v.slot_updated, v.write_version, v.data_hash), + UnprocessedAccount::InscriptionData(v) => { + (v.slot_updated, v.write_version, v.data_hash) + } + UnprocessedAccount::MplCoreFee(v) => (v.slot_updated, v.write_version, v.data_hash), + }; + (self.key, slot, write_version, data_hash) + } +} + pub struct BufferedTxWithID { pub tx: BufferedTransaction, pub id: String, diff --git a/grpc/Cargo.toml b/grpc/Cargo.toml index 939896cf1..facfa3850 100644 --- a/grpc/Cargo.toml +++ b/grpc/Cargo.toml @@ -6,6 +6,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +anyhow = { workspace = true } tokio = { workspace = true } tonic = { workspace = true } prost = { workspace = true } diff --git a/grpc/build.rs b/grpc/build.rs index 3b04ae2f7..e583467f8 100644 --- a/grpc/build.rs +++ b/grpc/build.rs @@ -6,6 +6,7 @@ fn main() -> Result<(), Box> { // Paths to the .proto files "proto/gap_filler.proto", "proto/asset_urls.proto", + "proto/consistency_api.proto", ], &["proto"], // Include paths for proto file dependencies )?; diff --git a/grpc/proto/consistency_api.proto b/grpc/proto/consistency_api.proto new file mode 100644 index 000000000..42127dbd8 --- /dev/null +++ b/grpc/proto/consistency_api.proto @@ -0,0 +1,146 @@ +syntax = "proto3"; + +import "google/protobuf/empty.proto"; + +package consistencyapi; + +message BbgmEarlistGrandEpoch { + optional uint32 grand_epoch = 1; +} + +message BbgmGrandEpochList { + repeated BbgmGrandEpoch list = 1; +} + +message BbgmGrandEpoch { + uint32 grand_epoch = 1; + bytes tree_pubkey = 2; + optional bytes checksum = 3; +} + +message BbgmEpochList { + repeated BbgmEpoch list = 1; +} + +message BbgmEpoch { + uint32 epoch = 1; + bytes tree_pubkey = 2; + optional bytes checksum = 3; +} + +message BbgmChangeList { + repeated BbgmChange list = 1; +} + +message BbgmChange { + bytes tree_pubkey = 1; + uint64 slot = 2; + uint64 seq = 3; + string signature = 4; +} + +// Request object for getting grand epoch trees checksums +message GetBbgmGrandEpochsReq { + // Grand epoch number + uint32 grand_epoch = 1; + // Maximum amount of tree checksums to return + optional uint64 limit = 2; + // Return trees checksums that are after given + optional bytes after = 3; +} + +// Request object for getting epoch tree checksums in the geven grand epoch +message GetBbgmEpochsReq { + // Public key of the bubblegum tree, checksum should be returned for + bytes tree_pubkey = 1; + // Number of grand epoch which nested epochs should be returned + uint32 grand_epoch = 2; +} + +message BbgmChangePosition { + uint64 slot = 1; + uint64 seq = 2; +} + +// Request object for getting list of individual bubblegum tree changes +// that happened in the given epoch +message GetBbgmChangesReq { + // Pubkey of bubblegum tree + bytes tree_pubkey = 1; + // Number of epoch changes are listed from + uint32 epoch = 2; + // Maximum amount of bubblegum changes to return + optional uint64 limit = 3; + // Return changes after given position + optional BbgmChangePosition after = 4; +} + +// Represents account NFT grand bucket checksum. +message AccGrandBucketChecksum { + uint32 grand_bucket = 1; + optional bytes checksum = 2; +} + +// List of account NFT grand bucket checksums. +message AccGrandBucketChecksumsList { + repeated AccGrandBucketChecksum list = 1; +} + +message AccBucketChecksum { + uint32 bucket = 1; + optional bytes checksum = 2; +} + +message AccBucketChecksumsList { + repeated AccBucketChecksum list = 1; +} + +// Represents last tracked account NFT change +message Acc { + bytes account_pubkey = 1; + uint64 slot = 2; + uint64 write_version = 3; +} + +// Represents list of last tracked account NFT changes +message AccList { + repeated Acc list = 1; +} + +message GetAccBucketsReq { + uint32 grand_bucket = 1; +} + +message GetAccReq { + // number of bucket + uint32 bucket = 1; + // maximum amount of account latest states to return + optional uint64 limit = 2; + // return account that are after the given + optional bytes after = 3; +} + +service BbgmConsistencyService { + // Returns earliest grand epoch avaible on the peer. + rpc GetBbgmEarliestGrandEpoch(google.protobuf.Empty) returns (BbgmEarlistGrandEpoch); + + // Request list of tree checksums in the given grand epoch + // No need to use stream since in the worst case the response size + // is still significanly less than 1 MB + rpc GetBbgmGrandEpochChecksums(GetBbgmGrandEpochsReq) returns (BbgmGrandEpochList); + rpc GetBbgmEpochChecksumsInGrandEpoch(GetBbgmEpochsReq) returns (BbgmEpochList); + rpc GetBbgmChangesInEpoch(GetBbgmChangesReq) returns (BbgmChangeList); + + // Propose bubblegum changes to a peer, that has these changes missing. + // Can be called after after the "get changes" API is called, and a portion + // of missing bubblegum changes detected on the peer. + rpc ProposeMissingBbgmChanges(BbgmChangeList) returns (google.protobuf.Empty); +} + +service AccConsistencyService { + rpc GetAccGrandBucketChecksums(google.protobuf.Empty) returns (AccGrandBucketChecksumsList); + rpc GetAccBucketChecksumsInGrandBucket(GetAccBucketsReq) returns (AccBucketChecksumsList); + rpc GetAccsInBucket(GetAccReq) returns (AccList); + + rpc ProposeMissingAccChanges(AccList) returns (google.protobuf.Empty); +} \ No newline at end of file diff --git a/grpc/src/client.rs b/grpc/src/client.rs index adc5dcdf0..27d10fd5d 100644 --- a/grpc/src/client.rs +++ b/grpc/src/client.rs @@ -21,8 +21,11 @@ pub struct Client { impl Client { pub async fn connect(peer_discovery: impl PeerDiscovery) -> Result { - let url = Uri::from_str(peer_discovery.get_gapfiller_peer_addr().as_str()) - .map_err(|e| GrpcError::UriCreate(e.to_string()))?; + Client::connect_to_url(peer_discovery.get_gapfiller_peer_addr().as_str()).await + } + + pub async fn connect_to_url(url_str: &str) -> Result { + let url = Uri::from_str(url_str).map_err(|e| GrpcError::UriCreate(e.to_string()))?; let channel = Channel::builder(url).connect().await?; Ok(Self { diff --git a/grpc/src/consistencyapi.rs b/grpc/src/consistencyapi.rs new file mode 100644 index 000000000..6ffc0087e --- /dev/null +++ b/grpc/src/consistencyapi.rs @@ -0,0 +1,1240 @@ +// This file is @generated by prost-build. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BbgmEarlistGrandEpoch { + #[prost(uint32, optional, tag = "1")] + pub grand_epoch: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BbgmGrandEpochList { + #[prost(message, repeated, tag = "1")] + pub list: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BbgmGrandEpoch { + #[prost(uint32, tag = "1")] + pub grand_epoch: u32, + #[prost(bytes = "vec", tag = "2")] + pub tree_pubkey: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", optional, tag = "3")] + pub checksum: ::core::option::Option<::prost::alloc::vec::Vec>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BbgmEpochList { + #[prost(message, repeated, tag = "1")] + pub list: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BbgmEpoch { + #[prost(uint32, tag = "1")] + pub epoch: u32, + #[prost(bytes = "vec", tag = "2")] + pub tree_pubkey: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", optional, tag = "3")] + pub checksum: ::core::option::Option<::prost::alloc::vec::Vec>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BbgmChangeList { + #[prost(message, repeated, tag = "1")] + pub list: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BbgmChange { + #[prost(bytes = "vec", tag = "1")] + pub tree_pubkey: ::prost::alloc::vec::Vec, + #[prost(uint64, tag = "2")] + pub slot: u64, + #[prost(uint64, tag = "3")] + pub seq: u64, + #[prost(string, tag = "4")] + pub signature: ::prost::alloc::string::String, +} +/// Request object for getting grand epoch trees checksums +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetBbgmGrandEpochsReq { + /// Grand epoch number + #[prost(uint32, tag = "1")] + pub grand_epoch: u32, + /// Maximum amount of tree checksums to return + #[prost(uint64, optional, tag = "2")] + pub limit: ::core::option::Option, + /// Return trees checksums that are after given + #[prost(bytes = "vec", optional, tag = "3")] + pub after: ::core::option::Option<::prost::alloc::vec::Vec>, +} +/// Request object for getting epoch tree checksums in the geven grand epoch +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetBbgmEpochsReq { + /// Public key of the bubblegum tree, checksum should be returned for + #[prost(bytes = "vec", tag = "1")] + pub tree_pubkey: ::prost::alloc::vec::Vec, + /// Number of grand epoch which nested epochs should be returned + #[prost(uint32, tag = "2")] + pub grand_epoch: u32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BbgmChangePosition { + #[prost(uint64, tag = "1")] + pub slot: u64, + #[prost(uint64, tag = "2")] + pub seq: u64, +} +/// Request object for getting list of individual bubblegum tree changes +/// that happened in the given epoch +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetBbgmChangesReq { + /// Pubkey of bubblegum tree + #[prost(bytes = "vec", tag = "1")] + pub tree_pubkey: ::prost::alloc::vec::Vec, + /// Number of epoch changes are listed from + #[prost(uint32, tag = "2")] + pub epoch: u32, + /// Maximum amount of bubblegum changes to return + #[prost(uint64, optional, tag = "3")] + pub limit: ::core::option::Option, + /// Return changes after given position + #[prost(message, optional, tag = "4")] + pub after: ::core::option::Option, +} +/// Represents account NFT grand bucket checksum. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccGrandBucketChecksum { + #[prost(uint32, tag = "1")] + pub grand_bucket: u32, + #[prost(bytes = "vec", optional, tag = "2")] + pub checksum: ::core::option::Option<::prost::alloc::vec::Vec>, +} +/// List of account NFT grand bucket checksums. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccGrandBucketChecksumsList { + #[prost(message, repeated, tag = "1")] + pub list: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccBucketChecksum { + #[prost(uint32, tag = "1")] + pub bucket: u32, + #[prost(bytes = "vec", optional, tag = "2")] + pub checksum: ::core::option::Option<::prost::alloc::vec::Vec>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccBucketChecksumsList { + #[prost(message, repeated, tag = "1")] + pub list: ::prost::alloc::vec::Vec, +} +/// Represents last tracked account NFT change +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Acc { + #[prost(bytes = "vec", tag = "1")] + pub account_pubkey: ::prost::alloc::vec::Vec, + #[prost(uint64, tag = "2")] + pub slot: u64, + #[prost(uint64, tag = "3")] + pub write_version: u64, +} +/// Represents list of last tracked account NFT changes +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccList { + #[prost(message, repeated, tag = "1")] + pub list: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetAccBucketsReq { + #[prost(uint32, tag = "1")] + pub grand_bucket: u32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetAccReq { + /// number of bucket + #[prost(uint32, tag = "1")] + pub bucket: u32, + /// maximum amount of account latest states to return + #[prost(uint64, optional, tag = "2")] + pub limit: ::core::option::Option, + /// return account that are after the given + #[prost(bytes = "vec", optional, tag = "3")] + pub after: ::core::option::Option<::prost::alloc::vec::Vec>, +} +/// Generated client implementations. +pub mod bbgm_consistency_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::http::Uri; + use tonic::codegen::*; + #[derive(Debug, Clone)] + pub struct BbgmConsistencyServiceClient { + inner: tonic::client::Grpc, + } + impl BbgmConsistencyServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl BbgmConsistencyServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> BbgmConsistencyServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + >>::Error: + Into + Send + Sync, + { + BbgmConsistencyServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Returns earliest grand epoch avaible on the peer. + pub async fn get_bbgm_earliest_grand_epoch( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/consistencyapi.BbgmConsistencyService/GetBbgmEarliestGrandEpoch", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "consistencyapi.BbgmConsistencyService", + "GetBbgmEarliestGrandEpoch", + )); + self.inner.unary(req, path, codec).await + } + /// Request list of tree checksums in the given grand epoch + /// No need to use stream since in the worst case the response size + /// is still significanly less than 1 MB + pub async fn get_bbgm_grand_epoch_checksums( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/consistencyapi.BbgmConsistencyService/GetBbgmGrandEpochChecksums", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "consistencyapi.BbgmConsistencyService", + "GetBbgmGrandEpochChecksums", + )); + self.inner.unary(req, path, codec).await + } + pub async fn get_bbgm_epoch_checksums_in_grand_epoch( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/consistencyapi.BbgmConsistencyService/GetBbgmEpochChecksumsInGrandEpoch", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "consistencyapi.BbgmConsistencyService", + "GetBbgmEpochChecksumsInGrandEpoch", + )); + self.inner.unary(req, path, codec).await + } + pub async fn get_bbgm_changes_in_epoch( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/consistencyapi.BbgmConsistencyService/GetBbgmChangesInEpoch", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "consistencyapi.BbgmConsistencyService", + "GetBbgmChangesInEpoch", + )); + self.inner.unary(req, path, codec).await + } + /// Propose bubblegum changes to a peer, that has these changes missing. + /// Can be called after after the "get changes" API is called, and a portion + /// of missing bubblegum changes detected on the peer. + pub async fn propose_missing_bbgm_changes( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/consistencyapi.BbgmConsistencyService/ProposeMissingBbgmChanges", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "consistencyapi.BbgmConsistencyService", + "ProposeMissingBbgmChanges", + )); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated client implementations. +pub mod acc_consistency_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::http::Uri; + use tonic::codegen::*; + #[derive(Debug, Clone)] + pub struct AccConsistencyServiceClient { + inner: tonic::client::Grpc, + } + impl AccConsistencyServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl AccConsistencyServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> AccConsistencyServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + >>::Error: + Into + Send + Sync, + { + AccConsistencyServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + pub async fn get_acc_grand_bucket_checksums( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/consistencyapi.AccConsistencyService/GetAccGrandBucketChecksums", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "consistencyapi.AccConsistencyService", + "GetAccGrandBucketChecksums", + )); + self.inner.unary(req, path, codec).await + } + pub async fn get_acc_bucket_checksums_in_grand_bucket( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/consistencyapi.AccConsistencyService/GetAccBucketChecksumsInGrandBucket", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "consistencyapi.AccConsistencyService", + "GetAccBucketChecksumsInGrandBucket", + )); + self.inner.unary(req, path, codec).await + } + pub async fn get_accs_in_bucket( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/consistencyapi.AccConsistencyService/GetAccsInBucket", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "consistencyapi.AccConsistencyService", + "GetAccsInBucket", + )); + self.inner.unary(req, path, codec).await + } + pub async fn propose_missing_acc_changes( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/consistencyapi.AccConsistencyService/ProposeMissingAccChanges", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "consistencyapi.AccConsistencyService", + "ProposeMissingAccChanges", + )); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod bbgm_consistency_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with BbgmConsistencyServiceServer. + #[async_trait] + pub trait BbgmConsistencyService: Send + Sync + 'static { + /// Returns earliest grand epoch avaible on the peer. + async fn get_bbgm_earliest_grand_epoch( + &self, + request: tonic::Request<()>, + ) -> std::result::Result, tonic::Status>; + /// Request list of tree checksums in the given grand epoch + /// No need to use stream since in the worst case the response size + /// is still significanly less than 1 MB + async fn get_bbgm_grand_epoch_checksums( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn get_bbgm_epoch_checksums_in_grand_epoch( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn get_bbgm_changes_in_epoch( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Propose bubblegum changes to a peer, that has these changes missing. + /// Can be called after after the "get changes" API is called, and a portion + /// of missing bubblegum changes detected on the peer. + async fn propose_missing_bbgm_changes( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + } + #[derive(Debug)] + pub struct BbgmConsistencyServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl BbgmConsistencyServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for BbgmConsistencyServiceServer + where + T: BbgmConsistencyService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/consistencyapi.BbgmConsistencyService/GetBbgmEarliestGrandEpoch" => { + #[allow(non_camel_case_types)] + struct GetBbgmEarliestGrandEpochSvc(pub Arc); + impl tonic::server::UnaryService<()> + for GetBbgmEarliestGrandEpochSvc + { + type Response = super::BbgmEarlistGrandEpoch; + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request<()>) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_bbgm_earliest_grand_epoch( + &inner, request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetBbgmEarliestGrandEpochSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/consistencyapi.BbgmConsistencyService/GetBbgmGrandEpochChecksums" => { + #[allow(non_camel_case_types)] + struct GetBbgmGrandEpochChecksumsSvc(pub Arc); + impl + tonic::server::UnaryService + for GetBbgmGrandEpochChecksumsSvc + { + type Response = super::BbgmGrandEpochList; + type Future = BoxFuture, tonic::Status>; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_bbgm_grand_epoch_checksums( + &inner, request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetBbgmGrandEpochChecksumsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/consistencyapi.BbgmConsistencyService/GetBbgmEpochChecksumsInGrandEpoch" => { + #[allow(non_camel_case_types)] + struct GetBbgmEpochChecksumsInGrandEpochSvc( + pub Arc, + ); + impl + tonic::server::UnaryService + for GetBbgmEpochChecksumsInGrandEpochSvc + { + type Response = super::BbgmEpochList; + type Future = BoxFuture, tonic::Status>; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_bbgm_epoch_checksums_in_grand_epoch( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetBbgmEpochChecksumsInGrandEpochSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/consistencyapi.BbgmConsistencyService/GetBbgmChangesInEpoch" => { + #[allow(non_camel_case_types)] + struct GetBbgmChangesInEpochSvc(pub Arc); + impl + tonic::server::UnaryService + for GetBbgmChangesInEpochSvc + { + type Response = super::BbgmChangeList; + type Future = BoxFuture, tonic::Status>; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_bbgm_changes_in_epoch( + &inner, request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetBbgmChangesInEpochSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/consistencyapi.BbgmConsistencyService/ProposeMissingBbgmChanges" => { + #[allow(non_camel_case_types)] + struct ProposeMissingBbgmChangesSvc(pub Arc); + impl + tonic::server::UnaryService + for ProposeMissingBbgmChangesSvc + { + type Response = (); + type Future = BoxFuture, tonic::Status>; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::propose_missing_bbgm_changes( + &inner, request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ProposeMissingBbgmChangesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => Box::pin(async move { + Ok(http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap()) + }), + } + } + } + impl Clone for BbgmConsistencyServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for BbgmConsistencyServiceServer { + const NAME: &'static str = "consistencyapi.BbgmConsistencyService"; + } +} +/// Generated server implementations. +pub mod acc_consistency_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with AccConsistencyServiceServer. + #[async_trait] + pub trait AccConsistencyService: Send + Sync + 'static { + async fn get_acc_grand_bucket_checksums( + &self, + request: tonic::Request<()>, + ) -> std::result::Result, tonic::Status>; + async fn get_acc_bucket_checksums_in_grand_bucket( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn get_accs_in_bucket( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn propose_missing_acc_changes( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + } + #[derive(Debug)] + pub struct AccConsistencyServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl AccConsistencyServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for AccConsistencyServiceServer + where + T: AccConsistencyService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/consistencyapi.AccConsistencyService/GetAccGrandBucketChecksums" => { + #[allow(non_camel_case_types)] + struct GetAccGrandBucketChecksumsSvc(pub Arc); + impl tonic::server::UnaryService<()> + for GetAccGrandBucketChecksumsSvc + { + type Response = super::AccGrandBucketChecksumsList; + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request<()>) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_acc_grand_bucket_checksums( + &inner, request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetAccGrandBucketChecksumsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/consistencyapi.AccConsistencyService/GetAccBucketChecksumsInGrandBucket" => { + #[allow(non_camel_case_types)] + struct GetAccBucketChecksumsInGrandBucketSvc( + pub Arc, + ); + impl + tonic::server::UnaryService + for GetAccBucketChecksumsInGrandBucketSvc + { + type Response = super::AccBucketChecksumsList; + type Future = BoxFuture, tonic::Status>; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_acc_bucket_checksums_in_grand_bucket( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetAccBucketChecksumsInGrandBucketSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/consistencyapi.AccConsistencyService/GetAccsInBucket" => { + #[allow(non_camel_case_types)] + struct GetAccsInBucketSvc(pub Arc); + impl tonic::server::UnaryService + for GetAccsInBucketSvc + { + type Response = super::AccList; + type Future = BoxFuture, tonic::Status>; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_accs_in_bucket(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetAccsInBucketSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/consistencyapi.AccConsistencyService/ProposeMissingAccChanges" => { + #[allow(non_camel_case_types)] + struct ProposeMissingAccChangesSvc(pub Arc); + impl tonic::server::UnaryService + for ProposeMissingAccChangesSvc + { + type Response = (); + type Future = BoxFuture, tonic::Status>; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::propose_missing_acc_changes( + &inner, request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ProposeMissingAccChangesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => Box::pin(async move { + Ok(http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap()) + }), + } + } + } + impl Clone for AccConsistencyServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for AccConsistencyServiceServer { + const NAME: &'static str = "consistencyapi.AccConsistencyService"; + } +} diff --git a/grpc/src/consistencyapi_impl.rs b/grpc/src/consistencyapi_impl.rs new file mode 100644 index 000000000..4dce63b83 --- /dev/null +++ b/grpc/src/consistencyapi_impl.rs @@ -0,0 +1,633 @@ +use std::str::FromStr; +use std::sync::Arc; + +use interface::checksums_storage::{ + AccBucketCksm, AccChecksumServiceApi, AccGrandBucketCksm, AccLastChange, BbgmChangePos, + BbgmChangeRecord, BbgmChecksumServiceApi, BbgmEpochCksm, BbgmGrandEpochCksm, +}; +use solana_sdk::pubkey::Pubkey; +use tonic::async_trait; +use tonic::transport::{Channel, Uri}; + +use crate::consistencyapi::acc_consistency_service_client::AccConsistencyServiceClient; +use crate::consistencyapi::acc_consistency_service_server::AccConsistencyService; +use crate::consistencyapi::bbgm_consistency_service_client::BbgmConsistencyServiceClient; +use crate::consistencyapi::bbgm_consistency_service_server::BbgmConsistencyService; +use crate::consistencyapi::{ + Acc, AccBucketChecksum, AccBucketChecksumsList, AccGrandBucketChecksum, + AccGrandBucketChecksumsList, AccList, BbgmChange, BbgmChangeList, BbgmChangePosition, + BbgmEarlistGrandEpoch, BbgmEpoch, BbgmEpochList, BbgmGrandEpoch, BbgmGrandEpochList, + GetAccBucketsReq, GetAccReq, GetBbgmChangesReq, GetBbgmEpochsReq, GetBbgmGrandEpochsReq, +}; +use crate::error::GrpcError; + +pub struct ConsistencyApiServerImpl { + bbgm_service: Arc, + acc_service: Arc, +} + +#[async_trait] +impl BbgmConsistencyService for ConsistencyApiServerImpl { + async fn get_bbgm_earliest_grand_epoch( + &self, + _request: tonic::Request<()>, + ) -> std::result::Result, tonic::Status> { + let earliest_grand_epoch = match self.bbgm_service.get_earliest_grand_epoch().await { + Ok(v) => v, + Err(e) => return Err(tonic::Status::internal(e.to_string())), + }; + let response = BbgmEarlistGrandEpoch { + grand_epoch: earliest_grand_epoch.map(|v| v as u32), + }; + Ok(tonic::Response::new(response)) + } + + async fn get_bbgm_grand_epoch_checksums( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let GetBbgmGrandEpochsReq { + grand_epoch, + limit, + after, + } = request.into_inner(); + let grand_epoch = grand_epoch as u16; + + let after_pk = if let Some(bytes) = after { + let Ok(pk) = Pubkey::try_from(bytes) else { + return Err(tonic::Status::invalid_argument( + "Invalid continuation value 'after'", + )); + }; + Some(pk) + } else { + None + }; + let grand_epoch_checksums = match self + .bbgm_service + .list_grand_epoch_checksums(grand_epoch, limit, after_pk) + .await + { + Ok(v) => v, + Err(e) => return Err(tonic::Status::internal(e.to_string())), + }; + let response_records = grand_epoch_checksums + .into_iter() + .map(|ge| convert_granch_epoch_checksum(ge, grand_epoch)) + .collect::>(); + let response = BbgmGrandEpochList { + list: response_records, + }; + Ok(tonic::Response::new(response)) + } + + async fn get_bbgm_epoch_checksums_in_grand_epoch( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let GetBbgmEpochsReq { + tree_pubkey, + grand_epoch, + } = request.into_inner(); + let Ok(tree) = Pubkey::try_from(tree_pubkey) else { + return Err(tonic::Status::invalid_argument("Invalid tree pubkey")); + }; + + let db_epochs = match self + .bbgm_service + .list_epoch_checksums(grand_epoch as u16, tree) + .await + { + Ok(v) => v, + Err(e) => return Err(tonic::Status::internal(e.to_string())), + }; + let epochs = db_epochs.into_iter().map(|e| e.into()).collect::>(); + let response = BbgmEpochList { list: epochs }; + Ok(tonic::Response::new(response)) + } + + async fn get_bbgm_changes_in_epoch( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let GetBbgmChangesReq { + tree_pubkey, + epoch, + limit, + after, + } = request.into_inner(); + let Ok(tree) = Pubkey::try_from(tree_pubkey) else { + return Err(tonic::Status::invalid_argument("Invalid tree pubkey")); + }; + + let db_changes = match self + .bbgm_service + .list_epoch_changes(epoch, tree, limit, after.map(|p| p.into())) + .await + { + Ok(v) => v, + Err(e) => return Err(tonic::Status::internal(e.to_string())), + }; + let changes = db_changes + .into_iter() + .map(|change| change.into()) + .collect::>(); + + let response = BbgmChangeList { list: changes }; + Ok(tonic::Response::new(response)) + } + + async fn propose_missing_bbgm_changes( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let change_list = request.into_inner().list; + let mut changes = Vec::with_capacity(change_list.len()); + + for BbgmChange { + tree_pubkey, + slot, + seq, + signature, + } in change_list + { + let Ok(tree) = Pubkey::try_from(tree_pubkey) else { + return Err(tonic::Status::invalid_argument("Invalid tree pubkey")); + }; + let record = BbgmChangeRecord { + tree_pubkey: tree, + slot, + seq, + signature, + }; + changes.push(record); + } + self.bbgm_service.propose_missing_changes(&changes).await; + Ok(tonic::Response::new(())) + } +} + +#[async_trait] +impl AccConsistencyService for ConsistencyApiServerImpl { + async fn get_acc_grand_bucket_checksums( + &self, + _request: tonic::Request<()>, + ) -> std::result::Result, tonic::Status> { + let db_grand_buckets = match self.acc_service.list_grand_buckets().await { + Ok(v) => v, + Err(e) => return Err(tonic::Status::internal(e.to_string())), + }; + let grand_buckets = db_grand_buckets + .into_iter() + .map(|gb| gb.into()) + .collect::>(); + let response = AccGrandBucketChecksumsList { + list: grand_buckets, + }; + Ok(tonic::Response::new(response)) + } + + async fn get_acc_bucket_checksums_in_grand_bucket( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let GetAccBucketsReq { grand_bucket } = request.into_inner(); + let db_buckets = match self + .acc_service + .list_bucket_checksums(grand_bucket as u16) + .await + { + Ok(v) => v, + Err(e) => return Err(tonic::Status::internal(e.to_string())), + }; + let buckets = db_buckets.into_iter().map(|b| b.into()).collect::>(); + let response = AccBucketChecksumsList { list: buckets }; + Ok(tonic::Response::new(response)) + } + + async fn get_accs_in_bucket( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let GetAccReq { + bucket, + limit, + after, + } = request.into_inner(); + + let after_pk = if let Some(bytes) = after { + let Ok(pk) = Pubkey::try_from(bytes) else { + return Err(tonic::Status::invalid_argument( + "Invalid continuation value 'after'", + )); + }; + Some(pk) + } else { + None + }; + + let db_accs = match self + .acc_service + .list_accounts(bucket as u16, limit, after_pk) + .await + { + Ok(v) => v, + Err(e) => return Err(tonic::Status::internal(e.to_string())), + }; + let accs = db_accs + .into_iter() + .map(|acc| acc.into()) + .collect::>(); + let response = AccList { list: accs }; + Ok(tonic::Response::new(response)) + } + + async fn propose_missing_acc_changes( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let AccList { list } = request.into_inner(); + let mut accs = Vec::with_capacity(list.len()); + for Acc { + account_pubkey, + slot, + write_version, + } in list + { + let Ok(acc_pk) = Pubkey::try_from(account_pubkey) else { + return Err(tonic::Status::invalid_argument("Invalid account pubkey")); + }; + let acc = AccLastChange { + account_pubkey: acc_pk, + slot, + write_version, + }; + accs.push(acc); + } + self.acc_service.propose_missing_changes(accs).await; + Ok(tonic::Response::new(())) + } +} + +impl From for AccGrandBucketChecksum { + fn from(value: AccGrandBucketCksm) -> Self { + let AccGrandBucketCksm { + grand_bucket, + checksum, + } = value; + AccGrandBucketChecksum { + grand_bucket: grand_bucket as u32, + checksum: checksum.map(|c| c.to_vec()), + } + } +} + +impl From for AccBucketChecksum { + fn from(value: AccBucketCksm) -> Self { + let AccBucketCksm { bucket, checksum } = value; + AccBucketChecksum { + bucket: bucket as u32, + checksum: checksum.map(|c| c.to_vec()), + } + } +} + +impl From for Acc { + fn from(value: AccLastChange) -> Self { + let AccLastChange { + account_pubkey, + slot, + write_version, + } = value; + Acc { + account_pubkey: account_pubkey.to_bytes().to_vec(), + slot, + write_version, + } + } +} + +fn convert_granch_epoch_checksum(value: BbgmGrandEpochCksm, grand_epoch: u16) -> BbgmGrandEpoch { + let BbgmGrandEpochCksm { + tree_pubkey, + checksum, + } = value; + BbgmGrandEpoch { + grand_epoch: grand_epoch as u32, + tree_pubkey: tree_pubkey.to_bytes().to_vec(), + checksum: checksum.map(|arr| arr.to_vec()), + } +} + +impl From for BbgmEpoch { + fn from(value: BbgmEpochCksm) -> Self { + let BbgmEpochCksm { + epoch, + tree_pubkey, + checksum, + } = value; + BbgmEpoch { + epoch, + tree_pubkey: tree_pubkey.to_bytes().to_vec(), + checksum: checksum.map(|arr| arr.to_vec()), + } + } +} + +impl From for BbgmChange { + fn from(value: BbgmChangeRecord) -> Self { + let BbgmChangeRecord { + tree_pubkey, + slot, + seq, + signature, + } = value; + BbgmChange { + tree_pubkey: tree_pubkey.to_bytes().to_vec(), + slot, + seq, + signature, + } + } +} + +#[allow(clippy::from_over_into)] +impl Into for BbgmChangePosition { + fn into(self) -> BbgmChangePos { + BbgmChangePos { + slot: self.slot, + seq: self.seq, + } + } +} + +pub struct BbgmConsistencyApiClientImpl { + client: tokio::sync::Mutex>, +} + +impl BbgmConsistencyApiClientImpl { + pub async fn new(peer: &str) -> Result { + let url = Uri::from_str(peer).map_err(|e| GrpcError::UriCreate(e.to_string()))?; + let channel = Channel::builder(url).connect().await?; + + Ok(BbgmConsistencyApiClientImpl { + client: tokio::sync::Mutex::new(BbgmConsistencyServiceClient::new(channel)), + }) + } +} + +#[async_trait] +impl BbgmChecksumServiceApi for BbgmConsistencyApiClientImpl { + async fn get_earliest_grand_epoch(&self) -> anyhow::Result> { + let grpc_request = tonic::Request::new(()); + let mut client = self.client.lock().await; + let grpc_response = client.get_bbgm_earliest_grand_epoch(grpc_request).await?; + let result = grpc_response.into_inner().grand_epoch.map(|v| v as u16); + Ok(result) + } + + async fn list_grand_epoch_checksums( + &self, + grand_epoch: u16, + limit: Option, + after: Option, + ) -> anyhow::Result> { + let grpc_request = tonic::Request::new(GetBbgmGrandEpochsReq { + grand_epoch: grand_epoch as u32, + limit, + after: after.map(|pk| pk.to_bytes().to_vec()), + }); + let mut client = self.client.lock().await; + let grpc_response = client.get_bbgm_grand_epoch_checksums(grpc_request).await?; + let list = grpc_response.into_inner().list; + let mut result = Vec::with_capacity(list.len()); + for BbgmGrandEpoch { + grand_epoch: _, + tree_pubkey, + checksum, + } in list + { + let pk = Pubkey::try_from(tree_pubkey) + .map_err(|v| anyhow::anyhow!("Invalid grand epoch tree pubkey bytes: {v:?}"))?; + let chksm: Option<[u8; 32]> = checksum + .map(|c| c.try_into()) + .transpose() + .map_err(|v| anyhow::anyhow!("Invalid checksum for epoch tree {pk}: {v:?}"))?; + result.push(BbgmGrandEpochCksm { + tree_pubkey: pk, + checksum: chksm, + }); + } + Ok(result) + } + + async fn list_epoch_checksums( + &self, + grand_epoch: u16, + tree_pubkey: Pubkey, + ) -> anyhow::Result> { + let grpc_request = tonic::Request::new(GetBbgmEpochsReq { + tree_pubkey: tree_pubkey.to_bytes().to_vec(), + grand_epoch: grand_epoch as u32, + }); + let mut client = self.client.lock().await; + let grpc_response = client + .get_bbgm_epoch_checksums_in_grand_epoch(grpc_request) + .await?; + let list = grpc_response.into_inner().list; + let mut result = Vec::with_capacity(list.len()); + for BbgmEpoch { + epoch, + tree_pubkey, + checksum, + } in list + { + let pk = Pubkey::try_from(tree_pubkey) + .map_err(|v| anyhow::anyhow!("Invalid epoch tree pubkey bytes: {v:?}"))?; + let chksm: Option<[u8; 32]> = checksum + .map(|c| c.try_into()) + .transpose() + .map_err(|v| anyhow::anyhow!("Invalid checksum for epoch tree {pk}: {v:?}"))?; + result.push(BbgmEpochCksm { + epoch, + tree_pubkey: pk, + checksum: chksm, + }); + } + Ok(result) + } + + async fn list_epoch_changes( + &self, + epoch: u32, + tree_pubkey: Pubkey, + limit: Option, + after: Option, + ) -> anyhow::Result> { + let grpc_request = tonic::Request::new(GetBbgmChangesReq { + tree_pubkey: tree_pubkey.to_bytes().to_vec(), + epoch, + limit, + after: after.map(|BbgmChangePos { slot, seq }| BbgmChangePosition { slot, seq }), + }); + let mut client = self.client.lock().await; + let grpc_response = client.get_bbgm_changes_in_epoch(grpc_request).await?; + let list = grpc_response.into_inner().list; + let mut result = Vec::with_capacity(list.len()); + for BbgmChange { + tree_pubkey, + slot, + seq, + signature, + } in list + { + let pk = Pubkey::try_from(tree_pubkey) + .map_err(|v| anyhow::anyhow!("Invalid epoch tree pubkey bytes: {v:?}"))?; + result.push(BbgmChangeRecord { + tree_pubkey: pk, + slot, + seq, + signature, + }); + } + Ok(result) + } + + async fn propose_missing_changes(&self, changes: &[BbgmChangeRecord]) { + let req_changes = changes + .iter() + .map( + |BbgmChangeRecord { + tree_pubkey, + slot, + seq, + signature, + }| BbgmChange { + tree_pubkey: tree_pubkey.to_bytes().to_vec(), + slot: *slot, + seq: *seq, + signature: signature.to_owned(), + }, + ) + .collect::>(); + let grpc_request = tonic::Request::new(BbgmChangeList { list: req_changes }); + let mut client = self.client.lock().await; + let _ = client.propose_missing_bbgm_changes(grpc_request).await; + } +} + +pub struct AccConsistencyApiClientImpl { + client: tokio::sync::Mutex>, +} + +impl AccConsistencyApiClientImpl { + pub async fn new(peer: &str) -> Result { + let url = Uri::from_str(peer).map_err(|e| GrpcError::UriCreate(e.to_string()))?; + let channel = Channel::builder(url).connect().await?; + + Ok(AccConsistencyApiClientImpl { + client: tokio::sync::Mutex::new(AccConsistencyServiceClient::new(channel)), + }) + } +} + +#[async_trait] +impl AccChecksumServiceApi for AccConsistencyApiClientImpl { + async fn list_grand_buckets(&self) -> anyhow::Result> { + let grpc_request = tonic::Request::new(()); + let mut client = self.client.lock().await; + let grpc_response = client.get_acc_grand_bucket_checksums(grpc_request).await?; + let list = grpc_response.into_inner().list; + let mut result = Vec::with_capacity(list.len()); + for AccGrandBucketChecksum { + grand_bucket, + checksum, + } in list + { + let chksm: Option<[u8; 32]> = + checksum.map(|c| c.try_into()).transpose().map_err(|v| { + anyhow::anyhow!("Invalid checksum for grand bucket {grand_bucket}: {v:?}") + })?; + result.push(AccGrandBucketCksm { + grand_bucket: grand_bucket as u16, + checksum: chksm, + }); + } + Ok(result) + } + + async fn list_bucket_checksums(&self, grand_bucket: u16) -> anyhow::Result> { + let grpc_request = tonic::Request::new(GetAccBucketsReq { + grand_bucket: grand_bucket as u32, + }); + let mut client = self.client.lock().await; + let grpc_response = client + .get_acc_bucket_checksums_in_grand_bucket(grpc_request) + .await?; + let list = grpc_response.into_inner().list; + let mut result = Vec::with_capacity(list.len()); + for AccBucketChecksum { bucket, checksum } in list { + let chksm: Option<[u8; 32]> = checksum + .map(|c| c.try_into()) + .transpose() + .map_err(|v| anyhow::anyhow!("Invalid checksum for bucket {bucket}: {v:?}"))?; + result.push(AccBucketCksm { + bucket: bucket as u16, + checksum: chksm, + }); + } + Ok(result) + } + + async fn list_accounts( + &self, + bucket: u16, + limit: Option, + after: Option, + ) -> anyhow::Result> { + let grpc_request = tonic::Request::new(GetAccReq { + bucket: bucket as u32, + limit, + after: after.map(|v| v.to_bytes().to_vec()), + }); + let mut client = self.client.lock().await; + let grpc_response = client.get_accs_in_bucket(grpc_request).await?; + let list = grpc_response.into_inner().list; + let mut result = Vec::with_capacity(list.len()); + for Acc { + account_pubkey, + slot, + write_version, + } in list + { + let pk = Pubkey::try_from(account_pubkey) + .map_err(|v| anyhow::anyhow!("Invalid account pubkey bytes: {v:?}"))?; + result.push(AccLastChange { + account_pubkey: pk, + slot, + write_version, + }); + } + Ok(result) + } + + async fn propose_missing_changes(&self, changes: Vec) { + let req_changes = changes + .iter() + .map( + |AccLastChange { + account_pubkey, + slot, + write_version, + }| Acc { + account_pubkey: account_pubkey.to_bytes().to_vec(), + slot: *slot, + write_version: *write_version, + }, + ) + .collect::>(); + let grpc_request = tonic::Request::new(AccList { list: req_changes }); + let mut client = self.client.lock().await; + let _ = client.propose_missing_acc_changes(grpc_request).await; + } +} diff --git a/grpc/src/lib.rs b/grpc/src/lib.rs index 7880450ff..c943e99cf 100644 --- a/grpc/src/lib.rs +++ b/grpc/src/lib.rs @@ -1,6 +1,8 @@ pub mod asseturls; pub mod asseturls_impl; pub mod client; +pub mod consistencyapi; +pub mod consistencyapi_impl; pub mod error; pub mod gapfiller; mod mapper; diff --git a/interface/Cargo.toml b/interface/Cargo.toml index ef755c516..a4ea510ba 100644 --- a/interface/Cargo.toml +++ b/interface/Cargo.toml @@ -6,6 +6,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +anyhow = { workspace = true } async-trait = { workspace = true } futures = { workspace = true } mockall = { workspace = true } diff --git a/interface/src/aura_peers_provides.rs b/interface/src/aura_peers_provides.rs new file mode 100644 index 000000000..69f9e29e5 --- /dev/null +++ b/interface/src/aura_peers_provides.rs @@ -0,0 +1,9 @@ +use async_trait::async_trait; + +/// Aura can connect other Aura peers to exchange various information. +#[async_trait] +pub trait AuraPeersProvides { + /// Provide list of URLs of other Aura nodes that are trusted, + /// meaning we don't expect that a data from them might be incorrect intentionally. + async fn list_trusted_peers(&self) -> Vec; +} diff --git a/interface/src/checksums_storage.rs b/interface/src/checksums_storage.rs new file mode 100644 index 000000000..47dd44b6e --- /dev/null +++ b/interface/src/checksums_storage.rs @@ -0,0 +1,108 @@ +use async_trait::async_trait; +use solana_sdk::pubkey::Pubkey; + +/// Type of checksum for bubblegum epochs and account NFT buckets. +/// It is technically a SHA3 hash. +pub type Chksm = [u8; 32]; + +/// Data transfer object for bubblegum grand epoch. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct BbgmGrandEpochCksm { + pub tree_pubkey: Pubkey, + pub checksum: Option<[u8; 32]>, +} + +/// Data transfer object for bubblegum epoch. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct BbgmEpochCksm { + pub epoch: u32, + pub tree_pubkey: Pubkey, + pub checksum: Option<[u8; 32]>, +} + +/// Data transfer object for bubblegum tree change. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct BbgmChangeRecord { + pub tree_pubkey: Pubkey, + pub slot: u64, + pub seq: u64, + pub signature: String, +} + +/// Used to specify offset when fetching a portion of +/// bubblegum tree changes. +#[derive(Debug, Clone, Default, PartialEq, Eq, PartialOrd, Ord)] +pub struct BbgmChangePos { + pub slot: u64, + pub seq: u64, +} + +/// Data transfer object for account NFT grand bucket. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct AccGrandBucketCksm { + pub grand_bucket: u16, + pub checksum: Option<[u8; 32]>, +} + +/// Data transfer object for account NFT bucket. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct AccBucketCksm { + pub bucket: u16, + pub checksum: Option<[u8; 32]>, +} + +/// Data transfer object for account NFT last received state. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct AccLastChange { + pub account_pubkey: Pubkey, + pub slot: u64, + pub write_version: u64, +} + +/// Interface for querying bubblegum checksums from peer +/// or local storage. +#[async_trait] +pub trait BbgmChecksumServiceApi { + async fn get_earliest_grand_epoch(&self) -> anyhow::Result>; + + async fn list_grand_epoch_checksums( + &self, + grand_epoch: u16, + limit: Option, + after: Option, + ) -> anyhow::Result>; + + async fn list_epoch_checksums( + &self, + grand_epoch: u16, + tree_pubkey: Pubkey, + ) -> anyhow::Result>; + + async fn list_epoch_changes( + &self, + epoch: u32, + tree_pubkey: Pubkey, + limit: Option, + after: Option, + ) -> anyhow::Result>; + + async fn propose_missing_changes(&self, changes: &[BbgmChangeRecord]); +} + +/// Interface for querying Account NFT checksums from peer +/// or local storage. +#[async_trait] +pub trait AccChecksumServiceApi { + async fn list_grand_buckets(&self) -> anyhow::Result>; + + async fn list_bucket_checksums(&self, grand_bucket: u16) -> anyhow::Result>; + + async fn list_accounts( + &self, + bucket: u16, + limit: Option, + after: Option, + ) -> anyhow::Result>; + + async fn propose_missing_changes(&self, changes: Vec); +} diff --git a/interface/src/lib.rs b/interface/src/lib.rs index b07c0b184..9dbaf53b4 100644 --- a/interface/src/lib.rs +++ b/interface/src/lib.rs @@ -2,7 +2,9 @@ pub mod account_balance; pub mod assert_urls; pub mod asset_sigratures; pub mod asset_streaming_and_discovery; +pub mod aura_peers_provides; pub mod batch_mint; +pub mod checksums_storage; pub mod consistency_check; pub mod error; pub mod fork_cleaner; diff --git a/nft_ingester/Cargo.toml b/nft_ingester/Cargo.toml index 9897c7a4f..ab9a5839f 100644 --- a/nft_ingester/Cargo.toml +++ b/nft_ingester/Cargo.toml @@ -5,6 +5,7 @@ edition = "2021" publish = false [dependencies] +anyhow = { workspace = true } serde_derive = { workspace = true } env_logger = { workspace = true } thiserror = { workspace = true } diff --git a/nft_ingester/benches/ingester_benchmark.rs b/nft_ingester/benches/ingester_benchmark.rs index 57bd3b2b9..d80d115b0 100644 --- a/nft_ingester/benches/ingester_benchmark.rs +++ b/nft_ingester/benches/ingester_benchmark.rs @@ -35,6 +35,7 @@ async fn bench_ingest( rocks_dest.clone(), Arc::new(IngesterMetricsConfig::new()), buffer.json_tasks.clone(), + None, )); let tx_ingester = Arc::new(transaction_ingester::BackfillTransactionIngester::new( diff --git a/nft_ingester/src/bin/ingester/main.rs b/nft_ingester/src/bin/ingester/main.rs index f5fddffce..ccf62a1a0 100644 --- a/nft_ingester/src/bin/ingester/main.rs +++ b/nft_ingester/src/bin/ingester/main.rs @@ -1,6 +1,7 @@ use arweave_rs::consts::ARWEAVE_BASE_URL; use arweave_rs::Arweave; use nft_ingester::batch_mint::batch_mint_persister::{BatchMintDownloaderForPersister, BatchMintPersister}; +use nft_ingester::consistency_bg_job::FileSrcAuraPeersProvides; use nft_ingester::scheduler::Scheduler; use postgre_client::PG_MIGRATIONS_PATH; use std::path::PathBuf; @@ -18,7 +19,7 @@ use plerkle_messenger::ConsumptionType; use pprof::ProfilerGuardBuilder; use rocks_db::bubblegum_slots::{BubblegumSlotGetter, IngestableSlotGetter}; use solana_client::nonblocking::rpc_client::RpcClient; -use tokio::sync::{broadcast, Mutex}; +use tokio::sync::{broadcast, mpsc, Mutex}; use tokio::task::JoinSet; use tokio::time::sleep as tokio_sleep; use tracing::{error, info, warn}; @@ -41,6 +42,9 @@ use nft_ingester::buffer::{debug_buffer, Buffer}; use nft_ingester::config::{ setup_config, ApiConfig, BackfillerConfig, BackfillerMode, IngesterConfig, MessageSource, INGESTER_CONFIG_PREFIX, }; +use nft_ingester::consistency_calculator; +use nft_ingester::consistency_calculator::NftChangesTracker; +use nft_ingester::consistency_calculator::NTF_CHANGES_NOTIFICATION_QUEUE_SIZE; use nft_ingester::fork_cleaner::{run_fork_cleaner, ForkCleaner}; use nft_ingester::gapfiller::{process_asset_details_stream_wrapper, run_sequence_consistent_gapfiller}; use nft_ingester::index_syncronizer::Synchronizer; @@ -188,6 +192,20 @@ pub async fn main() -> Result<(), IngesterError> { } let rpc_client = Arc::new(RpcClient::new(config.rpc_host.clone())); + + let (nft_change_snd, nft_change_rcv) = mpsc::channel(NTF_CHANGES_NOTIFICATION_QUEUE_SIZE); + let changes_tracker = Arc::new(NftChangesTracker::new(nft_change_snd.clone())); + consistency_calculator::run_bg_consistency_calculator( + nft_change_rcv, + primary_rocks_storage.clone(), + shutdown_rx.resubscribe(), + ); + + if let Some(peer_urls_file) = config.peer_urls_file.as_ref() { + let peers_provider = Arc::new(FileSrcAuraPeersProvides::new(peer_urls_file.clone())); + nft_ingester::consistency_bg_job::run_consistenct_bg_job(primary_rocks_storage.clone(), peers_provider); + } + for _ in 0..config.accounts_parsing_workers { match config.message_source { MessageSource::Redis => { @@ -210,6 +228,7 @@ pub async fn main() -> Result<(), IngesterError> { Some(metrics_state.message_process_metrics.clone()), index_pg_storage.clone(), rpc_client.clone(), + changes_tracker.clone(), mutexed_tasks.clone(), ) .await; @@ -227,6 +246,7 @@ pub async fn main() -> Result<(), IngesterError> { None, index_pg_storage.clone(), rpc_client.clone(), + changes_tracker.clone(), mutexed_tasks.clone(), ) .await; @@ -248,6 +268,7 @@ pub async fn main() -> Result<(), IngesterError> { None, index_pg_storage.clone(), rpc_client.clone(), + changes_tracker.clone(), mutexed_tasks.clone(), ) .await; @@ -382,6 +403,7 @@ pub async fn main() -> Result<(), IngesterError> { primary_rocks_storage.clone(), metrics_state.ingester_metrics.clone(), buffer.json_tasks.clone(), + Some(changes_tracker.clone()), )); for _ in 0..config.transactions_parsing_workers { @@ -429,6 +451,7 @@ pub async fn main() -> Result<(), IngesterError> { primary_rocks_storage.clone(), metrics_state.ingester_metrics.clone(), buffer.json_tasks.clone(), + Some(changes_tracker.clone()), )); let tx_ingester = Arc::new(BackfillTransactionIngester::new(backfill_bubblegum_updates_processor.clone())); let backfiller_config = setup_config::(INGESTER_CONFIG_PREFIX); @@ -711,6 +734,8 @@ pub async fn main() -> Result<(), IngesterError> { let fork_cleaner = ForkCleaner::new( primary_rocks_storage.clone(), primary_rocks_storage.clone(), + primary_rocks_storage.clone(), + Some(changes_tracker.clone()), metrics_state.fork_cleaner_metrics.clone(), ); let rx = shutdown_rx.resubscribe(); diff --git a/nft_ingester/src/bin/raw_backfiller/main.rs b/nft_ingester/src/bin/raw_backfiller/main.rs index 1c6624410..64d7531b6 100644 --- a/nft_ingester/src/bin/raw_backfiller/main.rs +++ b/nft_ingester/src/bin/raw_backfiller/main.rs @@ -5,6 +5,9 @@ use nft_ingester::buffer::Buffer; use nft_ingester::config::{ self, init_logger, setup_config, BackfillerConfig, RawBackfillConfig, INGESTER_CONFIG_PREFIX, }; +use nft_ingester::consistency_calculator; +use nft_ingester::consistency_calculator::NftChangesTracker; +use nft_ingester::consistency_calculator::NTF_CHANGES_NOTIFICATION_QUEUE_SIZE; use nft_ingester::error::IngesterError; use nft_ingester::init::graceful_stop; use nft_ingester::processors::transaction_based::bubblegum_updates_processor::BubblegumTxProcessor; @@ -19,6 +22,7 @@ use metrics_utils::{BackfillerMetricsConfig, IngesterMetricsConfig}; use rocks_db::bubblegum_slots::BubblegumSlotGetter; use rocks_db::migrator::MigrationState; use rocks_db::Storage; +use tokio::sync::mpsc; use tokio::sync::{broadcast, Mutex}; use tokio::task::JoinSet; @@ -139,6 +143,14 @@ pub async fn main() -> Result<(), IngesterError> { ); let (shutdown_tx, shutdown_rx) = broadcast::channel::<()>(1); + let (nft_change_snd, nft_change_rcv) = mpsc::channel(NTF_CHANGES_NOTIFICATION_QUEUE_SIZE); + let changes_tracker = Arc::new(NftChangesTracker::new(nft_change_snd.clone())); + consistency_calculator::run_bg_consistency_calculator( + nft_change_rcv, + rocks_storage.clone(), + shutdown_rx.resubscribe(), + ); + match backfiller_config.backfiller_mode { config::BackfillerMode::IngestDirectly => { todo!(); @@ -174,6 +186,7 @@ pub async fn main() -> Result<(), IngesterError> { rocks_storage.clone(), ingester_metrics.clone(), buffer.json_tasks.clone(), + Some(changes_tracker.clone()), )); let tx_ingester = Arc::new(transaction_ingester::BackfillTransactionIngester::new( diff --git a/nft_ingester/src/config.rs b/nft_ingester/src/config.rs index 0300d81ae..df441dfd8 100644 --- a/nft_ingester/src/config.rs +++ b/nft_ingester/src/config.rs @@ -190,6 +190,7 @@ pub struct IngesterConfig { pub backfiller_source_mode: BackfillerSourceMode, #[serde(default = "default_synchronizer_parallel_tasks")] pub synchronizer_parallel_tasks: usize, + pub peer_urls_file: Option, #[serde(default)] pub run_temp_sync_during_dump: bool, #[serde(default = "default_parallel_json_downloaders")] diff --git a/nft_ingester/src/consistency_bg_job.rs b/nft_ingester/src/consistency_bg_job.rs new file mode 100644 index 000000000..1f7b4f873 --- /dev/null +++ b/nft_ingester/src/consistency_bg_job.rs @@ -0,0 +1,635 @@ +//! This module contains background job that after each epoch (10 000 slots) is finished, +//! calls Aura peers to get their epoch ckecksums, and searches for missing data. +//! When a missing data is found, corresponding blocks are requsted from +//! peers. + +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + time::Duration, +}; + +use grpc::consistencyapi_impl::{AccConsistencyApiClientImpl, BbgmConsistencyApiClientImpl}; +use interface::signature_persistence::BlockConsumer; +use interface::signature_persistence::BlockProducer; +use interface::{ + aura_peers_provides::AuraPeersProvides, + checksums_storage::{ + AccBucketCksm, AccChecksumServiceApi, AccGrandBucketCksm, AccLastChange, BbgmChangeRecord, + BbgmChecksumServiceApi, BbgmEpochCksm, BbgmGrandEpochCksm, Chksm, + }, +}; +use rocks_db::{ + storage_consistency::{ + calc_exchange_slot_for_epoch, epoch_of_slot, grand_epoch_of_epoch, last_tracked_slot, + slots_to_time, + }, + Storage, +}; +use solana_sdk::pubkey::Pubkey; +use url::Url; + +use crate::consistency_calculator::{get_calculating_acc_epoch, get_calculating_bbgm_epoch}; + +/// Read peer URLs from given file. +/// Each URL is expected to be on a separate line. +pub struct FileSrcAuraPeersProvides { + pub file_path: String, +} + +impl FileSrcAuraPeersProvides { + pub fn new(file_path: String) -> FileSrcAuraPeersProvides { + FileSrcAuraPeersProvides { file_path } + } +} + +#[async_trait::async_trait] +impl AuraPeersProvides for FileSrcAuraPeersProvides { + async fn list_trusted_peers(&self) -> Vec { + match tokio::fs::read_to_string(&self.file_path).await { + Ok(s) => { + let mut result = Vec::new(); + for line in s.lines().filter(|s| !s.is_empty()).map(|s| s.trim()) { + if Url::parse(line).is_err() { + tracing::warn!("Invalid peer URL: {line}"); + } else { + result.push(line.to_string()); + } + } + result + } + Err(e) => { + tracing::error!("Unable to read peers file: {e}"); + Vec::new() + } + } + } +} + +/// Spawns in background functionality for checksums and missing blocks fetching. +pub fn run_consistenct_bg_job( + storage: Arc, + peers_provider: Arc, +) { + tokio::spawn(async move { + let _ = run_peers_checking_loop(storage, peers_provider).await; + }); +} + +/// Background task that waits for the current epoch to end, then waits a little +/// for late data to come, and the epoch checksums to be calculated, +/// and after that runs an exchange of bubblegum and account checksums +/// to identify changes that had been missing on our side. +/// After missing changes are found, it fetches corresponding blocks from the peer +/// and used gap filling mechanism to process them +/// +/// ## Args: +/// * storage - rocksdb storage +/// * peers_provider - provider of trusted peers +async fn run_peers_checking_loop( + storage: Arc, + peers_provider: Arc, +) { + let mut last_processed_epoch = 0u32; + + loop { + let next_processing_slot = calc_exchange_slot_for_epoch(last_processed_epoch + 1); + let current_slot = last_tracked_slot(); + if current_slot < next_processing_slot { + let duration = slots_to_time(next_processing_slot - current_slot); + tokio::time::sleep(duration).await; + continue; + } + + let epoch_to_process = epoch_of_slot(next_processing_slot); + + let bbgm_task = { + let storage = storage.clone(); + let peers_provider = peers_provider.clone(); + tokio::spawn(async move { + exchange_bbgms_with_peers(epoch_to_process, storage, peers_provider).await + }) + }; + let acc_task = { + let storage = storage.clone(); + let peers_provider = peers_provider.clone(); + tokio::spawn(async move { exchange_account_with_peers(storage, peers_provider).await }) + }; + let _ = bbgm_task.await; + let _ = acc_task.await; + + last_processed_epoch = epoch_to_process; + } +} + +/// Exchanges bubblegum checksum with peers to identify missing bubblegum change, +/// and requests these missing changes from peers. +/// +/// ## Args: +/// * epoch - epoch we want the checksums to be exchanged +/// * storage - local database +/// * peers_provider - source of trusted peers +async fn exchange_bbgms_with_peers( + epoch: u32, + storage: Arc, + peers_provider: Arc, +) { + tracing::info!("Starting bubblegum changes peer-to-peer exchange for epoch={epoch}"); + while get_calculating_bbgm_epoch() + .map(|e| e == epoch) + .unwrap_or(false) + { + tokio::time::sleep(Duration::from_secs(10)).await; + } + + let grand_epoch = grand_epoch_of_epoch(epoch); + + let mut missing_bbgm_changes: HashMap> = HashMap::new(); + let trusted_peers = peers_provider.list_trusted_peers().await; + + for (peer_ind, trusted_peer) in trusted_peers.iter().enumerate() { + tracing::info!("Exchanging bubblegum changes for epoch={epoch} with peer={trusted_peer}"); + let Ok(client) = BbgmConsistencyApiClientImpl::new(trusted_peer) + .await + .map(Arc::new) + else { + tracing::warn!("Cannot connect to peer={trusted_peer}"); + continue; + }; + + let changes_we_miss = + compare_bbgm_with_peer(grand_epoch, storage.as_ref(), client.as_ref()).await; + + for change in changes_we_miss { + match missing_bbgm_changes.get_mut(&change) { + Some(peers_have_change) => { + peers_have_change.insert(peer_ind); + } + None => { + missing_bbgm_changes.insert(change, HashSet::from([peer_ind])); + } + }; + } + } + handle_missing_bbgm_changes(missing_bbgm_changes, trusted_peers, storage).await +} + +/// For given grand epoch, compares our bubblegum changes +/// (by comparing, first, grand epoch checksums and then epoch checksums) +/// with corresponding bubblegum changes of peer, +/// and returns changes that are missing on our side. +/// +/// ## Args: +/// * grand_epoch - a grand epoch the exchange is performed for +/// * we - local storage of bubblegum changes (rocksdb storage) +/// * peer - GRPC client for peer +pub async fn compare_bbgm_with_peer( + grand_epoch: u16, + we: &impl BbgmChecksumServiceApi, + peer: &impl BbgmChecksumServiceApi, +) -> Vec { + let mut result = Vec::new(); + let Ok(peer_ge_chksms) = peer + .list_grand_epoch_checksums(grand_epoch, None, None) + .await + else { + return Vec::new(); + }; + let my_ge_chksms = match we.list_grand_epoch_checksums(grand_epoch, None, None).await { + Ok(v) => v, + Err(e) => { + tracing::error!("Error reading grand epochs from DB: {}", e.to_string()); + return Vec::new(); + } + }; + let ge_cmp_res = cmp(&my_ge_chksms, &peer_ge_chksms); + let ge_trees_to_check = ge_cmp_res + .we_miss + .iter() + .chain(ge_cmp_res.different.iter()) + .map(|&a| a.tree_pubkey) + .collect::>(); + for tree_pk in ge_trees_to_check { + let Ok(peer_e_chksms) = peer.list_epoch_checksums(grand_epoch, tree_pk).await else { + continue; + }; + let my_e_chksms = match we.list_epoch_checksums(grand_epoch, tree_pk).await { + Ok(v) => v, + Err(e) => { + tracing::error!("Error reading epochs from DB: {}", e.to_string()); + return result; + } + }; + let e_cmp_res = cmp(&my_e_chksms, &peer_e_chksms); + let epochs_to_check = e_cmp_res + .we_miss + .iter() + .chain(e_cmp_res.different.iter()) + .map(|&a| (a.epoch, a.tree_pubkey)) + .collect::>(); + for (epoch, tree_pubkey) in epochs_to_check { + let Ok(peer_changes) = peer + .list_epoch_changes(epoch, tree_pubkey, None, None) + .await + else { + continue; + }; + let my_changes = match we.list_epoch_changes(epoch, tree_pubkey, None, None).await { + Ok(v) => v, + Err(e) => { + tracing::error!("Error reading bubblegum changes from DB: {}", e.to_string()); + return result; + } + }; + let changes_cmp_res = cmp(&my_changes, &peer_changes); + result.extend(changes_cmp_res.we_miss.into_iter().map(|a| a.to_owned())); + } + } + + result +} + +#[allow(clippy::while_let_on_iterator)] +async fn handle_missing_bbgm_changes( + missing_accounts: HashMap>, + trusted_peers: Vec, + storage: Arc, +) { + let mut clients: HashMap = HashMap::new(); + + for (change, peers) in missing_accounts { + let mut it = peers.iter(); + while let Some(peer_ind) = it.next() { + let client = if let Some(client) = clients.get_mut(peer_ind) { + client + } else { + let Ok(peer_client) = + grpc::client::Client::connect_to_url(&trusted_peers[*peer_ind]).await + else { + continue; + }; + clients.insert(*peer_ind, peer_client); + clients.get_mut(peer_ind).unwrap() + }; + if let Ok(block) = client + .get_block(change.slot, Option::>::None) + .await + { + let _ = storage.consume_block(change.slot, block).await; + } + break; + } + } +} + +/// Exchanges account NFTs checksum with peers to identify missing account NFT changes, +/// and requests these missing changes from peers. +/// +/// ## Args: +/// * storage - local database +/// * peers_provider - source of trusted peers +async fn exchange_account_with_peers( + storage: Arc, + peers_provider: Arc, +) { + tracing::info!("Starting account NFT peer-to-peer exchange"); + while get_calculating_acc_epoch().is_some() { + tokio::time::sleep(Duration::from_secs(10)).await; + } + + let mut missing_accounts: HashMap> = HashMap::new(); + let trusted_peers = peers_provider.list_trusted_peers().await; + + for (peer_ind, trusted_peer) in trusted_peers.iter().enumerate() { + tracing::info!("Exchanging account NFT with peer={trusted_peer}"); + let Ok(client) = AccConsistencyApiClientImpl::new(trusted_peer) + .await + .map(Arc::new) + else { + tracing::warn!("Cannot connect to peer={trusted_peer}"); + continue; + }; + + let accs_we_miss: Vec = + compare_acc_with_peer(storage.as_ref(), client.as_ref()).await; + for change in accs_we_miss { + match missing_accounts.get_mut(&change) { + Some(peers_have_change) => { + peers_have_change.insert(peer_ind); + } + None => { + missing_accounts.insert(change, HashSet::from([peer_ind])); + } + }; + } + } + handle_missing_accs(missing_accounts, trusted_peers, storage).await +} + +/// Compares our account NFT info with corresponding account records on peer, +/// and returns records that are missing on our side. +/// +/// ## Args: +/// * we - local storage of account NFTs (rocksdb storage) +/// * peer - GRPC client for peer +pub async fn compare_acc_with_peer( + storage: &impl AccChecksumServiceApi, + client: &impl AccChecksumServiceApi, +) -> Vec { + let mut result = Vec::new(); + + let Ok(peer_grand_buckets) = client.list_grand_buckets().await else { + return result; + }; + let my_grand_buckets = match storage.list_grand_buckets().await { + Ok(v) => v, + Err(e) => { + tracing::error!("Error reading grand buckets from DB: {}", e.to_string()); + return result; + } + }; + let gb_cmp_res = cmp(&peer_grand_buckets, &my_grand_buckets); + + let grand_buckets_to_check = gb_cmp_res + .we_miss + .iter() + .chain(gb_cmp_res.different.iter()) + .map(|&a| a.grand_bucket) + .collect::>(); + + for grand_bucket in grand_buckets_to_check { + let Ok(peer_buckets) = client.list_bucket_checksums(grand_bucket).await else { + continue; + }; + let my_buckets = match storage.list_bucket_checksums(grand_bucket).await { + Ok(v) => v, + Err(e) => { + tracing::error!("Error reading buckets from DB: {}", e.to_string()); + return result; + } + }; + let b_cmp_res = cmp(&peer_buckets, &my_buckets); + + let buckets_to_check = b_cmp_res + .we_miss + .iter() + .chain(b_cmp_res.different.iter()) + .map(|&a| a.bucket) + .collect::>(); + + for bucket in buckets_to_check { + let Ok(peer_accounts) = client.list_accounts(bucket, None, None).await else { + continue; + }; + let my_accounts = match storage.list_accounts(bucket, None, None).await { + Ok(v) => v, + Err(e) => { + tracing::error!("Error reading accounts from DB: {}", e.to_string()); + return result; + } + }; + let acc_cmp_res = cmp(&peer_accounts, &my_accounts); + result.extend(acc_cmp_res.we_miss.into_iter().map(|a| a.to_owned())); + } + } + result +} + +#[allow(clippy::while_let_on_iterator)] +async fn handle_missing_accs( + missing_accounts: HashMap>, + trusted_peers: Vec, + storage: Arc, +) { + let mut clients: HashMap = HashMap::new(); + + for (change, peers) in missing_accounts { + let mut it = peers.iter(); + while let Some(peer_ind) = it.next() { + let client = if let Some(client) = clients.get_mut(peer_ind) { + client + } else { + let Ok(peer_client) = + grpc::client::Client::connect_to_url(&trusted_peers[*peer_ind]).await + else { + continue; + }; + clients.insert(*peer_ind, peer_client); + clients.get_mut(peer_ind).unwrap() + }; + if let Ok(block) = client + .get_block(change.slot, Option::>::None) + .await + { + let _ = storage.consume_block(change.slot, block).await; + } + break; + } + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct CmpRes<'a, T> { + pub we_miss: Vec<&'a T>, + pub they_miss: Vec<&'a T>, + pub different: Vec<&'a T>, +} + +#[allow( + clippy::collapsible_else_if, + clippy::comparison_chain, + clippy::needless_range_loop +)] +fn cmp<'a, T>(we: &'a [T], they: &'a [T]) -> CmpRes<'a, T> +where + T: AsKeyVal, + T::Key: PartialEq + Ord, + T::Val: PartialEq, +{ + let mut we_ind = 0usize; + let mut they_ind = 0usize; + + let mut we_miss = Vec::new(); + let mut they_miss = Vec::new(); + let mut different = Vec::new(); + + while we_ind < we.len() && they_ind < they.len() { + let we_key = we[we_ind].key(); + let they_key = they[they_ind].key(); + + if we_key < they_key { + if we[we_ind].val().is_some() { + they_miss.push(&we[we_ind]); + } + we_ind += 1; + } else if we_key > they_key { + if they[they_ind].val().is_some() { + we_miss.push(&they[they_ind]); + } + they_ind += 1; + } else { + if we[we_ind].val() == they[they_ind].val() + || we[we_ind].val().is_none() + || they[they_ind].val().is_none() + { + we_ind += 1; + they_ind += 1; + } else { + different.push(&we[we_ind]); + we_ind += 1; + they_ind += 1; + } + } + } + for i in we_ind..we.len() { + if we[i].val().is_some() { + they_miss.push(&we[i]); + } + } + for i in they_ind..they.len() { + if they[i].val().is_some() { + we_miss.push(&they[i]); + } + } + CmpRes { + we_miss, + they_miss, + different, + } +} + +pub trait AsKeyVal { + type Key; + type Val; + fn key(&self) -> Self::Key; + fn val(&self) -> Option; +} + +impl AsKeyVal for BbgmGrandEpochCksm { + type Key = Pubkey; + type Val = [u8; 32]; + fn key(&self) -> Self::Key { + self.tree_pubkey + } + fn val<'a>(&self) -> Option { + self.checksum + } +} + +impl AsKeyVal for BbgmEpochCksm { + type Key = (u32, Pubkey); + type Val = [u8; 32]; + fn key(&self) -> Self::Key { + (self.epoch, self.tree_pubkey) + } + fn val(&self) -> Option { + self.checksum + } +} + +impl AsKeyVal for BbgmChangeRecord { + type Key = Self; + type Val = (); + fn key(&self) -> Self::Key { + self.to_owned() + } + fn val(&self) -> Option { + Some(()) + } +} + +impl AsKeyVal for AccGrandBucketCksm { + type Key = u16; + type Val = Chksm; + fn key(&self) -> Self::Key { + self.grand_bucket + } + fn val(&self) -> Option { + self.checksum + } +} + +impl AsKeyVal for AccBucketCksm { + type Key = u16; + type Val = Chksm; + fn key(&self) -> Self::Key { + self.bucket + } + fn val(&self) -> Option { + self.checksum + } +} + +impl AsKeyVal for AccLastChange { + type Key = Self; + type Val = (); + fn key(&self) -> Self::Key { + self.to_owned() + } + fn val(&self) -> Option { + Some(()) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_cmp() { + { + let list_1 = [ + (1, Some('a')), + (2, Some('b')), + (3, Some('c')), + (5, Some('e')), + ]; + let list_2 = [(1, Some('a')), (2, Some('z')), (4, Some('d')), (5, None)]; + + let r = cmp(&list_1, &list_2); + assert_eq!( + r, + CmpRes { + we_miss: vec![&(4, Some('d'))], + they_miss: vec![&(3, Some('c'))], + different: vec![&(2, Some('b'))] + } + ); + } + + { + let list_1 = [ + (0, Some('y')), + (1, Some('a')), + (2, Some('b')), + (3, Some('c')), + (5, Some('e')), + ]; + let list_2 = [(1, Some('a')), (2, Some('z')), (4, Some('d')), (5, None)]; + + let result = cmp(&list_1, &list_2); + assert_eq!( + result, + CmpRes { + we_miss: vec![&(4, Some('d'))], + they_miss: vec![&(0, Some('y')), &(3, Some('c'))], + different: vec![&(2, Some('b'))] + } + ); + } + } + + impl AsKeyVal for (i32, Option) { + type Key = i32; + + type Val = char; + + fn key(&self) -> Self::Key { + self.0 + } + + fn val(&self) -> Option { + self.1 + } + } +} diff --git a/nft_ingester/src/consistency_calculator.rs b/nft_ingester/src/consistency_calculator.rs new file mode 100644 index 000000000..285eacc5a --- /dev/null +++ b/nft_ingester/src/consistency_calculator.rs @@ -0,0 +1,785 @@ +//! The module contains functionality for calculating transaction-based +//! and account-based NFT checsums that are used in peer-to-peer +//! Aura nodes communication to identify missing data on a node. + +use rocks_db::batch_savers::BatchSaveStorage; +use rocks_db::{ + column::TypedColumn, + storage_consistency::{ + self, bucket_for_acc, epoch_of_slot, grand_bucket_for_bucket, grand_epoch_of_epoch, + AccountNft, AccountNftBucket, AccountNftBucketKey, AccountNftChange, AccountNftChangeKey, + AccountNftGrandBucket, AccountNftGrandBucketKey, AccountNftKey, BubblegumChange, + BubblegumChangeKey, BubblegumEpoch, BubblegumEpochKey, BubblegumGrandEpoch, + BubblegumGrandEpochKey, ACC_BUCKET_INVALIDATE, ACC_GRAND_BUCKET_INVALIDATE, + BUBBLEGUM_GRAND_EPOCH_INVALIDATED, + }, + Storage, +}; +use solana_sdk::{hash::Hasher, pubkey::Pubkey}; +use std::sync::atomic::AtomicI32; +use std::{ + collections::{BTreeSet, HashSet}, + sync::Arc, + time::Duration, +}; +use storage_consistency::{BUBBLEGUM_EPOCH_CALCULATING, BUBBLEGUM_GRAND_EPOCH_CALCULATING}; +use tokio::sync::{ + mpsc::{Receiver, Sender}, + Mutex, +}; + +/// This flag is set to true before bubblegum epoch calculation is started, +/// and set to false after the calculation is finished. +static IS_CALCULATING_BBGM_EPOCH: AtomicI32 = AtomicI32::new(-1); +static IS_CALCULATING_ACC_EPOCH: AtomicI32 = AtomicI32::new(-1); + +fn set_currently_calculated_bbgm_epoch(epoch: u32) { + IS_CALCULATING_BBGM_EPOCH.store(epoch as i32, std::sync::atomic::Ordering::Relaxed); +} + +fn finish_currently_calculated_bbgm_epoch() { + IS_CALCULATING_BBGM_EPOCH.store(-1, std::sync::atomic::Ordering::Relaxed); +} + +fn set_currently_calculated_acc_epoch(epoch: u32) { + IS_CALCULATING_ACC_EPOCH.store(epoch as i32, std::sync::atomic::Ordering::Relaxed); +} + +fn finish_currently_calculated_acc_epoch() { + IS_CALCULATING_ACC_EPOCH.store(-1, std::sync::atomic::Ordering::Relaxed); +} + +pub fn get_calculating_bbgm_epoch() -> Option { + let epoch = IS_CALCULATING_BBGM_EPOCH.load(std::sync::atomic::Ordering::Relaxed); + if epoch > -1 { + Some(epoch as u32) + } else { + None + } +} + +pub fn get_calculating_acc_epoch() -> Option { + let epoch = IS_CALCULATING_ACC_EPOCH.load(std::sync::atomic::Ordering::Relaxed); + if epoch > -1 { + Some(epoch as u32) + } else { + None + } +} + +pub const NTF_CHANGES_NOTIFICATION_QUEUE_SIZE: usize = 1000; + +/// Wait this amount of seconds for late data before starting to calculate the epoch +const EPOCH_CALC_LAG_SEC: u64 = 300; + +/// This message is used to send notifications abount changes from: +/// - bubblegum processor +/// - account processor +/// - fork cleaner +#[derive(Debug, PartialEq, Eq)] +pub enum ConsistencyCalcMsg { + StartingBackfilling, + FinishedBackfilling, + EpochChanged { new_epoch: u32 }, + BubblegumUpdated { tree: Pubkey, slot: u64 }, + AccUpdated { account: Pubkey, slot: u64 }, +} + +/// Component for convenient storing of account NFT changes, +/// and notifying checksum calculator when a whole epoch, +/// or an individual bubblegum three/account checksum should be calculated. +pub struct NftChangesTracker { + sender: Sender, +} + +impl NftChangesTracker { + pub fn new(sender: Sender) -> NftChangesTracker { + NftChangesTracker { sender } + } + + /// Persists given account NFT change into the sotrage, and, if the change is from the epoch + /// that is previous to the current epoch, then also notifies checksums calculator + /// about late data. + /// + /// ## Args: + /// * `batch_storage` - same batch storage that is used to save account data + /// * `account_pubkey` - Pubkey of the NFT account + /// * `slot` - the slot number that change is made in + /// * `write_version` - write version of the change + pub async fn track_account_change( + &self, + batch_storage: &mut BatchSaveStorage, + account_pubkey: Pubkey, + slot: u64, + write_version: u64, + data_hash: u64, + ) { + let epoch = epoch_of_slot(slot); + let key = AccountNftChangeKey { + epoch, + account_pubkey, + slot, + write_version, + data_hash, + }; + let value = AccountNftChange {}; + + let last_slot = storage_consistency::track_slot_counter(slot); + let last_slot_epoch = epoch_of_slot(last_slot); + + if epoch < last_slot_epoch { + let bucket = bucket_for_acc(account_pubkey); + let grand_bucket = grand_bucket_for_bucket(bucket); + let _ = batch_storage.put_acc_grand_bucket( + AccountNftGrandBucketKey::new(grand_bucket), + ACC_GRAND_BUCKET_INVALIDATE, + ); + let _ = batch_storage + .put_acc_bucket(AccountNftBucketKey::new(bucket), ACC_BUCKET_INVALIDATE); + } + + let _ = batch_storage.put_account_change(key, value); + + if epoch < last_slot_epoch { + let _ = self + .sender + .send(ConsistencyCalcMsg::AccUpdated { + account: account_pubkey, + slot, + }) + .await; + } else if epoch > last_slot_epoch && last_slot != 0 { + let _ = self + .sender + .send(ConsistencyCalcMsg::EpochChanged { new_epoch: epoch }) + .await; + } + } + + /// Checks bubble tree slot, and if the slot number is from an epoch previous to the current, + /// emits notification to the checksums calculator. + /// + /// In contrast to account notification tracking method, for bubblegum we don't + /// store tree change here, since it is stored inside of [rocks_db::transaction_client] + /// in scope of the same batch that persists Bubblegum tree change details. + pub async fn watch_bubblegum_change(&self, tree: Pubkey, slot: u64) { + let epoch = epoch_of_slot(slot); + let last_slot = storage_consistency::track_slot_counter(slot); + let last_slot_epoch = epoch_of_slot(last_slot); + if epoch < last_slot_epoch { + let _ = self + .sender + .send(ConsistencyCalcMsg::BubblegumUpdated { tree, slot }) + .await; + } else if epoch > last_slot_epoch && last_slot != 0 { + let _ = self + .sender + .send(ConsistencyCalcMsg::EpochChanged { new_epoch: epoch }) + .await; + } + } + + /// Iterates over bubblegum changes, and for each of them check if the change from an epoch + /// previous to the current. If the change is from the previous epoch, + /// it sends a notification for the checksums calculator. + /// This method is called from the fork cleaner. + pub async fn watch_remove_forked_bubblegum_changes(&self, keys: &[BubblegumChangeKey]) { + let last_slot = storage_consistency::last_tracked_slot(); + let last_slot_epoch = epoch_of_slot(last_slot); + for key in keys { + if key.epoch < last_slot_epoch { + let _ = self + .sender + .send(ConsistencyCalcMsg::BubblegumUpdated { + tree: key.tree_pubkey, + slot: key.slot, + }) + .await; + } + } + } +} + +/// An entry point for checksums calculation component. +/// Should be called from "main". +/// Accepts notifications about epoch change, or changes in specific bubblegum tree or account, +/// and schedules checksum calculation. +pub fn run_bg_consistency_calculator( + mut rcv: Receiver, + storage: Arc, + mut shutdown_signal: tokio::sync::broadcast::Receiver<()>, +) { + tokio::spawn(async move { + let bbgm_tasks: Arc>> = Arc::new(Mutex::new(BTreeSet::new())); + let acc_tasks: Arc>> = Arc::new(Mutex::new(BTreeSet::new())); + + // Taks that calculates bubblegum checksums + let _bbgm_bg = { + let storage = storage.clone(); + let bbgm_tasks = bbgm_tasks.clone(); + tokio::spawn(async move { + process_bbgm_tasks(storage, bbgm_tasks).await; + }) + }; + // Taks that calculates account NFT checksums + let _acc_bg = { + let storage = storage.clone(); + let acc_tasks = acc_tasks.clone(); + tokio::spawn(async move { + process_acc_tasks(storage, acc_tasks).await; + }) + }; + + loop { + let calc_msg = tokio::select! { + msg = rcv.recv() => msg, + _ = shutdown_signal.recv() => { + tracing::info!("Received stop signal, stopping consistency calculator"); + break; + } + }; + + match calc_msg { + Some(msg) => match msg { + ConsistencyCalcMsg::EpochChanged { new_epoch } => { + let prev_epoch = new_epoch.saturating_sub(1); + { + // We don't wait for gaps filles (sequnce_consistent.rs) to process + // slots up to the last in the epoch, just will recalculate then if needed. + let mut guard = bbgm_tasks.lock().await; + guard.insert(BbgmTask::CalcEpoch(prev_epoch)); + } + { + let mut guard = acc_tasks.lock().await; + guard.insert(AccTask::CalcEpoch(prev_epoch)); + } + } + ConsistencyCalcMsg::BubblegumUpdated { tree, slot } => { + let mut guard = bbgm_tasks.lock().await; + guard.insert(BbgmTask::CalcTree(epoch_of_slot(slot), tree)); + } + ConsistencyCalcMsg::AccUpdated { account: _, slot } => { + // It's actually more reasonable to just process all late changes + let mut guard = acc_tasks.lock().await; + guard.insert(AccTask::CalcEpoch(epoch_of_slot(slot))); + } + ConsistencyCalcMsg::StartingBackfilling => { + { + let mut guard = bbgm_tasks.lock().await; + guard.insert(BbgmTask::Suspend); + } + { + let mut guard = acc_tasks.lock().await; + guard.insert(AccTask::Suspend); + } + } + ConsistencyCalcMsg::FinishedBackfilling => { + { + let mut guard = bbgm_tasks.lock().await; + guard.insert(BbgmTask::Resume); + } + { + let mut guard = acc_tasks.lock().await; + guard.insert(AccTask::Resume); + } + } + }, + None => break, + } + } + }); +} + +/// Type for messages that are used to send commands to bubblegum epochs checksums calculator. +/// +/// Fields order matters, because we use sorted set to pass commands to the calculator. +/// We want whole epochs to be calculates before individual tree epochs from late changes. +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] +enum BbgmTask { + /// Suspend checksum calculation, e.g. before backfilling + Suspend, + /// Resume checksum calculation (backfilling is finished) + Resume, + /// Calculate checksums for all bubblegum trees in the given epoch + CalcEpoch(u32), + /// Calculate checksums only for the given bubblegum tree in the given epoch + CalcTree(u32, Pubkey), +} + +/// Type for messages that are used to send commands to account buckets checksums calculator. +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] +enum AccTask { + Suspend, + Resume, + CalcEpoch(u32), +} + +async fn process_bbgm_tasks(storage: Arc, tasks: Arc>>) { + let mut is_suspended = false; + loop { + if is_suspended { + let mut guard = tasks.lock().await; + match guard.first() { + Some(t) if *t != BbgmTask::Resume => (), + Some(t) if *t != BbgmTask::Suspend => { + guard.pop_first(); + continue; + } + _ => { + tokio::time::sleep(Duration::from_secs(10)).await; + continue; + } + } + } + let maybe_task = { + let mut guard = tasks.lock().await; + guard.pop_first() + }; + match maybe_task { + Some(BbgmTask::CalcEpoch(epoch)) => { + tokio::time::sleep(Duration::from_secs(EPOCH_CALC_LAG_SEC)).await; + tracing::info!("Calculating Bubblegum ckecksum epoch: {epoch}"); + set_currently_calculated_bbgm_epoch(epoch); + calc_bubblegum_checksums(&storage, epoch, None).await; + finish_currently_calculated_bbgm_epoch(); + tracing::info!("Finished calculating Bubblegum ckecksum epoch: {epoch}"); + } + Some(BbgmTask::CalcTree(epoch, tree)) => { + calc_bubblegum_checksums(&storage, epoch, Some(tree)).await + } + Some(BbgmTask::Suspend) => is_suspended = true, + Some(BbgmTask::Resume) => is_suspended = false, + None => tokio::time::sleep(Duration::from_secs(10)).await, + }; + } +} + +async fn process_acc_tasks(storage: Arc, tasks: Arc>>) { + let mut is_suspended = false; + loop { + if is_suspended { + let mut guard = tasks.lock().await; + match guard.first() { + Some(t) if *t != AccTask::Resume => (), + Some(t) if *t != AccTask::Suspend => { + guard.pop_first(); + continue; + } + _ => { + tokio::time::sleep(Duration::from_secs(10)).await; + continue; + } + } + } + let maybe_task = { + let mut guard = tasks.lock().await; + guard.pop_first() + }; + match maybe_task { + Some(AccTask::CalcEpoch(epoch)) => { + set_currently_calculated_acc_epoch(epoch); + calc_acc_nft_checksums(&storage, epoch).await; + finish_currently_calculated_acc_epoch(); + } + Some(AccTask::Suspend) => is_suspended = true, + Some(AccTask::Resume) => is_suspended = false, + None => tokio::time::sleep(Duration::from_secs(10)).await, + }; + } +} + +/// Bubblegum checksums calculation start point. +/// Iterates over all the bubblegum trees changes in the given epoch, and calculates epochs +/// and grand epochs checksums. +pub async fn calc_bubblegum_checksums(storage: &Storage, epoch: u32, only_tree: Option) { + // For now let's just ignore trees that are update in the process of calculation, + // anywhay we'll have a separate notification for each of tree late update. + let trees_updated_in_the_process = calc_bubblegum_epoch(storage, epoch, only_tree).await; + let invalidated_grand_epoch_trees = + calc_bubblegum_grand_epoch(storage, grand_epoch_of_epoch(epoch), only_tree).await; + if only_tree.is_none() { + tracing::info!( + "Calculated bubblegum epoch {epoch}. {} epoch and {} grand epoch trees were updated in the process", + trees_updated_in_the_process.len(), invalidated_grand_epoch_trees.len(), + ); + } +} + +/// Calculates and stores bubblegum epoch checksums for bubblegum updates +/// received during the given epoch. +/// +/// ## Args: +/// * `storage` - database +/// * `target_epoch` - the number of an epoch the checksum should be calculated for +async fn calc_bubblegum_epoch( + storage: &Storage, + target_epoch: u32, + only_tree: Option, +) -> Vec { + let mut to_recalc = Vec::new(); + let mut current_tree: Option = None; + + let start_key = if let Some(tree) = only_tree { + BubblegumChangeKey::tree_epoch_start_key(tree, target_epoch) + } else { + BubblegumChangeKey::epoch_start_key(target_epoch) + }; + let mut it = storage.bubblegum_changes.iter(start_key); + let mut hasher = Hasher::default(); + + while let Some(Ok((k, v))) = it.next() { + let Ok(change_key) = BubblegumChange::decode_key(k.to_vec()) else { + continue; + }; + if change_key.epoch > target_epoch { + break; + } + if only_tree + .map(|t| t != change_key.tree_pubkey) + .unwrap_or(false) + { + break; + } + if current_tree != Some(change_key.tree_pubkey) { + if current_tree.is_some() { + // write checksum for previous tree + let epoch_key = BubblegumEpochKey::new(current_tree.unwrap(), target_epoch); + + let epoch_val = BubblegumEpoch::from(hasher.result().to_bytes()); + let _ = storage + .bubblegum_epochs + .merge(epoch_key.clone(), epoch_val) + .await; + + if let Ok(Some(storage_consistency::BUBBLEGUM_EPOCH_INVALIDATED)) = + storage.bubblegum_epochs.get_async(epoch_key).await + { + to_recalc.push(current_tree.unwrap()); + } + } + current_tree = Some(change_key.tree_pubkey); + + let new_epoch_key = BubblegumEpochKey::new(current_tree.unwrap(), target_epoch); + let _ = storage + .bubblegum_epochs + .put_async(new_epoch_key, BUBBLEGUM_EPOCH_CALCULATING) + .await; + + hasher = Hasher::default(); + } + hasher.hash(&k); + hasher.hash(&v); + } + + if let Some(current_tree) = current_tree { + let epoch_key = BubblegumEpochKey { + tree_pubkey: current_tree, + epoch_num: target_epoch, + }; + let epoch_val = BubblegumEpoch::from(hasher.result().to_bytes()); + let _ = storage + .bubblegum_epochs + .merge(epoch_key.clone(), epoch_val) + .await; + if let Ok(Some(storage_consistency::BUBBLEGUM_EPOCH_INVALIDATED)) = + storage.bubblegum_epochs.get_async(epoch_key).await + { + to_recalc.push(current_tree); + } + } + + to_recalc +} + +async fn calc_bubblegum_grand_epoch( + storage: &Storage, + target_grand_epoch: u16, + only_tree: Option, +) -> Vec { + let mut to_recalc = Vec::new(); + let mut current_tree: Option = None; + let mut contains_invalidated_epoch = false; + + let start_key = if let Some(tree) = only_tree { + BubblegumEpochKey::tree_grand_epoch_start_key(tree, target_grand_epoch) + } else { + BubblegumEpochKey::grand_epoch_start_key(target_grand_epoch) + }; + let mut it = storage.bubblegum_epochs.iter(start_key); + let mut hasher = Hasher::default(); + + while let Some(Ok((k, v))) = it.next() { + let Ok(epoch_key) = BubblegumEpoch::decode_key(k.to_vec()) else { + continue; + }; + let element_grand_epoch = grand_epoch_of_epoch(epoch_key.epoch_num); + if element_grand_epoch > target_grand_epoch { + break; + } + if only_tree + .map(|t| t != epoch_key.tree_pubkey) + .unwrap_or(false) + { + break; + } + if v.as_ref() == storage_consistency::BUBBLEGUM_EPOCH_INVALIDATED_BYTES.as_slice() { + contains_invalidated_epoch = true; + let new_grand_epoch_key = + BubblegumGrandEpochKey::new(current_tree.unwrap(), target_grand_epoch); + let _ = storage + .bubblegum_grand_epochs + .put_async(new_grand_epoch_key, BUBBLEGUM_GRAND_EPOCH_INVALIDATED) + .await; + } + if current_tree != Some(epoch_key.tree_pubkey) { + if current_tree.is_some() { + if !contains_invalidated_epoch { + // write checksum for previous tree + let grand_epoch_key = + BubblegumGrandEpochKey::new(current_tree.unwrap(), target_grand_epoch); + let grand_epoch_val = BubblegumGrandEpoch::from(hasher.result().to_bytes()); + let _ = storage + .bubblegum_grand_epochs + .merge(grand_epoch_key.clone(), grand_epoch_val) + .await; + + if let Ok(Some(storage_consistency::BUBBLEGUM_GRAND_EPOCH_INVALIDATED)) = + storage + .bubblegum_grand_epochs + .get_async(grand_epoch_key) + .await + { + to_recalc.push(current_tree.unwrap()); + } + } else { + to_recalc.push(current_tree.unwrap()); + } + } + current_tree = Some(epoch_key.tree_pubkey); + contains_invalidated_epoch = false; + + let new_grand_epoch_key = + BubblegumGrandEpochKey::new(current_tree.unwrap(), target_grand_epoch); + let _ = storage + .bubblegum_grand_epochs + .put_async(new_grand_epoch_key, BUBBLEGUM_GRAND_EPOCH_CALCULATING) + .await; + + hasher = Hasher::default(); + } else if contains_invalidated_epoch { + continue; + } + hasher.hash(&k); + hasher.hash(&v); + } + + if let Some(current_tree) = current_tree { + let grand_epoch_key = BubblegumGrandEpochKey { + tree_pubkey: current_tree, + grand_epoch_num: target_grand_epoch, + }; + let grand_epoch_val = BubblegumGrandEpoch::from(hasher.result().to_bytes()); + let _ = storage + .bubblegum_grand_epochs + .merge(grand_epoch_key.clone(), grand_epoch_val) + .await; + if let Ok(Some(storage_consistency::BUBBLEGUM_GRAND_EPOCH_INVALIDATED)) = storage + .bubblegum_grand_epochs + .get_async(grand_epoch_key) + .await + { + to_recalc.push(current_tree); + } + } + + to_recalc +} + +pub async fn calc_acc_nft_checksums(storage: &Storage, epoch: u32) { + match calc_acc_latest_state(storage, epoch).await { + Ok((invalidated_buckets, invalidated_grand_buckets)) => { + calc_acc_buckets(storage, invalidated_buckets.iter()).await; + calc_acc_grand_buckets(storage, invalidated_grand_buckets.iter()).await; + } + Err(e) => tracing::warn!("Error calculating accounts checksum: {e}"), + }; +} + +async fn calc_acc_latest_state( + storage: &Storage, + target_epoch: u32, +) -> anyhow::Result<(HashSet, HashSet)> { + let mut it = storage.acc_nft_changes.iter_start(); + let mut invalidated_buckets: HashSet = HashSet::new(); + let mut invalidated_grand_buckets: HashSet = HashSet::new(); + + let Some(first_record) = it.next() else { + return Ok((HashSet::new(), HashSet::new())); + }; + let mut prev_change = AccountNftChange::decode_key(first_record?.0.to_vec())?; + let mut changes_to_delete = Vec::new(); + + while let Some(Ok((k, _v))) = it.next() { + changes_to_delete.push(prev_change.clone()); + let next_change = AccountNftChange::decode_key(k.to_vec())?; + if next_change.epoch > target_epoch { + break; + } + + if next_change.account_pubkey == prev_change.account_pubkey + && next_change.epoch <= target_epoch + { + if next_change.slot > prev_change.slot + || next_change.slot == prev_change.slot + && next_change.write_version > prev_change.write_version + { + prev_change = next_change.clone(); + } + } else { + update_acc_if_needed( + storage, + &prev_change, + &mut invalidated_buckets, + &mut invalidated_grand_buckets, + ) + .await; + + let _ = storage + .acc_nft_changes + .delete_batch(changes_to_delete) + .await; + changes_to_delete = Vec::new(); + prev_change = next_change.clone(); + } + } + update_acc_if_needed( + storage, + &prev_change, + &mut invalidated_buckets, + &mut invalidated_grand_buckets, + ) + .await; + let _ = storage + .acc_nft_changes + .delete_batch(changes_to_delete) + .await; + + Ok((invalidated_buckets, invalidated_grand_buckets)) +} + +async fn update_acc_if_needed( + storage: &Storage, + change: &AccountNftChangeKey, + invalidated_buckets: &mut HashSet, + invalidated_grand_buckets: &mut HashSet, +) { + let acc_key = AccountNftKey::new(change.account_pubkey); + + let need_to_update = storage + .acc_nft_last + .get_async(acc_key.clone()) + .await + .ok() + .flatten() + .map(|in_db| { + change.data_hash != in_db.last_data_hash + && (change.slot > in_db.last_slot + || change.slot == in_db.last_slot + && change.write_version > in_db.last_write_version) + }) + .unwrap_or(true); + + if need_to_update { + let _ = storage + .acc_nft_last + .put_async( + acc_key, + AccountNft::new(change.slot, change.write_version, change.data_hash), + ) + .await; + + let bucket = bucket_for_acc(change.account_pubkey); + let grand_bucket = grand_bucket_for_bucket(bucket); + if !invalidated_grand_buckets.contains(&grand_bucket) { + let _ = storage + .acc_nft_grand_buckets + .put_async( + AccountNftGrandBucketKey::new(grand_bucket), + ACC_GRAND_BUCKET_INVALIDATE, + ) + .await; + invalidated_grand_buckets.insert(grand_bucket); + } + + if !invalidated_buckets.contains(&bucket) { + let _ = storage + .acc_nft_buckets + .put_async(AccountNftBucketKey::new(bucket), ACC_BUCKET_INVALIDATE) + .await; + invalidated_buckets.insert(bucket); + } + } +} + +async fn calc_acc_buckets<'a>(storage: &Storage, buckets: impl Iterator) { + for bucket in buckets { + let mut it = storage + .acc_nft_last + .iter(AccountNftKey::bucket_start_key(*bucket)); + let mut hasher = Hasher::default(); + while let Some(Ok((k, v))) = it.next() { + if AccountNftKey::extract_bucket(&k) > *bucket { + break; + } + hasher.hash(&k); + hasher.hash(&v); + } + // There is not need in merge operation that checks that the previous state was Calculating, + // since we'll immediatelly detect a late update by finding a new change record. + let _ = storage + .acc_nft_buckets + .put_async( + AccountNftBucketKey::new(*bucket), + AccountNftBucket::new(hasher.result().to_bytes()), + ) + .await; + } +} + +async fn calc_acc_grand_buckets<'a>( + storage: &Storage, + grand_buckets: impl Iterator, +) { + for grand_bucket in grand_buckets { + let mut it = storage + .acc_nft_buckets + .iter(AccountNftBucketKey::grand_bucket_start_key(*grand_bucket)); + + let mut hasher = Hasher::default(); + while let Some(Ok((k, v))) = it.next() { + let is_for_next_grand_bucket = AccountNftBucket::decode_key(k.to_vec()) + .map(|bucket_key| grand_bucket_for_bucket(bucket_key.bucket) > *grand_bucket) + .unwrap_or(false); + if is_for_next_grand_bucket { + break; + } + hasher.hash(&k); + hasher.hash(&v); + } + let _ = storage + .acc_nft_grand_buckets + .put_async( + AccountNftGrandBucketKey::new(*grand_bucket), + AccountNftGrandBucket::new(hasher.result().to_bytes()), + ) + .await; + } +} + +/// Calculates hash for solana account data. +/// This is used for account NFTs, to solve the duplicates problem +/// caused by solana forks and by fetching of same data from multiple +/// different sources. +pub fn calc_solana_account_data_hash(data: &[u8]) -> u64 { + xxhash_rust::xxh3::xxh3_64(data) +} diff --git a/nft_ingester/src/fork_cleaner.rs b/nft_ingester/src/fork_cleaner.rs index f8d39c0ff..87f874fa8 100644 --- a/nft_ingester/src/fork_cleaner.rs +++ b/nft_ingester/src/fork_cleaner.rs @@ -1,6 +1,9 @@ +use crate::consistency_calculator::NftChangesTracker; use entities::models::ForkedItem; use interface::fork_cleaner::{CompressedTreeChangesManager, ForkChecker}; use metrics_utils::ForkCleanerMetricsConfig; +use rocks_db::storage_consistency::BubblegumChangeKey; +use rocks_db::storage_consistency::DataConsistencyStorage; use rocks_db::Storage; use solana_sdk::pubkey::Pubkey; use solana_sdk::signature::Signature; @@ -46,6 +49,8 @@ where { cl_items_manager: Arc, fork_checker: Arc, + data_consistency_storage: Arc, + nft_changes_tracker: Option>, metrics: Arc, } @@ -57,11 +62,15 @@ where pub fn new( cl_items_manager: Arc, fork_checker: Arc, + data_consistency_storage: Arc, + nft_changes_tracker: Option>, metrics: Arc, ) -> Self { Self { cl_items_manager, fork_checker, + data_consistency_storage, + nft_changes_tracker, metrics, } } @@ -75,6 +84,7 @@ where let mut forked_slots = 0; let mut delete_items = Vec::new(); + let mut changes_to_delete = Vec::new(); // from this column data will be dropped by slot // if we have any update from forked slot we have to delete it @@ -162,7 +172,7 @@ where // dropping only sequence 5 would result in an incorrect update during backfill. // therefore, we need to drop sequence 4 as well. Sequence 5 must be dropped because // it contains a different tree update in the main branch - for sequences in signature.slot_sequences.values() { + for (slot, sequences) in signature.slot_sequences.iter() { for seq in sequences { delete_items.push(ForkedItem { tree: signature.tree, @@ -171,6 +181,11 @@ where // because deletion will happen by tree and seq values node_idx: 0, }); + changes_to_delete.push(BubblegumChangeKey::new( + signature.tree, + *slot, + *seq, + )); } } } @@ -179,11 +194,27 @@ where } if delete_items.len() >= CI_ITEMS_DELETE_BATCH_SIZE { + self.data_consistency_storage + .drop_forked_bubblegum_changes(&changes_to_delete) + .await; + if let Some(changes_tracker) = self.nft_changes_tracker.as_ref() { + changes_tracker + .watch_remove_forked_bubblegum_changes(&changes_to_delete) + .await; + } self.delete_tree_seq_idx(&mut delete_items).await; } } if !delete_items.is_empty() { + self.data_consistency_storage + .drop_forked_bubblegum_changes(&changes_to_delete) + .await; + if let Some(changes_tracker) = self.nft_changes_tracker.as_ref() { + changes_tracker + .watch_remove_forked_bubblegum_changes(&changes_to_delete) + .await; + } self.delete_tree_seq_idx(&mut delete_items).await; } diff --git a/nft_ingester/src/lib.rs b/nft_ingester/src/lib.rs index fe5517fc4..958679f43 100644 --- a/nft_ingester/src/lib.rs +++ b/nft_ingester/src/lib.rs @@ -4,6 +4,8 @@ pub mod backfiller; pub mod batch_mint; pub mod buffer; pub mod config; +pub mod consistency_bg_job; +pub mod consistency_calculator; pub mod error; pub mod flatbuffer_mapper; pub mod fork_cleaner; diff --git a/nft_ingester/src/message_parser.rs b/nft_ingester/src/message_parser.rs index 8659ed757..4c96efff4 100644 --- a/nft_ingester/src/message_parser.rs +++ b/nft_ingester/src/message_parser.rs @@ -1,3 +1,4 @@ +use crate::consistency_calculator::calc_solana_account_data_hash; use crate::error::IngesterError; use crate::error::IngesterError::MissingFlatbuffersFieldError; use crate::inscription_raw_parsing::ParsedInscription; @@ -193,6 +194,7 @@ impl MessageParser { slot_updated: account_update.slot as i64, amount: ta.amount as i64, write_version: account_update.write_version, + data_hash: calc_solana_account_data_hash(&account_update.data), }) } TokenProgramAccount::Mint(m) => { @@ -206,6 +208,7 @@ impl MessageParser { token_program: account_update.owner, extensions: None, write_version: account_update.write_version, + data_hash: calc_solana_account_data_hash(&account_update.data), })) } } @@ -244,6 +247,7 @@ impl MessageParser { slot_updated: account_update.slot as i64, amount: ta.account.amount as i64, write_version: account_update.write_version, + data_hash: calc_solana_account_data_hash(&account_update.data), })) } TokenExtensionsProgramAccount::MintAccount(m) => { @@ -257,6 +261,7 @@ impl MessageParser { token_program: account_update.owner, extensions: Some(m.extensions.clone()), write_version: account_update.write_version, + data_hash: calc_solana_account_data_hash(&account_update.data), }))) } _ => None, @@ -293,6 +298,9 @@ impl MessageParser { entities::models::BurntMetadataSlot { slot_updated: account_info.slot, write_version: account_info.write_version, + data_hash: calc_solana_account_data_hash( + &account_info.data, + ), }, )) } @@ -306,6 +314,9 @@ impl MessageParser { executable: account_info.executable, rent_epoch: account_info.rent_epoch, metadata_owner: Some(account_info.owner.to_string()), + data_hash: calc_solana_account_data_hash( + &account_info.data, + ), }, )) } @@ -323,6 +334,9 @@ impl MessageParser { ), write_version: account_info.write_version, slot_updated: account_info.slot, + data_hash: calc_solana_account_data_hash( + &account_info.data, + ), }, )) } @@ -340,6 +354,9 @@ impl MessageParser { ), write_version: account_info.write_version, slot_updated: account_info.slot, + data_hash: calc_solana_account_data_hash( + &account_info.data, + ), }, )) } @@ -356,6 +373,9 @@ impl MessageParser { ), write_version: account_info.write_version, slot_updated: account_info.slot, + data_hash: calc_solana_account_data_hash( + &account_info.data, + ), }, )) } @@ -415,6 +435,7 @@ impl MessageParser { inscription, write_version: account_info.write_version, slot_updated: account_info.slot, + data_hash: calc_solana_account_data_hash(&account_info.data), }, )) } @@ -424,6 +445,7 @@ impl MessageParser { inscription_data, write_version: account_info.write_version, slot_updated: account_info.slot, + data_hash: calc_solana_account_data_hash(&account_info.data), }, )) } @@ -448,6 +470,7 @@ impl MessageParser { entities::models::BurntMetadataSlot { slot_updated: account_update.slot, write_version: account_update.write_version, + data_hash: calc_solana_account_data_hash(&account_update.data), }, )), MplCoreAccountData::Asset(_) | MplCoreAccountData::Collection(_) => response.push( @@ -458,6 +481,7 @@ impl MessageParser { lamports: account_update.lamports, executable: account_update.executable, rent_epoch: account_update.rent_epoch, + data_hash: calc_solana_account_data_hash(&account_update.data), }), ), _ => debug!("Not implemented"), @@ -473,6 +497,7 @@ impl MessageParser { write_version: account_update.write_version, lamports: account_update.lamports, rent_epoch: account_update.rent_epoch, + data_hash: calc_solana_account_data_hash(&account_update.data), }, )), _ => {} diff --git a/nft_ingester/src/processors/accounts_processor.rs b/nft_ingester/src/processors/accounts_processor.rs index e41279e4f..5efaee618 100644 --- a/nft_ingester/src/processors/accounts_processor.rs +++ b/nft_ingester/src/processors/accounts_processor.rs @@ -1,3 +1,4 @@ +use crate::consistency_calculator::NftChangesTracker; use crate::{error::IngesterError, redis_receiver::get_timestamp_from_id}; use chrono::Utc; @@ -42,6 +43,7 @@ pub async fn run_accounts_processor>, postgre_client: Arc, rpc_client: Arc, + nft_changes_tracker: Arc, join_set: Arc>>>, ) { mutexed_tasks.lock().await.spawn(async move { @@ -59,7 +61,7 @@ pub async fn run_accounts_processor AccountsProcessor { rx: Receiver<()>, storage: Arc, accounts_batch_size: usize, + nft_changes_tracker: Arc, ) { let mut batch_storage = BatchSaveStorage::new(storage, accounts_batch_size, self.metrics.clone()); @@ -141,7 +144,7 @@ impl AccountsProcessor { continue; } }; - self.process_account(&mut batch_storage, unprocessed_accounts, &mut core_fees, &mut ack_ids, &mut interval, &mut batch_fill_instant).await; + self.process_account(&mut batch_storage, unprocessed_accounts, &mut core_fees, &mut ack_ids, &mut interval, &mut batch_fill_instant, &nft_changes_tracker).await; }, _ = interval.tick() => { self.flush(&mut batch_storage, &mut ack_ids, &mut interval, &mut batch_fill_instant); @@ -168,6 +171,7 @@ impl AccountsProcessor { ack_ids: &mut Vec, interval: &mut tokio::time::Interval, batch_fill_instant: &mut Instant, + nft_changes_tracker: &NftChangesTracker, ) { for unprocessed_account in unprocessed_accounts { let processing_result = match &unprocessed_account.account { @@ -235,6 +239,19 @@ impl AccountsProcessor { error!("Processing account {}: {}", unprocessed_account.key, err); continue; } + { + let (account_pubkey, slot, write_version, data_hash) = + unprocessed_account.solana_change_info(); + nft_changes_tracker + .track_account_change( + batch_storage, + account_pubkey, + slot, + write_version, + data_hash, + ) + .await; + } self.metrics .inc_accounts(unprocessed_account.account.into()); diff --git a/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs b/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs index 33aa631f5..380d5c2c0 100644 --- a/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs +++ b/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs @@ -1,3 +1,4 @@ +use crate::consistency_calculator::NftChangesTracker; use crate::error::IngesterError; use crate::flatbuffer_mapper::FlatbufferMapper; use crate::plerkle; @@ -60,6 +61,7 @@ pub struct BubblegumTxProcessor { pub transaction_parser: Arc, pub instruction_parser: Arc, pub rocks_client: Arc, + pub nft_change_tracker: Option>, pub json_tasks: Arc>>, pub metrics: Arc, @@ -70,11 +72,13 @@ impl BubblegumTxProcessor { rocks_client: Arc, metrics: Arc, json_tasks: Arc>>, + nft_change_tracker: Option>, ) -> Self { BubblegumTxProcessor { transaction_parser: Arc::new(FlatbufferMapper {}), instruction_parser: Arc::new(BubblegumParser {}), rocks_client, + nft_change_tracker, json_tasks, metrics, } @@ -112,6 +116,9 @@ impl BubblegumTxProcessor { .await .map_err(|e| IngesterError::DatabaseError(e.to_string())); + self.calc_checksums_if_needed(&result.instruction_results) + .await; + result_to_metrics(self.metrics.clone(), &res, "process_transaction"); self.metrics.set_latency( "process_transaction", @@ -1169,6 +1176,25 @@ impl BubblegumTxProcessor { Ok(()) } + + /// Checks if the given instruction is a late instruction, belongs to a previous epoch, + /// and emits notification a notification, that will force the checksum calculator component + /// to recalculate epoch checksum. + /// + /// Note: this function only sends the notification, the saving of bubblgum changes + /// happens in [Storage::store_instruction_result_with_batch] by calling + /// [Storage::track_tree_change_with_batch] + async fn calc_checksums_if_needed(&self, instructions: &[InstructionResult]) { + if let Some(nft_change_tracker) = self.nft_change_tracker.as_ref() { + for ix in instructions { + if let Some(tree_update) = ix.tree_update.as_ref() { + nft_change_tracker + .watch_bubblegum_change(tree_update.tree, tree_update.slot) + .await; + } + } + } + } } fn use_method_from_mpl_bubblegum_state( diff --git a/nft_ingester/tests/api_tests.rs b/nft_ingester/tests/api_tests.rs index 9b324d625..b32d557b7 100644 --- a/nft_ingester/tests/api_tests.rs +++ b/nft_ingester/tests/api_tests.rs @@ -821,6 +821,7 @@ mod tests { slot_updated: 1, amount: 1, write_version: 1, + data_hash: 0, }; let mint_acc = Mint { @@ -833,6 +834,7 @@ mod tests { token_program: Default::default(), extensions: None, write_version: 1, + data_hash: 0, }; let metadata = MetadataInfo { @@ -860,6 +862,7 @@ mod tests { executable: false, metadata_owner: None, rent_epoch: 0, + data_hash: 0, }; let offchain_data = OffChainData { url: "https://ping-pong".to_string(), @@ -912,6 +915,7 @@ mod tests { token_program: Default::default(), extensions: None, write_version: 2, + data_hash: 0, }; let mut batch_storage = BatchSaveStorage::new( @@ -1002,6 +1006,7 @@ mod tests { amount: 1, write_version: 1, extensions: None, + data_hash: 0, }; let mint_acc = Mint { @@ -1014,6 +1019,7 @@ mod tests { write_version: 1, extensions: None, token_program: Default::default(), + data_hash: 0, }; let metadata = MetadataInfo { @@ -1041,6 +1047,7 @@ mod tests { executable: false, metadata_owner: None, rent_epoch: 0, + data_hash: 0, }; metadata_info.insert(mint_key, metadata); @@ -1164,6 +1171,7 @@ mod tests { amount: 1, write_version: 1, extensions: None, + data_hash: 0, }; let mint_acc = Mint { @@ -1176,6 +1184,7 @@ mod tests { write_version: 1, extensions: None, token_program: Default::default(), + data_hash: 0, }; let metadata = MetadataInfo { @@ -1203,6 +1212,7 @@ mod tests { executable: false, metadata_owner: None, rent_epoch: 0, + data_hash: 0, }; let metadata_ofch = OffChainData { @@ -1236,6 +1246,7 @@ mod tests { &BurntMetadataSlot { slot_updated: 2, write_version: 100, + data_hash: 0, }, ) .unwrap(); @@ -1540,6 +1551,7 @@ mod tests { amount: 1050, write_version: 10, extensions: None, + data_hash: 0, }, ); let pk = Pubkey::new_unique(); @@ -1556,6 +1568,7 @@ mod tests { amount: 1050, write_version: 10, extensions: None, + data_hash: 0, }, ); } @@ -1575,6 +1588,7 @@ mod tests { amount: 0, write_version: 10, extensions: None, + data_hash: 0, }, ); let pk = Pubkey::new_unique(); @@ -1591,6 +1605,7 @@ mod tests { amount: 0, write_version: 10, extensions: None, + data_hash: 0, }, ); } @@ -1610,6 +1625,7 @@ mod tests { amount: 140, write_version: 10, extensions: None, + data_hash: 0, }, ); } @@ -1764,6 +1780,7 @@ mod tests { amount: 1050, write_version: 10, extensions: None, + data_hash: 0, }, ); let pk = Pubkey::new_unique(); @@ -1780,6 +1797,7 @@ mod tests { amount: 1050, write_version: 10, extensions: None, + data_hash: 0, }, ); let pk = Pubkey::new_unique(); @@ -1796,6 +1814,7 @@ mod tests { amount: 1050, write_version: 10, extensions: None, + data_hash: 0, }, ); } @@ -3042,6 +3061,7 @@ mod tests { slot_updated: 10, write_version: 10, extensions: None, + data_hash: 0, }; let mint2 = Mint { pubkey: fungible_token_mint2, @@ -3053,6 +3073,7 @@ mod tests { slot_updated: 7, write_version: 10, extensions: None, + data_hash: 0, }; let owner = generated_assets.owners[50].owner.value.unwrap(); @@ -3069,6 +3090,7 @@ mod tests { slot_updated: 10, amount: 0, write_version: 10, + data_hash: 0, }; let token_account2 = TokenAccount { pubkey: fungible_token_account2, @@ -3081,6 +3103,7 @@ mod tests { slot_updated: 10, amount: 30000, write_version: 10, + data_hash: 0, }; let mut batch_storage = BatchSaveStorage::new( env.rocks_env.storage.clone(), @@ -3424,6 +3447,7 @@ mod tests { group_member_pointer: None, token_group_member: None, }), + data_hash: 0, }; let mut batch_storage = BatchSaveStorage::new( diff --git a/nft_ingester/tests/batch_mint_test.rs b/nft_ingester/tests/batch_mint_test.rs index 6408e207b..ede3eab6a 100644 --- a/nft_ingester/tests/batch_mint_test.rs +++ b/nft_ingester/tests/batch_mint_test.rs @@ -195,6 +195,7 @@ async fn save_batch_mint_to_queue_test() { env.rocks_env.storage.clone(), Arc::new(IngesterMetricsConfig::new()), tasks.clone(), + None, ); let metadata_url = "url".to_string(); diff --git a/nft_ingester/tests/bubblegum_tests.rs b/nft_ingester/tests/bubblegum_tests.rs index a515ea296..1d0cc5805 100644 --- a/nft_ingester/tests/bubblegum_tests.rs +++ b/nft_ingester/tests/bubblegum_tests.rs @@ -91,6 +91,7 @@ mod tests { env.rocks_env.storage.clone(), Arc::new(IngesterMetricsConfig::new()), buffer.json_tasks.clone(), + None, )); let tx_ingester = Arc::new(transaction_ingester::BackfillTransactionIngester::new( @@ -219,6 +220,7 @@ mod tests { env.rocks_env.storage.clone(), Arc::new(IngesterMetricsConfig::new()), buffer.json_tasks.clone(), + None, )); let tx_ingester = Arc::new(transaction_ingester::BackfillTransactionIngester::new( diff --git a/nft_ingester/tests/clean_forks_test.rs b/nft_ingester/tests/clean_forks_test.rs index 56141d48f..0c8f2cb9f 100644 --- a/nft_ingester/tests/clean_forks_test.rs +++ b/nft_ingester/tests/clean_forks_test.rs @@ -643,6 +643,8 @@ async fn test_clean_forks() { let fork_cleaner = ForkCleaner::new( storage.clone(), storage.clone(), + storage.clone(), + None, metrics_state.fork_cleaner_metrics.clone(), ); fork_cleaner.clean_forks(rx.resubscribe()).await; @@ -972,6 +974,8 @@ async fn test_process_forked_transaction() { let fork_cleaner = ForkCleaner::new( storage.clone(), storage.clone(), + storage.clone(), + None, metrics_state.fork_cleaner_metrics.clone(), ); fork_cleaner.clean_forks(shutdown_rx.resubscribe()).await; diff --git a/nft_ingester/tests/consistency_bg_job_test.rs b/nft_ingester/tests/consistency_bg_job_test.rs new file mode 100644 index 000000000..3c84f0445 --- /dev/null +++ b/nft_ingester/tests/consistency_bg_job_test.rs @@ -0,0 +1,96 @@ +#[cfg(test)] +mod tests { + use interface::checksums_storage::*; + use nft_ingester::consistency_bg_job::compare_bbgm_with_peer; + use nft_ingester::consistency_calculator::calc_bubblegum_checksums; + use rocks_db::storage_consistency::*; + use setup::rocks::RocksTestEnvironment; + use solana_sdk::pubkey::Pubkey; + + #[tokio::test] + pub async fn test_checksum_exchange() { + let my_storage = RocksTestEnvironment::new(&[]).storage; + + let peer_storage = RocksTestEnvironment::new(&[]).storage; + + // prepare + let tree1 = Pubkey::new_unique(); + + // This change is for epoch we won't calculate in the test, + // adding it just to verify it is ignored + let k0_1 = BubblegumChangeKey::new(tree1, 111, 1); + let v0_1 = BubblegumChange { + signature: "1".to_string(), + }; + peer_storage + .bubblegum_changes + .put(k0_1.clone(), v0_1.clone()) + .unwrap(); + + // Adding bubblegum changes checksum is calculated of + let k1_1 = BubblegumChangeKey::new(tree1, 10111, 2); + let v1_1 = BubblegumChange { + signature: "2".to_string(), + }; + peer_storage + .bubblegum_changes + .put(k1_1.clone(), v1_1.clone()) + .unwrap(); + + let k1_2 = BubblegumChangeKey::new(tree1, 10112, 3); + let v1_2 = BubblegumChange { + signature: "3".to_string(), + }; + peer_storage + .bubblegum_changes + .put(k1_2.clone(), v1_2.clone()) + .unwrap(); + + // This will be also ignored + let k2_1 = BubblegumChangeKey::new(tree1, 20000, 4); + let v2_1 = BubblegumChange { + signature: "4".to_string(), + }; + peer_storage + .bubblegum_changes + .put(k2_1.clone(), v2_1.clone()) + .unwrap(); + + // Calculate epoch and grand epoch checksum + calc_bubblegum_checksums(&peer_storage, 0, None).await; + calc_bubblegum_checksums(&peer_storage, 1, None).await; + calc_bubblegum_checksums(&peer_storage, 2, None).await; + + let result = compare_bbgm_with_peer(0, my_storage.as_ref(), peer_storage.as_ref()).await; + + assert_eq!( + result, + vec![ + BbgmChangeRecord { + tree_pubkey: tree1, + slot: 111, + seq: 1, + signature: "1".to_string() + }, + BbgmChangeRecord { + tree_pubkey: tree1, + slot: 10111, + seq: 2, + signature: "2".to_string() + }, + BbgmChangeRecord { + tree_pubkey: tree1, + slot: 10112, + seq: 3, + signature: "3".to_string() + }, + BbgmChangeRecord { + tree_pubkey: tree1, + slot: 20000, + seq: 4, + signature: "4".to_string() + } + ] + ); + } +} diff --git a/nft_ingester/tests/consistency_calculator_test.rs b/nft_ingester/tests/consistency_calculator_test.rs new file mode 100644 index 000000000..7636195bd --- /dev/null +++ b/nft_ingester/tests/consistency_calculator_test.rs @@ -0,0 +1,253 @@ +#[cfg(test)] +mod tests { + use nft_ingester::consistency_calculator::ConsistencyCalcMsg; + use nft_ingester::consistency_calculator::{ + calc_acc_nft_checksums, calc_bubblegum_checksums, NftChangesTracker, + }; + use rocks_db::{ + column::TypedColumn, + storage_consistency::{ + AccountNft, AccountNftBucketKey, AccountNftChange, AccountNftChangeKey, AccountNftKey, + BubblegumChange, BubblegumChangeKey, BubblegumEpoch, BubblegumEpochKey, + BubblegumGrandEpochKey, Checksum, + }, + }; + use setup::rocks::RocksTestEnvironment; + use solana_sdk::{hash::Hasher, pubkey::Pubkey}; + + /// This test checks that checksum calculation for bubblegum contract + /// correctly calculates checksum for the given epoch, + /// and only for the given epoch. + #[tokio::test] + async fn test_calc_epoch() { + // prepare + let tree1 = Pubkey::new_unique(); + + let storage = RocksTestEnvironment::new(&[]).storage; + + // This change is for epoch we won't calculate in the test, + // adding it just to verify it is ignored + let k0_1 = BubblegumChangeKey::new(tree1, 111, 1); + let v0_1 = BubblegumChange { + signature: "1".to_string(), + }; + storage + .bubblegum_changes + .put(k0_1.clone(), v0_1.clone()) + .unwrap(); + + // Adding bubblegum changes checksum is calculated of + let k1_1 = BubblegumChangeKey::new(tree1, 10111, 2); + let v1_1 = BubblegumChange { + signature: "2".to_string(), + }; + storage + .bubblegum_changes + .put(k1_1.clone(), v1_1.clone()) + .unwrap(); + + let k1_2 = BubblegumChangeKey::new(tree1, 10112, 3); + let v1_2 = BubblegumChange { + signature: "3".to_string(), + }; + storage + .bubblegum_changes + .put(k1_2.clone(), v1_2.clone()) + .unwrap(); + + // This will be also ignored + let k2_1 = BubblegumChangeKey::new(tree1, 20000, 4); + let v2_1 = BubblegumChange { + signature: "4".to_string(), + }; + storage + .bubblegum_changes + .put(k2_1.clone(), v2_1.clone()) + .unwrap(); + + // Calculate epoch and grand epoch checksum + calc_bubblegum_checksums(&storage, 1, None).await; + + let expected_epoch_checksum = { + let mut hasher = Hasher::default(); + hasher.hash(&BubblegumChange::encode_key(k1_1)); + hasher.hash(&bincode::serialize(&v1_1).unwrap()); + hasher.hash(&BubblegumChange::encode_key(k1_2)); + hasher.hash(&bincode::serialize(&v1_2).unwrap()); + hasher.result().to_bytes() + }; + + let epoch_key = BubblegumEpochKey::new(tree1, 1); + let epoch_val = storage + .bubblegum_epochs + .get(epoch_key.clone()) + .unwrap() + .unwrap(); + + assert_eq!(Checksum::Value(expected_epoch_checksum), epoch_val.checksum); + + let expected_grand_epoch_checksum = { + let mut hasher = Hasher::default(); + hasher.hash(&BubblegumEpoch::encode_key(epoch_key)); + hasher.hash(&bincode::serialize(&epoch_val).unwrap()); + hasher.result().to_bytes() + }; + + let grand_epoch_key = BubblegumGrandEpochKey::new(tree1, 0); + let grand_epoch_val = storage + .bubblegum_grand_epochs + .get(grand_epoch_key) + .unwrap() + .unwrap(); + + assert_eq!( + Checksum::Value(expected_grand_epoch_checksum), + grand_epoch_val.checksum + ); + } + + #[tokio::test] + async fn test_calc_acc_nft_checksums() { + let storage = RocksTestEnvironment::new(&[]).storage; + + let acc_change_val = AccountNftChange {}; + + let acc1_pubkey = make_pubkey_in_bucket(0); + let acc1_change_key1 = AccountNftChangeKey { + epoch: 0, + account_pubkey: acc1_pubkey, + slot: 11, + write_version: 1, + data_hash: 1, + }; + let acc1_change_key2 = AccountNftChangeKey { + epoch: 0, + account_pubkey: acc1_pubkey, + slot: 12, + write_version: 2, + data_hash: 2, + }; + // won't take part in calculation + let acc1_change_key3 = AccountNftChangeKey { + epoch: 1, + account_pubkey: acc1_pubkey, + slot: 10_0001, + write_version: 10, + data_hash: 3, + }; + storage + .acc_nft_changes + .put(acc1_change_key1.clone(), acc_change_val.clone()) + .unwrap(); + storage + .acc_nft_changes + .put(acc1_change_key2.clone(), acc_change_val.clone()) + .unwrap(); + storage + .acc_nft_changes + .put(acc1_change_key3.clone(), acc_change_val.clone()) + .unwrap(); + + let acc2_pubkey = make_pubkey_in_bucket(1); + let acc2_change_key1 = AccountNftChangeKey { + epoch: 0, + account_pubkey: acc2_pubkey, + slot: 21, + write_version: 21, + data_hash: 4, + }; + storage + .acc_nft_changes + .put(acc2_change_key1.clone(), acc_change_val.clone()) + .unwrap(); + + // SUT + calc_acc_nft_checksums(&storage, 0).await; + + // Verify account last state updated + let latest_acc1_key = AccountNftKey::new(acc1_pubkey); + let latest_acc1_val = storage + .acc_nft_last + .get(latest_acc1_key.clone()) + .unwrap() + .unwrap(); + assert_eq!(latest_acc1_val.last_slot, acc1_change_key2.slot); + assert_eq!( + latest_acc1_val.last_write_version, + acc1_change_key2.write_version + ); + + let latest_acc2_key = AccountNftKey::new(acc2_pubkey); + let latest_acc2_val = storage + .acc_nft_last + .get(latest_acc2_key.clone()) + .unwrap() + .unwrap(); + assert_eq!(latest_acc2_val.last_slot, acc2_change_key1.slot); + assert_eq!( + latest_acc2_val.last_write_version, + acc2_change_key1.write_version + ); + + // Verify buckets are updated + let bucket0_val = storage + .acc_nft_buckets + .get(AccountNftBucketKey::new(0)) + .unwrap() + .unwrap(); + let bucket1_val = storage + .acc_nft_buckets + .get(AccountNftBucketKey::new(1)) + .unwrap() + .unwrap(); + + let expected_bucket0_checksum = { + let mut hasher = Hasher::default(); + hasher.hash(&AccountNft::encode_key(latest_acc1_key.clone())); + hasher.hash(&bincode::serialize(&latest_acc1_val).unwrap()); + hasher.result().to_bytes() + }; + let expected_bucket1_checksum = { + let mut hasher = Hasher::default(); + hasher.hash(&AccountNft::encode_key(latest_acc2_key.clone())); + hasher.hash(&bincode::serialize(&latest_acc2_val).unwrap()); + hasher.result().to_bytes() + }; + assert_eq!( + bucket0_val.checksum, + Checksum::Value(expected_bucket0_checksum) + ); + assert_eq!( + bucket1_val.checksum, + Checksum::Value(expected_bucket1_checksum) + ); + } + + #[tokio::test] + async fn test_notification_on_epoch_change() { + let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); + let sut = NftChangesTracker::new(sender); + + let tree = Pubkey::new_unique(); + + sut.watch_bubblegum_change(tree, 90_001).await; + sut.watch_bubblegum_change(tree, 100_001).await; + + assert_eq!( + Ok(ConsistencyCalcMsg::EpochChanged { new_epoch: 10 }), + receiver.try_recv() + ); + assert_eq!( + Err(tokio::sync::mpsc::error::TryRecvError::Empty), + receiver.try_recv() + ); + } + + fn make_pubkey_in_bucket(bucket: u16) -> Pubkey { + let mut arr = Pubkey::new_unique().to_bytes(); + let bucket_arr = bucket.to_be_bytes(); + arr[0] = bucket_arr[0]; + arr[1] = bucket_arr[1]; + Pubkey::new_from_array(arr) + } +} diff --git a/nft_ingester/tests/decompress.rs b/nft_ingester/tests/decompress.rs index 29113b3ee..99dad910b 100644 --- a/nft_ingester/tests/decompress.rs +++ b/nft_ingester/tests/decompress.rs @@ -72,6 +72,7 @@ mod tests { env_rocks, Arc::new(IngesterMetricsConfig::new()), buffer.json_tasks.clone(), + None, )); let tx_ingester = Arc::new(transaction_ingester::BackfillTransactionIngester::new( @@ -128,6 +129,7 @@ mod tests { slot_updated: nft_created_slot, amount: 1, write_version: 1, + data_hash: 0, }; let mint_acc = Mint { @@ -142,6 +144,7 @@ mod tests { token_program: Default::default(), extensions: None, write_version: 1, + data_hash: 0, }; spl_token_accs_parser @@ -196,6 +199,7 @@ mod tests { metadata_owner: None, write_version: 1, rent_epoch: 0, + data_hash: 0, }; mplx_accs_parser diff --git a/nft_ingester/tests/dump_tests.rs b/nft_ingester/tests/dump_tests.rs index 8f0278026..11262ce2f 100644 --- a/nft_ingester/tests/dump_tests.rs +++ b/nft_ingester/tests/dump_tests.rs @@ -43,6 +43,7 @@ mod tests { slot_updated: 10, amount: 1000, write_version: 10, + data_hash: 0, }; token_accounts_processor .transform_and_save_token_account(&mut batch_storage, key, &token_account) diff --git a/nft_ingester/tests/process_accounts.rs b/nft_ingester/tests/process_accounts.rs index 9604be526..996ddb590 100644 --- a/nft_ingester/tests/process_accounts.rs +++ b/nft_ingester/tests/process_accounts.rs @@ -61,6 +61,7 @@ mod tests { rent_epoch: 0, executable: false, metadata_owner: None, + data_hash: 0, } } @@ -83,6 +84,7 @@ mod tests { token_program: Default::default(), extensions: None, write_version: 1, + data_hash: 0, }; let second_mint_to_save = Mint { pubkey: second_mint, @@ -94,6 +96,7 @@ mod tests { token_program: Default::default(), extensions: None, write_version: 1, + data_hash: 0, }; let first_token_account_to_save = TokenAccount { pubkey: first_token_account, @@ -106,6 +109,7 @@ mod tests { slot_updated: 1, amount: 1, write_version: 1, + data_hash: 0, }; let second_token_account_to_save = TokenAccount { pubkey: second_token_account, @@ -118,6 +122,7 @@ mod tests { slot_updated: 1, amount: 1, write_version: 1, + data_hash: 0, }; let cnt = 20; @@ -210,6 +215,7 @@ mod tests { }, write_version: 1, slot_updated: 1, + data_hash: 0, }; let second_edition_to_save = EditionMetadata { edition: TokenMetadataEdition::MasterEdition { @@ -222,6 +228,7 @@ mod tests { }, write_version: 1, slot_updated: 1, + data_hash: 0, }; let cnt = 20; @@ -361,6 +368,7 @@ mod tests { slot_updated: 1, write_version: 1, rent_epoch: 0, + data_hash: 0, }; let second_mpl_core_to_save = IndexableAssetWithAccountInfo { indexable_asset: MplCoreAccountData::Collection(IndexableAsset { @@ -403,6 +411,7 @@ mod tests { slot_updated: 1, write_version: 1, rent_epoch: 0, + data_hash: 0, }; let cnt = 20; diff --git a/rocks-db/Cargo.toml b/rocks-db/Cargo.toml index 7a9a0a32f..cecdb0b9c 100644 --- a/rocks-db/Cargo.toml +++ b/rocks-db/Cargo.toml @@ -6,6 +6,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +anyhow = { workspace = true } rocksdb = { workspace = true } solana-sdk = { workspace = true } bincode = { workspace = true } @@ -42,6 +43,7 @@ tempfile = { workspace = true } bubblegum-batch-sdk = { workspace = true } num-traits = { workspace = true } indicatif = { workspace = true } +lazy_static = { workspace = true } [dev-dependencies] rand = { workspace = true } diff --git a/rocks-db/src/batch_savers.rs b/rocks-db/src/batch_savers.rs index b4bf31780..1a07d0c37 100644 --- a/rocks-db/src/batch_savers.rs +++ b/rocks-db/src/batch_savers.rs @@ -1,4 +1,8 @@ use crate::asset::{AssetCollection, MetadataMintMap}; +use crate::storage_consistency::{ + AccountNftBucket, AccountNftBucketKey, AccountNftChange, AccountNftChangeKey, + AccountNftGrandBucket, AccountNftGrandBucketKey, +}; use crate::token_accounts::{TokenAccountMintOwnerIdx, TokenAccountOwnerIdx}; use crate::Result; use crate::{AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails, Storage}; @@ -168,6 +172,29 @@ impl BatchSaveStorage { )?; Ok(()) } + pub fn put_account_change( + &mut self, + key: AccountNftChangeKey, + v: AccountNftChange, + ) -> Result<()> { + self.storage + .acc_nft_changes + .put_with_batch(&mut self.batch, key, &v) + } + pub fn put_acc_bucket(&mut self, key: AccountNftBucketKey, v: AccountNftBucket) -> Result<()> { + self.storage + .acc_nft_buckets + .put_with_batch(&mut self.batch, key, &v) + } + pub fn put_acc_grand_bucket( + &mut self, + key: AccountNftGrandBucketKey, + v: AccountNftGrandBucket, + ) -> Result<()> { + self.storage + .acc_nft_grand_buckets + .put_with_batch(&mut self.batch, key, &v) + } pub fn asset_updated_with_batch(&mut self, slot: u64, pubkey: Pubkey) -> Result<()> { self.storage .asset_updated_with_batch(&mut self.batch, slot, pubkey)?; diff --git a/rocks-db/src/column.rs b/rocks-db/src/column.rs index e4b154aa0..230ca1f21 100644 --- a/rocks-db/src/column.rs +++ b/rocks-db/src/column.rs @@ -279,6 +279,19 @@ where Ok(r) } + pub async fn get_async(&self, key: C::KeyType) -> Result> { + let backend = self.backend.clone(); + + let raw_val = tokio::task::spawn_blocking(move || Self::get_raw(backend, key)) + .await + .map_err(|e| StorageError::Common(e.to_string()))?; + + raw_val.and_then(|op| { + op.map(|bytes: Vec| deserialize::(&bytes).map_err(StorageError::from)) + .transpose() + }) + } + pub fn get(&self, key: C::KeyType) -> Result> { let mut result = Ok(None); @@ -434,6 +447,22 @@ where self.backend.cf_handle(C::NAME).unwrap() } + pub async fn delete_async(&self, key: C::KeyType, await_operation: bool) -> Result<()> { + let backend = self.backend.clone(); + let encoded_key = C::encode_key(key); + + let task = tokio::task::spawn_blocking(move || { + backend.delete_cf(&backend.cf_handle(C::NAME).unwrap(), encoded_key) + }); + if await_operation { + let _ = task + .await + .map_err(|e| StorageError::Common(e.to_string()))?; + } + + Ok(()) + } + pub fn delete(&self, key: C::KeyType) -> Result<()> { self.backend.delete_cf(&self.handle(), C::encode_key(key))?; Ok(()) diff --git a/rocks-db/src/lib.rs b/rocks-db/src/lib.rs index aeda79246..50bbf2147 100644 --- a/rocks-db/src/lib.rs +++ b/rocks-db/src/lib.rs @@ -43,6 +43,11 @@ use crate::token_accounts::{TokenAccountMintOwnerIdx, TokenAccountOwnerIdx}; use crate::token_prices::TokenPrice; use crate::tree_seq::{TreeSeqIdx, TreesGaps}; +use storage_consistency::{ + AccountNft, AccountNftBucket, AccountNftChange, AccountNftGrandBucket, BubblegumChange, + BubblegumEpoch, BubblegumGrandEpoch, +}; + pub mod asset; mod asset_client; pub mod asset_previews; @@ -73,6 +78,7 @@ pub mod schedule; pub mod sequence_consistent; pub mod signature_client; pub mod slots_dumper; +pub mod storage_consistency; pub mod storage_traits; pub mod token_accounts; pub mod token_prices; @@ -132,6 +138,13 @@ pub struct Storage { pub inscription_data: Column, pub leaf_signature: Column, pub spl_mints: Column, + pub bubblegum_changes: Column, + pub bubblegum_epochs: Column, + pub bubblegum_grand_epochs: Column, + pub acc_nft_changes: Column, + pub acc_nft_last: Column, + pub acc_nft_buckets: Column, + pub acc_nft_grand_buckets: Column, assets_update_last_seq: AtomicU64, join_set: Arc>>>, red_metrics: Arc, @@ -185,6 +198,13 @@ impl Storage { let inscription_data = Self::column(db.clone(), red_metrics.clone()); let leaf_signature = Self::column(db.clone(), red_metrics.clone()); let spl_mints = Self::column(db.clone(), red_metrics.clone()); + let bubblegum_changes = Self::column(db.clone(), red_metrics.clone()); + let bubblegum_epochs = Self::column(db.clone(), red_metrics.clone()); + let bubblegum_grand_epochs = Self::column(db.clone(), red_metrics.clone()); + let acc_nft_changes = Self::column(db.clone(), red_metrics.clone()); + let acc_nft_last = Self::column(db.clone(), red_metrics.clone()); + let acc_nft_buckets = Self::column(db.clone(), red_metrics.clone()); + let acc_nft_grand_buckets = Self::column(db.clone(), red_metrics.clone()); Self { asset_static_data, @@ -231,6 +251,13 @@ impl Storage { inscription_data, leaf_signature, spl_mints, + bubblegum_changes, + bubblegum_epochs, + bubblegum_grand_epochs, + acc_nft_changes, + acc_nft_last, + acc_nft_buckets, + acc_nft_grand_buckets, } } @@ -310,6 +337,13 @@ impl Storage { Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), + Self::new_cf_descriptor::(migration_state), + Self::new_cf_descriptor::(migration_state), + Self::new_cf_descriptor::(migration_state), + Self::new_cf_descriptor::(migration_state), + Self::new_cf_descriptor::(migration_state), + Self::new_cf_descriptor::(migration_state), + Self::new_cf_descriptor::(migration_state), ] } @@ -646,6 +680,18 @@ impl Storage { token_accounts::merge_mints, ); } + BubblegumEpoch::NAME => { + cf_options.set_merge_operator_associative( + "merge_fn_bubblegum_epoch", + storage_consistency::merge_bubblgum_epoch_checksum, + ); + } + BubblegumGrandEpoch::NAME => { + cf_options.set_merge_operator_associative( + "merge_fn_bubblegum_grand_epoch", + storage_consistency::merge_bubblgum_grand_epoch_checksum, + ); + } _ => {} } cf_options diff --git a/rocks-db/src/migrations/spl2022.rs b/rocks-db/src/migrations/spl2022.rs index e99dadcaf..d45354135 100644 --- a/rocks-db/src/migrations/spl2022.rs +++ b/rocks-db/src/migrations/spl2022.rs @@ -22,6 +22,7 @@ pub struct TokenAccountWithoutExtentions { pub slot_updated: i64, pub amount: i64, pub write_version: u64, + pub data_hash: u64, } impl_merge_values!(TokenAccountWithoutExtentions); @@ -39,6 +40,7 @@ impl From for TokenAccount { slot_updated: value.slot_updated, amount: value.amount, write_version: value.write_version, + data_hash: value.data_hash, } } } diff --git a/rocks-db/src/storage_consistency.rs b/rocks-db/src/storage_consistency.rs new file mode 100644 index 000000000..fa8cebbd8 --- /dev/null +++ b/rocks-db/src/storage_consistency.rs @@ -0,0 +1,866 @@ +//! This module contains core functionality for storing and manipulating +//! so called p2p consistency checking data - checksums for bubblegum +//! and account NFT updates. +//! +//! The main idea is that we split slots "timeline" into so called epochs, +//! (each epoch is 10 000 slots) and calculate checksum for each epoch. +//! 10 epochs shape a grand epoch. +//! +//! Later aura instances can exchange these checksums one with each other +//! to identify whether an instance has missed a portion of changes. +//! +//! Bubblgum update: (tree, slot, seq) => (signature) +//! V +//! Bubblgum epoch: (tree, epoch) => (checksum) +//! V +//! Bubblegum grand epoch: (tree, grand epoch) => (checksum) +use async_trait::async_trait; +use interface::checksums_storage::Chksm; +use interface::checksums_storage::{ + AccBucketCksm, AccChecksumServiceApi, AccGrandBucketCksm, AccLastChange, BbgmChangePos, + BbgmChangeRecord, BbgmChecksumServiceApi, BbgmEpochCksm, BbgmGrandEpochCksm, +}; +use rocksdb::MergeOperands; +use serde::{Deserialize, Serialize}; +use solana_sdk::pubkey::Pubkey; + +use crate::{column::TypedColumn, transaction::TreeUpdate, Storage}; + +use std::{collections::HashSet, sync::atomic::AtomicU64, u64}; + +static LAST_SLOT: AtomicU64 = AtomicU64::new(0); + +pub fn current_estimated_epoch() -> u32 { + epoch_of_slot(LAST_SLOT.load(std::sync::atomic::Ordering::Relaxed)) +} + +pub fn last_tracked_slot() -> u64 { + LAST_SLOT.load(std::sync::atomic::Ordering::Relaxed) +} + +pub fn track_slot_counter(slot: u64) -> u64 { + let prev = LAST_SLOT.load(std::sync::atomic::Ordering::Relaxed); + if slot > prev { + LAST_SLOT.store(slot, std::sync::atomic::Ordering::Relaxed); + } + prev +} + +pub fn epoch_of_slot(slot: u64) -> u32 { + (slot / 10_000) as u32 +} + +pub fn grand_epoch_of_slot(slot: u64) -> u16 { + (slot / 100_000) as u16 +} + +pub fn grand_epoch_of_epoch(epoch: u32) -> u16 { + (epoch / 10) as u16 +} + +pub fn first_slot_in_epoch(epoch: u32) -> u64 { + epoch as u64 * 10_000 +} + +pub fn first_epoch_in_grand_epoch(grand_epoch: u16) -> u32 { + grand_epoch as u32 * 10 +} + +pub fn slots_to_next_epoch(slot: u64) -> u64 { + slot % 100_000 +} + +pub fn calc_exchange_slot_for_epoch(epoch: u32) -> u64 { + (epoch + 1) as u64 * 10_000 * 1500 +} + +pub fn slots_to_time(slots: u64) -> std::time::Duration { + std::time::Duration::from_millis(slots * 400) +} + +/// We use 2 leading bytes of an account pubkey as a bucket number, +/// which means we have 65536 buckets. +/// This allows to have records in "account NFT changes" collumn family +/// "grouped" by the bucket number. +pub fn bucket_for_acc(account_pubkey: Pubkey) -> u16 { + let bytes = account_pubkey.to_bytes(); + let mut b = <[u8; 2]>::default(); + b.clone_from_slice(&bytes[0..2]); + + u16::from_be_bytes(b) +} + +/// We use first 10 bits of an account pubkey as a grand bucket number, +/// i.e. we have 1024 grand buckets. +pub fn grand_bucket_for_bucket(bucket: u16) -> u16 { + bucket >> 6 +} + +pub fn grand_bucket_for_acc(account_pubkey: Pubkey) -> u16 { + grand_bucket_for_bucket(bucket_for_acc(account_pubkey)) +} + +pub const BUBBLEGUM_EPOCH_INVALIDATED: BubblegumEpoch = BubblegumEpoch { + checksum: Checksum::Invalidated, +}; + +pub const BUBBLEGUM_EPOCH_CALCULATING: BubblegumEpoch = BubblegumEpoch { + checksum: Checksum::Calculating, +}; + +pub const BUBBLEGUM_GRAND_EPOCH_INVALIDATED: BubblegumGrandEpoch = BubblegumGrandEpoch { + checksum: Checksum::Invalidated, +}; + +pub const BUBBLEGUM_GRAND_EPOCH_CALCULATING: BubblegumGrandEpoch = BubblegumGrandEpoch { + checksum: Checksum::Calculating, +}; + +pub const ACC_BUCKET_INVALIDATE: AccountNftBucket = AccountNftBucket { + checksum: Checksum::Invalidated, +}; + +pub const ACC_GRAND_BUCKET_INVALIDATE: AccountNftGrandBucket = AccountNftGrandBucket { + checksum: Checksum::Invalidated, +}; + +/// Checksum value for bubblegum epoch/account bucket. +/// Since the arrival of Solana data is asynchronous and has no strict order guarantees, +/// we can easily fall into a situation when we are in the process of calculation +/// of a checksum for an epoch, and a new update came befor the checksum has been written. +/// ```img +/// epoch end a change for previous epoch arrived +/// | | +/// V V +/// ---------------------------------------------------> timeline +/// ^ \____________ ____________/ ^ +/// | \/ | +/// read calculating write +/// all checksum epoch +/// changes checksum +/// ``` +/// To prevent such inconsistency of a checksum, roght before the calulating, +/// we mark the epoch checksum to be calculated is "Calculating", +/// and after the checksum is calculated, we write this value only in case +/// if the previous value is still in "Calculated" state. +/// +/// At the same time, when the Bubblegum updated processor receives +/// a new update with slot that epoch is from the previous epoch perioud, +/// it not only writed the bubblegum change, but also updated +/// corresponding epoch state to "Invalidated", which prevents +/// the checksum that might be in the process of calculation +/// to be written. +#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq)] +pub enum Checksum { + Invalidated, + Calculating, + Value(Chksm), +} + +impl Checksum { + pub fn ok(&self) -> Option { + match self { + Checksum::Value(chksm) => Some(chksm.to_owned()), + _ => None, + } + } +} + +/// Key for storing a change detected for bubblegum contract. +/// The value is supposed to be `solana_sdk::signature::Signature`` +#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq)] +pub struct BubblegumChangeKey { + pub epoch: u32, + pub tree_pubkey: Pubkey, + pub slot: u64, + pub seq: u64, +} + +impl BubblegumChangeKey { + pub fn new(tree_pubkey: Pubkey, slot: u64, seq: u64) -> BubblegumChangeKey { + BubblegumChangeKey { + epoch: epoch_of_slot(slot), + tree_pubkey, + slot, + seq, + } + } + pub fn epoch_start_key(epoch: u32) -> BubblegumChangeKey { + BubblegumChangeKey { + epoch, + tree_pubkey: Pubkey::from([0u8; 32]), + slot: first_slot_in_epoch(epoch), + seq: 0, + } + } + pub fn tree_epoch_start_key(tree_pubkey: Pubkey, epoch: u32) -> BubblegumChangeKey { + BubblegumChangeKey { + epoch, + tree_pubkey, + slot: first_slot_in_epoch(epoch), + seq: 0, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq)] +pub struct BubblegumChange { + /// Original signature can be restored as + /// `solana_sdk::signature::Signature::from_str(...)` + pub signature: String, +} + +impl TypedColumn for BubblegumChange { + type KeyType = BubblegumChangeKey; + type ValueType = Self; + const NAME: &'static str = "BUBBLEGUM_CHANGES"; + + fn encode_key(key: Self::KeyType) -> Vec { + // fields are incoded in the order they are defined + bincode::serialize(&key).unwrap() + } + + fn decode_key(bytes: Vec) -> crate::Result { + let key = bincode::deserialize(&bytes)?; + Ok(key) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq)] +pub struct BubblegumEpochKey { + pub tree_pubkey: Pubkey, + pub epoch_num: u32, +} + +impl BubblegumEpochKey { + pub fn new(tree_pubkey: Pubkey, epoch_num: u32) -> BubblegumEpochKey { + BubblegumEpochKey { + tree_pubkey, + epoch_num, + } + } + pub fn grand_epoch_start_key(grand_epoch: u16) -> BubblegumEpochKey { + BubblegumEpochKey { + tree_pubkey: Pubkey::from([0u8; 32]), + epoch_num: first_epoch_in_grand_epoch(grand_epoch), + } + } + pub fn tree_grand_epoch_start_key(tree_pubkey: Pubkey, grand_epoch: u16) -> BubblegumEpochKey { + BubblegumEpochKey { + tree_pubkey, + epoch_num: first_epoch_in_grand_epoch(grand_epoch), + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq)] +pub struct BubblegumEpoch { + pub checksum: Checksum, +} + +impl From for BubblegumEpoch { + fn from(value: Chksm) -> Self { + BubblegumEpoch { + checksum: Checksum::Value(value), + } + } +} + +impl TypedColumn for BubblegumEpoch { + type KeyType = BubblegumEpochKey; + type ValueType = Self; + const NAME: &'static str = "BUBBLEGUM_EPOCHS"; + + fn encode_key(key: Self::KeyType) -> Vec { + bincode::serialize(&key).unwrap() + } + + fn decode_key(bytes: Vec) -> crate::Result { + let key = bincode::deserialize(&bytes)?; + Ok(key) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq)] +pub struct BubblegumGrandEpochKey { + pub grand_epoch_num: u16, + pub tree_pubkey: Pubkey, +} + +impl BubblegumGrandEpochKey { + pub fn new(tree_pubkey: Pubkey, grand_epoch_num: u16) -> BubblegumGrandEpochKey { + BubblegumGrandEpochKey { + tree_pubkey, + grand_epoch_num, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq)] +pub struct BubblegumGrandEpoch { + pub checksum: Checksum, +} + +impl From for BubblegumGrandEpoch { + fn from(value: Chksm) -> Self { + BubblegumGrandEpoch { + checksum: Checksum::Value(value), + } + } +} + +impl TypedColumn for BubblegumGrandEpoch { + type KeyType = BubblegumGrandEpochKey; + type ValueType = Self; + const NAME: &'static str = "BUBBLEGUM_GRAND_EPOCHS"; + + fn encode_key(key: Self::KeyType) -> Vec { + bincode::serialize(&key).unwrap() + } + + fn decode_key(bytes: Vec) -> crate::Result { + let key = bincode::deserialize(&bytes)?; + Ok(key) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq)] +pub struct AccountNftChangeKey { + pub epoch: u32, + pub account_pubkey: Pubkey, + pub slot: u64, + pub write_version: u64, + pub data_hash: u64, +} + +impl AccountNftChangeKey { + pub fn new( + account_pubkey: Pubkey, + slot: u64, + write_version: u64, + data_hash: u64, + ) -> AccountNftChangeKey { + let epoch = epoch_of_slot(slot); + AccountNftChangeKey { + epoch, + account_pubkey, + slot, + write_version, + data_hash, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq)] +pub struct AccountNftChange {} + +impl TypedColumn for AccountNftChange { + type KeyType = AccountNftChangeKey; + type ValueType = Self; + const NAME: &'static str = "ACC_NFT_CHANGES"; + + fn encode_key(key: Self::KeyType) -> Vec { + bincode::serialize(&key).unwrap() + } + + fn decode_key(bytes: Vec) -> crate::Result { + let key = bincode::deserialize(&bytes)?; + Ok(key) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq)] +pub struct AccountNftKey { + pub account_pubkey: Pubkey, +} + +impl AccountNftKey { + pub fn new(account_pubkey: Pubkey) -> AccountNftKey { + AccountNftKey { account_pubkey } + } + + /// bincode which is used to encode the AccountNftKey, + /// preserves same bytes as in unencoded version of account Pubkey. + pub fn extract_bucket(key_raw_bytes: &[u8]) -> u16 { + let mut arr = [0u8; 2]; + arr[0] = key_raw_bytes[0]; + arr[1] = key_raw_bytes[1]; + u16::from_be_bytes(arr) + } + + pub fn bucket_start_key(bucket: u16) -> AccountNftKey { + let leading_bytes = bucket.to_be_bytes(); + let mut pk = [0u8; 32]; + pk[0] = leading_bytes[0]; + pk[1] = leading_bytes[1]; + AccountNftKey { + account_pubkey: Pubkey::new_from_array(pk), + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq)] +pub struct AccountNft { + pub last_slot: u64, + pub last_write_version: u64, + pub last_data_hash: u64, +} + +impl AccountNft { + pub fn new(last_slot: u64, last_write_version: u64, last_data_hash: u64) -> AccountNft { + AccountNft { + last_slot, + last_write_version, + last_data_hash, + } + } +} + +impl TypedColumn for AccountNft { + type KeyType = AccountNftKey; + + type ValueType = AccountNft; + + const NAME: &'static str = "ACC_NFT_LAST"; + + fn encode_key(key: Self::KeyType) -> Vec { + bincode::serialize(&key).unwrap() + } + + fn decode_key(bytes: Vec) -> crate::Result { + let key = bincode::deserialize(&bytes)?; + Ok(key) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq)] +pub struct AccountNftBucketKey { + pub bucket: u16, +} + +impl AccountNftBucketKey { + pub fn new(bucket: u16) -> AccountNftBucketKey { + AccountNftBucketKey { bucket } + } + pub fn grand_bucket_start_key(grand_bucket: u16) -> AccountNftBucketKey { + AccountNftBucketKey { + bucket: grand_bucket << 6, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq)] +pub struct AccountNftBucket { + pub checksum: Checksum, +} + +impl AccountNftBucket { + pub fn new(checksum: Chksm) -> AccountNftBucket { + AccountNftBucket { + checksum: Checksum::Value(checksum), + } + } +} + +impl TypedColumn for AccountNftBucket { + type KeyType = AccountNftBucketKey; + type ValueType = Self; + const NAME: &'static str = "ACC_NFT_BUCKETS"; + + fn encode_key(key: Self::KeyType) -> Vec { + bincode::serialize(&key).unwrap() + } + + fn decode_key(bytes: Vec) -> crate::Result { + let key = bincode::deserialize(&bytes)?; + Ok(key) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq)] +pub struct AccountNftGrandBucketKey { + pub grand_bucket: u16, +} + +impl AccountNftGrandBucketKey { + pub fn new(grand_bucket: u16) -> AccountNftGrandBucketKey { + AccountNftGrandBucketKey { grand_bucket } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq)] +pub struct AccountNftGrandBucket { + pub checksum: Checksum, +} + +impl AccountNftGrandBucket { + pub fn new(checksum: Chksm) -> AccountNftGrandBucket { + AccountNftGrandBucket { + checksum: Checksum::Value(checksum), + } + } +} + +impl TypedColumn for AccountNftGrandBucket { + type KeyType = AccountNftGrandBucketKey; + type ValueType = Self; + const NAME: &'static str = "ACC_NFT_GRAND_BUCKET"; + + fn encode_key(key: Self::KeyType) -> Vec { + bincode::serialize(&key).unwrap() + } + + fn decode_key(bytes: Vec) -> crate::Result { + let key = bincode::deserialize(&bytes)?; + Ok(key) + } +} + +impl Storage { + /// Adds bubblegum change record to the bubblegum changes column family. + /// Functionality for triggering checksum calculation/re-calculation is triggered separately, + /// in ingester module. + pub fn track_tree_change_with_batch( + &self, + batch: &mut rocksdb::WriteBatch, + tree_update: &TreeUpdate, + ) -> crate::Result<()> { + let key = BubblegumChangeKey::new(tree_update.tree, tree_update.slot, tree_update.seq); + let value = BubblegumChange { + signature: tree_update.tx.clone(), + }; + let _ = self.bubblegum_changes.put_with_batch(batch, key, &value); + + if epoch_of_slot(tree_update.slot) < current_estimated_epoch() { + // We invalidate epoch checksum here, but trigger checksum recalculation in another place. + // Possibly somthing might happen after that checksum is invalidate, and re-calculation + // won't start. + // It is acceptable, since it is better to have clearly invalidated checksum, + // than cehcksum that doesn't reflect the current state. + self.invalidate_bubblegum_epoch_with_batch( + batch, + tree_update.tree, + epoch_of_slot(tree_update.slot), + ); + } + + Ok(()) + } + + fn invalidate_bubblegum_epoch_with_batch( + &self, + batch: &mut rocksdb::WriteBatch, + tree: Pubkey, + epoch: u32, + ) { + let epock_key = BubblegumEpochKey { + tree_pubkey: tree, + epoch_num: epoch, + }; + let _ = + self.bubblegum_epochs + .put_with_batch(batch, epock_key, &BUBBLEGUM_EPOCH_INVALIDATED); + + if grand_epoch_of_epoch(epoch) < grand_epoch_of_epoch(current_estimated_epoch()) { + let grand_epock_key = BubblegumGrandEpochKey { + tree_pubkey: tree, + grand_epoch_num: grand_epoch_of_epoch(epoch), + }; + let _ = self.bubblegum_grand_epochs.put_with_batch( + batch, + grand_epock_key, + &BUBBLEGUM_GRAND_EPOCH_INVALIDATED, + ); + } + } +} + +#[async_trait::async_trait] +pub trait DataConsistencyStorage { + async fn drop_forked_bubblegum_changes(&self, chagens: &[BubblegumChangeKey]); +} + +#[async_trait::async_trait] +impl DataConsistencyStorage for Storage { + async fn drop_forked_bubblegum_changes(&self, chagens: &[BubblegumChangeKey]) { + let _ = self.bubblegum_changes.delete_batch(chagens.to_vec()).await; + + let mut epochs_to_invalidate = HashSet::new(); + for change in chagens { + epochs_to_invalidate.insert(BubblegumEpochKey { + tree_pubkey: change.tree_pubkey, + epoch_num: epoch_of_slot(change.slot), + }); + } + + let mut grand_epochs_to_invalidate = HashSet::new(); + for epoch_to_invalidate in epochs_to_invalidate { + let _ = self.bubblegum_epochs.put( + epoch_to_invalidate.clone(), + BUBBLEGUM_EPOCH_INVALIDATED.clone(), + ); + grand_epochs_to_invalidate.insert(BubblegumGrandEpochKey { + tree_pubkey: epoch_to_invalidate.tree_pubkey, + grand_epoch_num: grand_epoch_of_epoch(epoch_to_invalidate.epoch_num), + }); + } + + for grand_epoch_to_invalidate in grand_epochs_to_invalidate { + let _ = self.bubblegum_grand_epochs.put( + grand_epoch_to_invalidate, + BUBBLEGUM_GRAND_EPOCH_INVALIDATED.clone(), + ); + } + } +} + +// TODO: Replace with LazyLock after rustc update. +lazy_static::lazy_static! { + pub static ref BUBBLEGUM_EPOCH_INVALIDATED_BYTES: Vec = bincode::serialize(&BUBBLEGUM_EPOCH_INVALIDATED).unwrap(); + pub static ref BUBBLEGUM_EPOCH_CALCULATING_BYTES: Vec = bincode::serialize(&BUBBLEGUM_EPOCH_CALCULATING).unwrap(); + pub static ref BUBBLEGUM_GRAND_EPOCH_INVALIDATED_BYTES: Vec = bincode::serialize(&BUBBLEGUM_GRAND_EPOCH_INVALIDATED).unwrap(); + pub static ref BUBBLEGUM_GRAND_EPOCH_CALCULATING_BYTES: Vec = bincode::serialize(&BUBBLEGUM_GRAND_EPOCH_CALCULATING).unwrap(); +} + +/// This merge should be used only for setting calculated checksum. +/// The thing is that while we calculate checksum for tree signatures in a given slot, +/// it is possible that in parallel we receive another update for this tree in this epoch. +/// To not miss this fact we calculate tree epoch checksum in following way: +/// 1) Set tree epoch as Calculating +/// 2) Calculate checksum +/// 3) Update tree epoch with calculated checksum, only if previous value is Calculating +/// This works in conjunction with bubblegum_updates_processor that sets tree epoch value +/// to Invalidated after each tree update. +/// That's why checksum calculator is able to specify checksum only, if no updates have been +/// received during the calculating (because otherwise the status will be Invalidated, not Calculating). +pub(crate) fn merge_bubblgum_epoch_checksum( + _new_key: &[u8], + existing_val: Option<&[u8]>, + operands: &MergeOperands, +) -> Option> { + if let Some(v) = existing_val { + if v == BUBBLEGUM_EPOCH_CALCULATING_BYTES.as_slice() { + if let Some(op) = operands.into_iter().next() { + return Some(op.to_vec()); + } + } + Some(v.to_vec()) + } else { + None + } +} + +pub(crate) fn merge_bubblgum_grand_epoch_checksum( + _new_key: &[u8], + existing_val: Option<&[u8]>, + operands: &MergeOperands, +) -> Option> { + if let Some(v) = existing_val { + if v == BUBBLEGUM_GRAND_EPOCH_CALCULATING_BYTES.as_slice() { + if let Some(op) = operands.into_iter().next() { + return Some(op.to_vec()); + } + } + Some(v.to_vec()) + } else { + None + } +} + +#[allow(clippy::while_let_on_iterator)] +#[async_trait] +impl BbgmChecksumServiceApi for Storage { + async fn get_earliest_grand_epoch(&self) -> anyhow::Result> { + let Some(first_record) = self.bubblegum_grand_epochs.iter_start().next() else { + return Ok(None); + }; + let (k, _v) = first_record?; + let first_key = BubblegumGrandEpoch::decode_key(k.to_vec())?; + Ok(Some(first_key.grand_epoch_num)) + } + + async fn list_grand_epoch_checksums( + &self, + grand_epoch: u16, + limit: Option, + after: Option, + ) -> anyhow::Result> { + let max_result = limit.unwrap_or(u64::MAX) as usize; + let mut it = if let Some(after) = after { + let mut it = self + .bubblegum_grand_epochs + .iter(BubblegumGrandEpochKey::new(after, grand_epoch)); + let _ = it.next(); + it + } else { + self.bubblegum_grand_epochs.iter_start() + }; + let mut result = Vec::new(); + while let Some(next) = it.next() { + let pair = next?; + let k = BubblegumGrandEpoch::decode_key(pair.0.to_vec())?; + let v = bincode::deserialize::(&pair.1)?; + if k.grand_epoch_num != grand_epoch { + break; + } + result.push(BbgmGrandEpochCksm { + tree_pubkey: k.tree_pubkey, + checksum: v.checksum.ok(), + }); + if result.len() >= max_result { + break; + } + } + Ok(result) + } + + async fn list_epoch_checksums( + &self, + grand_epoch: u16, + tree_pubkey: Pubkey, + ) -> anyhow::Result> { + let first_epoch = first_epoch_in_grand_epoch(grand_epoch); + let mut it = self + .bubblegum_epochs + .iter(BubblegumEpochKey::new(tree_pubkey, first_epoch)); + let mut result = Vec::new(); + while let Some(next) = it.next() { + let pair = next?; + let k = BubblegumEpoch::decode_key(pair.0.to_vec())?; + let v = bincode::deserialize::(&pair.1)?; + if grand_epoch_of_epoch(k.epoch_num) != grand_epoch || k.tree_pubkey != tree_pubkey { + break; + } + result.push(BbgmEpochCksm { + tree_pubkey, + epoch: k.epoch_num, + checksum: v.checksum.ok(), + }); + } + Ok(result) + } + + async fn list_epoch_changes( + &self, + epoch: u32, + tree_pubkey: Pubkey, + limit: Option, + after: Option, + ) -> anyhow::Result> { + let max_result = limit.unwrap_or(u64::MAX) as usize; + let BbgmChangePos { slot, seq } = after.unwrap_or_default(); + + let mut it = self.bubblegum_changes.iter(BubblegumChangeKey { + epoch, + tree_pubkey, + slot, + seq, + }); + + let mut result = Vec::new(); + while let Some(next) = it.next() { + let pair = next?; + let k = BubblegumChange::decode_key(pair.0.to_vec())?; + let v = bincode::deserialize::(&pair.1)?; + if k.tree_pubkey != tree_pubkey || epoch_of_slot(k.slot) != epoch { + break; + } + result.push(BbgmChangeRecord { + tree_pubkey, + slot: k.slot, + seq: k.seq, + signature: v.signature, + }); + if result.len() >= max_result { + break; + } + } + + Ok(result) + } + + async fn propose_missing_changes(&self, _changes: &[BbgmChangeRecord]) { + // TODO: how handle? + } +} + +#[allow(clippy::while_let_on_iterator)] +#[async_trait] +impl AccChecksumServiceApi for Storage { + async fn list_grand_buckets(&self) -> anyhow::Result> { + let mut it = self.acc_nft_grand_buckets.iter_start(); + let mut result = Vec::new(); + while let Some(rec) = it.next() { + if let Ok((Ok(k), Ok(v))) = rec.map(|(k, v)| { + ( + AccountNftGrandBucket::decode_key(k.to_vec()), + bincode::deserialize::(&v), + ) + }) { + result.push(AccGrandBucketCksm { + grand_bucket: k.grand_bucket, + checksum: v.checksum.ok(), + }); + } + } + Ok(result) + } + + async fn list_bucket_checksums(&self, grand_bucket: u16) -> anyhow::Result> { + let mut it = self + .acc_nft_buckets + .iter(AccountNftBucketKey::grand_bucket_start_key(grand_bucket)); + let mut result = Vec::new(); + while let Some(next) = it.next() { + let pair = next?; + let k = AccountNftBucket::decode_key(pair.0.to_vec())?; + let v = bincode::deserialize::(&pair.1)?; + if grand_bucket_for_bucket(k.bucket) != grand_bucket { + break; + } + result.push(AccBucketCksm { + bucket: k.bucket, + checksum: v.checksum.ok(), + }); + } + Ok(result) + } + + async fn list_accounts( + &self, + bucket: u16, + limit: Option, + after: Option, + ) -> anyhow::Result> { + let max_result = limit.unwrap_or(u64::MAX) as usize; + let start_key = after + .map(|account_pubkey| AccountNftKey { account_pubkey }) + .unwrap_or(AccountNftKey::bucket_start_key(bucket)); + let mut it = self.acc_nft_last.iter(start_key); + let mut result = Vec::new(); + while let Some(next) = it.next() { + let pair = next?; + let k = AccountNft::decode_key(pair.0.to_vec())?; + let v = bincode::deserialize::(&pair.1)?; + if bucket_for_acc(k.account_pubkey) != bucket { + break; + } + result.push(AccLastChange { + account_pubkey: k.account_pubkey, + slot: v.last_slot, + write_version: v.last_write_version, + }); + if result.len() >= max_result { + break; + } + } + Ok(result) + } + + async fn propose_missing_changes( + &self, + _changes: Vec, + ) { + // Do nothing + } +} diff --git a/rocks-db/src/transaction.rs b/rocks-db/src/transaction.rs index 6f98ac427..e587c3108 100644 --- a/rocks-db/src/transaction.rs +++ b/rocks-db/src/transaction.rs @@ -84,6 +84,8 @@ pub struct AssetUpdate { pub pk: Pubkey, pub details: T, } + +/// For Bubblegum instructions only #[derive(Clone, Default)] pub struct InstructionResult { pub update: Option, diff --git a/rocks-db/src/transaction_client.rs b/rocks-db/src/transaction_client.rs index 70fbafeab..0817421fc 100644 --- a/rocks-db/src/transaction_client.rs +++ b/rocks-db/src/transaction_client.rs @@ -161,6 +161,7 @@ impl Storage { self.save_tree_with_batch(batch, tree_update); self.save_asset_signature_with_batch(batch, tree_update); self.save_leaf_signature_with_batch(batch, tree_update)?; + self.track_tree_change_with_batch(batch, tree_update)?; // for p2p consistency } Ok(()) diff --git a/rocks-db/tests/storage_consistency_test.rs b/rocks-db/tests/storage_consistency_test.rs new file mode 100644 index 000000000..4bec037db --- /dev/null +++ b/rocks-db/tests/storage_consistency_test.rs @@ -0,0 +1,170 @@ +#[cfg(test)] +mod test { + + use interface::checksums_storage::{ + BbgmChangePos, BbgmChangeRecord, BbgmChecksumServiceApi, BbgmEpochCksm, BbgmGrandEpochCksm, + }; + use rocks_db::storage_consistency::Checksum; + use rocks_db::storage_consistency::{ + BubblegumChange, BubblegumChangeKey, BubblegumEpoch, BubblegumEpochKey, + BubblegumGrandEpoch, BubblegumGrandEpochKey, + }; + use setup::rocks::RocksTestEnvironment; + use solana_sdk::pubkey::Pubkey; + + #[tokio::test] + async fn test_list_bbgm_grand_epochs() { + let storage = RocksTestEnvironment::new(&[]).storage; + + let tree_1 = Pubkey::new_unique(); + let ge_1_key = BubblegumGrandEpochKey { + grand_epoch_num: 0, + tree_pubkey: tree_1, + }; + let ge_1_val = BubblegumGrandEpoch { + checksum: Checksum::Value([0u8; 32]), + }; + storage + .bubblegum_grand_epochs + .put(ge_1_key, ge_1_val) + .unwrap(); + + let result = storage + .list_grand_epoch_checksums(0, None, None) + .await + .unwrap(); + assert_eq!( + result, + vec![BbgmGrandEpochCksm { + tree_pubkey: tree_1, + checksum: Some([0u8; 32]) + }] + ); + } + + #[tokio::test] + async fn test_list_bbgm_epoch_checksums() { + let storage = RocksTestEnvironment::new(&[]).storage; + + let tree_1 = Pubkey::new_unique(); + + let epoch_0_key = BubblegumEpochKey { + tree_pubkey: tree_1, + epoch_num: 0, + }; + let epoch_0_val = BubblegumEpoch { + checksum: Checksum::Value([1u8; 32]), + }; + + let epoch_1_key = BubblegumEpochKey { + tree_pubkey: tree_1, + epoch_num: 1, + }; + let epoch_1_val = BubblegumEpoch { + checksum: Checksum::Value([2u8; 32]), + }; + + storage + .bubblegum_epochs + .put(epoch_0_key, epoch_0_val) + .unwrap(); + storage + .bubblegum_epochs + .put(epoch_1_key, epoch_1_val) + .unwrap(); + + let result = storage.list_epoch_checksums(0, tree_1).await.unwrap(); + assert_eq!( + result, + vec![ + BbgmEpochCksm { + epoch: 0, + tree_pubkey: tree_1, + checksum: Some([1u8; 32]) + }, + BbgmEpochCksm { + epoch: 1, + tree_pubkey: tree_1, + checksum: Some([2u8; 32]) + } + ] + ); + } + + #[tokio::test] + async fn test_list_bbgm_epoch_changes() { + let storage = RocksTestEnvironment::new(&[]).storage; + let tree_1 = Pubkey::new_unique(); + + let bbgm_change_1_key = BubblegumChangeKey { + epoch: 0, + tree_pubkey: tree_1, + slot: 5, + seq: 1, + }; + let bbgm_change_1_val = BubblegumChange { + signature: "1".to_string(), + }; + + let bbgm_change_2_key = BubblegumChangeKey { + epoch: 0, + tree_pubkey: tree_1, + slot: 6, + seq: 2, + }; + let bbgm_change_2_val = BubblegumChange { + signature: "2".to_string(), + }; + + let bbgm_change_3_key = BubblegumChangeKey { + epoch: 0, + tree_pubkey: tree_1, + slot: 7, + seq: 3, + }; + let bbgm_change_3_val = BubblegumChange { + signature: "3".to_string(), + }; + + storage + .bubblegum_changes + .put(bbgm_change_1_key, bbgm_change_1_val) + .unwrap(); + storage + .bubblegum_changes + .put(bbgm_change_2_key, bbgm_change_2_val) + .unwrap(); + storage + .bubblegum_changes + .put(bbgm_change_3_key, bbgm_change_3_val) + .unwrap(); + + let result_1 = storage + .list_epoch_changes(0, tree_1, Some(1), None) + .await + .unwrap(); + assert_eq!( + result_1, + vec![BbgmChangeRecord { + tree_pubkey: tree_1, + slot: 5, + seq: 1, + signature: "1".to_string() + }] + ); + + let result_2 = storage + .list_epoch_changes(0, tree_1, Some(1), Some(BbgmChangePos { slot: 6, seq: 7 })) + .await + .unwrap(); + assert_eq!( + result_2, + vec![BbgmChangeRecord { + tree_pubkey: tree_1, + slot: 7, + seq: 3, + signature: "3".to_string() + }] + ); + } +}