From 5a5154735838ef549b1993c3abfbc026f1981fdf Mon Sep 17 00:00:00 2001 From: Ali Behjati Date: Mon, 13 Nov 2023 09:28:41 +0100 Subject: [PATCH] feat: add dynamic pricing based on staleness This commit adds dynamic (exponential) pricing based on staleness of the feeds that the publishers publish. This feature, combined with the recent fees for the price updates causes the spike on fee to last some time before going away. --- Cargo.lock | 2 +- Cargo.toml | 2 +- config/config.toml | 10 ++ src/agent.rs | 21 +++-- src/agent/dashboard.rs | 1 + src/agent/pythd/adapter.rs | 5 +- src/agent/solana.rs | 14 ++- src/agent/solana/exporter.rs | 171 ++++++++++++++++++++++++++--------- src/agent/store/global.rs | 110 ++++++++++++++++------ src/bin/agent.rs | 1 - 10 files changed, 251 insertions(+), 86 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f230be5..a78d739 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2761,7 +2761,7 @@ dependencies = [ [[package]] name = "pyth-agent" -version = "2.2.0" +version = "2.3.0" dependencies = [ "anyhow", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index 1afd90f..cfead40 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pyth-agent" -version = "2.2.0" +version = "2.3.0" edition = "2021" [[bin]] diff --git a/config/config.toml b/config/config.toml index 9cb1ca8..b7b5dbd 100644 --- a/config/config.toml +++ b/config/config.toml @@ -83,6 +83,16 @@ key_store.mapping_key = "RelevantOracleMappingAddress" # calculated based on the network previous prioritization fees. # exporter.dynamic_compute_unit_pricing_enabled = false +# Maximum total compute unit fee paid for a single transaction. Defaults to 0.001 SOL. This +# is a safety measure while using dynamic compute price to prevent the exporter from paying +# too much for a single transaction. The default is 10**12 micro lamports (0.001 SOL). +# exporter.maximum_total_compute_fee_micro_lamports = 1000000000000 + +# Maximum slot gap between the current slot and the oldest slot amongst all the accounts in +# the batch. This is used to calculate the dynamic price per compute unit. When the slot gap +# reaches this number we will use the maximum total_compute_fee for the transaction. +# exporter.maximum_slot_gap_for_dynamic_compute_unit_price = 25 + # Duration of the interval with which to poll the status of transactions. # It is recommended to set this to a value close to exporter.publish_interval_duration # exporter.transaction_monitor.poll_interval_duration = "4s" diff --git a/src/agent.rs b/src/agent.rs index def7e4a..e24c82a 100644 --- a/src/agent.rs +++ b/src/agent.rs @@ -10,13 +10,13 @@ | +--------+ +----------+ | | +----------+ +--------+ | | | Oracle | | Exporter | | | | Exporter | | Oracle | | | +--------+ +----------+ | | +----------+ +--------+ | -| | ^ ^ | | ^ ^ | | -+------|--------------|--|-------+ +-----|--|-------------|---------+ - | | | | | | +------------------------+ - | +--------|---------------|---------+ | | Pythd Websocket API | - | | Local Store | |<---------------+-------+ +------+ | - | +--------|---------------|---------+ | | | |<--|JRPC | | - v | | | v | |Adapter| | WS | | +| | ^ ^ ^ | | ^ ^ ^ | | ++------|-----------|--|--|-------+ +-----|--|--|----------|---------+ + | | | | | | | | +------------------------+ + | +--|-----|---------------|-----|---+ | | Pythd Websocket API | + | | | Local Store | | |<---------------+-------+ +------+ | + | +--|-----|---------------|-----|---+ | | | |<--|JRPC | | + v | | | | | v | |Adapter| | WS | | +--------------------|---------------|--------|-----------+ | | |-->|Server| | | | Global Store | | |---->+-------+ +------+ | +--------------------|---------------|--------|-----------+ | ^ | | @@ -39,7 +39,8 @@ Publisher data write path: - The Adapter then transforms this into the Pyth SDK data structures and sends it to the Local Store. - The Local Store holds the latest price data the user has submitted for each price feed. - The Exporters periodically query the Local Store for the latest user-submitted data, -and send it to the RPC node. +and send it to the RPC node. They query the Global Store to get the on-chain status to dynamically +adjust the compute unit price (if enabled). Publisher data read path: - The Oracles continually fetch data from the RPC node, and pass this to the Global Store. @@ -128,7 +129,9 @@ impl Agent { // Spawn the primary network jhs.extend(network::spawn_network( self.config.primary_network.clone(), + network::Network::Primary, local_store_tx.clone(), + global_store_lookup_tx.clone(), primary_oracle_updates_tx, primary_keypair_loader_tx, logger.new(o!("primary" => true)), @@ -138,7 +141,9 @@ impl Agent { if let Some(config) = &self.config.secondary_network { jhs.extend(network::spawn_network( config.clone(), + network::Network::Secondary, local_store_tx.clone(), + global_store_lookup_tx.clone(), secondary_oracle_updates_tx, secondary_keypair_loader_tx, logger.new(o!("primary" => false)), diff --git a/src/agent/dashboard.rs b/src/agent/dashboard.rs index 4a693e7..bc167db 100644 --- a/src/agent/dashboard.rs +++ b/src/agent/dashboard.rs @@ -56,6 +56,7 @@ impl MetricsServer { self.global_store_lookup_tx .send(Lookup::LookupAllAccountsData { + network: super::solana::network::Network::Primary, result_tx: global_data_tx, }) .await?; diff --git a/src/agent/pythd/adapter.rs b/src/agent/pythd/adapter.rs index a64f741..1ba9ae3 100644 --- a/src/agent/pythd/adapter.rs +++ b/src/agent/pythd/adapter.rs @@ -359,7 +359,10 @@ impl Adapter { async fn lookup_all_accounts_data(&self) -> Result { let (result_tx, result_rx) = oneshot::channel(); self.global_store_lookup_tx - .send(global::Lookup::LookupAllAccountsData { result_tx }) + .send(global::Lookup::LookupAllAccountsData { + network: solana::network::Network::Primary, + result_tx, + }) .await?; result_rx.await? } diff --git a/src/agent/solana.rs b/src/agent/solana.rs index d170251..39bf407 100644 --- a/src/agent/solana.rs +++ b/src/agent/solana.rs @@ -36,6 +36,12 @@ pub mod network { }, }; + #[derive(Clone, Copy, Serialize, Deserialize, Debug)] + pub enum Network { + Primary, + Secondary, + } + pub fn default_rpc_url() -> String { "http://localhost:8899".to_string() } @@ -72,9 +78,11 @@ pub mod network { pub fn spawn_network( config: Config, + network: Network, local_store_tx: Sender, - global_store_update_tx: mpsc::Sender, - keypair_request_tx: mpsc::Sender, + global_store_lookup_tx: Sender, + global_store_update_tx: Sender, + keypair_request_tx: Sender, logger: Logger, ) -> Result>> { // Publisher permissions updates between oracle and exporter @@ -96,11 +104,13 @@ pub mod network { // Spawn the Exporter let exporter_jhs = exporter::spawn_exporter( config.exporter, + network, &config.rpc_url, config.rpc_timeout, publisher_permissions_rx, KeyStore::new(config.key_store.clone(), &logger)?, local_store_tx, + global_store_lookup_tx, keypair_request_tx, logger, )?; diff --git a/src/agent/solana/exporter.rs b/src/agent/solana/exporter.rs index e89e201..40ac274 100644 --- a/src/agent/solana/exporter.rs +++ b/src/agent/solana/exporter.rs @@ -7,6 +7,7 @@ use { PriceIdentifier, }, key_store, + network::Network, }, crate::agent::remote_keypair_loader::{ KeypairRequest, @@ -84,11 +85,6 @@ const PYTH_ORACLE_VERSION: u32 = 2; const UPDATE_PRICE_NO_FAIL_ON_ERROR: i32 = 13; // const UPDATE_PRICE: i32 = 7; // Useful for making tx errors more visible in place of UPDATE_PRICE_NO_FAIL_ON_ERROR -// Maximum total compute unit fee paid for a single transaction (0.001 SOL). This is a safety -// measure while using dynamic compute price to prevent the exporter from paying too much for a -// single transaction -const MAXIMUM_TOTAL_COMPUTE_UNIT_FEE_MICRO_LAMPORTS: u64 = 1_000_000_000_000; - #[repr(C)] #[derive(Serialize, PartialEq, Debug, Clone)] struct UpdPriceCmd { @@ -108,61 +104,73 @@ pub struct Config { /// It is recommended to set this to slightly less than the network's block time, /// as the slot fetched will be used as the time of the price update. #[serde(with = "humantime_serde")] - pub refresh_network_state_interval_duration: Duration, + pub refresh_network_state_interval_duration: Duration, /// Duration of the interval at which to publish updates #[serde(with = "humantime_serde")] - pub publish_interval_duration: Duration, + pub publish_interval_duration: Duration, /// Age after which a price update is considered stale and not published #[serde(with = "humantime_serde")] - pub staleness_threshold: Duration, + pub staleness_threshold: Duration, /// Wait at least this long before publishing an unchanged price /// state; unchanged price state means only timestamp has changed /// with other state identical to last published state. - pub unchanged_publish_threshold: Duration, + pub unchanged_publish_threshold: Duration, /// Maximum size of a batch - pub max_batch_size: usize, + pub max_batch_size: usize, /// Capacity of the channel between the Exporter and the Transaction Monitor - pub inflight_transactions_channel_capacity: usize, + pub inflight_transactions_channel_capacity: usize, /// Configuration for the Transaction Monitor - pub transaction_monitor: transaction_monitor::Config, + pub transaction_monitor: transaction_monitor::Config, /// Number of compute units requested per update_price instruction within the transaction /// (i.e., requested units equals `n * compute_unit_limit`, where `n` is the number of update_price /// instructions) - pub compute_unit_limit: u32, + pub compute_unit_limit: u32, /// Price per compute unit offered for update_price transactions If dynamic compute unit is /// enabled and this value is set, the actual price per compute unit will be the maximum of the /// network dynamic price and this value. - pub compute_unit_price_micro_lamports: Option, + pub compute_unit_price_micro_lamports: Option, /// Enable using dynamic price per compute unit based on the network previous prioritization /// fees. - pub dynamic_compute_unit_pricing_enabled: bool, + pub dynamic_compute_unit_pricing_enabled: bool, + /// Maximum total compute unit fee paid for a single transaction. Defaults to 0.001 SOL. This + /// is a safety measure while using dynamic compute price to prevent the exporter from paying + /// too much for a single transaction + pub maximum_total_compute_fee_micro_lamports: u64, + /// Maximum slot gap between the current slot and the oldest slot amongst all the accounts in + /// the batch. This is used to calculate the dynamic price per compute unit. When the slot gap + /// reaches this number we will use the maximum total_compute_fee for the transaction. + pub maximum_slot_gap_for_dynamic_compute_unit_price: u64, } impl Default for Config { fn default() -> Self { Self { - refresh_network_state_interval_duration: Duration::from_millis(200), - publish_interval_duration: Duration::from_secs(1), - staleness_threshold: Duration::from_secs(5), - unchanged_publish_threshold: Duration::from_secs(5), - max_batch_size: 12, - inflight_transactions_channel_capacity: 10000, - transaction_monitor: Default::default(), + refresh_network_state_interval_duration: Duration::from_millis(200), + publish_interval_duration: Duration::from_secs(1), + staleness_threshold: Duration::from_secs(5), + unchanged_publish_threshold: Duration::from_secs(5), + max_batch_size: 12, + inflight_transactions_channel_capacity: 10000, + transaction_monitor: Default::default(), // The largest transactions appear to be about ~12000 CUs. We leave ourselves some breathing room. - compute_unit_limit: 40000, - compute_unit_price_micro_lamports: None, - dynamic_compute_unit_pricing_enabled: false, + compute_unit_limit: 40000, + compute_unit_price_micro_lamports: None, + dynamic_compute_unit_pricing_enabled: false, + maximum_total_compute_fee_micro_lamports: 1_000_000_000_000, + maximum_slot_gap_for_dynamic_compute_unit_price: 25, } } } pub fn spawn_exporter( config: Config, + network: Network, rpc_url: &str, rpc_timeout: Duration, publisher_permissions_rx: mpsc::Receiver>>, key_store: KeyStore, local_store_tx: Sender, + global_store_tx: Sender, keypair_request_tx: mpsc::Sender, logger: Logger, ) -> Result>> { @@ -192,10 +200,12 @@ pub fn spawn_exporter( // Create and spawn the exporter let mut exporter = Exporter::new( config, + network, rpc_url, rpc_timeout, key_store, local_store_tx, + global_store_tx, network_state_rx, transactions_tx, publisher_permissions_rx, @@ -218,6 +228,9 @@ pub struct Exporter { config: Config, + /// The exporter network + network: Network, + /// Interval at which to publish updates publish_interval: Interval, @@ -227,6 +240,9 @@ pub struct Exporter { /// Channel on which to communicate with the local store local_store_tx: Sender, + /// Channel on which to communicate with the global store + global_store_tx: Sender, + /// The last state published for each price identifier. Used to /// rule out stale data and prevent repetitive publishing of /// unchanged prices. @@ -258,10 +274,12 @@ pub struct Exporter { impl Exporter { pub fn new( config: Config, + network: Network, rpc_url: &str, rpc_timeout: Duration, key_store: KeyStore, local_store_tx: Sender, + global_store_tx: Sender, network_state_rx: watch::Receiver, inflight_transactions_tx: Sender, publisher_permissions_rx: mpsc::Receiver>>, @@ -272,9 +290,11 @@ impl Exporter { Exporter { rpc_client: RpcClient::new_with_timeout(rpc_url.to_string(), rpc_timeout), config, + network, publish_interval, key_store, local_store_tx, + global_store_tx, last_published_state: HashMap::new(), network_state_rx, inflight_transactions_tx, @@ -542,14 +562,14 @@ impl Exporter { match self.publisher_permissions_rx.try_recv() { Ok(publisher_permissions) => { self.our_prices = publisher_permissions.get(publish_pubkey) .cloned() - .unwrap_or_else( || { - warn!( - self.logger, - "Exporter: No permissioned prices were found for the publishing keypair on-chain. This is expected only on startup."; - "publish_pubkey" => publish_pubkey.to_string(), - ); - HashSet::new() - }); + .unwrap_or_else( || { + warn!( + self.logger, + "Exporter: No permissioned prices were found for the publishing keypair on-chain. This is expected only on startup."; + "publish_pubkey" => publish_pubkey.to_string(), + ); + HashSet::new() + }); trace!( self.logger, "Exporter: read permissioned price accounts from channel"; @@ -657,17 +677,84 @@ impl Exporter { compute_unit_price_micro_lamports = Some(price); } - // If the dynamic unit price is enabled, use the estimated price if it is higher - // than the curdynamic_compute_unit_pricing_enabled - if let Some(estimated_recent_price) = self.recent_compute_unit_price_micro_lamports { - // Get the estimated compute unit price and Wrap it so it stays below the maximum total - // compute unit fee - let estimated_recent_price = estimated_recent_price - .min(MAXIMUM_TOTAL_COMPUTE_UNIT_FEE_MICRO_LAMPORTS / total_compute_limit as u64); + // If dynamic compute unit pricing is enabled, we use the following two methods to calculate an + // estimate of the price: + // - We exponentially increase price based on the price staleness (slot gap between the + // current slot and the oldest slot amongst all the accounts in this batch). + // - We use the network recent prioritization fees to get the minimum unit price + // that landed a transaction using Pyth price accounts (permissioned to this publisher) + // as writable. We take the median over the last 20 slots and divide it by two to make + // sure that it decays over time. The API doesn't return the priority fees for the Pyth + // price reads and so, this reflects the unit price that publishers have paid in the + // pverious slots. + // + // The two methods above combined act like massively increasing the price when they cannot + // land transactions on-chain that decays over time. The decaying behaviour is important to + // keep the uptime high during congestion whereas without it we would publish price after a + // large gap and then we can publish it again after the next large gap. + if self.config.dynamic_compute_unit_pricing_enabled { + let maximum_unit_price = + self.config.maximum_total_compute_fee_micro_lamports / total_compute_limit as u64; + + // Use the estimated previous price if it is higher + // than the current price. + if let Some(estimated_recent_price) = self.recent_compute_unit_price_micro_lamports { + // Get the estimated compute unit price and wrap it so it stays below the maximum + // total compute unit fee. We additionally divide such price by 2 to create an + // exponential decay. This will make sure that a spike doesn't get propagated + // forever. + let estimated_price = (estimated_recent_price >> 1).min(maximum_unit_price); + + compute_unit_price_micro_lamports = compute_unit_price_micro_lamports + .map(|price| price.max(estimated_price)) + .or(Some(estimated_price)); + } + + // Use exponentially higher price if this publisher hasn't published in a while for the accounts + // in this batch. This will use the maximum total compute unit fee if the publisher + // hasn't updated for >= MAXIMUM_SLOT_GAP_FOR_DYNAMIC_COMPUTE_UNIT_PRICE slots. + let (result_tx, result_rx) = oneshot::channel(); + self.global_store_tx + .send(store::global::Lookup::LookupPriceAccounts { + network: self.network, + price_ids: price_accounts.clone().into_iter().collect(), + result_tx, + }) + .await?; + + let result = result_rx.await??; + + // Calculate the maximum slot difference between aggregate slot and + // current slot amongst all the accounts. Here, the aggregate slot is + // used instead of the publishers latest update to avoid overpaying. + let oldest_slot = result + .values() + .map(|account| account.last_slot) + .min() + .ok_or(anyhow!("No price accounts"))?; + + let slot_gap = network_state.current_slot.saturating_sub(oldest_slot); + + // Set the dynamic price exponentially based on the slot gap. If the max slot gap is + // 25, on this number (or more) the maximum unit price is paid, and then on slot 24 it + // is half of that and gets halved each lower slot. Given that we have max total + // compute price of 10**12 and 250k compute units in one tx (12 updates) these are the + // estimated prices based on slot gaps: + // 25 (or more): 4_000_000 + // 20 : 125_000 + // 18 : 31_250 + // 15 : 3_906 + // 13 : 976 + // 10 : 122 + let exponential_price = maximum_unit_price + >> self + .config + .maximum_slot_gap_for_dynamic_compute_unit_price + .saturating_sub(slot_gap); compute_unit_price_micro_lamports = compute_unit_price_micro_lamports - .map(|price| price.max(estimated_recent_price)) - .or(Some(estimated_recent_price)); + .map(|price| price.max(exponential_price)) + .or(Some(exponential_price)); } if let Some(compute_unit_price_micro_lamports) = compute_unit_price_micro_lamports { diff --git a/src/agent/store/global.rs b/src/agent/store/global.rs index f8f715e..154b212 100644 --- a/src/agent/store/global.rs +++ b/src/agent/store/global.rs @@ -1,3 +1,4 @@ +use std::collections::HashSet; // The Global Store stores a copy of all the product and price information held in the Pyth // on-chain aggregation contracts, across both the primary and secondary networks. // This enables this data to be easily queried by other components. @@ -14,6 +15,7 @@ use { PROMETHEUS_REGISTRY, }, pythd::adapter, + solana::network::Network, }, anyhow::{ anyhow, @@ -107,13 +109,27 @@ pub enum Lookup { result_tx: oneshot::Sender>, }, LookupAllAccountsData { + network: Network, result_tx: oneshot::Sender>, }, + LookupPriceAccounts { + network: Network, + price_ids: HashSet, + result_tx: oneshot::Sender>>, + }, } pub struct Store { - /// The actual data - account_data: AllAccountsData, + /// The actual data on primary network + account_data_primary: AllAccountsData, + + /// The actual data on secondary network + /// This data is not necessarily consistent across both networks, so we need to store it + /// separately. + account_data_secondary: AllAccountsData, + + /// The account metadata for both networks + /// The metadata is consistent across both networks, so we only need to store it once. account_metadata: AllAccountsMetadata, /// Prometheus metrics for products @@ -169,7 +185,8 @@ impl Store { let prom_registry_ref = &mut &mut PROMETHEUS_REGISTRY.lock().await; Store { - account_data: Default::default(), + account_data_primary: Default::default(), + account_data_secondary: Default::default(), account_metadata: Default::default(), product_metrics: ProductGlobalMetrics::new(prom_registry_ref), price_metrics: PriceGlobalMetrics::new(prom_registry_ref), @@ -193,15 +210,11 @@ impl Store { async fn handle_next(&mut self) -> Result<()> { tokio::select! { Some(update) = self.primary_updates_rx.recv() => { - self.update_data(&update).await?; + self.update_data(Network::Primary, &update).await?; self.update_metadata(&update)?; } Some(update) = self.secondary_updates_rx.recv() => { - // We only use the secondary store to update the metadata, which is - // the same between both networks. This is so that if one network is offline - // we still have the metadata available to us. We don't update the data - // itself, because the aggregate prices may diverge slightly between - // the two networks. + self.update_data(Network::Secondary, &update).await?; self.update_metadata(&update)?; } Some(lookup) = self.lookup_rx.recv() => { @@ -212,7 +225,13 @@ impl Store { Ok(()) } - async fn update_data(&mut self, update: &Update) -> Result<()> { + async fn update_data(&mut self, network: Network, update: &Update) -> Result<()> { + // Choose the right account data to update + let account_data = match network { + Network::Primary => &mut self.account_data_primary, + Network::Secondary => &mut self.account_data_secondary, + }; + match update { Update::ProductAccountUpdate { account_key, @@ -225,7 +244,7 @@ impl Store { self.product_metrics.update(account_key, maybe_symbol); // Update the stored data - self.account_data + account_data .product_accounts .insert(*account_key, account.clone()); } @@ -234,7 +253,7 @@ impl Store { account, } => { // Sanity-check that we are updating with more recent data - if let Some(existing_price) = self.account_data.price_accounts.get(account_key) { + if let Some(existing_price) = account_data.price_accounts.get(account_key) { if existing_price.timestamp > account.timestamp { // This message is not an error. It is common // for primary and secondary network to have @@ -252,22 +271,24 @@ impl Store { self.price_metrics.update(account_key, account); // Update the stored data - self.account_data - .price_accounts - .insert(*account_key, *account); - - // Notify the Pythd API adapter that this account has changed - self.pythd_adapter_tx - .send(adapter::Message::GlobalStoreUpdate { - price_identifier: Identifier::new(account_key.to_bytes()), - price: account.agg.price, - conf: account.agg.conf, - status: account.agg.status, - valid_slot: account.valid_slot, - pub_slot: account.agg.pub_slot, - }) - .await - .map_err(|_| anyhow!("failed to notify pythd adapter of account update"))?; + account_data.price_accounts.insert(*account_key, *account); + + // Notify the Pythd API adapter that this account has changed. + // As the account data might differ between the two networks + // we only notify the adapter of the primary network updates. + if let Network::Primary = network { + self.pythd_adapter_tx + .send(adapter::Message::GlobalStoreUpdate { + price_identifier: Identifier::new(account_key.to_bytes()), + price: account.agg.price, + conf: account.agg.conf, + status: account.agg.status, + valid_slot: account.valid_slot, + pub_slot: account.agg.pub_slot, + }) + .await + .map_err(|_| anyhow!("failed to notify pythd adapter of account update"))?; + } } } @@ -304,9 +325,38 @@ impl Store { Lookup::LookupAllAccountsMetadata { result_tx } => result_tx .send(Ok(self.account_metadata.clone())) .map_err(|_| anyhow!("failed to send metadata to pythd adapter")), - Lookup::LookupAllAccountsData { result_tx } => result_tx - .send(Ok(self.account_data.clone())) + Lookup::LookupAllAccountsData { network, result_tx } => result_tx + .send(Ok(match network { + Network::Primary => self.account_data_primary.clone(), + Network::Secondary => self.account_data_secondary.clone(), + })) .map_err(|_| anyhow!("failed to send data to pythd adapter")), + Lookup::LookupPriceAccounts { + network, + price_ids, + result_tx, + } => { + let account_data = match network { + Network::Primary => &self.account_data_primary, + Network::Secondary => &self.account_data_secondary, + }; + + result_tx + .send( + price_ids + .into_iter() + .map(|id| { + account_data + .price_accounts + .get(&id) + .cloned() + .map(|v| (id, v)) + .ok_or(anyhow!("price id not found")) + }) + .collect::>>(), + ) + .map_err(|_| anyhow!("failed to send price accounts data")) + } } } } diff --git a/src/bin/agent.rs b/src/bin/agent.rs index beb9a4a..98e5194 100644 --- a/src/bin/agent.rs +++ b/src/bin/agent.rs @@ -116,7 +116,6 @@ async fn main() -> Result<()> { )); } - if let Err(err) = start(config, logger.clone()).await { error!(logger, "{}", err); debug!(logger, "error context"; "context" => format!("{:?}", err));