diff --git a/Cargo.lock b/Cargo.lock index de38d8c3..4f5de11d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3400,7 +3400,7 @@ dependencies = [ [[package]] name = "pyth-agent" -version = "2.11.1" +version = "2.12.0" dependencies = [ "anyhow", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index e6144cff..6ef86430 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,16 +1,12 @@ [package] name = "pyth-agent" -version = "2.11.1" +version = "2.12.0" edition = "2021" [[bin]] name = "agent" path = "src/bin/agent.rs" -[[bin]] -name = "agent-migrate-config" -path = "src/bin/agent_migrate_config.rs" - [dependencies] anyhow = "1.0.81" serde = { version = "1.0.197", features = ["derive"] } diff --git a/config/config.sample.pythnet.toml b/config/config.sample.pythnet.toml index 909779e4..f3f0822f 100644 --- a/config/config.sample.pythnet.toml +++ b/config/config.sample.pythnet.toml @@ -17,9 +17,6 @@ key_store.publish_keypair_path = "/path/to/keypair.json" # Oracle program pubkey key_store.program_key = "FsJ3A3u2vn5cTVofAjvy6y5kwABJAqYWpe4975bi2epH" -# Oracle mapping pubkey -key_store.mapping_key = "AHtgzX45WTKfkPG53L6WYhGEXwQkN1BVknET3sVsLL8J" - # Compute unit per price update. exporter.compute_unit_limit = 5000 diff --git a/config/config.sample.pythtest.toml b/config/config.sample.pythtest.toml index e19d72ae..c18568b5 100644 --- a/config/config.sample.pythtest.toml +++ b/config/config.sample.pythtest.toml @@ -18,10 +18,6 @@ key_store.publish_keypair_path = "/path/to/keypair.json" key_store.program_key = "8tfDNiaEyrV6Q1U4DEXrEigs9DoDtkugzFbybENEbCDz" # conformance # key_store.program_key = "gSbePebfvPy7tRqimPoVecS2UsBvYv46ynrzWocc92s" # cross-chain -# Oracle mapping pubkey -key_store.mapping_key = "AFmdnt9ng1uVxqCmqwQJDAYC5cKTkw8gJKSM5PnzuF6z" # conformance -# key_store.mapping_key = "BmA9Z6FjioHJPpjT39QazZyhDRUdZy2ezwx4GiDdE2u2" # cross-chain - # Pythtest accumulator key (only for the cross-chain oracle) # key_store.accumulator_key = "7Vbmv1jt4vyuqBZcpYPpnVhrqVe5e6ZPb6JxDcffRHUM" diff --git a/config/config.toml b/config/config.toml index ae92b87f..0beadfc1 100644 --- a/config/config.toml +++ b/config/config.toml @@ -47,9 +47,6 @@ key_store.publish_keypair_path = "/path/to/keypair.json" # Public key of the oracle program key_store.program_key = "RelevantOracleProgramAddress" -# Public key of the root mapping account -key_store.mapping_key = "RelevantOracleMappingAddress" - ### Optional fields of primary/secondary network config ### # Pubkey of accumulator message buffer program ID. Setting this diff --git a/integration-tests/tests/test_integration.py b/integration-tests/tests/test_integration.py index d155a903..b0c584a6 100644 --- a/integration-tests/tests/test_integration.py +++ b/integration-tests/tests/test_integration.py @@ -20,12 +20,10 @@ from contextlib import contextmanager import shutil from solana.keypair import Keypair -from solders.system_program import ID as SYSTEM_PROGRAM_ID from solana.rpc.async_api import AsyncClient from solana.rpc import commitment -from solana.transaction import AccountMeta, Transaction, TransactionInstruction +from solana.transaction import AccountMeta, Transaction from anchorpy import Provider, Wallet -from construct import Bytes, Int32sl, Int32ul, Struct from solana.publickey import PublicKey from message_buffer_client_codegen.instructions import initialize, set_allowed_programs, create_buffer from message_buffer_client_codegen.accounts.message_buffer import MessageBuffer @@ -359,21 +357,6 @@ def agent_publish_keypair(self, agent_keystore_path, sync_accounts): LOGGER.debug(f"Publisher {address.stdout.strip()} balance: {balance.stdout.strip()}") time.sleep(8) - @pytest.fixture - def agent_keystore(self, agent_keystore_path, agent_publish_keypair): - self.run( - f"../scripts/init_key_store.sh localnet {agent_keystore_path}") - - if USE_ACCUMULATOR: - path = os.path.join(agent_keystore_path, "accumulator_program_key.json") - - with open(path, 'w') as f: - f.write(MESSAGE_BUFFER_PROGRAM) - - if os.path.exists("keystore"): - os.remove("keystore") - os.symlink(agent_keystore_path, "keystore") - @pytest_asyncio.fixture async def initialize_message_buffer_program(self, funding_keypair, sync_key_path, sync_accounts): @@ -429,18 +412,15 @@ async def initialize_message_buffer_program(self, funding_keypair, sync_key_path await provider.send(tx, [parsed_funding_keypair]) @pytest.fixture - def agent_config(self, agent_keystore, agent_keystore_path, tmp_path): + def agent_config(self, agent_keystore_path, agent_publish_keypair, tmp_path): with open("agent_conf.toml") as config_file: agent_config = config_file.read() publish_keypair_path = os.path.join(agent_keystore_path, "publish_key_pair.json") - mapping_keypair = Keypair.from_secret_key(MAPPING_KEYPAIR) - agent_config += f""" key_store.publish_keypair_path = "{publish_keypair_path}" key_store.program_key = "{ORACLE_PROGRAM}" -key_store.mapping_key = "{mapping_keypair.public_key}" """ # Add accumulator setting if option is enabled @@ -457,32 +437,7 @@ def agent_config(self, agent_keystore, agent_keystore_path, tmp_path): return path @pytest.fixture - def agent_legacy_config(self, agent_keystore, agent_keystore_path, tmp_path): - """ - Prepares a legacy v1.x.x config for testing agent-migrate-config - """ - with open("agent_conf.toml") as config_file: - agent_config = config_file.read() - - agent_config += f'\nkey_store.root_path = "{agent_keystore_path}"' - - if USE_ACCUMULATOR: - # Add accumulator setting to verify that it is inlined as well - agent_config += f'\nkey_store.accumulator_key_path = "accumulator_program_key.json"' - - LOGGER.debug(f"Built legacy agent config:\n{agent_config}") - - path = os.path.join(tmp_path, "agent_conf_legacy.toml") - - with open(path, 'w') as f: - f.write(agent_config) - - return path - - - - @pytest.fixture - def agent(self, sync_accounts, agent_keystore, tmp_path, initialize_message_buffer_program, agent_config): + def agent(self, sync_accounts, agent_keystore_path, agent_publish_keypair, tmp_path, initialize_message_buffer_program, agent_config): LOGGER.debug("Building agent binary") self.run("cargo build --release --bin agent") @@ -496,7 +451,7 @@ def agent(self, sync_accounts, agent_keystore, tmp_path, initialize_message_buff yield @pytest.fixture - def agent_hotload(self, sync_accounts, agent_keystore, agent_keystore_path, tmp_path, initialize_message_buffer_program, agent_config): + def agent_hotload(self, sync_accounts, agent_keystore_path, tmp_path, initialize_message_buffer_program, agent_config): """ Spawns an agent without a publish keypair, used for keypair hotloading testing """ @@ -560,11 +515,11 @@ async def test_update_price_simple(self, client: PythAgentClient): # Send an "update_price" request await client.update_price(price_account, 42, 2, "trading") - time.sleep(2) + time.sleep(5) # Send another "update_price" request to trigger aggregation await client.update_price(price_account, 81, 1, "trading") - time.sleep(2) + time.sleep(5) # Confirm that the price account has been updated with the values from the first "update_price" request final_product_state = await client.get_product(product_account) @@ -726,44 +681,6 @@ async def test_publish_forever(self, client: PythAgentClient, tmp_path): await client.update_price(price_account, 47, 2, "trading") time.sleep(1) - @pytest.mark.asyncio - async def test_agent_migrate_config(self, - agent_keystore, - agent_legacy_config, - agent_migrate_config_binary, - client_no_spawn: PythAgentClient, - initialize_message_buffer_program, - sync_accounts, - tmp_path, - ): - os.environ["RUST_BACKTRACE"] = "full" - os.environ["RUST_LOG"] = "debug" - - # Migrator must run successfully (run() raises on error) - new_config = self.run(f"{agent_migrate_config_binary} -c {agent_legacy_config}").stdout.strip() - - LOGGER.debug(f"Successfully migrated legacy config to:\n{new_config}") - - # Overwrite legacy config with the migrated version. - # - # NOTE: assumes 'w' erases the file before access) - with open(agent_legacy_config, 'w') as f: - f.write(new_config) - f.flush() - - self.run("cargo build --release --bin agent") - - log_dir = os.path.join(tmp_path, "agent_logs") - - # We start the agent manually to pass it the updated legacy config - with self.spawn(f"../target/release/agent --config {agent_legacy_config}", log_dir=log_dir): - time.sleep(3) - await client_no_spawn.connect() - - # Continue with the simple test case, which must succeed - await self.test_update_price_simple(client_no_spawn) - await client_no_spawn.close() - @pytest.mark.asyncio async def test_agent_respects_market_hours(self, client: PythAgentClient): ''' @@ -784,13 +701,13 @@ async def test_agent_respects_market_hours(self, client: PythAgentClient): # Send an "update_price" request await client.update_price(price_account, 42, 2, "trading") - time.sleep(2) + time.sleep(5) # Send another update_price request to "trigger" aggregation # (aggregation would happen if market hours were to fail, but # we want to catch that happening if there's a problem) await client.update_price(price_account, 81, 1, "trading") - time.sleep(2) + time.sleep(5) # Confirm that the price account has not been updated final_product_state = await client.get_product(product_account) @@ -819,13 +736,13 @@ async def test_agent_respects_holiday_hours(self, client: PythAgentClient): # Send an "update_price" request await client.update_price(price_account, 42, 2, "trading") - time.sleep(2) + time.sleep(5) # Send another update_price request to "trigger" aggregation # (aggregation would happen if market hours were to fail, but # we want to catch that happening if there's a problem) await client.update_price(price_account, 81, 1, "trading") - time.sleep(2) + time.sleep(5) # Confirm that the price account has not been updated final_product_state = await client.get_product(product_account) @@ -861,7 +778,7 @@ async def test_agent_respects_publish_interval(self, client: PythAgentClient): # (aggregation would happen if publish interval were to fail, but # we want to catch that happening if there's a problem) await client.update_price(price_account, 81, 1, "trading") - time.sleep(2) + time.sleep(5) # Confirm that the price account has not been updated final_product_state = await client.get_product(product_account) @@ -875,7 +792,7 @@ async def test_agent_respects_publish_interval(self, client: PythAgentClient): # Send another update_price request to "trigger" aggregation # Now it is after the publish interval, so the price should be updated await client.update_price(price_account, 81, 1, "trading") - time.sleep(2) + time.sleep(5) # Confirm that the price account has been updated final_product_state = await client.get_product(product_account) diff --git a/scripts/init_key_store.sh b/scripts/init_key_store.sh deleted file mode 100755 index c804f014..00000000 --- a/scripts/init_key_store.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -set -e - -# This script initializes the keystore directory used by the agent binary. - -KENV=$1 # The environment targeted -KDIR=$2 # The directory of the key store - -case $KENV in - mainnet) - MAP_KEY=AHtgzX45WTKfkPG53L6WYhGEXwQkN1BVknET3sVsLL8J - PGM_KEY=FsJ3A3u2vn5cTVofAjvy6y5kwABJAqYWpe4975bi2epH - ;; - devnet) - MAP_KEY=BmA9Z6FjioHJPpjT39QazZyhDRUdZy2ezwx4GiDdE2u2 - PGM_KEY=gSbePebfvPy7tRqimPoVecS2UsBvYv46ynrzWocc92s - ;; - testnet) - MAP_KEY=AFmdnt9ng1uVxqCmqwQJDAYC5cKTkw8gJKSM5PnzuF6z - PGM_KEY=8tfDNiaEyrV6Q1U4DEXrEigs9DoDtkugzFbybENEbCDz - ;; - localnet) - MAP_KEY=BTJKZngp3vzeJiRmmT9PitQH4H29dhQZ1GNhxFfDi4kw - PGM_KEY=BujGr9ChcuaCJhxeFEvGvaCFTxSV1CUCSVHL1SVFpU4i - ;; - *) - echo "Unknown environment. Please use: mainnet, devnet, testnet, localnet" - exit 1; -esac - -if [ -z "$KDIR" ] ; then - KDIR=$HOME/.pythd -fi - -PKEY_FILE=$KDIR/publish_key_pair.json -if [ ! -f $PKEY_FILE ] ; then - echo "cannot find $PKEY_FILE" - exit 1 -fi - -echo $PGM_KEY > $KDIR/program_key.json -chmod 0400 $KDIR/program_key.json -echo $MAP_KEY > $KDIR/mapping_key.json -chmod 0400 $KDIR/mapping_key.json -chmod 0700 $KDIR diff --git a/src/agent/services/oracle.rs b/src/agent/services/oracle.rs index c82b98fb..b64dbe4c 100644 --- a/src/agent/services/oracle.rs +++ b/src/agent/services/oracle.rs @@ -60,7 +60,7 @@ where config.clone(), network, state.clone(), - key_store.mapping_key, + key_store.pyth_oracle_program_key, key_store.publish_keypair, key_store.pyth_price_store_program_key, config.oracle.max_lookup_batch_size, @@ -152,13 +152,13 @@ where Ok(()) } -/// On poll lookup all Pyth Mapping/Product/Price accounts and sync. +/// On poll lookup all Pyth Product/Price accounts and sync. #[instrument(skip(config, publish_keypair, state))] async fn poller( config: Config, network: Network, state: Arc, - mapping_key: Pubkey, + oracle_program_key: Pubkey, publish_keypair: Option, pyth_price_store_program_key: Option, max_lookup_batch_size: usize, @@ -183,7 +183,7 @@ async fn poller( Oracle::poll_updates( &*state, network, - mapping_key, + oracle_program_key, publish_keypair.as_ref(), pyth_price_store_program_key, &client, diff --git a/src/agent/solana.rs b/src/agent/solana.rs index 5ca207c6..8336d8b1 100644 --- a/src/agent/solana.rs +++ b/src/agent/solana.rs @@ -103,12 +103,6 @@ pub mod key_store { default )] pub pyth_price_store_program_key: Option, - /// The public key of the root mapping account - #[serde( - serialize_with = "pubkey_string_ser", - deserialize_with = "pubkey_string_de" - )] - pub mapping_key: Pubkey, /// The public key of the accumulator program. #[serde( serialize_with = "opt_pubkey_string_ser", @@ -127,8 +121,6 @@ pub mod key_store { pub pyth_oracle_program_key: Pubkey, /// Public key of the pyth-price-store program pub pyth_price_store_program_key: Option, - /// Public key of the root mapping account - pub mapping_key: Pubkey, /// Public key of the accumulator program (if provided) pub accumulator_key: Option, } @@ -151,7 +143,6 @@ pub mod key_store { publish_keypair, pyth_oracle_program_key: config.pyth_oracle_program_key, pyth_price_store_program_key: config.pyth_price_store_program_key, - mapping_key: config.mapping_key, accumulator_key: config.accumulator_key, }) } diff --git a/src/agent/state/oracle.rs b/src/agent/state/oracle.rs index 526545fd..5d6b02a5 100644 --- a/src/agent/state/oracle.rs +++ b/src/agent/state/oracle.rs @@ -20,10 +20,8 @@ use { }, pyth_price_store::instruction::PUBLISHER_CONFIG_SEED, pyth_sdk_solana::state::{ - load_mapping_account, load_product_account, GenericPriceAccount, - MappingAccount, PriceComp, PythnetPriceAccount, SolanaPriceAccount, @@ -132,7 +130,6 @@ impl std::ops::Deref for PriceEntry { #[derive(Default, Debug, Clone)] pub struct Data { - pub mapping_accounts: HashMap, pub product_accounts: HashMap, pub price_accounts: HashMap, /// publisher => {their permissioned price accounts => price publishing metadata} @@ -194,7 +191,7 @@ pub trait Oracle { async fn poll_updates( &self, network: Network, - mapping_key: Pubkey, + oracle_program_key: Pubkey, publish_keypair: Option<&Keypair>, pyth_price_store_program_key: Option, rpc_client: &RpcClient, @@ -269,20 +266,16 @@ where async fn poll_updates( &self, network: Network, - mapping_key: Pubkey, + oracle_program_key: Pubkey, publish_keypair: Option<&Keypair>, pyth_price_store_program_key: Option, rpc_client: &RpcClient, max_lookup_batch_size: usize, ) -> Result<()> { let mut publisher_permissions = HashMap::new(); - let mapping_accounts = fetch_mapping_accounts(rpc_client, mapping_key).await?; - let (product_accounts, price_accounts) = fetch_product_and_price_accounts( - rpc_client, - max_lookup_batch_size, - mapping_accounts.values(), - ) - .await?; + let (product_accounts, price_accounts) = + fetch_product_and_price_accounts(rpc_client, oracle_program_key, max_lookup_batch_size) + .await?; for (price_key, price_entry) in price_accounts.iter() { for component in price_entry.comp { @@ -337,7 +330,6 @@ where } let new_data = Data { - mapping_accounts, product_accounts, price_accounts, publisher_permissions, @@ -412,57 +404,109 @@ async fn fetch_publisher_buffer_key( } #[instrument(skip(rpc_client))] -async fn fetch_mapping_accounts( - rpc_client: &RpcClient, - mapping_account_key: Pubkey, -) -> Result> { - let mut accounts = HashMap::new(); - let mut account_key = mapping_account_key; - while account_key != Pubkey::default() { - let account = *load_mapping_account( - &rpc_client - .get_account_data(&account_key) - .await - .with_context(|| format!("load mapping account {}", account_key))?, - )?; - accounts.insert(account_key, account); - account_key = account.next; - } - Ok(accounts) -} - -#[instrument(skip(rpc_client, mapping_accounts))] -async fn fetch_product_and_price_accounts<'a, A>( +async fn fetch_product_and_price_accounts( rpc_client: &RpcClient, + oracle_program_key: Pubkey, max_lookup_batch_size: usize, - mapping_accounts: A, -) -> Result<(HashMap, HashMap)> -where - A: IntoIterator, -{ - let mut product_keys = vec![]; - - // Get all product keys - for mapping_account in mapping_accounts { - for account_key in mapping_account - .products - .iter() - .filter(|pubkey| **pubkey != Pubkey::default()) - { - product_keys.push(*account_key); - } - } - +) -> Result<(HashMap, HashMap)> { let mut product_entries = HashMap::new(); let mut price_entries = HashMap::new(); - // Lookup products and their prices using the configured batch size - for product_key_batch in product_keys.as_slice().chunks(max_lookup_batch_size) { - let (mut batch_products, mut batch_prices) = - fetch_batch_of_product_and_price_accounts(rpc_client, product_key_batch).await?; + let oracle_accounts = rpc_client.get_program_accounts(&oracle_program_key).await?; + + // Go over all the product accounts and partially fill the product entires. The product + // entires need to have prices inside them which gets filled by going over all the + // price accounts. + for (product_key, product) in oracle_accounts.iter().filter_map(|(pubkey, account)| { + load_product_account(&account.data) + .ok() + .map(|product| (pubkey, product)) + }) { + #[allow(deprecated)] + let legacy_schedule: LegacySchedule = if let Some((_wsched_key, wsched_val)) = + product.iter().find(|(k, _v)| *k == "weekly_schedule") + { + wsched_val.parse().unwrap_or_else(|err| { + tracing::warn!( + product_key = product_key.to_string(), + weekly_schedule = wsched_val, + "Oracle: Product has weekly_schedule defined but it could not be parsed. Falling back to 24/7 publishing.", + ); + tracing::debug!(err = ?err, "Parsing error context."); + Default::default() + }) + } else { + Default::default() // No market hours specified, meaning 24/7 publishing + }; + + let market_schedule: Option = if let Some((_msched_key, msched_val)) = + product.iter().find(|(k, _v)| *k == "schedule") + { + match msched_val.parse::() { + Ok(schedule) => Some(schedule), + Err(err) => { + tracing::warn!( + product_key = product_key.to_string(), + schedule = msched_val, + "Oracle: Product has schedule defined but it could not be parsed. Falling back to legacy schedule.", + ); + tracing::debug!(err = ?err, "Parsing error context."); + None + } + } + } else { + None + }; + + let publish_interval: Option = if let Some(( + _publish_interval_key, + publish_interval_val, + )) = + product.iter().find(|(k, _v)| *k == "publish_interval") + { + match publish_interval_val.parse::() { + Ok(interval) => Some(Duration::from_secs_f64(interval)), + Err(err) => { + tracing::warn!( + product_key = product_key.to_string(), + publish_interval = publish_interval_val, + "Oracle: Product has publish_interval defined but it could not be parsed. Falling back to None.", + ); + tracing::debug!(err = ?err, "parsing error context"); + None + } + } + } else { + None + }; - product_entries.extend(batch_products.drain()); - price_entries.extend(batch_prices.drain()); + product_entries.insert( + *product_key, + ProductEntry { + account_data: *product, + schedule: market_schedule.unwrap_or_else(|| legacy_schedule.into()), + price_accounts: vec![], + publish_interval, + }, + ); + } + + // Load the price accounts into price entry and also fill the product entires + for (price_key, price) in oracle_accounts.iter().filter_map(|(pubkey, account)| { + PriceEntry::load_from_account(&account.data).map(|product| (pubkey, product)) + }) { + if let Some(prod) = product_entries.get_mut(&price.prod) { + prod.price_accounts.push(*price_key); + price_entries.insert(*price_key, price); + } else { + tracing::warn!( + missing_product = price.prod.to_string(), + price_key = price_key.to_string(), + "Could not find product entry for price, listed in its prod field, skipping", + ); + + continue; + } } Ok((product_entries, price_entries)) @@ -625,20 +669,6 @@ async fn fetch_batch_of_product_and_price_accounts( #[instrument(skip(data, new_data))] fn log_data_diff(data: &Data, new_data: &Data) { // Log new accounts which have been found - let previous_mapping_accounts = data - .mapping_accounts - .keys() - .cloned() - .collect::>(); - tracing::info!( - new = ?new_data - .mapping_accounts - .keys() - .cloned() - .collect::>().difference(&previous_mapping_accounts), - total = data.mapping_accounts.len(), - "Fetched mapping accounts." - ); let previous_product_accounts = data .product_accounts .keys() diff --git a/src/bin/agent_migrate_config.rs b/src/bin/agent_migrate_config.rs deleted file mode 100644 index 365a6245..00000000 --- a/src/bin/agent_migrate_config.rs +++ /dev/null @@ -1,213 +0,0 @@ -use { - anyhow::{ - anyhow, - Context, - Result, - }, - clap::Parser, - solana_sdk::pubkey::Pubkey, - std::{ - fs::File, - io::{ - Read, - Write, - }, - path::PathBuf, - str::FromStr, - }, - toml_edit::{ - value, - DocumentMut, - Item, - }, -}; - -#[derive(Parser, Debug)] -#[command(author, version, about = "1.x.x -> 2.0.0 pyth-agent config migrator")] -struct Args { - /// Config path to be migrated - #[arg(short, long)] - config: PathBuf, -} - -pub fn main() -> Result<()> { - let args = Args::parse(); - - eprintln!("Loading old config from {}", args.config.display()); - - let mut f = File::open(args.config).context("Could not open the config file")?; - - let mut old_cfg_contents = String::new(); - - f.read_to_string(&mut old_cfg_contents)?; - - let mut doc: DocumentMut = old_cfg_contents - .parse() - .context("Could not parse config file contents as TOML")?; - - let primary_network = doc - .get_mut("primary_network") - .ok_or_else(|| anyhow::anyhow!("Could not read mandatory primary_network section"))?; - - eprint!("Migrating primary_network..."); - std::io::stderr().flush()?; - migrate_network(primary_network)?; - eprintln!("OK"); - - if let Some(secondary_network) = doc.get_mut("secondary_network") { - eprint!("Migrating secondary_network..."); - std::io::stdout().flush()?; - migrate_network(secondary_network)?; - eprintln!("OK"); - } else { - eprintln!("secondary_network not defined, moving on"); - } - - eprintln!("Migration OK. Result:"); - std::io::stderr().flush()?; - - println!("{}", doc); - - Ok(()) -} - -/// Generalized migration routine for primary/secondary_network TOML -/// sections. v1.x.x defaults are supplied if unspecified in order to -/// reach the file-based pubkeys on disk. -pub fn migrate_network(network_config: &mut Item) -> Result<()> { - // Retrieve all key store (sub)paths or supply defaults - let key_store_root_path: PathBuf = { - let root_item = network_config - .get("key_store") - .and_then(|ks| ks.get("root_path")) - .cloned() - // v1.4.0 used PathBuf::default(), meaning current working directory, if unspecified. - .unwrap_or(value(".")); - - let root_str = root_item - .as_str() - .ok_or(anyhow!("Could not parse key_store.root_path"))?; - - PathBuf::from(root_str.to_owned()) - }; - - let publish_keypair_relpath: PathBuf = { - let publish_item = network_config - .get("key_store") - .and_then(|ks| ks.get("publish_keypair_path")) - .cloned() - .unwrap_or(value("publish_key_pair.json")); - - let publish_str = publish_item - .as_str() - .ok_or(anyhow!("Could not parse key_store.publish_keypair"))?; - - PathBuf::from(publish_str) - }; - - let program_key_relpath: PathBuf = { - let program_item = network_config - .get("key_store") - .and_then(|ks| ks.get("program_key_path")) - .cloned() - .unwrap_or(value("program_key.json")); - - let program_str = program_item - .as_str() - .ok_or(anyhow!("Could not parse key_store.program_key"))?; - - PathBuf::from(program_str) - }; - - let mapping_key_relpath: PathBuf = { - let mapping_item = network_config - .get("key_store") - .and_then(|ks| ks.get("mapping_key_path")) - .cloned() - .unwrap_or(value("mapping_key.json")); - - let mapping_str = mapping_item - .as_str() - .ok_or(anyhow!("Could not parse key_store.mapping_key"))?; - - PathBuf::from(mapping_str) - }; - - let accumulator_key_relpath: Option = { - let maybe_item = network_config - .get("key_store") - .and_then(|ks| ks.get("accumulator_key_path")); - - match maybe_item { - Some(item) => { - let item_str = item.as_str().ok_or(anyhow!( - "Could not parse existing key_store.accumulator_key_path" - ))?; - Some(PathBuf::from(item_str)) - } - None => None, - } - }; - - // We're done reading legacy key store values, remove the - // subsection from network config if present. - if let Some(ks_table_like) = network_config - .get_mut("key_store") - .and_then(|ks| ks.as_table_like_mut()) - { - ks_table_like.clear(); - } - - // Attach publish keypair path to legacy key store root path - let mut publish_keypair_path = key_store_root_path.clone(); - publish_keypair_path.push(publish_keypair_relpath); - - // Extract pubkeys from legacy file paths for other key store values - let mut program_key_path = key_store_root_path.clone(); - program_key_path.push(program_key_relpath); - let mut program_key_str = String::new(); - File::open(&program_key_path) - .context(format!( - "Could not open program key file at {}", - program_key_path.display() - ))? - .read_to_string(&mut program_key_str)?; - let program_key = - Pubkey::from_str(program_key_str.trim()).context("Could not parse program key")?; - - let mut mapping_key_path = key_store_root_path.clone(); - mapping_key_path.push(mapping_key_relpath); - let mut mapping_key_str = String::new(); - File::open(mapping_key_path) - .context("Could not open mapping key file")? - .read_to_string(&mut mapping_key_str)?; - let mapping_key = - Pubkey::from_str(mapping_key_str.trim()).context("Could not parse mapping key")?; - - let accumulator_key = if let Some(relpath) = accumulator_key_relpath { - let mut accumulator_key_path = key_store_root_path.clone(); - accumulator_key_path.push(relpath); - let mut accumulator_key_str = String::new(); - File::open(accumulator_key_path) - .context("Could not open accumulator key file")? - .read_to_string(&mut accumulator_key_str)?; - let accumulator_key = Pubkey::from_str(accumulator_key_str.trim()) - .context("Could not parse accumulator key")?; - - Some(accumulator_key) - } else { - None - }; - - // Inline new key store pubkeys in the section - network_config["key_store"]["publish_keypair_path"] = - value(publish_keypair_path.display().to_string()); - network_config["key_store"]["program_key"] = value(program_key.to_string()); - network_config["key_store"]["mapping_key"] = value(mapping_key.to_string()); - - if let Some(k) = accumulator_key { - network_config["key_store"]["accumulator_key"] = value(k.to_string()); - } - - Ok(()) -}