From db3f86de719c6dc87419ed4adbb4a522579722e3 Mon Sep 17 00:00:00 2001 From: Thomas Braun <38082993+tbraun96@users.noreply.github.com> Date: Mon, 5 Jun 2023 15:33:40 -0400 Subject: [PATCH] [WIP] Signing Manager V2 (#577) Co-authored-by: Thomas Braun * add better logging: split offline and voting, include unsigned proposal hash * Update dkg-gadget/src/signing_manager/mod.rs * add pipeline tests for harness * point to temp dir, do not clean * remove backoff logic * Remove unused code * add message enqueueing mechanism * allow message enqueuing for initialized protocols * add filtering of messages for async protocols based on block number * [WIP] Signing Manager V2: Fix message loss bug (#620) * move debug logger to dkg-logging, add checkpoint-based logging (based on env var) * get 0 message loss for j=10,k=11 * get 1000 running in parallel successfully * eradicate message loss entirely * adjust parallelism in job manager to j=4 * adjust parameters, remove bottlenecks, improve stalling logic * update multi-party ecdsa --------- Co-authored-by: drewstone Co-authored-by: 1xstj <106580853+1xstj@users.noreply.github.com> Co-authored-by: shekohex --- .github/workflows/checks.yml | 3 - .github/workflows/coverage.yml | 6 +- .github/workflows/e2e.yml | 6 - .github/workflows/harness_stress_tests.yml | 85 ++ .github/workflows/publish-rust-docs.yml | 7 +- .github/workflows/tests.yml | 5 - Cargo.lock | 961 +++++++++--------- Cargo.toml | 7 +- README.md | 2 +- dkg-gadget/Cargo.toml | 5 +- .../async_protocols/blockchain_interface.rs | 40 +- dkg-gadget/src/async_protocols/incoming.rs | 142 ++- .../src/async_protocols/keygen/handler.rs | 4 +- .../async_protocols/keygen/state_machine.rs | 1 - dkg-gadget/src/async_protocols/mod.rs | 112 +- dkg-gadget/src/async_protocols/remote.rs | 102 +- .../src/async_protocols/sign/handler.rs | 130 ++- .../src/async_protocols/sign/state_machine.rs | 24 +- .../src/async_protocols/state_machine.rs | 3 +- .../async_protocols/state_machine_wrapper.rs | 106 +- dkg-gadget/src/async_protocols/test_utils.rs | 5 +- dkg-gadget/src/db/offchain_storage.rs | 2 +- dkg-gadget/src/debug_logger.rs | 111 -- dkg-gadget/src/gossip_engine/mod.rs | 32 +- dkg-gadget/src/gossip_engine/network.rs | 261 +---- .../gossip_messages/misbehaviour_report.rs | 125 ++- .../src/gossip_messages/public_key_gossip.rs | 21 +- dkg-gadget/src/keystore.rs | 2 +- dkg-gadget/src/lib.rs | 72 +- dkg-gadget/src/signing_manager/mod.rs | 301 ++++++ .../src/signing_manager/work_manager.rs | 302 ++++++ dkg-gadget/src/storage/proposals.rs | 20 +- dkg-gadget/src/utils.rs | 38 + dkg-gadget/src/worker.rs | 863 ++++++---------- dkg-logging/Cargo.toml | 9 +- dkg-logging/src/debug_logger.rs | 438 ++++++++ dkg-logging/src/lib.rs | 2 + dkg-mock-blockchain/Cargo.toml | 2 + dkg-mock-blockchain/src/data_types.rs | 6 +- .../src/mock_blockchain_config.rs | 2 + dkg-mock-blockchain/src/server.rs | 395 ++++++- dkg-primitives/src/types.rs | 34 +- dkg-runtime-primitives/src/lib.rs | 20 +- dkg-runtime-primitives/src/utils.rs | 7 + dkg-test-orchestrator/README.md | 7 +- dkg-test-orchestrator/config/test_n3t2.toml | 5 +- dkg-test-orchestrator/src/client.rs | 41 +- dkg-test-orchestrator/src/dummy_api.rs | 27 +- .../src/in_memory_gossip_engine.rs | 122 +-- dkg-test-orchestrator/src/main.rs | 158 ++- dkg-test-suite/scripts/submitProposals.ts | 4 +- .../tests/e2e/keygenChanges.test.ts | 5 +- .../tests/e2e/misbehaviourReporting.test.ts | 5 +- .../tests/updateAnchorProposal.test.ts | 2 +- dkg-test-suite/tests/utils/setup.ts | 12 +- dkg-test-suite/tests/utils/util.ts | 6 +- pallets/dkg-proposal-handler/src/lib.rs | 30 +- scripts/harness_stress_test.sh | 5 + scripts/run-standalone.sh | 25 +- standalone/node/src/cli.rs | 3 +- standalone/node/src/command.rs | 10 +- standalone/node/src/service.rs | 12 +- standalone/runtime/src/lib.rs | 2 +- 63 files changed, 3373 insertions(+), 1929 deletions(-) create mode 100644 .github/workflows/harness_stress_tests.yml delete mode 100644 dkg-gadget/src/debug_logger.rs create mode 100644 dkg-gadget/src/signing_manager/mod.rs create mode 100644 dkg-gadget/src/signing_manager/work_manager.rs create mode 100644 dkg-logging/src/debug_logger.rs create mode 100755 scripts/harness_stress_test.sh diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index a424ba201..92f603321 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -4,9 +4,6 @@ on: push: branches: [master] pull_request: - branches: - - "**" - workflow_dispatch: env: diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index e8a502585..3237bddc7 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -4,15 +4,13 @@ on: push: branches: [ master ] pull_request: - branches: [ master ] - workflow_dispatch: env: CARGO_REGISTRIES_CRATES_IO_PROTOCOL: git jobs: - # code coverage job; moved to own workflow file due to running out of disk space. + # code coverage job; moved to own workflow file due to running out of disk space. # The runner will stop working when the machine runs out of disk space. Free space left: 72 MB coverage: name: coverage @@ -46,7 +44,7 @@ jobs: - name: Run Tarpaulin run : cargo tarpaulin --out Xml -p pallet-dkg-metadata -p pallet-dkg-proposal-handler -p pallet-dkg-proposals -p dkg-primitives -p dkg-runtime-primitives --locked --jobs 16 --timeout 3600 --skip-clean -- --test-threads 16 - + - name: Upload CodeCov uses: codecov/codecov-action@v2 diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 3622103ac..1bcc14abb 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -4,8 +4,6 @@ on: push: branches: [master] pull_request: - branches: [master] - workflow_dispatch: env: @@ -74,8 +72,6 @@ jobs: run: dvc pull - name: Build Standalone Node for E2E Tests (Release) - env: - CARGO_INCREMENTAL: 1 run: cargo build --release -p dkg-standalone-node --features integration-tests - name: Run E2E Tests @@ -152,8 +148,6 @@ jobs: run: dvc pull - name: Build Standalone Node for Integration Tests (Release) - env: - CARGO_INCREMENTAL: 1 run: cargo build --release -p dkg-standalone-node - name: Run Proposals E2E Tests diff --git a/.github/workflows/harness_stress_tests.yml b/.github/workflows/harness_stress_tests.yml new file mode 100644 index 000000000..b680ad0bc --- /dev/null +++ b/.github/workflows/harness_stress_tests.yml @@ -0,0 +1,85 @@ +name: harness stress tests + +on: + push: + branches: [master] + pull_request: + workflow_dispatch: + +env: + CARGO_REGISTRIES_CRATES_IO_PROTOCOL: git + RUST_LOG: "dkg=trace" + +jobs: + # dkg-substrate integration tests + harness: + name: harness stress tests + runs-on: ubuntu-latest + steps: + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.9.1 + with: + access_token: ${{ github.token }} + + - name: Checkout Code + uses: actions/checkout@v3 + + - name: Configure sccache + run: | + echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV + echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV + + - name: Run sccache-cache + uses: mozilla-actions/sccache-action@v0.0.3 + + - name: Restore Cache + if: always() + uses: actions/cache/restore@v3 + with: + path: | + ~/.cargo/registry + target/release + target/debug + key: ${{ runner.os }}-cargo-index-${{ github.ref_name }}-harness-stress-tests + + - name: Install toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: nightly + + - name: Setup DVC + uses: iterative/setup-dvc@v1 + + - name: Populate fixtures + run: dvc pull + + - name: Install Protobuf + run: sudo apt-get install protobuf-compiler + + - name: Setup DVC + uses: iterative/setup-dvc@v1 + + - name: Populate Fixtures + run: dvc pull + + - name: t2n3 && 1 proposal per session + run: cargo run --package dkg-test-orchestrator --features=debug-tracing -- --tmp /tmp --threshold 2 --n 3 --bind 127.0.0.1:7777 --n-tests 10 -p 1 + + - name: t2n3 && 2 proposals per session + run: cargo run --package dkg-test-orchestrator --features=debug-tracing -- --tmp /tmp --threshold 2 --n 3 --bind 127.0.0.1:7777 --n-tests 10 -p 2 + + - name: t3n5 && 1 proposal per session + run: cargo run --package dkg-test-orchestrator --features=debug-tracing -- --tmp /tmp --threshold 3 --n 5 --bind 127.0.0.1:7777 --n-tests 10 -p 1 + + - name: t3n5 && 2 proposals per session + run: cargo run --package dkg-test-orchestrator --features=debug-tracing -- --tmp /tmp --threshold 3 --n 5 --bind 127.0.0.1:7777 --n-tests 10 -p 2 + + - name: Save Cache + if: ${{ !cancelled() }} + uses: actions/cache/save@v3 + with: + path: | + ~/.cargo/registry + target/release + target/debug + key: ${{ runner.os }}-cargo-index-${{ github.ref_name }}-harness-stress-tests diff --git a/.github/workflows/publish-rust-docs.yml b/.github/workflows/publish-rust-docs.yml index 9b52850b0..c44aac466 100644 --- a/.github/workflows/publish-rust-docs.yml +++ b/.github/workflows/publish-rust-docs.yml @@ -5,8 +5,7 @@ on: branches: - master pull_request: - branches: - - master + env: CARGO_REGISTRIES_CRATES_IO_PROTOCOL: git @@ -17,7 +16,7 @@ jobs: runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v2 - + - name: Install apt dependencies run: | sudo apt-get update && \ @@ -30,7 +29,7 @@ jobs: toolchain: nightly - name: Build documentation - run: cargo doc --no-deps + run: cargo doc --no-deps - name: Publish documentation run: | cd target/doc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 2bafcb39b..ca58b9741 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -4,9 +4,6 @@ on: push: branches: [master] pull_request: - branches: - - "**" # matches every branch - workflow_dispatch: env: @@ -61,8 +58,6 @@ jobs: run: sudo apt-get install protobuf-compiler - name: Run tests - env: - CARGO_INCREMENTAL: 1 run: cargo nextest run - name: Save Cache diff --git a/Cargo.lock b/Cargo.lock index 735cde78f..f38e66034 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -57,9 +57,9 @@ dependencies = [ [[package]] name = "aead" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c192eb8f11fc081b0fe4259ba5af04217d4e0faddd02417310a927911abd7c8" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ "crypto-common", "generic-array 0.14.7", @@ -119,7 +119,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82e1366e0c69c9f927b1fa5ce2c7bf9eafc8f9268c0b9800729e8b267612447c" dependencies = [ - "aead 0.5.1", + "aead 0.5.2", "aes 0.8.2", "cipher 0.4.4", "ctr 0.9.2", @@ -153,7 +153,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", "once_cell", "version_check", ] @@ -165,7 +165,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ "cfg-if", - "getrandom 0.2.8", + "getrandom 0.2.9", "once_cell", "version_check", ] @@ -179,6 +179,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "aho-corasick" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" +dependencies = [ + "memchr", +] + [[package]] name = "android_system_properties" version = "0.1.5" @@ -199,49 +208,58 @@ dependencies = [ [[package]] name = "anstream" -version = "0.2.6" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "342258dd14006105c2b75ab1bd7543a03bdf0cfc94383303ac212a04939dff6f" +checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" dependencies = [ "anstyle", "anstyle-parse", + "anstyle-query", "anstyle-wincon", - "concolor-override", - "concolor-query", + "colorchoice", "is-terminal", "utf8parse", ] [[package]] name = "anstyle" -version = "0.3.5" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23ea9e81bd02e310c216d080f6223c179012256e5151c41db88d12c88a1684d2" +checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d" [[package]] name = "anstyle-parse" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7d1bb534e9efed14f3e5f44e7dd1a4f709384023a4165199a4241e18dff0116" +checksum = "e765fd216e48e067936442276d1d57399e37bce53c264d6fefbe298080cb57ee" dependencies = [ "utf8parse", ] +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys 0.48.0", +] + [[package]] name = "anstyle-wincon" -version = "0.2.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3127af6145b149f3287bb9a0d10ad9c5692dba8c53ad48285e5bec4063834fa" +checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" dependencies = [ "anstyle", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] name = "anyhow" -version = "1.0.70" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" +checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "approx" @@ -351,9 +369,9 @@ dependencies = [ [[package]] name = "asn1_der" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" +checksum = "155a5a185e42c6b77ac7b88a15143d930a9e9727a5b7b77eed417404ab15c247" [[package]] name = "async-io" @@ -369,7 +387,7 @@ dependencies = [ "log", "parking", "polling", - "rustix 0.37.5", + "rustix 0.37.18", "slab", "socket2", "waker-fn", @@ -386,9 +404,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad445822218ce64be7a341abfb0b1ea43b5c23aa83902542a4542e78309d8e5e" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", @@ -397,13 +415,13 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.15", ] [[package]] @@ -414,7 +432,7 @@ checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.15", ] [[package]] @@ -441,9 +459,9 @@ dependencies = [ [[package]] name = "atomic-waker" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "debc29dde2e69f9e47506b525f639ed42300fc014a3e007832592448fa8e4599" +checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" [[package]] name = "atty" @@ -458,9 +476,9 @@ dependencies = [ [[package]] name = "auto_impl" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a8c1df849285fbacd587de7818cc7d13be6cd2cbcd47a04fb1801b0e2706e33" +checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" dependencies = [ "proc-macro-error", "proc-macro2", @@ -493,7 +511,7 @@ dependencies = [ "cc", "cfg-if", "libc", - "miniz_oxide", + "miniz_oxide 0.6.2", "object 0.30.3", "rustc-demangle", ] @@ -546,7 +564,7 @@ dependencies = [ [[package]] name = "binary-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "hash-db", "log", @@ -697,9 +715,9 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] name = "bounded-collections" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a071c348a5ef6da1d3a87166b408170b46002382b1dda83992b5c2208cefb370" +checksum = "e3888522b497857eb606bf51695988dba7096941822c1bcf676e3a929a9ae7a0" dependencies = [ "log", "parity-scale-codec", @@ -748,9 +766,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.0" +version = "3.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" +checksum = "9b1ce199063694f33ffb7dd4e0ee620741495c32833cde5aa08f02a0bf96f0c8" [[package]] name = "byte-slice-cast" @@ -802,9 +820,9 @@ dependencies = [ [[package]] name = "cargo_metadata" -version = "0.15.3" +version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08a1ec454bc3eead8719cb56e15dbbfecdbc14e4b3a3ae4936cc6e31f5fc0d07" +checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", @@ -982,9 +1000,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.2.1" +version = "4.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046ae530c528f252094e4a77886ee1374437744b2bff1497aa898bbddbbb29b3" +checksum = "34d21f9bf1b425d2968943631ec91202fe5e837264063503708b83013f8fc938" dependencies = [ "clap_builder", "clap_derive", @@ -993,9 +1011,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.2.1" +version = "4.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "223163f58c9a40c3b0a43e1c4b50a9ce09f007ea2cb1ec258a687945b4b7929f" +checksum = "914c8c79fb560f238ef6429439a30023c862f7a28e688c58f7203f12b29970bd" dependencies = [ "anstream", "anstyle", @@ -1013,7 +1031,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.15", ] [[package]] @@ -1041,6 +1059,12 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + [[package]] name = "comfy-table" version = "6.1.4" @@ -1063,26 +1087,11 @@ dependencies = [ "ryu", ] -[[package]] -name = "concolor-override" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a855d4a1978dc52fb0536a04d384c2c0c1aa273597f08b77c8c4d3b2eec6037f" - -[[package]] -name = "concolor-query" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d11d52c3d7ca2e6d0040212be9e4dbbcd78b6447f535b6b561f449427944cf" -dependencies = [ - "windows-sys 0.45.0", -] - [[package]] name = "concurrent-queue" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" dependencies = [ "crossbeam-utils", ] @@ -1117,9 +1126,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "core2" @@ -1141,27 +1150,27 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "280a9f2d8b3a38871a3c8a46fb80db65e5e5ed97da80c4d08bf27fb63e35e181" +checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" dependencies = [ "libc", ] [[package]] name = "cranelift-bforest" -version = "0.93.1" +version = "0.93.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7379abaacee0f14abf3204a7606118f0465785252169d186337bcb75030815a" +checksum = "2bc42ba2e232e5b20ff7dc299a812d53337dadce9a7e39a238e6a5cb82d2e57b" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.93.1" +version = "0.93.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9489fa336927df749631f1008007ced2871068544f40a202ce6d93fbf2366a7b" +checksum = "253531aca9b6f56103c9420369db3263e784df39aa1c90685a1f69cfbba0623e" dependencies = [ "arrayvec 0.7.2", "bumpalo", @@ -1180,33 +1189,33 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.93.1" +version = "0.93.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05bbb67da91ec721ed57cef2f7c5ef7728e1cd9bde9ffd3ef8601022e73e3239" +checksum = "72f2154365e2bff1b1b8537a7181591fdff50d8e27fa6e40d5c69c3bad0ca7c8" dependencies = [ "cranelift-codegen-shared", ] [[package]] name = "cranelift-codegen-shared" -version = "0.93.1" +version = "0.93.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418ecb2f36032f6665dc1a5e2060a143dbab41d83b784882e97710e890a7a16d" +checksum = "687e14e3f5775248930e0d5a84195abef8b829958e9794bf8d525104993612b4" [[package]] name = "cranelift-entity" -version = "0.93.1" +version = "0.93.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cf583f7b093f291005f9fb1323e2c37f6ee4c7909e39ce016b2e8360d461705" +checksum = "f42ea692c7b450ad18b8c9889661505d51c09ec4380cf1c2d278dbb2da22cae1" dependencies = [ "serde", ] [[package]] name = "cranelift-frontend" -version = "0.93.1" +version = "0.93.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b66bf9e916f57fbbd0f7703ec6286f4624866bf45000111627c70d272c8dda1" +checksum = "8483c2db6f45fe9ace984e5adc5d058102227e4c62e5aa2054e16b0275fd3a6e" dependencies = [ "cranelift-codegen", "log", @@ -1216,15 +1225,15 @@ dependencies = [ [[package]] name = "cranelift-isle" -version = "0.93.1" +version = "0.93.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "649782a39ce99798dd6b4029e2bb318a2fbeaade1b4fa25330763c10c65bc358" +checksum = "e9793158837678902446c411741d87b43f57dadfb944f2440db4287cda8cbd59" [[package]] name = "cranelift-native" -version = "0.93.1" +version = "0.93.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "937e021e089c51f9749d09e7ad1c4f255c2f8686cb8c3df63a34b3ec9921bc41" +checksum = "72668c7755f2b880665cb422c8ad2d56db58a88b9bebfef0b73edc2277c13c49" dependencies = [ "cranelift-codegen", "libc", @@ -1233,9 +1242,9 @@ dependencies = [ [[package]] name = "cranelift-wasm" -version = "0.93.1" +version = "0.93.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d850cf6775477747c9dfda9ae23355dd70512ffebc70cf82b85a5b111ae668b5" +checksum = "3852ce4b088b44ac4e29459573943009a70d1b192c8d77ef949b4e814f656fc1" dependencies = [ "cranelift-codegen", "cranelift-entity", @@ -1273,9 +1282,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if", "crossbeam-utils", @@ -1501,7 +1510,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.11", + "syn 2.0.15", ] [[package]] @@ -1518,7 +1527,7 @@ checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.15", ] [[package]] @@ -1778,6 +1787,7 @@ dependencies = [ name = "dkg-gadget" version = "0.0.1" dependencies = [ + "async-stream", "async-trait", "atomic", "auto_impl", @@ -1792,6 +1802,7 @@ dependencies = [ "hash-db", "hex", "itertools 0.10.5", + "lazy_static", "linked-hash-map", "multi-party-ecdsa", "parity-scale-codec", @@ -1818,6 +1829,7 @@ dependencies = [ "sp-runtime", "strum 0.21.0", "substrate-prometheus-endpoint", + "sync_wrapper", "thiserror", "tokio", "tokio-stream", @@ -1830,9 +1842,16 @@ dependencies = [ name = "dkg-logging" version = "0.1.0" dependencies = [ + "hex", + "lazy_static", + "parking_lot 0.12.1", + "serde", + "serde_json", + "sp-core", + "tokio", "tracing", "tracing-filter", - "tracing-subscriber 0.3.16", + "tracing-subscriber 0.3.17", ] [[package]] @@ -1843,6 +1862,8 @@ dependencies = [ "atomic", "bincode2", "bytes", + "dkg-logging", + "dkg-runtime-primitives", "futures", "humantime-serde", "log", @@ -1864,7 +1885,7 @@ name = "dkg-primitives" version = "0.0.1" dependencies = [ "chacha20poly1305", - "clap 4.2.1", + "clap 4.2.7", "curv-kzen", "dkg-runtime-primitives", "hex", @@ -1908,7 +1929,7 @@ dependencies = [ name = "dkg-standalone-node" version = "3.0.0" dependencies = [ - "clap 4.2.1", + "clap 4.2.7", "dkg-gadget", "dkg-logging", "dkg-primitives", @@ -2173,22 +2194,22 @@ dependencies = [ [[package]] name = "enumflags2" -version = "0.7.5" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e75d4cd21b95383444831539909fbb14b9dc3fdceb2a6f5d36577329a1f55ccb" +checksum = "c041f5090df68b32bcd905365fd51769c8b9d553fe87fde0b683534f10c01bd2" dependencies = [ "enumflags2_derive", ] [[package]] name = "enumflags2_derive" -version = "0.7.4" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f58dc3c5e468259f19f2d46304a6b28f1c3d034442e14b322d2b850e36f6d5ae" +checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.15", ] [[package]] @@ -2225,24 +2246,13 @@ checksum = "e48c92028aaa870e83d51c64e5d4e0b6981b360c522198c23959f219a4e1b15b" [[package]] name = "errno" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" -dependencies = [ - "errno-dragonfly", - "libc", - "winapi", -] - -[[package]] -name = "errno" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d6a0976c999d473fe89ad888d5a284e55366d9dc9038b1ba2aa15128c4afa0" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -2282,7 +2292,7 @@ dependencies = [ "parity-scale-codec", "rlp", "scale-info", - "sha3 0.10.6", + "sha3 0.10.7", "triehash", ] @@ -2384,9 +2394,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ace6ec7cc19c8ed33a32eaa9ea692d7faea05006b5356b9e2b668ec4bc3955" +checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" [[package]] name = "file-per-thread-logger" @@ -2400,14 +2410,14 @@ dependencies = [ [[package]] name = "filetime" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a3de6e8d11b22ff9edc6d916f890800597d60f8b2da1caf2955c274638d6412" +checksum = "5cbc844cecaee9d4443931972e1289c8ff485cb4cc2767cb03ca139ed6885153" dependencies = [ "cfg-if", "libc", "redox_syscall 0.2.16", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -2446,13 +2456,13 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" +checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", "libz-sys", - "miniz_oxide", + "miniz_oxide 0.7.1", ] [[package]] @@ -2473,7 +2483,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "parity-scale-codec", ] @@ -2496,7 +2506,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-support", "frame-support-procedural", @@ -2521,12 +2531,12 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "Inflector", "array-bytes", "chrono", - "clap 4.2.1", + "clap 4.2.7", "comfy-table", "frame-benchmarking", "frame-support", @@ -2568,7 +2578,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2579,7 +2589,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2596,7 +2606,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-support", "frame-system", @@ -2624,7 +2634,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "bitflags", "frame-metadata", @@ -2656,7 +2666,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "Inflector", "cfg-expr", @@ -2671,7 +2681,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -2683,7 +2693,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "proc-macro2", "quote", @@ -2693,7 +2703,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-support", "log", @@ -2711,7 +2721,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-benchmarking", "frame-support", @@ -2726,7 +2736,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "parity-scale-codec", "sp-api", @@ -2805,9 +2815,9 @@ checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-lite" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ "fastrand", "futures-core", @@ -2826,7 +2836,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.15", ] [[package]] @@ -2927,9 +2937,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if", "libc", @@ -2979,7 +2989,7 @@ version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "029d74589adefde59de1a0c4f4732695c32805624aec7b68d91503d4dba79afc" dependencies = [ - "aho-corasick", + "aho-corasick 0.7.20", "bstr", "fnv", "log", @@ -2999,9 +3009,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.16" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" +checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" dependencies = [ "bytes", "fnv", @@ -3235,9 +3245,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.25" +version = "0.14.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" +checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" dependencies = [ "bytes", "futures-channel", @@ -3274,16 +3284,16 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.55" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "716f12fbcfac6ffab0a5e9ec51d0a0ff70503742bb2dc7b99396394c9dc323f0" +checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows 0.47.0", + "windows 0.48.0", ] [[package]] @@ -3449,13 +3459,13 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09270fd4fa1111bc614ed2246c7ef56239a3063d5be0d1ec3b589c505d400aeb" +checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" dependencies = [ "hermit-abi 0.3.1", "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -3484,14 +3494,14 @@ checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" [[package]] name = "is-terminal" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "256017f749ab3117e93acb91063009e1f1bb56d03965b14c2c8df4eb02c524d8" +checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", - "rustix 0.37.5", - "windows-sys 0.45.0", + "rustix 0.37.18", + "windows-sys 0.48.0", ] [[package]] @@ -3689,9 +3699,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.140" +version = "0.2.142" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" +checksum = "6a987beff54b60ffa6d51982e1aa1146bc42f19bd26be28b0586f252fccf5317" [[package]] name = "libm" @@ -3714,7 +3724,7 @@ dependencies = [ "bytes", "futures", "futures-timer", - "getrandom 0.2.8", + "getrandom 0.2.9", "instant", "libp2p-core 0.38.0", "libp2p-dns", @@ -3775,9 +3785,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.39.1" +version = "0.39.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7f8b7d65c070a5a1b5f8f0510648189da08f787b8963f8e21219e0710733af" +checksum = "3c1df63c0b582aa434fb09b2d86897fa2b419ffeccf934b36f87fcedc8e835c2" dependencies = [ "either", "fnv", @@ -3838,18 +3848,18 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a8ea433ae0cea7e3315354305237b9897afe45278b2118a7a57ca744e70fd27" +checksum = "9e2d584751cecb2aabaa56106be6be91338a60a0f4e420cf2af639204f596fc1" dependencies = [ "bs58", "ed25519-dalek", "log", "multiaddr 0.17.1", "multihash 0.17.0", - "prost", "quick-protobuf", "rand 0.8.5", + "sha2 0.10.6", "thiserror", "zeroize", ] @@ -4069,7 +4079,7 @@ checksum = "ff08d13d0dc66e5e9ba6279c1de417b84fa0d0adc3b03e5732928c180ec02781" dependencies = [ "futures", "futures-rustls", - "libp2p-core 0.39.1", + "libp2p-core 0.39.2", "libp2p-identity", "rcgen 0.10.0", "ring", @@ -4208,9 +4218,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" +checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db" dependencies = [ "cc", "pkg-config", @@ -4258,9 +4268,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "linux-raw-sys" -version = "0.3.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd550e73688e6d578f0ac2119e32b797a327631a42f9433e59d02e139c8df60d" +checksum = "b64f40e5e03e0d54f03845c8197d0291253cdbedfb1cb46b13c2c117554a9f4c" [[package]] name = "lock_api" @@ -4360,10 +4370,11 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matrixmultiply" -version = "0.3.2" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add85d4dd35074e6fedc608f8c8f513a3548619a9024b751949ef0e8e45a4d84" +checksum = "090126dc04f95dc0d1c1c91f61bdd474b3930ca064c1edc8a849da2c6cbe1e77" dependencies = [ + "autocfg 1.1.0", "rawpointer", ] @@ -4388,7 +4399,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffc89ccdc6e10d6907450f753537ebc5c5d3460d2e4e62ea74bd571db62c0f9e" dependencies = [ - "rustix 0.37.5", + "rustix 0.37.18", ] [[package]] @@ -4510,6 +4521,15 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +dependencies = [ + "adler", +] + [[package]] name = "mio" version = "0.8.6" @@ -4552,7 +4572,7 @@ dependencies = [ [[package]] name = "multi-party-ecdsa" version = "0.8.2" -source = "git+https://github.com/webb-tools/multi-party-ecdsa.git#facf26da1bee74f6bf10ebfba58bc8828d74c6a9" +source = "git+https://github.com/webb-tools/multi-party-ecdsa.git#3d5926d78d9acbb12ebf94e2445e3a3afd5cee84" dependencies = [ "centipede", "curv-kzen", @@ -4629,7 +4649,7 @@ dependencies = [ "digest 0.10.6", "multihash-derive", "sha2 0.10.6", - "sha3 0.10.6", + "sha3 0.10.7", "unsigned-varint", ] @@ -4640,17 +4660,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" dependencies = [ "core2", - "digest 0.10.6", "multihash-derive", - "sha2 0.10.6", "unsigned-varint", ] [[package]] name = "multihash-derive" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc076939022111618a5026d3be019fd8b366e76314538ff9a1b59ffbcbf98bcd" +checksum = "1d6d4752e6230d8ef7adf7bd5d8c4b1f6561c1014c5ba9a37445ccefe18aa1db" dependencies = [ "proc-macro-crate", "proc-macro-error", @@ -5047,7 +5065,7 @@ dependencies = [ [[package]] name = "pallet-aura" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-support", "frame-system", @@ -5063,7 +5081,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-support", "frame-system", @@ -5077,7 +5095,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-benchmarking", "frame-support", @@ -5101,7 +5119,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5121,7 +5139,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-benchmarking", "frame-support", @@ -5136,7 +5154,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-support", "frame-system", @@ -5155,7 +5173,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "array-bytes", "binary-merkle-tree", @@ -5295,7 +5313,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5318,7 +5336,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5331,7 +5349,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-benchmarking", "frame-support", @@ -5354,7 +5372,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5370,7 +5388,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-benchmarking", "frame-support", @@ -5390,7 +5408,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-benchmarking", "frame-support", @@ -5407,7 +5425,7 @@ dependencies = [ [[package]] name = "pallet-insecure-randomness-collective-flip" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-support", "frame-system", @@ -5421,7 +5439,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-benchmarking", "frame-support", @@ -5438,7 +5456,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-support", "frame-system", @@ -5455,7 +5473,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-support", "frame-system", @@ -5476,7 +5494,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5499,7 +5517,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5510,7 +5528,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-support", "frame-system", @@ -5524,7 +5542,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-benchmarking", "frame-support", @@ -5542,7 +5560,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-support", "frame-system", @@ -5558,7 +5576,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -5574,7 +5592,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -5585,9 +5603,9 @@ dependencies = [ [[package]] name = "parity-db" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00bfb81cf5c90a222db2fb7b3a7cbf8cc7f38dfb6647aca4d98edf8281f56ed5" +checksum = "bd4572a52711e2ccff02b4973ec7e4a5b5c23387ebbfbd6cd42b34755714cefc" dependencies = [ "blake2", "crc32fast", @@ -5644,9 +5662,9 @@ checksum = "e1ad0aff30c1da14b1254fcb2af73e1fa9a28670e584a626f53a369d0e157304" [[package]] name = "parking" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" +checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" [[package]] name = "parking_lot" @@ -5689,10 +5707,13 @@ version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ + "backtrace", "cfg-if", "libc", + "petgraph", "redox_syscall 0.2.16", "smallvec", + "thread-id", "windows-sys 0.45.0", ] @@ -5746,9 +5767,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7" +checksum = "e68e84bfb01f0507134eac1e9b410a12ba379d064eab48c50ba4ce329a527b70" dependencies = [ "thiserror", "ucd-trie", @@ -5756,9 +5777,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a81186863f3d0a27340815be8f2078dd8050b14cd71913db9fbda795e5f707d7" +checksum = "6b79d4c71c865a25a4322296122e3924d30bc8ee0834c8bfc8b95f7f054afbfb" dependencies = [ "pest", "pest_generator", @@ -5766,22 +5787,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" +checksum = "6c435bf1076437b851ebc8edc3a18442796b30f1728ffea6262d59bbe28b077e" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.15", ] [[package]] name = "pest_meta" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e3b284b1f13a20dc5ebc90aff59a51b8d7137c221131b52a7260c08cbc1cc80" +checksum = "745a452f8eb71e39ffd8ee32b3c5f51d03845f99786fa9b68db6ff509c505411" dependencies = [ "once_cell", "pest", @@ -5848,9 +5869,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "platforms" @@ -5921,9 +5942,9 @@ dependencies = [ [[package]] name = "polling" -version = "2.6.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ "autocfg 1.1.0", "bitflags", @@ -5932,7 +5953,7 @@ dependencies = [ "libc", "log", "pin-project-lite 0.2.9", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -6032,12 +6053,12 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.3.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" dependencies = [ - "once_cell", - "toml_edit", + "thiserror", + "toml 0.5.11", ] [[package]] @@ -6066,9 +6087,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.54" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e472a104799c74b514a57226160104aa483546de37e839ec50e3c2e41dd87534" +checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ "unicode-ident", ] @@ -6112,9 +6133,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e48e50df39172a3e7eb17e14642445da64996989bc212b583015435d39a58537" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ "bytes", "prost-derive", @@ -6122,9 +6143,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c828f93f5ca4826f97fedcbd3f9a536c16b12cff3dbbb4a007f932bbad95b12" +checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck 0.4.1", @@ -6157,9 +6178,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ea9b0f8cbe5e15a8a042d030bd96668db28ecb567ec37d691971ff5731d2b1b" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools 0.10.5", @@ -6170,9 +6191,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "379119666929a1afd7a043aa6cf96fa67a6dce9af60c88095a4686dbce4c9c88" +checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" dependencies = [ "prost", ] @@ -6361,7 +6382,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", ] [[package]] @@ -6539,7 +6560,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", "redox_syscall 0.2.16", "thiserror", ] @@ -6561,7 +6582,7 @@ checksum = "8d2275aab483050ab2a7364c1a46604865ee7d6906684e08db0f090acf74f9e7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.15", ] [[package]] @@ -6578,13 +6599,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.7.3" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" +checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" dependencies = [ - "aho-corasick", + "aho-corasick 1.0.1", "memchr", - "regex-syntax", + "regex-syntax 0.7.1", ] [[package]] @@ -6593,7 +6614,7 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "regex-syntax", + "regex-syntax 0.6.29", ] [[package]] @@ -6602,6 +6623,12 @@ version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" +[[package]] +name = "regex-syntax" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" + [[package]] name = "region" version = "3.0.0" @@ -6675,8 +6702,7 @@ dependencies = [ [[package]] name = "round-based" version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61d7da583ffbf4d938fb9dc60871b51769ff47e9836e323668fe2d791ca2fa06" +source = "git+https://github.com/webb-tools/round-based-protocol#959126f9f6edce16d4ee95954091b93e33a83140" dependencies = [ "async-stream", "futures", @@ -6773,9 +6799,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4a36c42d1873f9a77c53bde094f9664d9891bc604a45b4798fd2c389ed12e5b" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -6818,12 +6844,12 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.11" +version = "0.36.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db4165c9963ab29e422d6c26fbc1d37f15bace6b2810221f9d925023480fcf0e" +checksum = "3a38f9520be93aba504e8ca974197f46158de5dcaa9fa04b57c57cd6a679d658" dependencies = [ "bitflags", - "errno 0.2.8", + "errno", "io-lifetimes", "libc", "linux-raw-sys 0.1.4", @@ -6832,16 +6858,16 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.5" +version = "0.37.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e78cc525325c06b4a7ff02db283472f3c042b7ff0c391f96c6d5ac6f4f91b75" +checksum = "8bbfc1d1c7c40c01715f47d71444744a81669ca84e8b63e25a55e169b1f86433" dependencies = [ "bitflags", - "errno 0.3.0", + "errno", "io-lifetimes", "libc", - "linux-raw-sys 0.3.0", - "windows-sys 0.45.0", + "linux-raw-sys 0.3.6", + "windows-sys 0.48.0", ] [[package]] @@ -6943,7 +6969,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "log", "sp-core", @@ -6954,7 +6980,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "futures", "futures-timer", @@ -6977,7 +7003,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -6993,7 +7019,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "memmap2", "sc-chain-spec-derive", @@ -7008,7 +7034,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -7019,11 +7045,11 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "array-bytes", "chrono", - "clap 4.2.1", + "clap 4.2.7", "fdlimit", "futures", "libp2p", @@ -7059,7 +7085,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "fnv", "futures", @@ -7085,7 +7111,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "hash-db", "kvdb", @@ -7110,7 +7136,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "async-trait", "futures", @@ -7135,7 +7161,7 @@ dependencies = [ [[package]] name = "sc-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "async-trait", "futures", @@ -7164,7 +7190,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "async-trait", "futures", @@ -7187,7 +7213,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "lru", "parity-scale-codec", @@ -7211,7 +7237,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", @@ -7224,7 +7250,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "log", "sc-allocator", @@ -7237,14 +7263,14 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "anyhow", "cfg-if", "libc", "log", "once_cell", - "rustix 0.36.11", + "rustix 0.36.13", "sc-allocator", "sc-executor-common", "sp-runtime-interface", @@ -7255,7 +7281,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "ahash 0.8.3", "array-bytes", @@ -7295,7 +7321,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "ansi_term", "futures", @@ -7310,7 +7336,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "array-bytes", "async-trait", @@ -7325,7 +7351,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "array-bytes", "async-trait", @@ -7368,7 +7394,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "cid", "futures", @@ -7387,7 +7413,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "async-trait", "bitflags", @@ -7413,7 +7439,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "ahash 0.8.3", "futures", @@ -7431,7 +7457,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "array-bytes", "futures", @@ -7452,7 +7478,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "array-bytes", "async-trait", @@ -7484,7 +7510,7 @@ dependencies = [ [[package]] name = "sc-network-test" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "async-trait", "futures", @@ -7515,7 +7541,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "array-bytes", "futures", @@ -7534,7 +7560,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "array-bytes", "bytes", @@ -7564,7 +7590,7 @@ dependencies = [ [[package]] name = "sc-peerset" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "futures", "libp2p", @@ -7577,7 +7603,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -7586,7 +7612,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "futures", "jsonrpsee", @@ -7616,7 +7642,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -7635,7 +7661,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "http", "jsonrpsee", @@ -7650,7 +7676,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "array-bytes", "futures", @@ -7676,7 +7702,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "async-trait", "directories", @@ -7742,7 +7768,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "log", "parity-scale-codec", @@ -7753,9 +7779,9 @@ dependencies = [ [[package]] name = "sc-storage-monitor" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ - "clap 4.2.1", + "clap 4.2.7", "futures", "log", "nix 0.26.2", @@ -7769,7 +7795,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "futures", "libc", @@ -7788,7 +7814,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "chrono", "futures", @@ -7807,7 +7833,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "ansi_term", "atty", @@ -7838,7 +7864,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -7849,7 +7875,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "async-trait", "futures", @@ -7876,7 +7902,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "async-trait", "futures", @@ -7890,7 +7916,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "backtrace", "futures", @@ -7903,9 +7929,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cfdffd972d76b22f3d7f81c8be34b2296afd3a25e0a547bd9abe340a4dbbe97" +checksum = "dfdef77228a4c05dc94211441595746732131ad7f6530c6c18f045da7b7ab937" dependencies = [ "bitvec", "cfg-if", @@ -7917,9 +7943,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61fa974aea2d63dd18a4ec3a49d59af9f34178c73a4f56d2f18205628d00681e" +checksum = "53012eae69e5aa5c14671942a5dd47de59d4cdcff8532a6dd0e081faf1119482" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -8152,9 +8178,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.159" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c04e8343c3daeec41f58990b9d77068df31209f2af111e059e9fe9646693065" +checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c" dependencies = [ "serde_derive", ] @@ -8170,13 +8196,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.159" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c614d17805b093df4b147b51339e7e44bf05ef59fba1e45d83500bcfb4d8585" +checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.15", ] [[package]] @@ -8192,9 +8218,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.95" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d721eca97ac802aa7777b701877c8004d950fc142651367300d21c1cc0194744" +checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" dependencies = [ "itoa", "ryu", @@ -8284,9 +8310,9 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" +checksum = "54c2bb1a323307527314a36bfb73f24febb08ce2b8a554bf4ffd6f51ad15198c" dependencies = [ "digest 0.10.6", "keccak", @@ -8322,9 +8348,9 @@ dependencies = [ [[package]] name = "simba" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50582927ed6f77e4ac020c057f37a268fc6aebc29225050365aacbb9deeeddc4" +checksum = "061507c94fc6ab4ba1c9a0305018408e312e17c041eb63bef8aa726fa33aceae" dependencies = [ "approx", "num-complex", @@ -8419,7 +8445,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "hash-db", "log", @@ -8437,7 +8463,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "blake2", "proc-macro-crate", @@ -8449,7 +8475,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "parity-scale-codec", "scale-info", @@ -8462,7 +8488,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "integer-sqrt", "num-traits", @@ -8476,7 +8502,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "parity-scale-codec", "scale-info", @@ -8489,7 +8515,7 @@ dependencies = [ [[package]] name = "sp-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "lazy_static", "parity-scale-codec", @@ -8508,7 +8534,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "parity-scale-codec", "sp-api", @@ -8520,7 +8546,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "futures", "log", @@ -8538,7 +8564,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "async-trait", "futures", @@ -8556,7 +8582,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "async-trait", "parity-scale-codec", @@ -8574,7 +8600,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "async-trait", "merlin", @@ -8597,7 +8623,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "parity-scale-codec", "scale-info", @@ -8609,7 +8635,7 @@ dependencies = [ [[package]] name = "sp-consensus-vrf" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "parity-scale-codec", "scale-info", @@ -8622,7 +8648,7 @@ dependencies = [ [[package]] name = "sp-core" version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "array-bytes", "base58", @@ -8665,13 +8691,13 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "blake2", "byteorder", "digest 0.10.6", "sha2 0.10.6", - "sha3 0.10.6", + "sha3 0.10.7", "sp-std", "twox-hash", ] @@ -8679,7 +8705,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "proc-macro2", "quote", @@ -8690,7 +8716,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -8699,7 +8725,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "proc-macro2", "quote", @@ -8709,7 +8735,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.13.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "environmental", "parity-scale-codec", @@ -8720,7 +8746,7 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "finality-grandpa", "log", @@ -8738,7 +8764,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -8753,7 +8779,7 @@ dependencies = [ [[package]] name = "sp-io" version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "bytes", "ed25519", @@ -8778,7 +8804,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "lazy_static", "sp-core", @@ -8789,7 +8815,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.13.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "async-trait", "futures", @@ -8806,7 +8832,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "thiserror", "zstd", @@ -8815,7 +8841,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "ckb-merkle-mountain-range", "log", @@ -8833,7 +8859,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "parity-scale-codec", "scale-info", @@ -8847,7 +8873,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "sp-api", "sp-core", @@ -8857,7 +8883,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "backtrace", "lazy_static", @@ -8867,7 +8893,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "rustc-hash", "serde", @@ -8877,7 +8903,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "either", "hash256-std-hasher", @@ -8899,7 +8925,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -8917,7 +8943,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "Inflector", "proc-macro-crate", @@ -8929,7 +8955,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "parity-scale-codec", "scale-info", @@ -8943,7 +8969,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "parity-scale-codec", "scale-info", @@ -8955,7 +8981,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.13.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "hash-db", "log", @@ -8975,12 +9001,12 @@ dependencies = [ [[package]] name = "sp-std" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" [[package]] name = "sp-storage" version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "impl-serde", "parity-scale-codec", @@ -8993,7 +9019,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "async-trait", "futures-timer", @@ -9008,7 +9034,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "parity-scale-codec", "sp-std", @@ -9020,7 +9046,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "sp-api", "sp-runtime", @@ -9029,7 +9055,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "async-trait", "log", @@ -9045,7 +9071,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "ahash 0.8.3", "hash-db", @@ -9068,7 +9094,7 @@ dependencies = [ [[package]] name = "sp-version" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "impl-serde", "parity-scale-codec", @@ -9085,7 +9111,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -9096,7 +9122,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -9110,7 +9136,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "parity-scale-codec", "scale-info", @@ -9140,9 +9166,9 @@ dependencies = [ [[package]] name = "ss58-registry" -version = "1.39.0" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecf0bd63593ef78eca595a7fc25e9a443ca46fe69fd472f8f09f5245cdcd769d" +checksum = "eb47a8ad42e5fc72d5b1eb104a5546937eaf39843499948bb666d6e93c62423b" dependencies = [ "Inflector", "num-format", @@ -9301,7 +9327,7 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "platforms 2.0.0", ] @@ -9309,7 +9335,7 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -9328,7 +9354,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "hyper", "log", @@ -9340,7 +9366,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "array-bytes", "async-trait", @@ -9366,7 +9392,7 @@ dependencies = [ [[package]] name = "substrate-test-runtime" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "cfg-if", "frame-support", @@ -9409,7 +9435,7 @@ dependencies = [ [[package]] name = "substrate-test-runtime-client" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "futures", "parity-scale-codec", @@ -9428,7 +9454,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#1837f423b494254e1d27834b1c9da34b2c0c2375" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c4b84520cee2d7de53cc33cb67605ce4efefba8" dependencies = [ "ansi_term", "build-helper", @@ -9498,15 +9524,21 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.11" +version = "2.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e3787bb71465627110e7d87ed4faaa36c1f61042ee67badb9e2ef173accc40" +checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + [[package]] name = "synstructure" version = "0.12.6" @@ -9548,9 +9580,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.12.6" +version = "0.12.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae9980cab1db3fceee2f6c6f643d5d8de2997c58ee8d25fb0cc8a9e9e7348e5" +checksum = "fd1ba337640d60c3e96bc6f0638a939b9c9a7f2c316a1598c279828b3d1dc8c5" [[package]] name = "tempfile" @@ -9561,7 +9593,7 @@ dependencies = [ "cfg-if", "fastrand", "redox_syscall 0.3.5", - "rustix 0.37.5", + "rustix 0.37.18", "windows-sys 0.45.0", ] @@ -9627,7 +9659,7 @@ checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.15", ] [[package]] @@ -9636,6 +9668,17 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3bf63baf9f5039dadc247375c29eb13706706cfde997d0330d05aa63a77d8820" +[[package]] +name = "thread-id" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fdfe0627923f7411a43ec9ec9c39c3a9b4151be313e0922042581fb6c9b717f" +dependencies = [ + "libc", + "redox_syscall 0.2.16", + "winapi", +] + [[package]] name = "thread_local" version = "1.1.7" @@ -9748,9 +9791,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.27.0" +version = "1.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0de47a4eecbe11f498978a9b29d792f0d2692d1dd003650c24c76510e3bc001" +checksum = "c3c786bf8134e5a3a166db9b29ab8f48134739014a3eca7bc6bfa95d673b136f" dependencies = [ "autocfg 1.1.0", "bytes", @@ -9762,18 +9805,18 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] name = "tokio-macros" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a573bdc87985e9d6ddeed1b3d864e8a302c847e40d647746df2f1de209d1ce" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.15", ] [[package]] @@ -9789,9 +9832,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite 0.2.9", @@ -9801,9 +9844,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes", "futures-core", @@ -9913,13 +9956,13 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.15", ] [[package]] @@ -9947,7 +9990,7 @@ dependencies = [ "thread_local", "tracing", "tracing-core", - "tracing-subscriber 0.3.16", + "tracing-subscriber 0.3.17", ] [[package]] @@ -10006,9 +10049,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "matchers 0.1.0", "nu-ansi-term", @@ -10287,11 +10330,11 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.3.0" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" +checksum = "4dad5567ad0cf5b760e5665964bec1b47dfd077ba8a2544b513f3556d3d239a2" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", "serde", ] @@ -10549,9 +10592,9 @@ dependencies = [ [[package]] name = "wasmtime" -version = "6.0.1" +version = "6.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6e89f9819523447330ffd70367ef4a18d8c832e24e8150fe054d1d912841632" +checksum = "76a222f5fa1e14b2cefc286f1b68494d7a965f4bf57ec04c59bb62673d639af6" dependencies = [ "anyhow", "bincode", @@ -10577,18 +10620,18 @@ dependencies = [ [[package]] name = "wasmtime-asm-macros" -version = "6.0.1" +version = "6.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bd3a5e46c198032da934469f3a6e48649d1f9142438e4fd4617b68a35644b8a" +checksum = "4407a7246e7d2f3d8fb1cf0c72fda8dbafdb6dd34d555ae8bea0e5ae031089cc" dependencies = [ "cfg-if", ] [[package]] name = "wasmtime-cache" -version = "6.0.1" +version = "6.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b389ae9b678b9c3851091a4804f4182d688d27aff7abc9aa37fa7be37d8ecffa" +checksum = "5ceb3adf61d654be0be67fffdce42447b0880481348785be5fe40b5dd7663a4c" dependencies = [ "anyhow", "base64 0.13.1", @@ -10596,7 +10639,7 @@ dependencies = [ "directories-next", "file-per-thread-logger", "log", - "rustix 0.36.11", + "rustix 0.36.13", "serde", "sha2 0.10.6", "toml 0.5.11", @@ -10606,9 +10649,9 @@ dependencies = [ [[package]] name = "wasmtime-cranelift" -version = "6.0.1" +version = "6.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b2c92a08c0db6efffd88fdc97d7aa9c7c63b03edb0971dbca745469f820e8c" +checksum = "3c366bb8647e01fd08cb5589976284b00abfded5529b33d7e7f3f086c68304a4" dependencies = [ "anyhow", "cranelift-codegen", @@ -10627,9 +10670,9 @@ dependencies = [ [[package]] name = "wasmtime-environ" -version = "6.0.1" +version = "6.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a6db9fc52985ba06ca601f2ff0ff1f526c5d724c7ac267b47326304b0c97883" +checksum = "47b8b50962eae38ee319f7b24900b7cf371f03eebdc17400c1dc8575fc10c9a7" dependencies = [ "anyhow", "cranelift-entity", @@ -10646,9 +10689,9 @@ dependencies = [ [[package]] name = "wasmtime-jit" -version = "6.0.1" +version = "6.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b77e3a52cd84d0f7f18554afa8060cfe564ccac61e3b0802d3fd4084772fa5f6" +checksum = "ffaed4f9a234ba5225d8e64eac7b4a5d13b994aeb37353cde2cbeb3febda9eaa" dependencies = [ "addr2line 0.17.0", "anyhow", @@ -10670,20 +10713,20 @@ dependencies = [ [[package]] name = "wasmtime-jit-debug" -version = "6.0.1" +version = "6.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0245e8a9347017c7185a72e215218a802ff561545c242953c11ba00fccc930f" +checksum = "eed41cbcbf74ce3ff6f1d07d1b707888166dc408d1a880f651268f4f7c9194b2" dependencies = [ "object 0.29.0", "once_cell", - "rustix 0.36.11", + "rustix 0.36.13", ] [[package]] name = "wasmtime-jit-icache-coherence" -version = "6.0.1" +version = "6.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67d412e9340ab1c83867051d8d1d7c90aa8c9afc91da086088068e2734e25064" +checksum = "43a28ae1e648461bfdbb79db3efdaee1bca5b940872e4175390f465593a2e54c" dependencies = [ "cfg-if", "libc", @@ -10692,9 +10735,9 @@ dependencies = [ [[package]] name = "wasmtime-runtime" -version = "6.0.1" +version = "6.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d594e791b5fdd4dbaf8cf7ae62f2e4ff85018ce90f483ca6f42947688e48827d" +checksum = "e704b126e4252788ccfc3526d4d4511d4b23c521bf123e447ac726c14545217b" dependencies = [ "anyhow", "cc", @@ -10707,7 +10750,7 @@ dependencies = [ "memoffset 0.6.5", "paste", "rand 0.8.5", - "rustix 0.36.11", + "rustix 0.36.13", "wasmtime-asm-macros", "wasmtime-environ", "wasmtime-jit-debug", @@ -10716,9 +10759,9 @@ dependencies = [ [[package]] name = "wasmtime-types" -version = "6.0.1" +version = "6.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6688d6f96d4dbc1f89fab626c56c1778936d122b5f4ae7a57c2eb42b8d982e2" +checksum = "83e5572c5727c1ee7e8f28717aaa8400e4d22dcbd714ea5457d85b5005206568" dependencies = [ "cranelift-entity", "serde", @@ -10917,18 +10960,15 @@ dependencies = [ [[package]] name = "webrtc-media" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a3c157a040324e5049bcbd644ffc9079e6738fa2cfab2bcff64e5cc4c00d7" +checksum = "f72e1650a8ae006017d1a5280efb49e2610c19ccc3c0905b03b648aee9554991" dependencies = [ "byteorder", "bytes", - "derive_builder", - "displaydoc", "rand 0.8.5", "rtp", "thiserror", - "webrtc-util", ] [[package]] @@ -11066,11 +11106,11 @@ dependencies = [ [[package]] name = "windows" -version = "0.47.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2649ff315bee4c98757f15dac226efe3d81927adbb6e882084bb1ee3e0c330a7" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets 0.47.0", + "windows-targets 0.48.0", ] [[package]] @@ -11097,6 +11137,15 @@ dependencies = [ "windows-targets 0.42.2", ] +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -11114,17 +11163,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.47.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f8996d3f43b4b2d44327cd71b7b0efd1284ab60e6e9d0e8b630e18555d87d3e" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" dependencies = [ - "windows_aarch64_gnullvm 0.47.0", - "windows_aarch64_msvc 0.47.0", - "windows_i686_gnu 0.47.0", - "windows_i686_msvc 0.47.0", - "windows_x86_64_gnu 0.47.0", - "windows_x86_64_gnullvm 0.47.0", - "windows_x86_64_msvc 0.47.0", + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", ] [[package]] @@ -11135,9 +11184,9 @@ checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.47.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "831d567d53d4f3cb1db332b68e6e2b6260228eb4d99a777d8b2e8ed794027c90" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] name = "windows_aarch64_msvc" @@ -11153,9 +11202,9 @@ checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.47.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a42d54a417c60ce4f0e31661eed628f0fa5aca73448c093ec4d45fab4c51cdf" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_i686_gnu" @@ -11171,9 +11220,9 @@ checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.47.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1925beafdbb22201a53a483db861a5644123157c1c3cee83323a2ed565d71e3" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_msvc" @@ -11189,9 +11238,9 @@ checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.47.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a8ef8f2f1711b223947d9b69b596cf5a4e452c930fb58b6fc3fdae7d0ec6b31" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_x86_64_gnu" @@ -11207,9 +11256,9 @@ checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" -version = "0.47.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7acaa0c2cf0d2ef99b61c308a0c3dbae430a51b7345dedec470bd8f53f5a3642" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnullvm" @@ -11219,9 +11268,9 @@ checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" -version = "0.47.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5a0628f71be1d11e17ca4a0e9e15b3a5180f6fbf1c2d55e3ba3f850378052c1" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_msvc" @@ -11237,15 +11286,15 @@ checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" -version = "0.47.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6e62c256dc6d40b8c8707df17df8d774e60e39db723675241e7c15e910bce7" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.4.1" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae8970b36c66498d8ff1d66685dc86b91b29db0c7739899012f63a63814b4b28" +checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699" dependencies = [ "memchr", ] @@ -11369,9 +11418,9 @@ dependencies = [ [[package]] name = "yasna" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aed2e7a52e3744ab4d0c05c20aa065258e84c49fd4226f5191b2ed29712710b4" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" dependencies = [ "time 0.3.20", ] @@ -11387,13 +11436,13 @@ dependencies = [ [[package]] name = "zeroize_derive" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25588073e5216b50bca71d61cb8595cdb9745e87032a58c199730def2862c934" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.15", ] [[package]] @@ -11434,9 +11483,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.7+zstd.1.5.4" +version = "2.0.8+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94509c3ba2fe55294d752b79842c530ccfab760192521df74a081a78d2b3c7f5" +checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" dependencies = [ "cc", "libc", diff --git a/Cargo.toml b/Cargo.toml index acb1e5477..c651d24d5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,12 +48,12 @@ thiserror = "1.0" hex = { version = "0.4", default-features = false } strum = { version = "0.21", features = ["derive"] } linked-hash-map = "0.5.4" -round-based = { version = "0.1.7", features = [] } +round-based = { git = "https://github.com/webb-tools/round-based-protocol", features = [] } serde = { version = "1.0", default-features = false, features = ["derive"] } humantime-serde = { version = "1.1.1", default-features = false } serde_json = "1.0.59" multi-party-ecdsa = { git = "https://github.com/webb-tools/multi-party-ecdsa.git" } -tokio = { version = "1.17.0", default-features = false, features = ["sync", "macros"] } +tokio = { version = "1.28.0", default-features = false, features = ["sync", "macros"] } tokio-util = { version = "0.7.7", default-features = false, features = ["codec"] } tokio-stream = { version = "0.1.8", features = ["sync"] } atomic = "0.5.1" @@ -67,6 +67,9 @@ curv = { package = "curv-kzen", version = "0.10.0", default-features = false } libsecp256k1 = { version = "0.7.1", default-features = false } tracing = "0.1.37" tracing-subscriber = "0.3.5" +sync_wrapper = "0.1.2" +async-stream = "0.3.5" +lazy_static = "1.4.0" scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } diff --git a/README.md b/README.md index 09dcfd732..9bb0ac8d6 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ 🚀 Threshold ECDSA Distributed Key Generation Protocol 🔑

-[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/webb-tools/dkg-substrate/checks.yml?branch=master&style=flat-square)](https://github.com/webb-tools/dkg-substrate/actions) [![Codecov](https://img.shields.io/codecov/c/gh/webb-tools/dkg-substrate?style=flat-square&token=HNT1CEZ01E)](https://codecov.io/gh/webb-tools/dkg-substrate) [![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) [![Twitter](https://img.shields.io/twitter/follow/webbprotocol.svg?style=flat-square&label=Twitter&color=1DA1F2)](https://twitter.com/webbprotocol) [![Telegram](https://img.shields.io/badge/Telegram-gray?logo=telegram)](https://t.me/webbprotocol) [![Discord](https://img.shields.io/discord/833784453251596298.svg?style=flat-square&label=Discord&logo=discord)](https://discord.gg/cv8EfJu3Tn) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/webb-tools/dkg-substrate/checks.yml?branch=master&style=flat-square)](https://github.com/webb-tools/dkg-substrate/actions) [![Codecov](https://img.shields.io/codecov/c/gh/webb-tools/dkg-substrate?style=flat-square&token=HNT1CEZ01E)](https://codecov.io/gh/webb-tools/dkg-substrate) [![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) [![Telegram](https://img.shields.io/badge/Telegram-gray?logo=telegram)](https://t.me/webbprotocol) [![Discord](https://img.shields.io/discord/833784453251596298.svg?style=flat-square&label=Discord&logo=discord)](https://discord.gg/cv8EfJu3Tn)

📖 Table of Contents

diff --git a/dkg-gadget/Cargo.toml b/dkg-gadget/Cargo.toml index 9df4fa070..341210f29 100644 --- a/dkg-gadget/Cargo.toml +++ b/dkg-gadget/Cargo.toml @@ -10,7 +10,7 @@ edition = { workspace = true } [features] outbound-inspection = [] debug-tracing = ["tracing"] -testing = [] +testing = ["parking_lot/deadlock_detection"] [dependencies] futures = { workspace = true } @@ -54,6 +54,9 @@ atomic = { workspace = true } async-trait = { workspace = true } auto_impl = { workspace = true } itertools = { workspace = true } +sync_wrapper = { workspace = true } +async-stream = { workspace = true } +lazy_static = { workspace = true } hash-db = { workspace = true, optional = true } webb-proposals = { workspace = true } diff --git a/dkg-gadget/src/async_protocols/blockchain_interface.rs b/dkg-gadget/src/async_protocols/blockchain_interface.rs index 4809a9837..dfa9ffff1 100644 --- a/dkg-gadget/src/async_protocols/blockchain_interface.rs +++ b/dkg-gadget/src/async_protocols/blockchain_interface.rs @@ -47,21 +47,16 @@ use webb_proposals::Proposal; use super::KeygenPartyId; +#[async_trait::async_trait] #[auto_impl::auto_impl(Arc,&,&mut)] -pub trait BlockchainInterface: Send + Sync { +pub trait BlockchainInterface: Send + Sync + Unpin { type Clock: Debug + AtLeast32BitUnsigned + Copy + Send + Sync; type GossipEngine: GossipEngineIface; - type MaxProposalLength: Get - + Clone - + Send - + Sync - + std::fmt::Debug - + 'static - + std::fmt::Debug; + type MaxProposalLength: Get + Clone + Send + Sync + std::fmt::Debug + 'static + Unpin; - fn verify_signature_against_authorities( + async fn verify_signature_against_authorities( &self, - message: Arc>, + message: SignedDKGMessage, ) -> Result, DKGError>; fn sign_and_send_msg(&self, unsigned_msg: DKGMessage) -> Result<(), DKGError>; fn process_vote_result( @@ -120,12 +115,12 @@ impl< MaxAuthorities: Get + Clone + Send + Sync + std::fmt::Debug + 'static, > DKGProtocolEngine { - fn send_result_to_test_client(&self, result: Result<(), String>) { + fn send_result_to_test_client(&self, result: Result<(), String>, pub_key: Option>) { if let Some(bundle) = self.test_bundle.as_ref() { if let Some(current_test_id) = *bundle.current_test_id.read() { let _ = bundle .to_test_client - .send((current_test_id, result)) + .send((current_test_id, result, pub_key)) .map_err(|err| format!("send_result_to_test_client failed with error: {err}")); } } @@ -154,22 +149,23 @@ impl< > HasLatestHeader for DKGProtocolEngine where B: Block, - BE: Backend, + BE: Backend + 'static, GE: GossipEngineIface, - C: Client, + C: Client + 'static, { fn get_latest_header(&self) -> &Arc>> { &self.latest_header } } +#[async_trait::async_trait] impl BlockchainInterface for DKGProtocolEngine where B: Block, C: Client + 'static, C::Api: DKGApi, MaxProposalLength, MaxAuthorities>, - BE: Backend + 'static, + BE: Backend + Unpin + 'static, MaxProposalLength: Get + Send + Sync + Clone + 'static + std::fmt::Debug, GE: GossipEngineIface + 'static, { @@ -177,18 +173,19 @@ where type GossipEngine = Arc; type MaxProposalLength = MaxProposalLength; - fn verify_signature_against_authorities( + async fn verify_signature_against_authorities( &self, - msg: Arc>, + msg: SignedDKGMessage, ) -> Result, DKGError> { let client = &self.client; DKGWorker::<_, _, _, GE>::verify_signature_against_authorities_inner( &self.logger, - (*msg).clone(), + msg, &self.latest_header, client, ) + .await } fn sign_and_send_msg(&self, unsigned_msg: DKGMessage) -> Result<(), DKGError> { @@ -233,7 +230,7 @@ where if proposals_for_this_batch.len() == batch_key.len { self.logger.info(format!("All proposals have resolved for batch {batch_key:?}")); - let proposals = lock.remove(&batch_key).expect("Cannot get lock on vote_resuls"); // safe unwrap since lock is held + let proposals = lock.remove(&batch_key).expect("Cannot get lock on vote_results"); // safe unwrap since lock is held std::mem::drop(lock); if let Some(metrics) = self.metrics.as_ref() { @@ -248,6 +245,8 @@ where proposals, &self.logger, ); + // send None to signify this was a signing result + self.send_result_to_test_client(Ok(()), None); } else { self.logger.info(format!( "{}/{} proposals have resolved for batch {:?}", @@ -262,6 +261,7 @@ where } fn gossip_public_key(&self, key: DKGPublicKeyMessage) -> Result<(), DKGError> { + let public_key = key.pub_key.clone(); gossip_public_key::( &self.keystore, self.gossip_engine.clone(), @@ -269,7 +269,7 @@ where key, ); - self.send_result_to_test_client(Ok(())); + self.send_result_to_test_client(Ok(()), Some(public_key)); Ok(()) } diff --git a/dkg-gadget/src/async_protocols/incoming.rs b/dkg-gadget/src/async_protocols/incoming.rs index 26c910f61..566342c72 100644 --- a/dkg-gadget/src/async_protocols/incoming.rs +++ b/dkg-gadget/src/async_protocols/incoming.rs @@ -18,11 +18,10 @@ use futures::Stream; use round_based::Msg; use sp_runtime::traits::Get; use std::{ + marker::PhantomData, pin::Pin, - sync::Arc, task::{Context, Poll}, }; -use tokio_stream::wrappers::BroadcastStream; use crate::debug_logger::DebugLogger; @@ -30,42 +29,57 @@ use super::{blockchain_interface::BlockchainInterface, AsyncProtocolParameters, /// Used to filter and transform incoming messages from the DKG worker pub struct IncomingAsyncProtocolWrapper< - T, + T: TransformIncoming, BI, MaxProposalLength: Get + Clone + Send + Sync + std::fmt::Debug + 'static, > { - pub receiver: BroadcastStream, - session_id: SessionId, - engine: Arc, + stream: IncomingStreamMapped, logger: DebugLogger, - ty: ProtocolType, + _pd: PhantomData<(BI, MaxProposalLength)>, } +pub type IncomingStreamMapped = + Pin, DKGError>> + Send + 'static>>; + impl< T: TransformIncoming, - BI: BlockchainInterface, - MaxProposalLength: Get + Clone + Send + Sync + std::fmt::Debug + 'static, + BI: BlockchainInterface + 'static, + MaxProposalLength: Get + Clone + Send + Sync + std::fmt::Debug + Unpin + 'static, > IncomingAsyncProtocolWrapper { pub fn new( - receiver: tokio::sync::broadcast::Receiver, + mut receiver: tokio::sync::mpsc::UnboundedReceiver, ty: ProtocolType, - params: &AsyncProtocolParameters, + params: AsyncProtocolParameters, ) -> Self { - Self { - receiver: BroadcastStream::new(receiver), - session_id: params.session_id, - engine: params.engine.clone(), - logger: params.logger.clone(), - ty, - } + let logger = params.logger.clone(); + + let stream = async_stream::try_stream! { + while let Some(msg) = receiver.recv().await { + match msg.transform(¶ms.engine, &ty, params.session_id, ¶ms.logger).await { + Ok(Some(msg)) => yield msg, + + Ok(None) => continue, + + Err(err) => { + params.logger.warn(format!( + "While mapping signed message, received an error: {err:?}" + )); + continue + }, + } + } + }; + + Self { stream: Box::pin(stream), logger, _pd: Default::default() } } } +#[async_trait::async_trait] pub trait TransformIncoming: Clone + Send + 'static { - type IncomingMapped; + type IncomingMapped: Send; - fn transform< + async fn transform< BI: BlockchainInterface, MaxProposalLength: Get + Clone + Send + Sync + std::fmt::Debug + 'static, >( @@ -79,9 +93,10 @@ pub trait TransformIncoming: Clone + Send + 'static { Self: Sized; } -impl TransformIncoming for Arc> { +#[async_trait::async_trait] +impl TransformIncoming for SignedDKGMessage { type IncomingMapped = DKGMessage; - fn transform< + async fn transform< BI: BlockchainInterface, MaxProposalLength: Get + Clone + Send + Sync + std::fmt::Debug + 'static, >( @@ -94,21 +109,49 @@ impl TransformIncoming for Arc> { where Self: Sized, { + logger.checkpoint_message_raw(self.msg.payload.payload(), "CP-2-incoming"); match (stream_type, &self.msg.payload) { (ProtocolType::Keygen { .. }, DKGMsgPayload::Keygen(..)) | (ProtocolType::Offline { .. }, DKGMsgPayload::Offline(..)) | (ProtocolType::Voting { .. }, DKGMsgPayload::Vote(..)) => { + logger.checkpoint_message_raw(self.msg.payload.payload(), "CP-2.1-incoming"); // only clone if the downstream receiver expects this type + let associated_block_id = stream_type.get_associated_block_id(); let sender = self .msg .payload .async_proto_only_get_sender_id() .expect("Could not get sender id"); if sender != stream_type.get_i() { + logger.checkpoint_message_raw(self.msg.payload.payload(), "CP-2.2-incoming"); if self.msg.session_id == this_session_id { - verify - .verify_signature_against_authorities(self) - .map(|body| Some(Msg { sender, receiver: None, body })) + logger + .checkpoint_message_raw(self.msg.payload.payload(), "CP-2.3-incoming"); + if associated_block_id == &self.msg.associated_block_id { + logger.checkpoint_message_raw( + self.msg.payload.payload(), + "CP-2.4-incoming", + ); + let payload = self.msg.payload.payload().clone(); + match verify.verify_signature_against_authorities(self).await { + Ok(body) => { + logger.checkpoint_message_raw( + &payload, + "CP-2.4-verified-incoming", + ); + Ok(Some(Msg { sender, receiver: None, body })) + }, + Err(err) => { + let err_msg = format!("Unable to verify message: {err:?}"); + logger.error(&err_msg); + logger.checkpoint_message_raw(&payload, err_msg); + Err(err) + }, + } + } else { + logger.warn(format!("Will skip passing message to state machine since not for this associated block, msg block {:?} expected block {:?}", self.msg.associated_block_id, associated_block_id)); + Ok(None) + } } else { logger.warn(format!("Will skip passing message to state machine since not for this round, msg round {:?} this session {:?}", self.msg.session_id, this_session_id)); Ok(None) @@ -128,38 +171,29 @@ impl TransformIncoming for Arc> { } } -impl + Clone + Send + Sync + std::fmt::Debug + 'static> Stream - for IncomingAsyncProtocolWrapper -where - T: TransformIncoming, - BI: BlockchainInterface, +impl< + T: TransformIncoming, + BI: BlockchainInterface, + MaxProposalLength: Get + Clone + Send + Sync + std::fmt::Debug + 'static, + > Unpin for IncomingAsyncProtocolWrapper { - type Item = Msg; +} +impl< + T: TransformIncoming, + BI: BlockchainInterface, + MaxProposalLength: Get + Clone + Send + Sync + std::fmt::Debug + 'static, + > Stream for IncomingAsyncProtocolWrapper +{ + type Item = Msg; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let Self { receiver, ty, engine, session_id, logger } = &mut *self; - let mut receiver = Pin::new(receiver); - - loop { - match futures::ready!(receiver.as_mut().poll_next(cx)) { - Some(Ok(msg)) => match msg.transform(&**engine, &*ty, *session_id, &*logger) { - Ok(Some(msg)) => return Poll::Ready(Some(msg)), - - Ok(None) => continue, - - Err(err) => { - logger.warn(format!( - "While mapping signed message, received an error: {err:?}" - )); - continue - }, - }, - Some(Err(err)) => { - logger.error(format!("Stream RECV error: {err:?}")); - continue - }, - None => return Poll::Ready(None), - } + match futures::ready!(self.as_mut().stream.as_mut().poll_next(cx)) { + Some(Ok(msg)) => Poll::Ready(Some(msg)), + Some(Err(err)) => { + self.logger.error(format!("Error in incoming stream: {err:?}")); + self.poll_next(cx) + }, + None => Poll::Ready(None), } } } diff --git a/dkg-gadget/src/async_protocols/keygen/handler.rs b/dkg-gadget/src/async_protocols/keygen/handler.rs index faeffcdb2..72550815c 100644 --- a/dkg-gadget/src/async_protocols/keygen/handler.rs +++ b/dkg-gadget/src/async_protocols/keygen/handler.rs @@ -110,15 +110,15 @@ where DKGMsgStatus::QUEUED => KeygenRound::QUEUED, }; let i = params.party_i; + let associated_round_id = params.associated_block_id.clone(); let channel_type: ProtocolType<::MaxProposalLength> = - ProtocolType::Keygen { ty, i, t, n }; + ProtocolType::Keygen { ty, i, t, n, associated_block_id: associated_round_id }; new_inner( (), Keygen::new(*i.as_ref(), t, n) .map_err(|err| Self::map_keygen_error_to_dkg_error_keygen(err))?, params, channel_type, - 0, status, ) } diff --git a/dkg-gadget/src/async_protocols/keygen/state_machine.rs b/dkg-gadget/src/async_protocols/keygen/state_machine.rs index 30edb5258..d4196ff4c 100644 --- a/dkg-gadget/src/async_protocols/keygen/state_machine.rs +++ b/dkg-gadget/src/async_protocols/keygen/state_machine.rs @@ -78,7 +78,6 @@ impl StateMachineHandler for Keygen { local_key: ::Output, params: AsyncProtocolParameters, _: Self::AdditionalReturnParam, - _: u8, ) -> Result<::Output, DKGError> { params.logger.info_keygen("Completed keygen stage successfully!".to_string()); // PublicKeyGossip (we need meta handler to handle this) diff --git a/dkg-gadget/src/async_protocols/mod.rs b/dkg-gadget/src/async_protocols/mod.rs index 9680ef5a2..7e2244ca0 100644 --- a/dkg-gadget/src/async_protocols/mod.rs +++ b/dkg-gadget/src/async_protocols/mod.rs @@ -61,7 +61,9 @@ use self::{ state_machine::StateMachineHandler, state_machine_wrapper::StateMachineWrapper, }; use crate::{debug_logger::DebugLogger, utils::SendFuture, worker::KeystoreExt, DKGKeystore}; +use dkg_logging::debug_logger::AsyncProtocolType; use incoming::IncomingAsyncProtocolWrapper; +use multi_party_ecdsa::MessageRoundID; pub struct AsyncProtocolParameters< BI: BlockchainInterface, @@ -73,6 +75,7 @@ pub struct AsyncProtocolParameters< pub best_authorities: Arc>, pub authority_public_key: Arc, pub party_i: KeygenPartyId, + pub associated_block_id: Vec, pub batch_id_gen: Arc, pub handle: AsyncProtocolRemote, pub session_id: SessionId, @@ -116,13 +119,8 @@ impl< MaxAuthorities: Get + Clone + Send + Sync + std::fmt::Debug + 'static, > AsyncProtocolParameters { - pub fn get_next_batch_key< - MaxProposalLength: Get + Clone + Send + Sync + std::fmt::Debug + 'static, - >( - &self, - batch: &[UnsignedProposal], - ) -> BatchKey { - BatchKey { id: self.batch_id_gen.fetch_add(1, Ordering::SeqCst), len: batch.len() } + pub fn get_next_batch_key(&self) -> BatchKey { + BatchKey { id: self.batch_id_gen.fetch_add(1, Ordering::SeqCst), len: 1 } } } @@ -138,6 +136,7 @@ impl< engine: self.engine.clone(), keystore: self.keystore.clone(), current_validator_set: self.current_validator_set.clone(), + associated_block_id: self.associated_block_id.clone(), best_authorities: self.best_authorities.clone(), authority_public_key: self.authority_public_key.clone(), party_i: self.party_i, @@ -291,23 +290,34 @@ pub enum ProtocolType + Clone + Send + Sync + std::f i: KeygenPartyId, t: u16, n: u16, + associated_block_id: Vec, }, Offline { unsigned_proposal: Arc>, i: OfflinePartyId, s_l: Vec, local_key: Arc>, + associated_block_id: Vec, }, Voting { offline_stage: Arc, unsigned_proposal: Arc>, i: OfflinePartyId, + associated_block_id: Vec, }, } impl + Clone + Send + Sync + std::fmt::Debug + 'static> ProtocolType { + pub const fn get_associated_block_id(&self) -> &Vec { + match self { + Self::Keygen { associated_block_id: associated_round_id, .. } => associated_round_id, + Self::Offline { associated_block_id: associated_round_id, .. } => associated_round_id, + Self::Voting { associated_block_id: associated_round_id, .. } => associated_round_id, + } + } + pub const fn get_i(&self) -> u16 { match self { Self::Keygen { i, .. } => i.0, @@ -322,6 +332,10 @@ impl + Clone + Send + Sync + std::fmt::Debug + 'stat _ => None, } } + + pub fn get_unsigned_proposal_hash(&self) -> Option<[u8; 32]> { + self.get_unsigned_proposal().and_then(|x| x.hash()) + } } impl + Clone + Send + Sync + std::fmt::Debug + 'static + Debug> Debug @@ -329,13 +343,13 @@ impl + Clone + Send + Sync + std::fmt::Debug + 'stat { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { - ProtocolType::Keygen { ty, i, t, n } => { + ProtocolType::Keygen { ty, i, t, n, associated_block_id: associated_round_id } => { let ty = match ty { KeygenRound::ACTIVE => "ACTIVE", KeygenRound::QUEUED => "QUEUED", KeygenRound::UNKNOWN => "UNKNOWN", }; - write!(f, "{ty} | Keygen: (i, t, n) = ({i}, {t}, {n})") + write!(f, "{ty} | Keygen: (i, t, n, r) = ({i}, {t}, {n}, {associated_round_id:?})") }, ProtocolType::Offline { i, unsigned_proposal, .. } => { write!(f, "Offline: (i, proposal) = ({}, {:?})", i, &unsigned_proposal.proposal) @@ -375,13 +389,11 @@ pub fn new_inner + 'static, BI: BlockchainInterface sm: SM, params: AsyncProtocolParameters, channel_type: ProtocolType<::MaxProposalLength>, - async_index: u8, status: DKGMsgStatus, ) -> Result, DKGError> where ::Err: Send + Debug, - ::MessageBody: Send, - ::MessageBody: Serialize, + ::MessageBody: Send + Serialize + MessageRoundID, ::Output: Send, { let (incoming_tx_proto, incoming_rx_proto) = SM::generate_channel(); @@ -412,9 +424,7 @@ where logger.info(format!("Running AsyncProtocol with party_index: {}", params.party_i)); let res = async_proto.run().await; match res { - Ok(v) => - return SM::on_finish(v, params_for_end_of_proto, additional_param, async_index) - .await, + Ok(v) => return SM::on_finish(v, params_for_end_of_proto, additional_param).await, Err(err) => match err { async_runtime::Error::Recv(e) | async_runtime::Error::Proceed(e) | @@ -469,7 +479,6 @@ where params.clone(), outgoing_rx, channel_type.clone(), - async_index, status, ); @@ -483,17 +492,17 @@ where // Spawn the 3 tasks // 1. The inbound task (we will abort that task if the protocol finished) - let handle = tokio::spawn(inbound_signed_message_receiver); + let handle = + crate::utils::ExplicitPanicFuture::new(tokio::spawn(inbound_signed_message_receiver)); // 2. The outbound task (will stop if the protocol finished, after flushing the messages to the // network.) - let handle2 = tokio::spawn(outgoing_to_wire); + let handle2 = crate::utils::ExplicitPanicFuture::new(tokio::spawn(outgoing_to_wire)); // 3. The async protocol itself let protocol = async move { let res = async_proto.await; params .logger .info(format!("🕸️ Protocol {:?} Ended: {:?}", channel_type.clone(), res)); - // Abort the inbound task handle.abort(); // Wait for the outbound task to finish // TODO: We should probably have a timeout here, and if the outbound task doesn't finish @@ -515,16 +524,16 @@ fn generate_outgoing_to_wire_fn< params: AsyncProtocolParameters, outgoing_rx: UnboundedReceiver::MessageBody>>, proto_ty: ProtocolType<::MaxProposalLength>, - async_index: u8, status: DKGMsgStatus, ) -> impl SendFuture<'static, ()> where - ::MessageBody: Serialize, + ::MessageBody: Serialize + Send + MessageRoundID, ::MessageBody: Send, ::Output: Send, { Box::pin(async move { let mut outgoing_rx = outgoing_rx.fuse(); + let unsigned_proposal_hash = proto_ty.get_unsigned_proposal_hash(); // take all unsigned messages, then sign them and send outbound loop { // Here is a few explanations about the next few lines: @@ -540,10 +549,13 @@ where }, }; + let msg_hash = crate::debug_logger::message_to_string_hash(&unsigned_message); + params.logger.info(format!( - "Async proto sent outbound request in session={} from={:?} to={:?} | (ty: {:?})", - params.session_id, unsigned_message.sender, unsigned_message.receiver, &proto_ty + "Async proto about to send outbound message in session={} from={:?} to={:?} for round {:?}| (ty: {:?})", + params.session_id, unsigned_message.sender, unsigned_message.receiver, unsigned_message.body.round_id(), &proto_ty )); + let party_id = unsigned_message.sender; let serialized_body = match serde_json::to_vec(&unsigned_message) { Ok(value) => value, @@ -564,13 +576,19 @@ where .expect("message receiver should be a valid KeygenPartyId"); // try to find the authority id in the list of authorities by the // KeygenPartyId - params.best_authorities.iter().find_map(|(id, p)| { + let ret = params.best_authorities.iter().find_map(|(id, p)| { if id == &keygen_party_id { Some(p.clone()) } else { None } - }) + }); + if ret.is_none() { + params.logger.error(format!( + "Failed to find authority id for KeygenPartyId={keygen_party_id:?}" + )); + } + ret }, None => None, }; @@ -587,7 +605,8 @@ where ), signer_set_id: party_id as u64, offline_msg: serialized_body, - async_index, + unsigned_proposal_hash: unsigned_proposal_hash + .expect("Cannot hash unsigned proposal!"), }), _ => { unreachable!( @@ -598,6 +617,7 @@ where let id = params.authority_public_key.as_ref().clone(); let unsigned_dkg_message = DKGMessage { + associated_block_id: params.associated_block_id.clone(), sender_id: id, recipient_id: maybe_recipient_id, status, @@ -612,11 +632,24 @@ where params .logger .info(format!("🕸️ Async proto sent outbound message: {:?}", &proto_ty)); + params.logger.round_event( + &proto_ty, + crate::RoundsEventType::SentMessage { + session: params.session_id as _, + round: unsigned_message.body.round_id() as _, + sender: unsigned_message.sender as _, + receiver: unsigned_message.receiver as _, + msg_hash, + }, + ); + params.logger.checkpoint_message(&unsigned_message, "CP0"); } // check the status of the async protocol. // if it has completed or terminated then break out of the loop. if params.handle.is_completed() || params.handle.is_terminated() { + // TODO: consider telling the task running this to shut this off in 1s to allow time + // for additional messages to be sent params.logger.debug( "🕸️ Async proto is completed or terminated, breaking out of incoming loop", ); @@ -641,9 +674,14 @@ where { Box::pin(async move { // the below wrapper will map signed messages into unsigned messages - let incoming = params.handle.broadcaster.subscribe(); + let incoming = params + .handle + .rx_keygen_signing + .lock() + .take() + .expect("rx_keygen_signing already taken"); let incoming_wrapper = - IncomingAsyncProtocolWrapper::new(incoming, channel_type.clone(), ¶ms); + IncomingAsyncProtocolWrapper::new(incoming, channel_type.clone(), params.clone()); // we use fuse here, since normally, once a stream has returned `None` from calling // `next()` any further calls could exhibit bad behavior such as block forever, panic, never // return, etc. that's why we use fuse here to ensure that it has defined semantics, @@ -658,6 +696,10 @@ where }, }; + params + .logger + .checkpoint_message_raw(unsigned_message.body.payload.payload(), "CP-2.5-incoming"); + if SM::handle_unsigned_message( &to_async_proto, unsigned_message, @@ -781,3 +823,17 @@ mod tests { assert_eq!(authority_id, &my_authority_id); } } + +impl + Clone + Send + Sync + std::fmt::Debug + 'static> From<&'_ ProtocolType> + for AsyncProtocolType +{ + fn from(value: &ProtocolType) -> Self { + match value { + ProtocolType::Keygen { .. } => AsyncProtocolType::Keygen, + ProtocolType::Offline { unsigned_proposal, .. } => + AsyncProtocolType::Signing { hash: unsigned_proposal.hash().unwrap_or([0u8; 32]) }, + ProtocolType::Voting { unsigned_proposal, .. } => + AsyncProtocolType::Voting { hash: unsigned_proposal.hash().unwrap_or([0u8; 32]) }, + } + } +} diff --git a/dkg-gadget/src/async_protocols/remote.rs b/dkg-gadget/src/async_protocols/remote.rs index a7761cf0d..1e8e2cc97 100644 --- a/dkg-gadget/src/async_protocols/remote.rs +++ b/dkg-gadget/src/async_protocols/remote.rs @@ -14,7 +14,7 @@ use crate::{async_protocols::CurrentRoundBlame, debug_logger::DebugLogger}; use atomic::Atomic; -use dkg_primitives::types::{DKGError, SessionId, SignedDKGMessage}; +use dkg_primitives::types::{DKGError, DKGMsgPayload, SessionId, SignedDKGMessage}; use dkg_runtime_primitives::{crypto::Public, KEYGEN_TIMEOUT, SIGN_TIMEOUT}; use parking_lot::Mutex; use serde::{Deserialize, Serialize}; @@ -23,7 +23,10 @@ use std::sync::{atomic::Ordering, Arc}; pub struct AsyncProtocolRemote { pub(crate) status: Arc>, - pub(crate) broadcaster: tokio::sync::broadcast::Sender>>, + tx_keygen_signing: tokio::sync::mpsc::UnboundedSender>, + tx_voting: tokio::sync::mpsc::UnboundedSender>, + pub(crate) rx_keygen_signing: MessageReceiverHandle, + pub(crate) rx_voting: MessageReceiverHandle, start_tx: Arc>>>, pub(crate) start_rx: Arc>>>, stop_tx: Arc>>>, @@ -37,11 +40,17 @@ pub struct AsyncProtocolRemote { status_history: Arc>>, } +type MessageReceiverHandle = + Arc>>>>; + impl Clone for AsyncProtocolRemote { fn clone(&self) -> Self { Self { status: self.status.clone(), - broadcaster: self.broadcaster.clone(), + tx_keygen_signing: self.tx_keygen_signing.clone(), + tx_voting: self.tx_voting.clone(), + rx_keygen_signing: self.rx_keygen_signing.clone(), + rx_voting: self.rx_voting.clone(), start_tx: self.start_tx.clone(), start_rx: self.start_rx.clone(), stop_tx: self.stop_tx.clone(), @@ -71,7 +80,8 @@ impl AsyncProtocolRemote { /// Create at the beginning of each meta handler instantiation pub fn new(at: C, session_id: SessionId, logger: DebugLogger) -> Self { let (stop_tx, stop_rx) = tokio::sync::mpsc::unbounded_channel(); - let (broadcaster, _) = tokio::sync::broadcast::channel(4096); + let (tx_keygen_signing, rx_keygen_signing) = tokio::sync::mpsc::unbounded_channel(); + let (tx_voting, rx_voting) = tokio::sync::mpsc::unbounded_channel(); let (start_tx, start_rx) = tokio::sync::oneshot::channel(); let (current_round_blame_tx, current_round_blame) = @@ -100,15 +110,18 @@ impl AsyncProtocolRemote { // } // logger_debug.debug(format!( - // "AsyncProtocolRemote status: {status:?} ||||| history: {status_history:?}", - // )); + // "AsyncProtocolRemote status: {status:?} ||||| history: {status_history:?} ||||| + // session_id: {session_id:?}", )); // } // }); Self { status, + tx_keygen_signing, + tx_voting, + rx_keygen_signing: Arc::new(Mutex::new(Some(rx_keygen_signing))), + rx_voting: Arc::new(Mutex::new(Some(rx_voting))), status_history, - broadcaster, started_at: at, start_tx: Arc::new(Mutex::new(Some(start_tx))), start_rx: Arc::new(Mutex::new(Some(start_rx))), @@ -117,27 +130,34 @@ impl AsyncProtocolRemote { current_round_blame, logger, current_round_blame_tx: Arc::new(current_round_blame_tx), - is_primary_remote: true, + is_primary_remote: false, session_id, } } + pub fn set_as_primary(&mut self) { + self.is_primary_remote = true; + } + pub fn keygen_has_stalled(&self, now: C) -> bool { - self.keygen_is_not_complete() && (now >= self.started_at + KEYGEN_TIMEOUT.into()) + self.has_stalled(now, KEYGEN_TIMEOUT) } pub fn signing_has_stalled(&self, now: C) -> bool { - self.signing_is_not_complete() && (now >= self.started_at + SIGN_TIMEOUT.into()) + self.has_stalled(now, SIGN_TIMEOUT) } - pub fn keygen_is_not_complete(&self) -> bool { - self.get_status() != MetaHandlerStatus::Complete || - self.get_status() == MetaHandlerStatus::Terminated - } + fn has_stalled(&self, now: C, timeout: u32) -> bool { + let state = self.get_status(); + + // if the state is terminated, preemptively assume we are stalled + // to allow other tasks to take this one's place + if state == MetaHandlerStatus::Terminated { + return true + } - pub fn signing_is_not_complete(&self) -> bool { - self.get_status() != MetaHandlerStatus::Complete || - self.get_status() == MetaHandlerStatus::Terminated + // otherwise, if we have timed-out, no matter the state, we are stalled + now >= self.started_at + timeout.into() } } @@ -164,6 +184,10 @@ impl AsyncProtocolRemote { self.status_history.lock().push(status); self.status.store(status, Ordering::SeqCst); } else { + // for now, set the state anyways + self.status_history.lock().push(status); + self.status.store(status, Ordering::SeqCst); + self.logger.error(format!( "Invalid status update: {:?} -> {:?}", self.get_status(), @@ -174,29 +198,33 @@ impl AsyncProtocolRemote { pub fn is_active(&self) -> bool { let status = self.get_status(); - status != MetaHandlerStatus::Beginning && - status != MetaHandlerStatus::Complete && - status != MetaHandlerStatus::Terminated + status != MetaHandlerStatus::Complete && status != MetaHandlerStatus::Terminated } + #[allow(clippy::result_large_err)] pub fn deliver_message( &self, - msg: Arc>, - ) -> Result<(), tokio::sync::broadcast::error::SendError>>> { - if self.broadcaster.receiver_count() != 0 && self.is_active() { - self.broadcaster.send(msg).map(|_| ()) + msg: SignedDKGMessage, + ) -> Result<(), tokio::sync::mpsc::error::SendError>> { + let status = self.get_status(); + let can_deliver = + status != MetaHandlerStatus::Complete && status != MetaHandlerStatus::Terminated; + if can_deliver { + if matches!(msg.msg.payload, DKGMsgPayload::Vote(..)) { + self.tx_voting.send(msg) + } else { + self.tx_keygen_signing.send(msg) + } } else { - // do not forward the message + // do not forward the message (TODO: Consider enqueuing messages for rounds not yet + // active other nodes may be active, but this node is still in the process of "waking + // up"). Thus, by not delivering a message here, we may be preventing this node from + // joining. + self.logger.warn(format!("Did not deliver message {:?}", msg.msg.payload)); Ok(()) } } - /// Determines if there are any active listeners - #[allow(dead_code)] - pub fn is_receiving(&self) -> bool { - self.broadcaster.receiver_count() != 0 - } - /// Stops the execution of the meta handler, including all internal asynchronous subroutines pub fn start(&self) -> Result<(), DKGError> { let tx = self.start_tx.lock().take().ok_or_else(|| DKGError::GenericError { @@ -220,7 +248,7 @@ impl AsyncProtocolRemote { return Ok(()) }, }; - self.logger.error(format!("Shutting down meta handler: {}", reason.as_ref())); + self.logger.warn(format!("Shutting down meta handler: {}", reason.as_ref())); tx.send(()).map_err(|_| DKGError::GenericError { reason: "Unable to send shutdown signal (already shut down?)".to_string(), }) @@ -244,6 +272,10 @@ impl AsyncProtocolRemote { matches!(state, MetaHandlerStatus::Terminated) } + pub fn is_done(&self) -> bool { + self.is_terminated() || self.is_completed() + } + pub fn current_round_blame(&self) -> CurrentRoundBlame { self.current_round_blame.borrow().clone() } @@ -251,11 +283,7 @@ impl AsyncProtocolRemote { impl Drop for AsyncProtocolRemote { fn drop(&mut self) { - if Arc::strong_count(&self.status) == 2 || self.is_primary_remote { - // at this point, the only instances of this arc are this one, and, - // presumably the one in the DKG worker. This one is asserted to be the one - // belonging to the async proto. Signal as complete to allow the DKG worker to move - // forward + if Arc::strong_count(&self.status) == 1 || self.is_primary_remote { if self.get_status() != MetaHandlerStatus::Complete { self.logger.info(format!( "MetaAsyncProtocol is ending: {:?}, History: {:?}", diff --git a/dkg-gadget/src/async_protocols/sign/handler.rs b/dkg-gadget/src/async_protocols/sign/handler.rs index 02b71c2df..7f11b2e34 100644 --- a/dkg-gadget/src/async_protocols/sign/handler.rs +++ b/dkg-gadget/src/async_protocols/sign/handler.rs @@ -14,20 +14,20 @@ use curv::{arithmetic::Converter, elliptic::curves::Secp256k1, BigInt}; use dkg_runtime_primitives::UnsignedProposal; -use futures::{stream::FuturesUnordered, StreamExt, TryStreamExt}; +use futures::StreamExt; use multi_party_ecdsa::protocols::multi_party_ecdsa::gg_2020::state_machine::{ keygen::LocalKey, sign::{CompletedOfflineStage, OfflineStage, PartialSignature, SignManual}, }; -use std::{fmt::Debug, sync::Arc}; -use tokio::sync::broadcast::Receiver; +use std::{collections::HashSet, fmt::Debug, sync::Arc, time::Duration}; use crate::async_protocols::{ blockchain_interface::BlockchainInterface, incoming::IncomingAsyncProtocolWrapper, new_inner, remote::MetaHandlerStatus, state_machine::StateMachineHandler, AsyncProtocolParameters, BatchKey, GenericAsyncHandler, KeygenPartyId, OfflinePartyId, ProtocolType, Threshold, }; +use dkg_logging::debug_logger::RoundsEventType; use dkg_primitives::types::{ DKGError, DKGMessage, DKGMsgPayload, DKGMsgStatus, DKGVoteMessage, SignedDKGMessage, }; @@ -46,9 +46,8 @@ where pub fn setup_signing( params: AsyncProtocolParameters, threshold: u16, - unsigned_proposals: Vec::MaxProposalLength>>, + unsigned_proposal: UnsignedProposal<::MaxProposalLength>, s_l: Vec, - async_index: u8, ) -> Result, DKGError> { assert!(threshold + 1 == s_l.len() as u16, "Signing set must be of size threshold + 1"); let status_handle = params.handle.clone(); @@ -75,36 +74,29 @@ where .map_err(|err| DKGError::StartOffline { reason: err.to_string() })?; params.handle.set_status(MetaHandlerStatus::OfflineAndVoting); - let count_in_batch = unsigned_proposals.len(); - let batch_key = params.get_next_batch_key(&unsigned_proposals); + let batch_key = params.get_next_batch_key(); - params - .logger - .debug_signing(format!("Got unsigned proposals count {}", unsigned_proposals.len())); + params.logger.debug_signing("Received unsigned proposal"); if let Ok(offline_i) = params.party_i.try_to_offline_party_id(&s_l) { + params.logger.info_signing(format!( + "Party Index converted to offline stage Index : {:?}", + params.party_i + )); params.logger.info_signing(format!("Offline stage index: {offline_i}")); - // create one offline stage for each unsigned proposal - let futures = FuturesUnordered::new(); - for unsigned_proposal in unsigned_proposals { - futures.push(Box::pin(GenericAsyncHandler::new_offline( - params.clone(), - unsigned_proposal, - offline_i, - s_l.clone(), - local_key.clone(), - t, - batch_key, - async_index, - )?)); - } - - // NOTE: this will block at each batch of unsigned proposals. - // TODO: Consider not blocking here and allowing processing of - // each batch of unsigned proposals concurrently - futures.try_collect::<()>().await.map(|_| ())?; - params.logger.info_signing(format!("🕸️ Concluded all Offline->Voting stages ({count_in_batch} total) for this batch for this node")); + GenericAsyncHandler::new_offline( + params.clone(), + unsigned_proposal, + offline_i, + s_l.clone(), + local_key.clone(), + t, + batch_key, + )? + .await?; + + params.logger.info_signing("Concluded Offline->Voting stage for this node"); } else { params.logger.warn_signing("🕸️ We are not among signers, skipping".to_string()); return Err(DKGError::GenericError { @@ -113,7 +105,7 @@ where } } else { return Err(DKGError::GenericError { - reason: "Will skip keygen since local key does not exist".to_string(), + reason: "Will skip signing since local key does not exist".to_string(), }) } @@ -150,7 +142,6 @@ where local_key: LocalKey, threshold: u16, batch_key: BatchKey, - async_index: u8, ) -> Result< GenericAsyncHandler<'static, >::Return>, DKGError, @@ -160,16 +151,15 @@ where i: offline_i, s_l: s_l.clone(), local_key: Arc::new(local_key.clone()), + associated_block_id: params.associated_block_id.clone(), }; - let early_handle = params.handle.broadcaster.subscribe(); let s_l_raw = s_l.into_iter().map(|party_i| *party_i.as_ref()).collect(); new_inner( - (unsigned_proposal, offline_i, early_handle, threshold, batch_key), + (unsigned_proposal, offline_i, threshold, batch_key), OfflineStage::new(*offline_i.as_ref(), s_l_raw, local_key) .map_err(|err| DKGError::CriticalError { reason: err.to_string() })?, params, channel_type, - async_index, DKGMsgStatus::ACTIVE, ) } @@ -180,21 +170,26 @@ where completed_offline_stage: CompletedOfflineStage, unsigned_proposal: UnsignedProposal<::MaxProposalLength>, offline_i: OfflinePartyId, - rx: Receiver>>, + rx: tokio::sync::mpsc::UnboundedReceiver>, threshold: Threshold, batch_key: BatchKey, - async_index: u8, ) -> Result, DKGError> { let protocol = Box::pin(async move { + let unsigned_proposal_hash = unsigned_proposal.hash().expect("Should not fail"); let ty = ProtocolType::Voting { offline_stage: Arc::new(completed_offline_stage.clone()), unsigned_proposal: Arc::new(unsigned_proposal.clone()), i: offline_i, + associated_block_id: params.associated_block_id.clone(), }; - // the below wrapper will map signed messages into unsigned messages - let incoming = rx; - let incoming_wrapper = &mut IncomingAsyncProtocolWrapper::new(incoming, ty, ¶ms); + params.logger.round_event( + &ty, + RoundsEventType::ProceededToRound { session: params.session_id, round: 0 }, + ); + + let mut incoming_wrapper = + IncomingAsyncProtocolWrapper::new(rx, ty.clone(), params.clone()); // the first step is to generate the partial sig based on the offline stage let number_of_parties = params.best_authorities.len(); @@ -225,12 +220,13 @@ where // are allowing for parallelism now round_key: Vec::from(&hash_of_proposal as &[u8]), partial_signature: partial_sig_bytes, - async_index, + unsigned_proposal_hash, }); let id = params.authority_public_key.as_ref().clone(); // now, broadcast the data let unsigned_dkg_message = DKGMessage { + associated_block_id: params.associated_block_id.clone(), sender_id: id, // No recipient for this message, it is broadcasted recipient_id: None, @@ -238,7 +234,14 @@ where payload, session_id: params.session_id, }; - params.engine.sign_and_send_msg(unsigned_dkg_message)?; + + // we have no synchronization mechanism post-offline stage. Sometimes, messages + // don't get delivered. Thus, we sent multiple messages, and, wait for a while to let + // other nodes "show up" + for _ in 0..3 { + params.engine.sign_and_send_msg(unsigned_dkg_message.clone())?; + tokio::time::sleep(Duration::from_millis(100)).await; + } // we only need a threshold count of sigs let number_of_partial_sigs = threshold as usize; @@ -248,19 +251,55 @@ where "Must obtain {number_of_partial_sigs} partial sigs to continue ..." )); + let mut received_sigs = HashSet::new(); + while let Some(msg) = incoming_wrapper.next().await { + let payload = msg.body.payload.payload().clone(); + params.logger.checkpoint_message_raw(&payload, "CP-Voting-Received"); if let DKGMsgPayload::Vote(dkg_vote_msg) = msg.body.payload { // only process messages which are from the respective proposal if dkg_vote_msg.round_key.as_slice() == hash_of_proposal { + params.logger.checkpoint_message_raw(&payload, "CP-Voting-Received-2"); + if !received_sigs.insert(msg.sender) { + params.logger.warn_signing(format!( + "Received duplicate partial sig from {}", + msg.sender + )); + params.logger.clear_checkpoint_for_message_raw(&payload); + continue + } + + if msg.body.associated_block_id != params.associated_block_id { + params.logger.warn_signing(format!( + "Received partial sig from {} with wrong associated block id", + msg.sender + )); + params.logger.clear_checkpoint_for_message_raw(&payload); + continue + } + + params.logger.checkpoint_message_raw(&payload, "CP-Voting-Received-3"); params.logger.info_signing("Found matching round key!".to_string()); let partial = serde_json::from_slice::( &dkg_vote_msg.partial_signature, ) .map_err(|err| DKGError::GenericError { reason: err.to_string() })?; + params.logger.checkpoint_message_raw(&payload, "CP-Voting-Received-4"); + params.logger.debug(format!( + "[Sig] Received from {} sig {:?}", + dkg_vote_msg.party_ind, partial + )); sigs.push(partial); params .logger .info_signing(format!("There are now {} partial sigs ...", sigs.len())); + params.logger.round_event( + &ty, + RoundsEventType::ProceededToRound { + session: params.session_id, + round: sigs.len(), + }, + ); if sigs.len() == number_of_partial_sigs { break } @@ -303,7 +342,12 @@ where params.session_id, batch_key, message, - ) + )?; + params.logger.round_event( + &ty, + RoundsEventType::ProceededToRound { session: params.session_id, round: 9999999999 }, + ); + Ok(()) }); Ok(GenericAsyncHandler { protocol }) diff --git a/dkg-gadget/src/async_protocols/sign/state_machine.rs b/dkg-gadget/src/async_protocols/sign/state_machine.rs index 9057d4be1..e5812fa26 100644 --- a/dkg-gadget/src/async_protocols/sign/state_machine.rs +++ b/dkg-gadget/src/async_protocols/sign/state_machine.rs @@ -20,7 +20,7 @@ use crate::{ debug_logger::DebugLogger, }; use async_trait::async_trait; -use dkg_primitives::types::{DKGError, DKGMessage, DKGMsgPayload, SignedDKGMessage}; +use dkg_primitives::types::{DKGError, DKGMessage, DKGMsgPayload}; use dkg_runtime_primitives::{crypto::Public, MaxAuthorities, UnsignedProposal}; use futures::channel::mpsc::UnboundedSender; use multi_party_ecdsa::protocols::multi_party_ecdsa::gg_2020::state_machine::sign::{ @@ -28,15 +28,11 @@ use multi_party_ecdsa::protocols::multi_party_ecdsa::gg_2020::state_machine::sig }; use round_based::{Msg, StateMachine}; -use std::sync::Arc; -use tokio::sync::broadcast::Receiver; - #[async_trait] impl StateMachineHandler for OfflineStage { type AdditionalReturnParam = ( UnsignedProposal<::MaxProposalLength>, OfflinePartyId, - Receiver>>, Threshold, BatchKey, ); @@ -48,11 +44,14 @@ impl StateMachineHandler for OfflineStage local_ty: &ProtocolType<::MaxProposalLength>, logger: &DebugLogger, ) -> Result<(), ::Err> { + let payload_raw = msg.body.payload.payload().clone(); + logger.checkpoint_message_raw(&payload_raw, "CP-2.6-incoming"); let DKGMessage { payload, .. } = msg.body; // Send the payload to the appropriate AsyncProtocols match payload { DKGMsgPayload::Offline(msg) => { + logger.checkpoint_message_raw(&payload_raw, "CP-2.7-incoming"); let message: Msg = match serde_json::from_slice(msg.offline_msg.as_slice()) { Ok(msg) => msg, @@ -64,9 +63,11 @@ impl StateMachineHandler for OfflineStage return Ok(()) }, }; + logger.checkpoint_message_raw(&payload_raw, "CP-2.8-incoming"); if let Some(recv) = message.receiver.as_ref() { if *recv != local_ty.get_i() { logger.info_signing("Skipping passing of message to async proto since not intended for local"); + logger.clear_checkpoint_for_message_raw(&payload_raw); return Ok(()) } } @@ -78,13 +79,15 @@ impl StateMachineHandler for OfflineStage .expect("Unsigned proposal hash failed") != msg.key.as_slice() { - //dkg_logging::info!("Skipping passing of message to async proto since not - // correct unsigned proposal"); + logger.warn_signing("Skipping passing of message to async proto since not correct unsigned proposal"); + logger.clear_checkpoint_for_message_raw(&payload_raw); return Ok(()) } if let Err(err) = to_async_proto.unbounded_send(message) { logger.error_signing(format!("Error sending message to async proto: {err}")); + } else { + logger.checkpoint_message_raw(&payload_raw, "CP-2.9-incoming"); } }, @@ -98,25 +101,24 @@ impl StateMachineHandler for OfflineStage offline_stage: ::Output, params: AsyncProtocolParameters, unsigned_proposal: Self::AdditionalReturnParam, - async_index: u8, ) -> Result<(), DKGError> { - params.logger.info_signing("Completed offline stage successfully!".to_string()); + params.logger.info_signing("Completed offline stage successfully!"); // Take the completed offline stage and immediately execute the corresponding voting // stage (this will allow parallelism between offline stages executing across the // network) // // NOTE: we pass the generated offline stage id for the i in voting to keep // consistency + let rx_handle = params.handle.rx_voting.lock().take().expect("rx_voting not found"); let logger = params.logger.clone(); match GenericAsyncHandler::new_voting( params, offline_stage, unsigned_proposal.0, unsigned_proposal.1, + rx_handle, unsigned_proposal.2, unsigned_proposal.3, - unsigned_proposal.4, - async_index, ) { Ok(voting_stage) => { logger.info_signing("Starting voting stage...".to_string()); diff --git a/dkg-gadget/src/async_protocols/state_machine.rs b/dkg-gadget/src/async_protocols/state_machine.rs index 4b8d621f2..d31accd9f 100644 --- a/dkg-gadget/src/async_protocols/state_machine.rs +++ b/dkg-gadget/src/async_protocols/state_machine.rs @@ -30,7 +30,7 @@ pub(crate) type StateMachineTxRx = ( #[async_trait] /// Trait for interfacing between the meta handler and the individual state machines pub trait StateMachineHandler: - StateMachine + RoundBlame + Send + StateMachine + RoundBlame + Send + Debug where ::Output: Send, { @@ -54,6 +54,5 @@ where result: ::Output, params: AsyncProtocolParameters, additional_param: Self::AdditionalReturnParam, - async_index: u8, ) -> Result; } diff --git a/dkg-gadget/src/async_protocols/state_machine_wrapper.rs b/dkg-gadget/src/async_protocols/state_machine_wrapper.rs index aaf1f6aaf..3a91d7f4a 100644 --- a/dkg-gadget/src/async_protocols/state_machine_wrapper.rs +++ b/dkg-gadget/src/async_protocols/state_machine_wrapper.rs @@ -12,14 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. +use super::{CurrentRoundBlame, ProtocolType}; +use crate::{async_protocols::MessageRoundID, debug_logger::DebugLogger}; use dkg_primitives::types::SessionId; use multi_party_ecdsa::protocols::multi_party_ecdsa::gg_2020::state_machine::traits::RoundBlame; use round_based::{Msg, StateMachine}; use sp_runtime::traits::Get; -use std::{collections::HashSet, sync::Arc}; - -use super::{CurrentRoundBlame, ProtocolType}; -use crate::debug_logger::DebugLogger; +use std::{collections::HashSet, fmt::Debug, sync::Arc}; pub(crate) struct StateMachineWrapper< T: StateMachine, @@ -32,10 +31,12 @@ pub(crate) struct StateMachineWrapper< // stores a list of received messages received_messages: HashSet>, logger: DebugLogger, + #[allow(dead_code)] + outgoing_history: Vec>, } impl< - T: StateMachine + RoundBlame, + T: StateMachine + RoundBlame + Debug, MaxProposalLength: Get + Clone + Send + Sync + std::fmt::Debug + 'static, > StateMachineWrapper { @@ -53,11 +54,13 @@ impl< current_round_blame, logger, received_messages: HashSet::new(), + outgoing_history: Vec::new(), } } fn collect_round_blame(&self) { let (unreceived_messages, blamed_parties) = self.round_blame(); + self.logger.debug(format!("Not received messages from : {blamed_parties:?}")); let _ = self .current_round_blame .send(CurrentRoundBlame { unreceived_messages, blamed_parties }); @@ -67,51 +70,101 @@ impl< impl + Clone + Send + Sync + std::fmt::Debug + 'static> StateMachine for StateMachineWrapper where - T: StateMachine + RoundBlame, + T: StateMachine + RoundBlame + Debug, ::Err: std::fmt::Debug, - ::MessageBody: serde::Serialize, + ::MessageBody: serde::Serialize + MessageRoundID, { type Err = T::Err; type Output = T::Output; type MessageBody = T::MessageBody; fn handle_incoming(&mut self, msg: Msg) -> Result<(), Self::Err> { + let (session, round, sender, receiver) = + (self.session_id as _, msg.body.round_id() as _, msg.sender as _, msg.receiver as _); + self.logger.trace(format!( "Handling incoming message for {:?} from session={}, round={}, sender={}", - self.channel_type, - self.session_id, - self.current_round(), - msg.sender + self.channel_type, session, round, sender )); + let msg_hash = crate::debug_logger::message_to_string_hash(&msg); + self.logger.round_event( + &self.channel_type, + crate::RoundsEventType::ReceivedMessage { + session, + round, + sender, + receiver, + msg_hash: msg_hash.clone(), + }, + ); + self.logger.trace(format!("SM Before: {:?}", &self.sm)); + + self.collect_round_blame(); + + if round < self.current_round().into() { + self.logger.trace(format!( + "Message for {:?} from session={}, round={} is outdated, ignoring", + self.channel_type, session, round + )); + self.logger.clear_checkpoint_for_message(&msg); + return Ok(()) + } // Before passing to the state machine, make sure that we haven't already received the same // message (this is needed as we use a gossiping protocol to send messages, and we don't // want to process the same message twice) let msg_serde = bincode2::serialize(&msg).expect("Failed to serialize message"); - if self.received_messages.contains(&msg_serde) { + if !self.received_messages.insert(msg_serde) { self.logger.trace(format!( "Already received message for {:?} from session={}, round={}, sender={}", - self.channel_type, - self.session_id, - self.current_round(), - msg.sender + self.channel_type, session, round, sender )); + self.logger.clear_checkpoint_for_message(&msg); return Ok(()) - } else { - self.received_messages.insert(msg_serde); } - let result = self.sm.handle_incoming(msg); + let result = self.sm.handle_incoming(msg.clone()); if let Some(err) = result.as_ref().err() { self.logger.error(format!("StateMachine error: {err:?}")); + self.logger.checkpoint_message(&msg, format!("IN-STATE-MACHINE-ERR: {err:?}")) + } else { + self.logger.round_event( + &self.channel_type, + crate::RoundsEventType::ProcessedMessage { + session, + round, + sender, + receiver, + msg_hash, + }, + ); + self.logger.clear_checkpoint_for_message(&msg); } - self.collect_round_blame(); + self.logger.trace(format!("SM After: {:?}", &self.sm)); + result } fn message_queue(&mut self) -> &mut Vec> { - if !self.sm.message_queue().is_empty() { + // only send current round + previous round messages if we're running the signing protocol + /* + if !self.sm.message_queue().is_empty() && + matches!(self.channel_type, ProtocolType::Offline { .. }) + { + // store outgoing messages in history + let mut last_2_rounds = vec![]; + let current_round = self.current_round(); + let current_round_minus_1 = current_round.saturating_sub(1); + self.outgoing_history.extend(self.sm.message_queue().clone()); + for message in &self.outgoing_history { + let message_round = message.body.round_id(); + if message_round >= current_round_minus_1 && message_round <= current_round { + last_2_rounds.push(message.clone()); + } + } + // pass all messages in outgoing_history to the state machine + *self.sm.message_queue() = last_2_rounds; self.logger.trace(format!( "Preparing to drain message queue for {:?} in session={}, round={}, queue size={}", self.channel_type, @@ -119,7 +172,8 @@ where self.current_round(), self.sm.message_queue().len(), )); - } + }*/ + self.sm.message_queue() } @@ -140,6 +194,14 @@ where self.current_round(), self.round_blame(), )); + self.logger.round_event( + &self.channel_type, + crate::RoundsEventType::ProceededToRound { + session: self.session_id as _, + round: self.current_round() as _, + }, + ); + self.collect_round_blame(); result } diff --git a/dkg-gadget/src/async_protocols/test_utils.rs b/dkg-gadget/src/async_protocols/test_utils.rs index 3df297846..472e6022f 100644 --- a/dkg-gadget/src/async_protocols/test_utils.rs +++ b/dkg-gadget/src/async_protocols/test_utils.rs @@ -34,14 +34,15 @@ pub struct TestDummyIface { pub keygen_key: Arc>>>, } +#[async_trait::async_trait] impl BlockchainInterface for TestDummyIface { type Clock = u32; type GossipEngine = (); type MaxProposalLength = MaxProposalLength; - fn verify_signature_against_authorities( + async fn verify_signature_against_authorities( &self, - message: Arc>, + message: SignedDKGMessage, ) -> Result, DKGError> { Ok(message.msg.clone()) } diff --git a/dkg-gadget/src/db/offchain_storage.rs b/dkg-gadget/src/db/offchain_storage.rs index 5e4a692ba..c8af2013f 100644 --- a/dkg-gadget/src/db/offchain_storage.rs +++ b/dkg-gadget/src/db/offchain_storage.rs @@ -75,7 +75,7 @@ mod keys { impl super::DKGDbBackend for DKGOffchainStorageDb where B: Block, - BE: Backend + 'static, + BE: Backend + Unpin + 'static, { fn get_local_key( &self, diff --git a/dkg-gadget/src/debug_logger.rs b/dkg-gadget/src/debug_logger.rs deleted file mode 100644 index 18bfe2a62..000000000 --- a/dkg-gadget/src/debug_logger.rs +++ /dev/null @@ -1,111 +0,0 @@ -#![allow(clippy::unwrap_used)] -use dkg_logging::{debug, error, info, trace, warn}; -use std::{io::Write, sync::Arc}; - -#[derive(Clone, Debug)] -pub struct DebugLogger { - identifier: Arc, - to_file_io: Option>, -} - -impl DebugLogger { - pub fn new(identifier: T, file: Option) -> Self { - // use a channel for sending file I/O requests to a dedicated thread to avoid blocking the - // DKG workers - if let Some(mut file) = file { - let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel(); - - tokio::task::spawn(async move { - while let Some(message) = rx.recv().await { - writeln!(file, "{message:?}").unwrap(); - } - }); - - Self { identifier: Arc::new(identifier.to_string()), to_file_io: Some(tx) } - } else { - Self { identifier: Arc::new(identifier.to_string()), to_file_io: None } - } - } - - pub fn trace(&self, message: T) { - self.log_to_file("dkg_gadget", "trace", &message); - trace!(target: "dkg_gadget", "[{}]: {message:?}", self.identifier); - } - - pub fn debug(&self, message: T) { - self.log_to_file("dkg_gadget", "debug", &message); - debug!(target: "dkg_gadget", "[{}]: {message:?}", self.identifier); - } - - pub fn info(&self, message: T) { - self.log_to_file("dkg_gadget", "info", &message); - info!(target: "dkg_gadget", "[{}]: {message:?}", self.identifier); - } - - pub fn warn(&self, message: T) { - self.log_to_file("dkg_gadget", "warn", &message); - warn!(target: "dkg_gadget", "[{}]: {message:?}", self.identifier); - } - - pub fn error(&self, message: T) { - self.log_to_file("dkg_gadget", "error", &message); - error!(target: "dkg_gadget", "[{}]: {message:?}", self.identifier); - } - - pub fn trace_signing(&self, message: T) { - self.log_to_file("dkg_gadget::async_protocol::signing", "trace", &message); - trace!(target: "dkg_gadget::signing", "[{}]: {message:?}", self.identifier); - } - - pub fn debug_signing(&self, message: T) { - self.log_to_file("dkg_gadget::async_protocol::signing", "debug", &message); - debug!(target: "dkg_gadget::signing", "[{}]: {message:?}", self.identifier); - } - - pub fn info_signing(&self, message: T) { - self.log_to_file("dkg_gadget::async_protocol::signing", "info", &message); - info!(target: "dkg_gadget::signing", "[{}]: {message:?}", self.identifier); - } - - pub fn warn_signing(&self, message: T) { - self.log_to_file("dkg_gadget::async_protocol::signing", "warn", &message); - warn!(target: "dkg_gadget::signing", "[{}]: {message:?}", self.identifier); - } - - pub fn error_signing(&self, message: T) { - self.log_to_file("dkg_gadget::async_protocol::signing", "error", &message); - error!(target: "dkg_gadget::signing", "[{}]: {message:?}", self.identifier); - } - - pub fn trace_keygen(&self, message: T) { - self.log_to_file("dkg_gadget::async_protocol::keygen", "trace", &message); - trace!(target: "dkg_gadget::keygen", "[{}]: {message:?}", self.identifier); - } - - pub fn debug_keygen(&self, message: T) { - self.log_to_file("dkg_gadget::async_protocol::keygen", "debug", &message); - debug!(target: "dkg_gadget::keygen", "[{}]: {message:?}", self.identifier); - } - - pub fn info_keygen(&self, message: T) { - self.log_to_file("dkg_gadget::async_protocol::keygen", "info", &message); - info!(target: "dkg_gadget::keygen", "[{}]: {message:?}", self.identifier); - } - - pub fn warn_keygen(&self, message: T) { - self.log_to_file("dkg_gadget::async_protocol::keygen", "warn", &message); - warn!(target: "dkg_gadget::keygen", "[{}]: {message:?}", self.identifier); - } - - pub fn error_keygen(&self, message: T) { - self.log_to_file("dkg_gadget::async_protocol::keygen", "error", &message); - error!(target: "dkg_gadget::keygen", "[{}]: {message:?}", self.identifier); - } - - fn log_to_file(&self, target: &str, level: &str, message: T) { - if let Some(file) = &self.to_file_io { - let message = format!("[{target}] [{level}]: {message:?}"); - file.send(message).unwrap(); - } - } -} diff --git a/dkg-gadget/src/gossip_engine/mod.rs b/dkg-gadget/src/gossip_engine/mod.rs index 3e3eb1434..d2c4fda84 100644 --- a/dkg-gadget/src/gossip_engine/mod.rs +++ b/dkg-gadget/src/gossip_engine/mod.rs @@ -14,18 +14,16 @@ //! Webb Custom DKG Gossip Engine. -use std::pin::Pin; - use auto_impl::auto_impl; use dkg_primitives::{ crypto::AuthoritySignature, types::{DKGError, SignedDKGMessage}, }; use dkg_runtime_primitives::crypto::AuthorityId; -use futures::{Stream, StreamExt}; use sc_network::PeerId; use sp_application_crypto::RuntimeAppPublic; use sp_arithmetic::traits::AtLeast32BitUnsigned; +use tokio::sync::mpsc::UnboundedReceiver; /// A Gossip Engine for DKG, that uses [`sc_network::NetworkService`] as a backend. mod network; @@ -51,21 +49,9 @@ pub trait GossipEngineIface: Send + Sync + 'static { ) -> Result<(), DKGError>; /// Send a DKG message to all peers. fn gossip(&self, message: SignedDKGMessage) -> Result<(), DKGError>; - /// A stream that sends messages when they are ready to be polled from the message queue. - fn message_available_notification(&self) -> Pin + Send>>; - /// Peek the front of the message queue. - /// - /// Note that this will not remove the message from the queue, it will only return it. For - /// removing the message from the queue, use `acknowledge_last_message`. - /// - /// Returns `None` if there are no messages in the queue. - fn peek_last_message(&self) -> Option>; - /// Acknowledge the last message (the front of the queue) and mark it as processed, then removes - /// it from the queue. - fn acknowledge_last_message(&self); - - /// Clears the Message Queue. - fn clear_queue(&self); + /// A stream that sends messages. Should only return once with Some, then None thereafter + /// to reinforce a single read stream rather than multiple points in the codebase + fn get_stream(&self) -> Option>>; fn local_peer_id(&self) -> PeerId; fn logger(&self) -> &DebugLogger; @@ -87,18 +73,10 @@ impl GossipEngineIface for () { Ok(()) } - fn message_available_notification(&self) -> Pin + Send>> { - futures::stream::pending().boxed() - } - - fn peek_last_message(&self) -> Option> { + fn get_stream(&self) -> Option>> { None } - fn acknowledge_last_message(&self) {} - - fn clear_queue(&self) {} - fn local_peer_id(&self) -> PeerId { PeerId::random() } diff --git a/dkg-gadget/src/gossip_engine/network.rs b/dkg-gadget/src/gossip_engine/network.rs index 6d9b4cccd..6a2bf69ae 100644 --- a/dkg-gadget/src/gossip_engine/network.rs +++ b/dkg-gadget/src/gossip_engine/network.rs @@ -44,9 +44,9 @@ use crate::{debug_logger::DebugLogger, metrics::Metrics, worker::HasLatestHeader use codec::{Decode, Encode}; use dkg_primitives::types::{DKGError, SignedDKGMessage}; use dkg_runtime_primitives::crypto::AuthorityId; -use futures::{Stream, StreamExt}; +use futures::StreamExt; use linked_hash_map::LinkedHashMap; -use parking_lot::RwLock; +use parking_lot::{Mutex, RwLock}; use sc_network::{multiaddr, Event, NetworkService, NetworkStateInfo, PeerId, ProtocolName}; use sc_network_common::{ config, error, @@ -54,18 +54,17 @@ use sc_network_common::{ }; use sp_runtime::traits::{Block, NumberFor}; use std::{ - collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, + collections::{hash_map::Entry, HashMap, HashSet}, hash::Hash, iter, marker::PhantomData, num::NonZeroUsize, - pin::Pin, sync::{ - atomic::{AtomicBool, AtomicU8, Ordering}, + atomic::{AtomicBool, Ordering}, Arc, }, }; -use tokio::sync::broadcast; +use tokio::sync::mpsc::UnboundedReceiver; #[derive(Clone)] pub struct NetworkGossipEngineBuilder { @@ -115,23 +114,18 @@ impl NetworkGossipEngineBuilder { // background task and the controller. // since we have two things here we will need two channels: // 1. a channel to send commands to the background task (Controller -> Background). - let (handler_channel, _) = broadcast::channel(MAX_PENDING_MESSAGES); - let (message_notifications_channel, _) = broadcast::channel(MAX_PENDING_MESSAGES); + let (handler_channel, handler_channel_rx) = tokio::sync::mpsc::unbounded_channel(); + let (message_channel_tx, message_channel_rx) = tokio::sync::mpsc::unbounded_channel(); let gossip_enabled = Arc::new(AtomicBool::new(false)); - let processing_already_seen_messages_enabled = Arc::new(AtomicBool::new(false)); - let message_queue = Arc::new(RwLock::new(VecDeque::new())); let handler = GossipHandler { latest_header, keystore: self.keystore, protocol_name: self.protocol_name.clone(), - my_channel: handler_channel.clone(), - message_queue: message_queue.clone(), - message_notifications_channel: message_notifications_channel.clone(), + to_receiver: message_channel_tx, + incoming_messages_stream: Arc::new(Mutex::new(Some(handler_channel_rx))), pending_messages_peers: Arc::new(RwLock::new(HashMap::new())), authority_id_to_peer_id: Arc::new(RwLock::new(HashMap::new())), gossip_enabled: gossip_enabled.clone(), - processing_already_seen_messages_enabled: processing_already_seen_messages_enabled - .clone(), service, peers: Arc::new(RwLock::new(HashMap::new())), logger: logger.clone(), @@ -144,10 +138,8 @@ impl NetworkGossipEngineBuilder { local_peer_id, protocol_name: self.protocol_name, handler_channel, - message_notifications_channel, + message_notifications_channel: Arc::new(Mutex::new(Some(message_channel_rx))), gossip_enabled, - processing_already_seen_messages_enabled, - message_queue, logger, _pd: Default::default(), }; @@ -162,9 +154,6 @@ const MAX_KNOWN_MESSAGES: usize = 10240; // ~300kb per peer + overhead. /// Maximum allowed size for a DKG Signed Message notification. const MAX_MESSAGE_SIZE: u64 = 16 * 1024 * 1024; -/// Maximum number of messages request we keep at any moment. -const MAX_PENDING_MESSAGES: usize = 8192; - /// Maximum number of duplicate messages that a single peer can send us. /// /// This is to prevent a malicious peer from spamming us with messages. @@ -183,66 +172,18 @@ mod rep { pub const DUPLICATE_MESSAGE: Rep = Rep::new(-(1 << 12), "Duplicate message"); } -/// A wrapper around a [`SignedDKGMessage`] that also keeps track of how many time we tried to -/// process (read from the queue) that message and depending on that counter, we can decide to -/// ignore the message if it has been processed too many times but no progress has been made. -#[derive(Debug)] -struct DKGMessageWrapper { - inner: SignedDKGMessage, - read_count: AtomicU8, -} - -impl DKGMessageWrapper { - /// Maximum number of times we try to process a message before we ignore it. - const MAX_READ_COUNT: u8 = 5; - /// Create a new [`DKGMessageWrapper`]. - fn new(inner: SignedDKGMessage) -> Self { - Self { inner, read_count: AtomicU8::new(0) } - } - - /// Returns the current read count. - fn read_count(&self) -> u8 { - self.read_count.load(Ordering::Relaxed) - } - - /// Returns a clone of the inner [`SignedDKGMessage`] - /// and increases the read count by one, ignoring the message if the read count is too high. - fn read(&self) -> SignedDKGMessage { - self.read_count.fetch_add(1, Ordering::Relaxed); - self.inner.clone() - } - - /// Returns a clone of the inner [`SignedDKGMessage`] if the read count is less than the - /// [`Self::MAX_READ_COUNT`]. - fn try_read(&self) -> Option> { - if self.read_count() < Self::MAX_READ_COUNT { - Some(self.read()) - } else { - None - } - } - - /// Unwraps the inner [`SignedDKGMessage`]. - fn unwrap(self) -> SignedDKGMessage { - self.inner - } -} - /// Controls the behaviour of a [`GossipHandler`] it is connected to. #[derive(Clone)] pub struct GossipHandlerController { local_peer_id: PeerId, protocol_name: ProtocolName, /// a channel to send commands to the background task (Controller -> Background). - handler_channel: broadcast::Sender, - /// A simple channel to send notifications whenever we receive a message from a peer. - message_notifications_channel: broadcast::Sender<()>, - /// A Buffer of messages that we have received from the network, but not yet processed. - message_queue: Arc>>>, + handler_channel: tokio::sync::mpsc::UnboundedSender, + /// where messages are received + message_notifications_channel: + Arc>>>>, /// Whether the gossip mechanism is enabled or not. gossip_enabled: Arc, - /// Whether we should process already seen messages or not. - processing_already_seen_messages_enabled: Arc, logger: DebugLogger, /// Used to keep type information about the block. May /// be useful for the future, so keeping it here @@ -271,7 +212,6 @@ impl super::GossipEngineIface for GossipHandlerController { )); self.handler_channel .send(ToHandler::SendMessage { recipient, message }) - .map(|_| ()) .map_err(|_| DKGError::GenericError { reason: "Failed to send message to handler".into(), }) @@ -285,70 +225,8 @@ impl super::GossipEngineIface for GossipHandlerController { }) } - fn message_available_notification(&self) -> Pin + Send>> { - // We need to create a new receiver of the channel, so that we can receive messages - // from anywhere, without actually fight the rustc borrow checker. - let stream = self.message_notifications_channel.subscribe(); - tokio_stream::wrappers::BroadcastStream::new(stream) - .filter_map(|m| futures::future::ready(m.ok())) - .boxed() - } - - fn peek_last_message(&self) -> Option> { - let mut lock = self.message_queue.write(); - let msg = match lock.front() { - Some(value) => value.try_read(), - None => { - self.logger.debug("No message to dequeue"); - return None - }, - }; - match msg { - Some(msg) => { - self.logger.debug(format!( - "Protocol : {:?} | Dequeuing message: {}", - self.protocol_name, - msg.message_hash::() - )); - Some(msg) - }, - None => { - self.logger.debug(format!( - "Protocol : {:?} | Message already read too many times, ignoring", - self.protocol_name - )); - // We have already read this message too many times, so we remove it from the queue. - let _ = lock.pop_front(); - None - }, - } - } - - fn acknowledge_last_message(&self) { - let mut lock = self.message_queue.write(); - let msg = lock.pop_front().map(|m| m.unwrap()); - match msg { - Some(msg) => { - self.logger.debug(format!( - "Protocol : {:?} | Acknowledging message: {}", - self.protocol_name, - msg.message_hash::() - )); - }, - None => { - self.logger.debug(format!( - "Protocol : {:?} | No message to acknowledge", - self.protocol_name - )); - }, - } - } - - fn clear_queue(&self) { - self.logger - .debug(format!("Protocol : {:?} | Clearing message queue", self.protocol_name)); - let mut lock = self.message_queue.write(); - lock.clear(); + fn get_stream(&self) -> Option>> { + self.message_notifications_channel.lock().take() } } /// an Enum Representing the commands that can be sent to the background task. @@ -365,11 +243,6 @@ impl GossipHandlerController { pub fn set_gossip_enabled(&self, enabled: bool) { self.gossip_enabled.store(enabled, Ordering::Relaxed); } - - /// Controls whether we process already seen messages or not. - pub fn set_processing_already_seen_messages_enabled(&self, enabled: bool) { - self.processing_already_seen_messages_enabled.store(enabled, Ordering::Relaxed); - } } /// Handler for gossiping messages. Call [`GossipHandler::run`] to start the processing. @@ -383,10 +256,9 @@ pub struct GossipHandler { latest_header: Arc>>, /// The DKG Keystore. keystore: DKGKeystore, - /// A Buffer of messages that we have received from the network, but not yet processed. - message_queue: Arc>>>, /// A Simple notification stream to notify the caller that we have messages in the queue. - message_notifications_channel: broadcast::Sender<()>, + to_receiver: tokio::sync::mpsc::UnboundedSender>, + incoming_messages_stream: Arc>>>, /// As multiple peers can send us the same message, we group /// these peers using the message hash while the message is /// received. This prevents that we receive the same message @@ -402,10 +274,6 @@ pub struct GossipHandler { authority_id_to_peer_id: Arc>>, /// Whether the gossip mechanism is enabled or not. gossip_enabled: Arc, - /// Whether we should process already seen messages or not. - processing_already_seen_messages_enabled: Arc, - /// A Channel to receive commands from the controller. - my_channel: broadcast::Sender, logger: DebugLogger, /// Prometheus metrics. metrics: Arc>, @@ -417,17 +285,13 @@ impl Clone for GossipHandler { protocol_name: self.protocol_name.clone(), latest_header: self.latest_header.clone(), keystore: self.keystore.clone(), - message_queue: self.message_queue.clone(), - message_notifications_channel: self.message_notifications_channel.clone(), + to_receiver: self.to_receiver.clone(), + incoming_messages_stream: self.incoming_messages_stream.clone(), pending_messages_peers: self.pending_messages_peers.clone(), service: self.service.clone(), peers: self.peers.clone(), authority_id_to_peer_id: self.authority_id_to_peer_id.clone(), gossip_enabled: self.gossip_enabled.clone(), - processing_already_seen_messages_enabled: self - .processing_already_seen_messages_enabled - .clone(), - my_channel: self.my_channel.clone(), logger: self.logger.clone(), metrics: self.metrics.clone(), } @@ -466,8 +330,11 @@ impl GossipHandler { /// Turns the [`GossipHandler`] into a future that should run forever and not be /// interrupted. pub async fn run(self) { - let stream = self.my_channel.subscribe(); - let mut incoming_messages = tokio_stream::wrappers::BroadcastStream::new(stream); + let mut incoming_messages = self + .incoming_messages_stream + .lock() + .take() + .expect("incoming message stream already taken"); let mut event_stream = self.service.event_stream("dkg-handler"); self.logger.debug("Starting the DKG Gossip Handler"); @@ -477,38 +344,25 @@ impl GossipHandler { // first task, handles the incoming messages/Commands from the controller. let self0 = self.clone(); - let incoming_messages_task = tokio::spawn(async move { - while let Some(message) = incoming_messages.next().await { - match message { - Ok(ToHandler::SendMessage { recipient, message }) => - self0.send_signed_dkg_message(recipient, message), - Ok(ToHandler::Gossip(v)) => self0.gossip_dkg_signed_message(v), - _ => {}, - } - } - }); - - // a timer that fires every few ms to check if there are messages in the queue, and if so, - // notify the listener. - let self1 = self.clone(); - let mut timer = tokio::time::interval(core::time::Duration::from_millis(100)); - let timer_task = tokio::spawn(async move { - loop { - timer.tick().await; - let queue = self1.message_queue.read(); - if !queue.is_empty() { - let _ = self1.message_notifications_channel.send(()); + let incoming_messages_task = + crate::utils::ExplicitPanicFuture::new(tokio::spawn(async move { + while let Some(message) = incoming_messages.recv().await { + match message { + ToHandler::SendMessage { recipient, message } => + self0.send_signed_dkg_message(recipient, message), + ToHandler::Gossip(v) => self0.gossip_dkg_signed_message(v), + } } - } - }); + })); let self2 = self.clone(); // second task, handles the incoming messages/events from the network stream. - let network_events_task = tokio::spawn(async move { - while let Some(event) = event_stream.next().await { - self2.handle_network_event(event).await; - } - }); + let network_events_task = + crate::utils::ExplicitPanicFuture::new(tokio::spawn(async move { + while let Some(event) = event_stream.next().await { + self2.handle_network_event(event).await; + } + })); // wait for the first task to finish or error out. // @@ -526,12 +380,8 @@ impl GossipHandler { // events task should have finished as well. // 3. The timer task, however, will never finish, unless the node is shutting down, in which // case the network events task should have finished as well. - let _result = futures::future::select_all(vec![ - network_events_task, - incoming_messages_task, - timer_task, - ]) - .await; + let _result = + futures::future::select_all(vec![network_events_task, incoming_messages_task]).await; self.logger.error("The DKG Gossip Handler has finished!!".to_string()); } @@ -652,7 +502,7 @@ impl GossipHandler { match message.is_valid(who) { Ok(v) => if v { - self.logger.debug("Handshake message from peer {who} is valid"); + self.logger.debug(format!("Handshake message from peer {who} is valid")); } else { self.logger.warn(format!("Handshake message from peer {who} is invalid")); self.service.report_peer(who, rep::PEER_IMPERSONATED); @@ -692,16 +542,8 @@ impl GossipHandler { if let Some(ref mut peer) = self.peers.write().get_mut(&who) { peer.known_messages.insert(message.message_hash::()); let mut pending_messages_peers = self.pending_messages_peers.write(); - let enqueue_the_message = || { - let mut queue_lock = self.message_queue.write(); - queue_lock.push_back(DKGMessageWrapper::new(message.clone())); - drop(queue_lock); - let recv_count = self.message_notifications_channel.receiver_count(); - if recv_count == 0 { - self.logger - .warn("No one is going to process the message notification!!!".to_string()); - } - if let Err(e) = self.message_notifications_channel.send(()) { + let send_the_message = |message: SignedDKGMessage| { + if let Err(e) = self.to_receiver.send(message) { self.logger.error(format!( "Failed to send message notification to DKG controller: {e:?}" )); @@ -717,7 +559,7 @@ impl GossipHandler { if let Some(metrics) = self.metrics.as_ref() { metrics.dkg_new_signed_messages.inc(); } - enqueue_the_message(); + send_the_message(message.clone()); entry.insert(HashSet::from([who])); // This good, this peer is good, they sent us a message we didn't know about. // we should add some good reputation to them. @@ -752,10 +594,8 @@ impl GossipHandler { } } - // check if we shall process this old message or not. - if self.processing_already_seen_messages_enabled.load(Ordering::Relaxed) { - enqueue_the_message(); - } + // send the old message anyways + send_the_message(message.clone()); }, } } @@ -794,8 +634,9 @@ impl GossipHandler { // If we have a peer id, we send the message to that peer directly. if let Some(peer_id) = maybe_peer_id { self.logger.debug(format!("Sending message to recipient {peer_id} using p2p")); - self.send_signed_dkg_message(peer_id, message); - return + self.send_signed_dkg_message(peer_id, message.clone()); + // potential bug "fix" + //return } else if let Some(recipient_id) = &message.msg.recipient_id { self.logger.debug(format!( "No direct connection to {recipient_id}, falling back to gossiping" diff --git a/dkg-gadget/src/gossip_messages/misbehaviour_report.rs b/dkg-gadget/src/gossip_messages/misbehaviour_report.rs index b948defa2..c8762a6ee 100644 --- a/dkg-gadget/src/gossip_messages/misbehaviour_report.rs +++ b/dkg-gadget/src/gossip_messages/misbehaviour_report.rs @@ -29,13 +29,13 @@ use dkg_runtime_primitives::{ use sc_client_api::Backend; use sp_runtime::traits::{Block, Get, NumberFor}; -pub(crate) fn handle_misbehaviour_report( +pub(crate) async fn handle_misbehaviour_report( dkg_worker: &DKGWorker, dkg_msg: DKGMessage, ) -> Result<(), DKGError> where B: Block, - BE: Backend + 'static, + BE: Backend + Unpin + 'static, GE: GossipEngineIface + 'static, C: Client + 'static, MaxProposalLength: Get + Clone + Send + Sync + 'static + std::fmt::Debug, @@ -44,7 +44,10 @@ where { // Get authority accounts let header = &(dkg_worker.latest_header.read().clone().ok_or(DKGError::NoHeader)?); - let authorities = dkg_worker.validator_set(header).map(|a| (a.0.authorities, a.1.authorities)); + let authorities = dkg_worker + .validator_set(header) + .await + .map(|a| (a.0.authorities, a.1.authorities)); if authorities.is_none() { return Err(DKGError::NoAuthorityAccounts) } @@ -80,42 +83,44 @@ where )?; dkg_worker.logger.debug(format!("Reporter: {reporter:?}")); // Add new report to the aggregated reports - let mut lock = dkg_worker.aggregated_misbehaviour_reports.write(); - let reports = lock - .entry((msg.misbehaviour_type, msg.session_id, msg.offender.clone())) - .or_insert_with(|| AggregatedMisbehaviourReports { - misbehaviour_type: msg.misbehaviour_type, - session_id: msg.session_id, - offender: msg.offender.clone(), - reporters: Default::default(), - signatures: Default::default(), - }); - dkg_worker.logger.debug(format!("Reports: {reports:?}")); - if !reports.reporters.contains(&reporter) { - reports.reporters.try_push(reporter).map_err(|_| DKGError::InputOutOfBounds)?; - let bounded_signature = - msg.signature.try_into().map_err(|_| DKGError::InputOutOfBounds)?; - reports - .signatures - .try_push(bounded_signature) - .map_err(|_| DKGError::InputOutOfBounds)?; - } + let reports = { + let mut lock = dkg_worker.aggregated_misbehaviour_reports.write(); + let reports = lock + .entry((msg.misbehaviour_type, msg.session_id, msg.offender.clone())) + .or_insert_with(|| AggregatedMisbehaviourReports { + misbehaviour_type: msg.misbehaviour_type, + session_id: msg.session_id, + offender: msg.offender.clone(), + reporters: Default::default(), + signatures: Default::default(), + }); + dkg_worker.logger.debug(format!("Reports: {reports:?}")); + if !reports.reporters.contains(&reporter) { + reports.reporters.try_push(reporter).map_err(|_| DKGError::InputOutOfBounds)?; + let bounded_signature = + msg.signature.try_into().map_err(|_| DKGError::InputOutOfBounds)?; + reports + .signatures + .try_push(bounded_signature) + .map_err(|_| DKGError::InputOutOfBounds)?; + } - // Try to store reports offchain - let reports = reports.clone(); - try_store_offchain(dkg_worker, &reports)?; + reports.clone() + }; + + try_store_offchain(dkg_worker, &reports).await?; } Ok(()) } -pub(crate) fn gossip_misbehaviour_report( +pub(crate) async fn gossip_misbehaviour_report( dkg_worker: &DKGWorker, report: DKGMisbehaviourMessage, ) -> Result<(), DKGError> where B: Block, - BE: Backend + 'static, + BE: Backend + Unpin + 'static, GE: GossipEngineIface + 'static, C: Client + 'static, MaxProposalLength: Get + Clone + Send + Sync + 'static + std::fmt::Debug, @@ -143,6 +148,7 @@ where let status = if report.session_id == 0 { DKGMsgStatus::ACTIVE } else { DKGMsgStatus::QUEUED }; let message = DKGMessage:: { + associated_block_id: vec![], sender_id: public.clone(), // We need to gossip this misbehaviour, so no specific recipient. recipient_id: None, @@ -173,36 +179,43 @@ where Err(e) => dkg_worker.logger.error(format!("🕸️ Error signing DKG message: {e:?}")), } - let mut lock = dkg_worker.aggregated_misbehaviour_reports.write(); - let reports = lock - .entry((report.misbehaviour_type, report.session_id, report.offender.clone())) - .or_insert_with(|| AggregatedMisbehaviourReports { - misbehaviour_type: report.misbehaviour_type, - session_id: report.session_id, - offender: report.offender.clone(), - reporters: Default::default(), - signatures: Default::default(), - }); - - if reports.reporters.contains(&public) { - return Ok(()) - } + let reports = { + let mut lock = dkg_worker.aggregated_misbehaviour_reports.write(); + let reports = lock + .entry((report.misbehaviour_type, report.session_id, report.offender.clone())) + .or_insert_with(|| AggregatedMisbehaviourReports { + misbehaviour_type: report.misbehaviour_type, + session_id: report.session_id, + offender: report.offender.clone(), + reporters: Default::default(), + signatures: Default::default(), + }); + + if reports.reporters.contains(&public) { + return Ok(()) + } + + reports.reporters.try_push(public).map_err(|_| DKGError::InputOutOfBounds)?; + reports + .signatures + .try_push(encoded_signature.try_into().map_err(|_| DKGError::InputOutOfBounds)?) + .map_err(|_| DKGError::InputOutOfBounds)?; - reports.reporters.try_push(public).map_err(|_| DKGError::InputOutOfBounds)?; - reports - .signatures - .try_push(encoded_signature.try_into().map_err(|_| DKGError::InputOutOfBounds)?) - .map_err(|_| DKGError::InputOutOfBounds)?; + dkg_worker + .logger + .debug("Gossiping misbehaviour report and signature".to_string()); - dkg_worker - .logger - .debug("Gossiping misbehaviour report and signature".to_string()); + (*reports).clone() + }; - let reports = (*reports).clone(); // Try to store reports offchain - if try_store_offchain(dkg_worker, &reports).is_ok() { + if try_store_offchain(dkg_worker, &reports).await.is_ok() { // remove the report from the queue - lock.remove(&(report.misbehaviour_type, report.session_id, report.offender)); + dkg_worker.aggregated_misbehaviour_reports.write().remove(&( + report.misbehaviour_type, + report.session_id, + report.offender, + )); } Ok(()) } else { @@ -211,13 +224,13 @@ where } } -pub(crate) fn try_store_offchain( +pub(crate) async fn try_store_offchain( dkg_worker: &DKGWorker, reports: &AggregatedMisbehaviourReports, ) -> Result<(), DKGError> where B: Block, - BE: Backend + 'static, + BE: Backend + Unpin + 'static, GE: GossipEngineIface + 'static, C: Client + 'static, C::Api: DKGApi, MaxProposalLength, MaxAuthorities>, @@ -226,7 +239,7 @@ where // Fetch the current threshold for the DKG. We will use the // current threshold to determine if we have enough signatures // to submit the next DKG public key. - let threshold = dkg_worker.get_signature_threshold(header) as usize; + let threshold = dkg_worker.get_signature_threshold(header).await as usize; dkg_worker.logger.debug(format!( "DKG threshold: {}, reports: {}", threshold, diff --git a/dkg-gadget/src/gossip_messages/public_key_gossip.rs b/dkg-gadget/src/gossip_messages/public_key_gossip.rs index 8f902194a..078bbcbd7 100644 --- a/dkg-gadget/src/gossip_messages/public_key_gossip.rs +++ b/dkg-gadget/src/gossip_messages/public_key_gossip.rs @@ -32,13 +32,13 @@ use sc_client_api::Backend; use sp_runtime::traits::{Block, Get, Header, NumberFor}; use std::{collections::HashMap, sync::Arc}; -pub(crate) fn handle_public_key_broadcast( +pub(crate) async fn handle_public_key_broadcast( dkg_worker: &DKGWorker, dkg_msg: DKGMessage, ) -> Result<(), DKGError> where B: Block, - BE: Backend + 'static, + BE: Backend + Unpin + 'static, GE: GossipEngineIface + 'static, C: Client + 'static, C::Api: DKGApi, MaxProposalLength, MaxAuthorities>, @@ -46,7 +46,10 @@ where // Get authority accounts let header = &dkg_worker.latest_header.read().clone().ok_or(DKGError::NoHeader)?; let current_block_number = *header.number(); - let authorities = dkg_worker.validator_set(header).map(|a| (a.0.authorities, a.1.authorities)); + let authorities = dkg_worker + .validator_set(header) + .await + .map(|a| (a.0.authorities, a.1.authorities)); if authorities.is_none() { return Err(DKGError::NoAuthorityAccounts) } @@ -76,16 +79,19 @@ where let key_and_sig = (msg.pub_key, msg.signature); let session_id = msg.session_id; + + // Fetch the current threshold for the DKG. We will use the + // current threshold to determine if we have enough signatures + // to submit the next DKG public key. + let threshold = dkg_worker.get_next_signature_threshold(header).await as usize; + let mut lock = dkg_worker.aggregated_public_keys.write(); let aggregated_public_keys = lock.entry(session_id).or_default(); if !aggregated_public_keys.keys_and_signatures.contains(&key_and_sig) { aggregated_public_keys.keys_and_signatures.push(key_and_sig); } - // Fetch the current threshold for the DKG. We will use the - // current threshold to determine if we have enough signatures - // to submit the next DKG public key. - let threshold = dkg_worker.get_next_signature_threshold(header) as usize; + dkg_worker.logger.debug(format!( "SESSION {} | Threshold {} | Aggregated pubkeys {}", msg.session_id, @@ -146,6 +152,7 @@ pub(crate) fn gossip_public_key( let status = if msg.session_id == 0u64 { DKGMsgStatus::ACTIVE } else { DKGMsgStatus::QUEUED }; let message = DKGMessage:: { + associated_block_id: vec![], // we don't need to associate this message with a block sender_id: public.clone(), // we need to gossip the final public key to all parties, so no specific recipient in // this case. diff --git a/dkg-gadget/src/keystore.rs b/dkg-gadget/src/keystore.rs index 4ae07ffb9..f7fb49e8b 100644 --- a/dkg-gadget/src/keystore.rs +++ b/dkg-gadget/src/keystore.rs @@ -194,7 +194,7 @@ impl DKGKeystore { impl From> for DKGKeystore { fn from(store: Option) -> Self { - Self(store, DebugLogger::new("DKGKeystore", None)) + Self(store, DebugLogger::new("DKGKeystore", None).expect("Should not fail")) } } diff --git a/dkg-gadget/src/lib.rs b/dkg-gadget/src/lib.rs index 6f76b2b54..00db28c35 100644 --- a/dkg-gadget/src/lib.rs +++ b/dkg-gadget/src/lib.rs @@ -20,7 +20,7 @@ use parking_lot::RwLock; use prometheus::Registry; use sc_client_api::{Backend, BlockchainEvents}; use sc_keystore::LocalKeystore; -use sc_network::{NetworkService, NetworkStateInfo, ProtocolName}; +use sc_network::{NetworkService, ProtocolName}; use sc_network_common::ExHashT; use sp_api::{NumberFor, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; @@ -33,6 +33,7 @@ pub mod keyring; pub mod keystore; pub mod gossip_engine; +mod signing_manager; // mod meta_async_rounds; pub mod db; mod metrics; @@ -41,10 +42,11 @@ mod utils; pub mod worker; pub mod async_protocols; -pub mod debug_logger; +pub use dkg_logging::debug_logger; pub mod gossip_messages; pub mod storage; +pub use debug_logger::RoundsEventType; use gossip_engine::NetworkGossipEngineBuilder; pub use keystore::DKGKeystore; @@ -99,9 +101,10 @@ where pub local_keystore: Option>, /// Gossip network pub network: Arc>, - /// Prometheus metric registry pub prometheus_registry: Option, + /// For logging + pub debug_logger: DebugLogger, /// Phantom block type pub _block: PhantomData, } @@ -112,7 +115,7 @@ where pub async fn start_dkg_gadget(dkg_params: DKGParams) where B: Block, - BE: Backend + 'static, + BE: Backend + Unpin + 'static, C: Client + 'static, C::Api: DKGApi, MaxProposalLength, MaxAuthorities>, { @@ -127,12 +130,9 @@ where prometheus_registry, local_keystore, _block, + debug_logger, } = dkg_params; - // setup debug logging - let local_peer_id = network.local_peer_id(); - let debug_logger = DebugLogger::new(local_peer_id, None); - let dkg_keystore: DKGKeystore = DKGKeystore::new(key_store, debug_logger.clone()); let keygen_gossip_protocol = NetworkGossipEngineBuilder::new( DKG_KEYGEN_PROTOCOL_NAME.to_string().into(), @@ -174,11 +174,13 @@ where keygen_gossip_engine.set_gossip_enabled(true); signing_gossip_engine.set_gossip_enabled(true); - keygen_gossip_engine.set_processing_already_seen_messages_enabled(false); - signing_gossip_engine.set_processing_already_seen_messages_enabled(false); + // keygen_gossip_engine.set_processing_already_seen_messages_enabled(false); + // signing_gossip_engine.set_processing_already_seen_messages_enabled(false); - let keygen_handle = tokio::spawn(keygen_gossip_handler.run()); - let signing_handle = tokio::spawn(signing_gossip_handler.run()); + let keygen_handle = + crate::utils::ExplicitPanicFuture::new(tokio::spawn(keygen_gossip_handler.run())); + let signing_handle = + crate::utils::ExplicitPanicFuture::new(tokio::spawn(signing_gossip_handler.run())); // In memory backend, not used for now // let db_backend = Arc::new(db::DKGInMemoryDb::new()); @@ -210,3 +212,49 @@ where keygen_handle.abort(); signing_handle.abort(); } + +pub mod deadlock_detection { + #[cfg(not(feature = "testing"))] + pub fn deadlock_detect() {} + + #[cfg(feature = "testing")] + pub fn deadlock_detect() { + static HAS_STARTED: AtomicBool = AtomicBool::new(false); + use parking_lot::deadlock; + use std::{sync::atomic::AtomicBool, thread, time::Duration}; + + // Create a background thread which checks for deadlocks every 10s + thread::spawn(move || { + if HAS_STARTED + .compare_exchange( + false, + true, + std::sync::atomic::Ordering::SeqCst, + std::sync::atomic::Ordering::SeqCst, + ) + .unwrap_or(true) + { + println!("Deadlock detector already started"); + return + } + + println!("Deadlock detector started"); + loop { + thread::sleep(Duration::from_secs(5)); + let deadlocks = deadlock::check_deadlock(); + if deadlocks.is_empty() { + continue + } + + println!("{} deadlocks detected", deadlocks.len()); + for (i, threads) in deadlocks.iter().enumerate() { + println!("Deadlock #{i}"); + for t in threads { + println!("Thread Id {:#?}", t.thread_id()); + println!("{:#?}", t.backtrace()); + } + } + } + }); + } +} diff --git a/dkg-gadget/src/signing_manager/mod.rs b/dkg-gadget/src/signing_manager/mod.rs new file mode 100644 index 000000000..b0be2677a --- /dev/null +++ b/dkg-gadget/src/signing_manager/mod.rs @@ -0,0 +1,301 @@ +use std::marker::PhantomData; + +use dkg_primitives::{ + types::{DKGError, SignedDKGMessage}, + MaxProposalLength, UnsignedProposal, +}; + +use self::work_manager::WorkManager; +use crate::{ + async_protocols::{remote::AsyncProtocolRemote, GenericAsyncHandler, KeygenPartyId}, + gossip_engine::GossipEngineIface, + metric_inc, + utils::SendFuture, + worker::{DKGWorker, HasLatestHeader, KeystoreExt, ProtoStageType}, + *, +}; +use codec::Encode; +use dkg_primitives::{utils::select_random_set, SessionId}; +use dkg_runtime_primitives::crypto::Public; +use sp_api::HeaderT; +use std::pin::Pin; + +/// For balancing the amount of work done by each node +pub mod work_manager; + +/// The signing manager is triggered each time a new block is finalized. +/// It will then start a signing process for each of the proposals. SigningManagerV2 uses +/// 1 signing set per proposal for simplicity. +/// +/// The steps: +/// Fetch the current DKG PublicKey pk +/// Fetch all the Unsigned Proposals from on-chain unsignedProposals +/// for each unsinged proposal unsingedProposal do the following: +/// create a seed s where s is keccak256(pk, fN, keccak256(unsingedProposal)) +/// you take this seed and use it as a seed to random number generator. +/// generate a t+1 signing set from this RNG +/// if we are in this set, we send it to the work manager, and continue. +/// if we are not, we continue the loop. +pub struct SigningManager { + // governs the workload for each node + work_manager: WorkManager, + _pd: PhantomData<(B, BE, C, GE)>, +} + +impl Clone for SigningManager { + fn clone(&self) -> Self { + Self { work_manager: self.work_manager.clone(), _pd: self._pd } + } +} + +// the maximum number of tasks that the work manager tries to assign +const MAX_RUNNING_TASKS: usize = 4; + +impl SigningManager +where + B: Block, + BE: Backend + Unpin + 'static, + GE: GossipEngineIface + 'static, + C: Client + 'static, + C::Api: DKGApi, MaxProposalLength, MaxAuthorities>, +{ + pub fn new(logger: DebugLogger, clock: impl HasLatestHeader) -> Self { + Self { + work_manager: WorkManager::::new(logger, clock, MAX_RUNNING_TASKS), + _pd: Default::default(), + } + } + + pub fn deliver_message(&self, message: SignedDKGMessage) { + self.work_manager.deliver_message(message) + } + + /// This function is called each time a new block is finalized. + /// It will then start a signing process for each of the proposals. + pub async fn on_block_finalized( + &self, + header: &B::Header, + dkg_worker: &DKGWorker, + ) -> Result<(), DKGError> { + let on_chain_dkg = dkg_worker.get_dkg_pub_key(header).await; + let session_id = on_chain_dkg.0; + let dkg_pub_key = on_chain_dkg.1; + let at = header.hash(); + // Check whether the worker is in the best set or return + let party_i = match dkg_worker.get_party_index(header).await { + Some(party_index) => { + dkg_worker.logger.info(format!("🕸️ PARTY {party_index} | SESSION {session_id} | IN THE SET OF BEST AUTHORITIES")); + KeygenPartyId::try_from(party_index)? + }, + None => { + dkg_worker + .logger + .info(format!("🕸️ NOT IN THE SET OF BEST AUTHORITIES: session {session_id}")); + return Ok(()) + }, + }; + + dkg_worker.logger.info_signing("About to get unsigned proposals ..."); + + let unsigned_proposals = match dkg_worker + .exec_client_function(move |client| client.runtime_api().get_unsigned_proposals(at)) + .await + { + Ok(mut res) => { + // sort proposals by timestamp, we want to pick the oldest proposal to sign + res.sort_by(|a, b| a.1.cmp(&b.1)); + let mut filtered_unsigned_proposals = Vec::new(); + for proposal in res { + if let Some(hash) = proposal.0.hash() { + // only submit the job if it isn't already running + if !self.work_manager.job_exists(&hash) { + // update unsigned proposal counter + metric_inc!(dkg_worker, dkg_unsigned_proposal_counter); + filtered_unsigned_proposals.push(proposal); + } + } + } + filtered_unsigned_proposals + }, + Err(e) => { + dkg_worker + .logger + .error(format!("🕸️ PARTY {party_i} | Failed to get unsigned proposals: {e:?}")); + return Err(DKGError::GenericError { + reason: format!("Failed to get unsigned proposals: {e:?}"), + }) + }, + }; + if unsigned_proposals.is_empty() { + return Ok(()) + } else { + dkg_worker.logger.debug(format!( + "🕸️ PARTY {party_i} | Got unsigned proposals count {}", + unsigned_proposals.len() + )); + } + + let best_authorities: Vec<_> = dkg_worker + .get_best_authorities(header) + .await + .into_iter() + .flat_map(|(i, p)| KeygenPartyId::try_from(i).map(|i| (i, p))) + .collect(); + let threshold = dkg_worker.get_signature_threshold(header).await; + let authority_public_key = dkg_worker.get_authority_public_key(); + + for unsigned_proposal in unsigned_proposals { + /* + create a seed s where s is keccak256(pk, fN=at, unsignedProposal) + you take this seed and use it as a seed to random number generator. + generate a t+1 signing set from this RNG + if we are in this set, we send it to the signing manager, and continue. + if we are not, we continue the loop. + */ + let unsigned_proposal_bytes = unsigned_proposal.encode(); + let concat_data = dkg_pub_key + .clone() + .into_iter() + .chain(at.encode()) + .chain(unsigned_proposal_bytes) + .collect::>(); + let seed = sp_core::keccak_256(&concat_data); + let unsigned_proposal_hash = + unsigned_proposal.0.hash().expect("unable to hash proposal"); + + let maybe_set = self + .generate_signers(&seed, threshold, best_authorities.clone(), dkg_worker) + .ok(); + if let Some(signing_set) = maybe_set { + // if we are in the set, send to work manager + if signing_set.contains(&party_i) { + dkg_worker.logger.info(format!( + "🕸️ Session Id {:?} | {}-out-of-{} signers: ({:?})", + session_id, + threshold, + best_authorities.len(), + signing_set, + )); + match self.create_signing_protocol( + dkg_worker, + best_authorities.clone(), + authority_public_key.clone(), + party_i, + session_id, + threshold, + ProtoStageType::Signing { unsigned_proposal_hash }, + unsigned_proposal.0, + signing_set, + *header.number(), + ) { + Ok((handle, task)) => { + // send task to the work manager + self.work_manager.push_task(unsigned_proposal_hash, handle, task)?; + }, + Err(err) => { + dkg_worker + .logger + .error(format!("Error creating signing protocol: {:?}", &err)); + dkg_worker.handle_dkg_error(err.clone()).await; + return Err(err) + }, + } + } + } + } + + Ok(()) + } + + #[allow(clippy::too_many_arguments, clippy::type_complexity)] + #[cfg_attr( + feature = "debug-tracing", + dkg_logging::instrument( + target = "dkg", + skip_all, + err, + fields(session_id, threshold, stage, party_i) + ) + )] + fn create_signing_protocol( + &self, + dkg_worker: &DKGWorker, + best_authorities: Vec<(KeygenPartyId, Public)>, + authority_public_key: Public, + party_i: KeygenPartyId, + session_id: SessionId, + threshold: u16, + stage: ProtoStageType, + unsigned_proposal: UnsignedProposal, + signing_set: Vec, + associated_block_id: NumberFor, + ) -> Result<(AsyncProtocolRemote>, Pin>>), DKGError> + { + let async_proto_params = dkg_worker.generate_async_proto_params( + best_authorities, + authority_public_key, + party_i, + session_id, + stage, + crate::DKG_SIGNING_PROTOCOL_NAME, + associated_block_id, + )?; + + let handle = async_proto_params.handle.clone(); + + let err_handler_tx = dkg_worker.error_handler.clone(); + let meta_handler = GenericAsyncHandler::setup_signing( + async_proto_params, + threshold, + unsigned_proposal, + signing_set, + )?; + let logger = dkg_worker.logger.clone(); + let task = async move { + match meta_handler.await { + Ok(_) => { + logger.info("The meta handler has executed successfully".to_string()); + Ok(()) + }, + + Err(err) => { + logger.error(format!("Error executing meta handler {:?}", &err)); + let _ = err_handler_tx.send(err.clone()); + Err(err) + }, + } + }; + + Ok((handle, Box::pin(task))) + } + + /// After keygen, this should be called to generate a random set of signers + /// NOTE: since the random set is called using a deterministic seed to and RNG, + /// the resulting set is deterministic + fn generate_signers( + &self, + seed: &[u8], + t: u16, + best_authorities: Vec<(KeygenPartyId, Public)>, + dkg_worker: &DKGWorker, + ) -> Result, DKGError> { + let only_public_keys = best_authorities.iter().map(|(_, p)| p).cloned().collect::>(); + let mut final_set = dkg_worker.get_unjailed_signers(&only_public_keys)?; + // Mutate the final set if we don't have enough unjailed signers + if final_set.len() <= t as usize { + let jailed_set = dkg_worker.get_jailed_signers(&only_public_keys)?; + let diff = t as usize + 1 - final_set.len(); + final_set = final_set + .iter() + .chain(jailed_set.iter().take(diff)) + .cloned() + .collect::>(); + } + + select_random_set(seed, final_set, t + 1) + .map(|set| set.into_iter().flat_map(KeygenPartyId::try_from).collect::>()) + .map_err(|err| DKGError::CreateOfflineStage { + reason: format!("generate_signers failed, reason: {err}"), + }) + } +} diff --git a/dkg-gadget/src/signing_manager/work_manager.rs b/dkg-gadget/src/signing_manager/work_manager.rs new file mode 100644 index 000000000..3e0a45f93 --- /dev/null +++ b/dkg-gadget/src/signing_manager/work_manager.rs @@ -0,0 +1,302 @@ +use crate::{ + async_protocols::remote::AsyncProtocolRemote, debug_logger::DebugLogger, utils::SendFuture, + worker::HasLatestHeader, NumberFor, +}; +use dkg_primitives::{ + crypto::Public, + types::{DKGError, SignedDKGMessage}, +}; +use parking_lot::RwLock; +use sp_api::BlockT; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + hash::{Hash, Hasher}, + pin::Pin, + sync::Arc, +}; +use sync_wrapper::SyncWrapper; + +// How often to poll the jobs to check completion status +const JOB_POLL_INTERVAL_IN_MILLISECONDS: u64 = 500; + +#[derive(Clone)] +pub struct WorkManager { + inner: Arc>>, + clock: Arc>, + // for now, use a hard-coded value for the number of tasks + max_tasks: usize, + logger: DebugLogger, + to_handler: tokio::sync::mpsc::UnboundedSender<[u8; 32]>, +} + +pub struct WorkManagerInner { + pub active_tasks: HashSet>, + pub enqueued_tasks: VecDeque>, + pub enqueued_messages: HashMap<[u8; 32], VecDeque>>, +} + +impl WorkManager { + pub fn new(logger: DebugLogger, clock: impl HasLatestHeader, max_tasks: usize) -> Self { + let (to_handler, mut rx) = tokio::sync::mpsc::unbounded_channel(); + let this = Self { + inner: Arc::new(RwLock::new(WorkManagerInner { + active_tasks: HashSet::new(), + enqueued_tasks: VecDeque::new(), + enqueued_messages: HashMap::new(), + })), + clock: Arc::new(clock), + max_tasks, + logger, + to_handler, + }; + + let this_worker = this.clone(); + let handler = async move { + let job_receiver_worker = this_worker.clone(); + let logger = job_receiver_worker.logger.clone(); + + let job_receiver = async move { + while let Some(task_hash) = rx.recv().await { + job_receiver_worker + .logger + .info_signing(format!("[worker] Received job {task_hash:?}",)); + job_receiver_worker.poll(); + } + }; + + let periodic_poller = async move { + let mut interval = tokio::time::interval(std::time::Duration::from_millis( + JOB_POLL_INTERVAL_IN_MILLISECONDS, + )); + loop { + interval.tick().await; + this_worker.poll(); + } + }; + + tokio::select! { + _ = job_receiver => { + logger.error_signing("[worker] job_receiver exited"); + }, + _ = periodic_poller => { + logger.error_signing("[worker] periodic_poller exited"); + } + } + }; + + tokio::task::spawn(handler); + + this + } + + /// Pushes the task, but does not necessarily start it + pub fn push_task( + &self, + task_hash: [u8; 32], + mut handle: AsyncProtocolRemote>, + task: Pin>>, + ) -> Result<(), DKGError> { + let mut lock = self.inner.write(); + // set as primary, that way on drop, the async protocol ends + handle.set_as_primary(); + let job = Job { + task: Arc::new(RwLock::new(Some(task.into()))), + handle, + task_hash, + logger: self.logger.clone(), + }; + lock.enqueued_tasks.push_back(job); + + self.to_handler.send(task_hash).map_err(|_| DKGError::GenericError { + reason: "Failed to send job to worker".to_string(), + }) + } + + fn poll(&self) { + // go through each task and see if it's done + // finally, see if we can start a new task + let now = self.clock.get_latest_block_number(); + let mut lock = self.inner.write(); + let cur_count = lock.active_tasks.len(); + lock.active_tasks.retain(|job| { + let is_stalled = job.handle.signing_has_stalled(now); + if is_stalled { + // if stalled, lets log the start and now blocks for logging purposes + self.logger.info_signing(format!( + "[worker] Job {:?} | Started at {:?} | Now {:?} | is stalled, shutting down", + hex::encode(job.task_hash), + job.handle.started_at, + now + )); + + // the task is stalled, lets be pedantic and shutdown + let _ = job.handle.shutdown("Stalled!"); + // return false so that the proposals are released from the currently signing + // proposals + return false + } + + let is_done = job.handle.is_done(); + /*self.logger.info_signing(format!( + "[worker] Job {:?} is done: {}", + hex::encode(job.task_hash), + is_done + ));*/ + + !is_done + }); + + let new_count = lock.active_tasks.len(); + if cur_count != new_count { + self.logger + .info_signing(format!("[worker] {} jobs dropped", cur_count - new_count)); + } + + // now, check to see if there is room to start a new task + let tasks_to_start = self.max_tasks - lock.active_tasks.len(); + for _ in 0..tasks_to_start { + if let Some(job) = lock.enqueued_tasks.pop_front() { + self.logger.info_signing(format!( + "[worker] Starting job {:?}", + hex::encode(job.task_hash) + )); + if let Err(err) = job.handle.start() { + self.logger.error_signing(format!( + "Failed to start job {:?}: {err:?}", + hex::encode(job.task_hash) + )); + } else { + // deliver all the enqueued messages to the protocol now + if let Some(mut enqueued_messages) = + lock.enqueued_messages.remove(&job.task_hash) + { + self.logger.info_signing(format!( + "Will now deliver {} enqueued message(s) to the async protocol for {:?}", + enqueued_messages.len(), + hex::encode(job.task_hash) + )); + while let Some(message) = enqueued_messages.pop_front() { + if let Err(err) = job.handle.deliver_message(message) { + self.logger.error_signing(format!( + "Unable to deliver message for job {:?}: {err:?}", + hex::encode(job.task_hash) + )); + } + } + } + } + let task = job.task.clone(); + // Put the job inside here, that way the drop code does not get called right away, + // killing the process + lock.active_tasks.insert(job); + // run the task + let task = async move { + let task = task.write().take().expect("Should not happen"); + task.into_inner().await + }; + + // Spawn the task. When it finishes, it will clean itself up + tokio::task::spawn(task); + } + } + } + + pub fn job_exists(&self, job: &[u8; 32]) -> bool { + let lock = self.inner.read(); + lock.active_tasks.contains(job) || lock.enqueued_tasks.iter().any(|j| &j.task_hash == job) + } + + pub fn deliver_message(&self, msg: SignedDKGMessage) { + self.logger.debug_signing(format!( + "Delivered message is intended for session_id = {}", + msg.msg.session_id + )); + let mut lock = self.inner.write(); + + let msg_unsigned_proposal_hash = + msg.msg.payload.unsigned_proposal_hash().expect("Bad message type"); + + // check the enqueued + for task in lock.enqueued_tasks.iter() { + if task.handle.session_id == msg.msg.session_id && + &task.task_hash == msg_unsigned_proposal_hash + { + self.logger.debug(format!( + "Message is for this ENQUEUED signing execution in session: {}", + task.handle.session_id + )); + if let Err(_err) = task.handle.deliver_message(msg) { + self.logger.warn_signing("Failed to deliver message to signing task"); + } + return + } + } + + // check the currently signing + for task in lock.active_tasks.iter() { + if task.handle.session_id == msg.msg.session_id && + &task.task_hash == msg_unsigned_proposal_hash + { + self.logger.debug(format!( + "Message is for this signing CURRENT execution in session: {}", + task.handle.session_id + )); + if let Err(_err) = task.handle.deliver_message(msg) { + self.logger.warn_signing("Failed to deliver message to signing task"); + } + return + } + } + + // if the protocol is neither started nor enqueued, then, this message may be for a future + // async protocol. Store the message + self.logger.info_signing(format!( + "Enqueuing message for {:?}", + hex::encode(msg_unsigned_proposal_hash) + )); + lock.enqueued_messages + .entry(*msg_unsigned_proposal_hash) + .or_default() + .push_back(msg) + } +} + +pub struct Job { + // wrap in an arc to get the strong count for this job + task_hash: [u8; 32], + logger: DebugLogger, + handle: AsyncProtocolRemote>, + task: Arc>>>, +} + +pub type SyncFuture = SyncWrapper>>>; + +impl std::borrow::Borrow<[u8; 32]> for Job { + fn borrow(&self) -> &[u8; 32] { + &self.task_hash + } +} + +impl PartialEq for Job { + fn eq(&self, other: &Self) -> bool { + self.task_hash == other.task_hash + } +} + +impl Eq for Job {} + +impl Hash for Job { + fn hash(&self, state: &mut H) { + self.task_hash.hash(state); + } +} + +impl Drop for Job { + fn drop(&mut self) { + self.logger.info_signing(format!( + "Will remove job {:?} from currently_signing_proposals", + hex::encode(self.task_hash) + )); + let _ = self.handle.shutdown("shutdown from Job::drop"); + } +} diff --git a/dkg-gadget/src/storage/proposals.rs b/dkg-gadget/src/storage/proposals.rs index 2fea78b85..a6aee6415 100644 --- a/dkg-gadget/src/storage/proposals.rs +++ b/dkg-gadget/src/storage/proposals.rs @@ -37,13 +37,14 @@ pub(crate) fn save_signed_proposals_in_storage>>, latest_header: &Arc>>, backend: &Arc, - signed_proposals: Vec>, + mut signed_proposals: Vec>, logger: &DebugLogger, ) where B: Block, BE: Backend, C: Client, - MaxProposalLength: Get + Clone + Send + Sync + 'static + std::fmt::Debug, + MaxProposalLength: + Get + Clone + Send + Sync + 'static + std::fmt::Debug + std::cmp::PartialEq, MaxAuthorities: Get + Clone + Send + Sync + 'static + std::fmt::Debug, C::Api: DKGApi< B, @@ -97,6 +98,21 @@ pub(crate) fn save_signed_proposals_in_storage(*current_block_number, MAX_SUBMISSION_DELAY); + // lets create a vector of the data of all the proposals currently in offchain storage + let current_list_of_saved_signed_proposals_data: Vec> = prop_wrapper + .clone() + .proposals + .into_iter() + .flat_map(|prop| prop.0) + .map(|prop| prop.data().clone()) + .collect::>(); + + // lets remove any duplicates + // we need to compare the data to ensure that the proposal is a duplicate, otherwise the + // signatures can be different for a same proposal + signed_proposals + .retain(|prop| !current_list_of_saved_signed_proposals_data.contains(prop.data())); + if let Some(submit_at) = submit_at { prop_wrapper.proposals.push((signed_proposals, submit_at)) }; diff --git a/dkg-gadget/src/utils.rs b/dkg-gadget/src/utils.rs index a57369753..2f0ce35c9 100644 --- a/dkg-gadget/src/utils.rs +++ b/dkg-gadget/src/utils.rs @@ -98,3 +98,41 @@ pub fn convert_u16_vec_to_usize_vec(input: Vec) -> Vec { } usize_vec } + +use futures::task::Context; +use tokio::{ + macros::support::{Pin, Poll}, + task::{JoinError, JoinHandle}, +}; + +/// Ensures that if a panic occurs in a task, the panic backtrace prints +pub struct ExplicitPanicFuture { + future: JoinHandle, +} + +impl ExplicitPanicFuture { + pub fn new(future: JoinHandle) -> Self { + Self { future } + } + + pub fn abort(&self) { + self.future.abort(); + } +} + +impl Future for ExplicitPanicFuture { + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match futures::ready!(Pin::new(&mut self.future).poll(cx)) { + Err(err) => + if err.is_panic() { + std::panic::panic_any(err.into_panic()) + } else { + Poll::Ready(Err(err)) + }, + + res => Poll::Ready(res), + } + } +} diff --git a/dkg-gadget/src/worker.rs b/dkg-gadget/src/worker.rs index f787f2205..6356c6af3 100644 --- a/dkg-gadget/src/worker.rs +++ b/dkg-gadget/src/worker.rs @@ -21,24 +21,20 @@ use crate::{ }; use codec::{Codec, Encode}; use curv::elliptic::curves::Secp256k1; -use dkg_primitives::utils::select_random_set; use sc_network::NetworkService; use sp_consensus::SyncOracle; +use crate::signing_manager::SigningManager; use futures::StreamExt; -use itertools::Itertools; use multi_party_ecdsa::protocols::multi_party_ecdsa::gg_2020::state_machine::keygen::LocalKey; use parking_lot::RwLock; use sc_client_api::{Backend, FinalityNotification}; use sc_keystore::LocalKeystore; -use sp_arithmetic::traits::CheckedRem; use sp_core::ecdsa; -use sp_runtime::traits::{Block, Get, Header, NumberFor, Zero}; +use sp_runtime::traits::{Block, Get, Header, NumberFor}; use std::{ collections::{BTreeSet, HashMap, HashSet}, - future::Future, marker::PhantomData, - pin::Pin, sync::{ atomic::{AtomicUsize, Ordering}, Arc, @@ -57,8 +53,7 @@ use dkg_runtime_primitives::{ crypto::{AuthorityId, Public}, utils::to_slice_33, AggregatedMisbehaviourReports, AggregatedPublicKeys, AuthoritySet, DKGApi, MaxAuthorities, - MaxProposalLength, MaxReporters, MaxSignatureLength, UnsignedProposal, - GENESIS_AUTHORITY_SET_ID, KEYGEN_TIMEOUT, + MaxProposalLength, MaxReporters, MaxSignatureLength, GENESIS_AUTHORITY_SET_ID, KEYGEN_TIMEOUT, }; use crate::{ @@ -82,12 +77,8 @@ pub const STORAGE_SET_RETRY_NUM: usize = 5; pub const MAX_SUBMISSION_DELAY: u32 = 3; -pub const MAX_SIGNING_SETS: u64 = 2; - pub const MAX_KEYGEN_RETRIES: usize = 5; -pub const MAX_UNSIGNED_PROPOSALS_PER_SIGNING_SET: usize = 2; - /// How many blocks to keep the proposal hash in out local cache. pub const PROPOSAL_HASH_LIFETIME: u32 = 10; @@ -131,8 +122,6 @@ where pub rounds: Shared>>>, // Next keygen round, always taken and restarted each session pub next_rounds: Shared>>>, - // Signing rounds, created everytime there are unique unsigned proposals - pub signing_rounds: Shared>>>>, /// Cached best authorities pub best_authorities: Shared>, /// Cached next best authorities @@ -161,6 +150,7 @@ where pub network: Option>>, pub test_bundle: Option, pub logger: DebugLogger, + pub signing_manager: SigningManager, // keep rustc happy _backend: PhantomData<(BE, MaxProposalLength)>, } @@ -168,10 +158,12 @@ where /// Used only for tests #[derive(Clone)] pub struct TestBundle { - pub to_test_client: UnboundedSender<(uuid::Uuid, Result<(), String>)>, + pub to_test_client: UnboundedSender, pub current_test_id: Arc>>, } +pub type TestClientPayload = (uuid::Uuid, Result<(), String>, Option>); + // Implementing Clone for DKGWorker is required for the async protocol impl Clone for DKGWorker where @@ -191,7 +183,6 @@ where metrics: self.metrics.clone(), rounds: self.rounds.clone(), next_rounds: self.next_rounds.clone(), - signing_rounds: self.signing_rounds.clone(), best_authorities: self.best_authorities.clone(), next_best_authorities: self.next_best_authorities.clone(), latest_header: self.latest_header.clone(), @@ -207,6 +198,7 @@ where keygen_retry_count: self.keygen_retry_count.clone(), network: self.network.clone(), logger: self.logger.clone(), + signing_manager: self.signing_manager.clone(), _backend: PhantomData, } } @@ -220,7 +212,7 @@ pub type AggregatedMisbehaviourReportStore = HashMap< impl DKGWorker where B: Block + Codec, - BE: Backend + 'static, + BE: Backend + Unpin + 'static, GE: GossipEngineIface + 'static, C: Client + 'static, C::Api: DKGApi, MaxProposalLength, MaxAuthorities>, @@ -248,7 +240,8 @@ where } = worker_params; let (error_handler, _) = tokio::sync::broadcast::channel(1024); - + let clock = Clock { latest_header: latest_header.clone() }; + let signing_manager = SigningManager::::new(logger.clone(), clock); DKGWorker { client, misbehaviour_tx: None, @@ -260,7 +253,6 @@ where metrics: Arc::new(metrics), rounds: Arc::new(RwLock::new(None)), next_rounds: Arc::new(RwLock::new(None)), - signing_rounds: Arc::new(RwLock::new(vec![None; MAX_SIGNING_SETS as _])), best_authorities: Arc::new(RwLock::new(vec![])), next_best_authorities: Arc::new(RwLock::new(vec![])), current_validator_set: Arc::new(RwLock::new(AuthoritySet::empty())), @@ -275,22 +267,23 @@ where keygen_retry_count: Arc::new(AtomicUsize::new(0)), logger, network, + signing_manager, _backend: PhantomData, } } } #[derive(Copy, Clone, Debug, Eq, PartialEq)] -enum ProtoStageType { +pub enum ProtoStageType { Genesis, Queued, - Signing, + Signing { unsigned_proposal_hash: [u8; 32] }, } impl DKGWorker where B: Block, - BE: Backend + 'static, + BE: Backend + Unpin + 'static, GE: GossipEngineIface + 'static, C: Client + 'static, C::Api: DKGApi, MaxProposalLength, MaxAuthorities>, @@ -299,15 +292,15 @@ where // if "current" is true, this will set the "rounds" field in the dkg worker, otherwise, // it well set the "next_rounds" field #[allow(clippy::too_many_arguments, clippy::type_complexity)] - fn generate_async_proto_params( + pub(crate) fn generate_async_proto_params( &self, best_authorities: Vec<(KeygenPartyId, Public)>, authority_public_key: Public, party_i: KeygenPartyId, session_id: SessionId, stage: ProtoStageType, - async_index: u8, protocol_name: &str, + associated_block: NumberFor, ) -> Result< AsyncProtocolParameters< DKGProtocolEngine, @@ -319,13 +312,13 @@ where let authority_public_key = Arc::new(authority_public_key); let now = self.get_latest_block_number(); - let status_handle = AsyncProtocolRemote::new(now, session_id, self.logger.clone()); + let mut status_handle = AsyncProtocolRemote::new(now, session_id, self.logger.clone()); // Fetch the active key. This requires rotating the key to have happened with // full certainty in order to ensure the right key is being used to make signatures. let active_local_key = match stage { ProtoStageType::Genesis => None, ProtoStageType::Queued => None, - ProtoStageType::Signing => { + ProtoStageType::Signing { .. } => { let optional_session_id = Some(session_id); let (active_local_key, _) = self.fetch_local_keys(optional_session_id); active_local_key @@ -368,9 +361,17 @@ where handle: status_handle.clone(), logger: self.logger.clone(), local_key: active_local_key, + associated_block_id: associated_block.encode(), }; - // Start the respective protocol - status_handle.start()?; + + if let ProtoStageType::Signing { unsigned_proposal_hash } = &stage { + self.logger.debug(format!("Signing protocol for proposal hash {unsigned_proposal_hash:?} will start later in the work manager")); + return Ok(params) + } + + // Set the status handle as primary, implying that once it drops, it will stop the async + // protocol + status_handle.set_as_primary(); // Cache the rounds, respectively match stage { ProtoStageType::Genesis => { @@ -397,40 +398,8 @@ where } *lock = Some(status_handle); }, - // When we are at signing stage, it is using the active rounds. - ProtoStageType::Signing => { - self.logger - .debug(format!("Starting signing protocol (async_index #{async_index})")); - let mut lock = self.signing_rounds.write(); - // first, check if the async_index is already in use and if so, and it is still - // running, return an error and print a warning that we will overwrite the previous - // round. - if let Some(Some(current_round)) = lock.get(async_index as usize) { - // check if it has stalled or not, if so, we can overwrite it - // TODO: Write more on what we should be going here since it's all the same - if current_round.signing_has_stalled(now) { - // the round has stalled, so we can overwrite it - self.logger.warn(format!( - "signing round async index #{async_index} has stalled, overwriting it" - )); - lock[async_index as usize] = Some(status_handle) - } else if current_round.is_active() { - self.logger.warn( - "Overwriting rounds will result in termination of previous rounds!" - .to_string(), - ); - lock[async_index as usize] = Some(status_handle) - } else { - // the round is not active, nor has it stalled, so we can overwrite it. - self.logger.debug(format!( - "signing round async index #{async_index} is not active, overwriting it" - )); - lock[async_index as usize] = Some(status_handle) - } - } else { - // otherwise, we can safely write to this slot. - lock[async_index as usize] = Some(status_handle); - } + ProtoStageType::Signing { .. } => { + unreachable!("Signing stage should not be handled here!") }, } @@ -446,12 +415,14 @@ where } } - fn spawn_keygen_protocol( + #[allow(clippy::too_many_arguments)] + async fn spawn_keygen_protocol( &self, best_authorities: Vec<(KeygenPartyId, Public)>, authority_public_key: Public, party_i: KeygenPartyId, session_id: SessionId, + associated_block: NumberFor, threshold: u16, stage: ProtoStageType, ) { @@ -461,8 +432,8 @@ where party_i, session_id, stage, - 0u8, crate::DKG_KEYGEN_PROTOCOL_NAME, + associated_block, ) { Ok(async_proto_params) => { let err_handler_tx = self.error_handler.clone(); @@ -483,10 +454,18 @@ where // so we can safely assume that we are in the queued state. DKGMsgStatus::QUEUED }; + let start_handle = async_proto_params.handle.clone(); match GenericAsyncHandler::setup_keygen(async_proto_params, threshold, status) { Ok(meta_handler) => { let logger = self.logger.clone(); let task = async move { + if let Err(err) = start_handle.start() { + logger.error_keygen(format!( + "Error starting keygen protocol: {err:?}" + )); + return + } + match meta_handler.await { Ok(_) => { logger.info( @@ -502,6 +481,7 @@ where } }; + self.logger.debug(format!("Started Keygen Protocol for session {session_id} with status {status:?}")); // spawn on parallel thread self.logger.info("Started a new thread for task".to_string()); let _handle = tokio::task::spawn(task); @@ -509,88 +489,17 @@ where Err(err) => { self.logger.error(format!("Error starting meta handler {:?}", &err)); - self.handle_dkg_error(err); + self.handle_dkg_error(err).await; }, } }, Err(err) => { - self.handle_dkg_error(err); + self.handle_dkg_error(err).await; }, } } - #[allow(clippy::too_many_arguments, clippy::type_complexity)] - #[cfg_attr( - feature = "debug-tracing", - dkg_logging::instrument( - target = "dkg", - skip_all, - err, - fields(session_id, threshold, stage, async_index) - ) - )] - fn create_signing_protocol( - &self, - best_authorities: Vec<(KeygenPartyId, Public)>, - authority_public_key: Public, - party_i: KeygenPartyId, - session_id: SessionId, - threshold: u16, - stage: ProtoStageType, - unsigned_proposals: Vec>, - signing_set: Vec, - async_index: u8, - ) -> Result> + Send + 'static>>, DKGError> { - let async_proto_params = self.generate_async_proto_params( - best_authorities, - authority_public_key, - party_i, - session_id, - stage, - async_index, - crate::DKG_SIGNING_PROTOCOL_NAME, - )?; - - let err_handler_tx = self.error_handler.clone(); - let proposal_hashes = - unsigned_proposals.iter().filter_map(|p| p.hash()).collect::>(); - let meta_handler = GenericAsyncHandler::setup_signing( - async_proto_params, - threshold, - unsigned_proposals, - signing_set, - async_index, - )?; - let logger = self.logger.clone(); - let currently_signing_proposals = self.currently_signing_proposals.clone(); - let task = async move { - match meta_handler.await { - Ok(_) => { - logger.info("The meta handler has executed successfully".to_string()); - Ok(async_index) - }, - - Err(err) => { - logger.error(format!("Error executing meta handler {:?}", &err)); - let _ = err_handler_tx.send(err.clone()); - // remove proposal hashes, so that they can be reprocessed - let mut lock = currently_signing_proposals.write(); - proposal_hashes.iter().for_each(|h| { - lock.remove(h); - }); - logger.info(format!( - "Removed {:?} proposal hashes from currently signing queue", - proposal_hashes.len() - )); - Err(err) - }, - } - }; - - Ok(Box::pin(task)) - } - /// Fetch the stored local keys if they exist. /// /// The `optional_session_id` is used to fetch the keys for a specific session, only in case @@ -600,8 +509,12 @@ where &self, optional_session_id: Option, ) -> (Option>, Option>) { - let current_session_id = - self.rounds.read().as_ref().map(|r| r.session_id).or(optional_session_id); + let current_session_id = if let Some(sid) = optional_session_id { + Some(sid) + } else { + self.rounds.read().as_ref().map(|r| r.session_id).or(optional_session_id) + }; + let next_session_id = current_session_id.map(|s| s + 1); let active_local_key = current_session_id.and_then(|s| self.db.get_local_key(s).ok().flatten()); @@ -612,9 +525,9 @@ where /// Get the party index of our worker /// /// Returns `None` if we are not in the best authority set - pub fn get_party_index(&self, header: &B::Header) -> Option { + pub async fn get_party_index(&self, header: &B::Header) -> Option { let public = self.get_authority_public_key(); - let best_authorities = self.get_best_authorities(header); + let best_authorities = self.get_best_authorities(header).await; for elt in best_authorities { if elt.1 == public { return Some(elt.0) @@ -627,9 +540,9 @@ where /// Get the next party index of our worker for possible queued keygen /// /// Returns `None` if we are not in the next best authority set - pub fn get_next_party_index(&self, header: &B::Header) -> Option { + pub async fn get_next_party_index(&self, header: &B::Header) -> Option { let public = self.get_authority_public_key(); - let next_best_authorities = self.get_next_best_authorities(header); + let next_best_authorities = self.get_next_best_authorities(header).await; for elt in next_best_authorities { if elt.1 == public { return Some(elt.0) @@ -640,51 +553,75 @@ where } /// Get the signature threshold at a specific block - pub fn get_signature_threshold(&self, header: &B::Header) -> u16 { + pub async fn get_signature_threshold(&self, header: &B::Header) -> u16 { let at = header.hash(); - return self.client.runtime_api().signature_threshold(at).unwrap_or_default() + self.exec_client_function(move |client| { + client.runtime_api().signature_threshold(at).unwrap_or_default() + }) + .await } /// Get the next signature threshold at a specific block - pub fn get_next_signature_threshold(&self, header: &B::Header) -> u16 { + pub async fn get_next_signature_threshold(&self, header: &B::Header) -> u16 { let at = header.hash(); - return self.client.runtime_api().next_signature_threshold(at).unwrap_or_default() + self.exec_client_function(move |client| { + client.runtime_api().next_signature_threshold(at).unwrap_or_default() + }) + .await } /// Get the active DKG public key - pub fn get_dkg_pub_key(&self, header: &B::Header) -> (AuthoritySetId, Vec) { + pub async fn get_dkg_pub_key(&self, header: &B::Header) -> (AuthoritySetId, Vec) { let at = header.hash(); - return self.client.runtime_api().dkg_pub_key(at).ok().unwrap_or_default() + self.exec_client_function(move |client| { + client.runtime_api().dkg_pub_key(at).unwrap_or_default() + }) + .await } /// Get the next DKG public key - #[allow(dead_code)] - pub fn get_next_dkg_pub_key(&self, header: &B::Header) -> Option<(AuthoritySetId, Vec)> { + pub async fn get_next_dkg_pub_key( + &self, + header: &B::Header, + ) -> Option<(AuthoritySetId, Vec)> { let at = header.hash(); - return self.client.runtime_api().next_dkg_pub_key(at).ok().unwrap_or_default() + self.exec_client_function(move |client| { + client.runtime_api().next_dkg_pub_key(at).unwrap_or_default() + }) + .await } /// Get the jailed keygen authorities #[allow(dead_code)] - pub fn get_keygen_jailed(&self, header: &B::Header, set: &[AuthorityId]) -> Vec { + pub async fn get_keygen_jailed( + &self, + header: &B::Header, + set: &[AuthorityId], + ) -> Vec { let at = header.hash(); - return self - .client - .runtime_api() - .get_keygen_jailed(at, set.to_vec()) - .unwrap_or_default() + let set = set.to_vec(); + self.exec_client_function(move |client| { + client.runtime_api().get_keygen_jailed(at, set).unwrap_or_default() + }) + .await } /// Get the best authorities for keygen - pub fn get_best_authorities(&self, header: &B::Header) -> Vec<(u16, AuthorityId)> { + pub async fn get_best_authorities(&self, header: &B::Header) -> Vec<(u16, AuthorityId)> { let at = header.hash(); - return self.client.runtime_api().get_best_authorities(at).unwrap_or_default() + self.exec_client_function(move |client| { + client.runtime_api().get_best_authorities(at).unwrap_or_default() + }) + .await } /// Get the next best authorities for keygen - pub fn get_next_best_authorities(&self, header: &B::Header) -> Vec<(u16, AuthorityId)> { + pub async fn get_next_best_authorities(&self, header: &B::Header) -> Vec<(u16, AuthorityId)> { let at = header.hash(); - return self.client.runtime_api().get_next_best_authorities(at).unwrap_or_default() + self.exec_client_function(move |client| { + client.runtime_api().get_next_best_authorities(at).unwrap_or_default() + }) + .await } /// Return the next and queued validator set at header `header`. @@ -697,14 +634,14 @@ where /// /// If the validators are None, we use the arbitrary validators gotten from the authority set /// and queued authority set in the given header - pub fn validator_set( + pub async fn validator_set( &self, header: &B::Header, ) -> Option<(AuthoritySet, AuthoritySet)> { - Self::validator_set_inner(&self.logger, header, &self.client) + Self::validator_set_inner(&self.logger, header, &self.client).await } - fn validator_set_inner( + async fn validator_set_inner( logger: &DebugLogger, header: &B::Header, client: &Arc, @@ -713,8 +650,16 @@ where Some((new, queued)) } else { let at = header.hash(); - let current_authority_set = client.runtime_api().authority_set(at).ok(); - let queued_authority_set = client.runtime_api().queued_authority_set(at).ok(); + let current_authority_set = exec_client_function(client, move |client| { + client.runtime_api().authority_set(at).ok() + }) + .await; + + let queued_authority_set = exec_client_function(client, move |client| { + client.runtime_api().queued_authority_set(at).ok() + }) + .await; + match (current_authority_set, queued_authority_set) { (Some(current), Some(queued)) => Some((current, queued)), _ => None, @@ -753,7 +698,7 @@ where Ok(()) } - fn handle_genesis_dkg_setup( + async fn handle_genesis_dkg_setup( &self, header: &B::Header, genesis_authority_set: AuthoritySet, @@ -806,7 +751,7 @@ where // DKG keygen authorities are always taken from the best set of authorities let session_id = genesis_authority_set.id; // Check whether the worker is in the best set or return - let party_i = match self.get_party_index(header) { + let party_i = match self.get_party_index(header).await { Some(party_index) => { self.logger.info(format!("🕸️ PARTY {party_index} | SESSION {session_id} | IN THE SET OF BEST GENESIS AUTHORITIES: session: {session_id}")); KeygenPartyId::try_from(party_index)? @@ -822,10 +767,11 @@ where let best_authorities = self .get_best_authorities(header) + .await .into_iter() .flat_map(|(i, p)| KeygenPartyId::try_from(i).map(|i| (i, p))) .collect(); - let threshold = self.get_signature_threshold(header); + let threshold = self.get_signature_threshold(header).await; let authority_public_key = self.get_authority_public_key(); self.logger.debug(format!("🕸️ PARTY {party_i} | SPAWNING KEYGEN SESSION {session_id} | BEST AUTHORITIES: {best_authorities:?}")); self.spawn_keygen_protocol( @@ -833,13 +779,15 @@ where authority_public_key, party_i, session_id, + *header.number(), threshold, ProtoStageType::Genesis, - ); + ) + .await; Ok(()) } - fn handle_queued_dkg_setup( + async fn handle_queued_dkg_setup( &self, header: &B::Header, queued: AuthoritySet, @@ -867,7 +815,7 @@ where // Get the best next authorities using the keygen threshold let session_id = queued.id; // Check whether the worker is in the best set or return - let party_i = match self.get_next_party_index(header) { + let party_i = match self.get_next_party_index(header).await { Some(party_index) => { self.logger.info(format!("🕸️ PARTY {party_index} | SESSION {session_id} | IN THE SET OF BEST NEXT AUTHORITIES")); KeygenPartyId::try_from(party_index)? @@ -881,13 +829,14 @@ where }, }; - *self.next_best_authorities.write() = self.get_next_best_authorities(header); + *self.next_best_authorities.write() = self.get_next_best_authorities(header).await; let next_best_authorities = self .get_next_best_authorities(header) + .await .into_iter() .flat_map(|(i, p)| KeygenPartyId::try_from(i).map(|i| (i, p))) .collect(); - let threshold = self.get_next_signature_threshold(header); + let threshold = self.get_next_signature_threshold(header).await; let authority_public_key = self.get_authority_public_key(); // spawn the Keygen protocol for the Queued DKG. @@ -897,19 +846,24 @@ where authority_public_key, party_i, session_id, + *header.number(), threshold, ProtoStageType::Queued, - ); + ) + .await; Ok(()) } // *** Block notifications *** - fn process_block_notification(&self, header: &B::Header) { + async fn process_block_notification(&self, header: &B::Header) { if let Some(latest_header) = self.latest_header.read().clone() { if latest_header.number() >= header.number() { // We've already seen this block, ignore it. self.logger.debug( - "🕸️ Latest header is greater than or equal to current header, returning...", + format!("🕸️ Latest header {} is greater than or equal to current header {}, returning...", + latest_header.number(), + header.number() + ) ); return } @@ -935,22 +889,22 @@ where // 2. if yes, we start enacting authorities on genesis flow. // 3. if no, we start enacting authorities on queued flow and submit any unsigned // proposals. - if self.get_dkg_pub_key(header).1.is_empty() { + if self.get_dkg_pub_key(header).await.1.is_empty() { self.logger .debug("🕸️ Maybe enacting genesis authorities since dkg pub key is empty"); - self.maybe_enact_genesis_authorities(header); + self.maybe_enact_genesis_authorities(header).await; } else { - self.maybe_enact_next_authorities(header); - self.maybe_rotate_local_sessions(header); - if let Err(e) = self.submit_unsigned_proposals(header) { - self.logger.error(format!("🕸️ Error submitting unsigned proposals: {e:?}")); + self.maybe_enact_next_authorities(header).await; + self.maybe_rotate_local_sessions(header).await; + if let Err(e) = self.handle_unsigned_proposals(header).await { + self.logger.error(format!("🕸️ Error running handle_unsigned_proposals: {e:?}")); } } } - fn maybe_enact_genesis_authorities(&self, header: &B::Header) { + async fn maybe_enact_genesis_authorities(&self, header: &B::Header) { // Get the active and queued validators to check for updates - if let Some((active, _queued)) = self.validator_set(header) { + if let Some((active, _queued)) = self.validator_set(header).await { // If we are in the genesis state, we need to enact the genesis authorities if active.id == GENESIS_AUTHORITY_SET_ID { self.logger.debug(format!("🕸️ GENESIS SESSION ID {:?}", active.id)); @@ -959,10 +913,10 @@ where let _ = self.verify_validator_set(header.number(), active.clone()); // Setting new validator set id as current *self.current_validator_set.write() = active.clone(); - *self.best_authorities.write() = self.get_best_authorities(header); - *self.next_best_authorities.write() = self.get_next_best_authorities(header); + *self.best_authorities.write() = self.get_best_authorities(header).await; + *self.next_best_authorities.write() = self.get_next_best_authorities(header).await; // Setting up the DKG - if let Err(e) = self.handle_genesis_dkg_setup(header, active) { + if let Err(e) = self.handle_genesis_dkg_setup(header, active).await { self.logger.error(format!("🕸️ Error handling genesis DKG setup: {e:?}")); } } else { @@ -985,14 +939,16 @@ where /// Edge cases: /// 1. If we already running a keygen protocol, and we detected that we are stalled, this /// method will try to restart the keygen protocol. - fn maybe_enact_next_authorities(&self, header: &B::Header) { - if !self.should_execute_new_keygen(header) { + async fn maybe_enact_next_authorities(&self, header: &B::Header) { + if !self.should_execute_new_keygen(header).await { self.logger.debug("🕸️ Not executing new keygen protocol"); return } + self.logger.debug("Running maybe_enact_next_authorities"); + // Get the active and queued validators to check for updates - if let Some((_active, queued)) = self.validator_set(header) { + if let Some((_active, queued)) = self.validator_set(header).await { self.logger.debug("🕸️ Session progress percentage above threshold, proceed with enact new authorities"); // Check if there is a keygen is finished: let queued_keygen_finished = self @@ -1007,18 +963,19 @@ where "🕸️ QUEUED DKG STATUS: {:?}", self.next_rounds.read().as_ref().map(|r| r.status.clone()) )); - if queued_keygen_finished { + let test_harness_mode = self.test_bundle.is_some(); + + if queued_keygen_finished && !test_harness_mode { return } let has_next_rounds = self.next_rounds.read().is_some(); self.logger.debug(format!("🕸️ HAS NEXT ROUND KEYGEN: {has_next_rounds:?}")); // Check if there is a next DKG Key on-chain. - let next_dkg_key = self.get_next_dkg_pub_key(header); + let next_dkg_key = self.get_next_dkg_pub_key(header).await; self.logger .debug(format!("🕸️ NEXT DKG KEY ON CHAIN: {}", next_dkg_key.is_some())); - let test_harness_mode = self.test_bundle.is_some(); // Start a keygen if we don't have one OR if there is no queued key on chain. if (!has_next_rounds && next_dkg_key.is_none()) || test_harness_mode { self.logger.debug(format!( @@ -1026,7 +983,7 @@ where next_dkg_key.is_some() )); // Start the queued DKG setup for the new queued authorities - if let Err(e) = self.handle_queued_dkg_setup(header, queued) { + if let Err(e) = self.handle_queued_dkg_setup(header, queued).await { self.logger.error(format!("🕸️ Error handling queued DKG setup: {e:?}")); } // Reset the Retry counter. @@ -1040,9 +997,11 @@ where // Check if we are stalled: // a read only clone, to avoid holding the lock for the whole duration of the function - let lock = self.next_rounds.read(); - let next_rounds_clone = (*lock).clone(); - drop(lock); + let next_rounds_clone = { + let lock = self.next_rounds.read(); + (*lock).clone() + }; + if let Some(ref rounds) = next_rounds_clone { self.logger.debug(format!( "🕸️ Status: {:?}, Now: {:?}, Started At: {:?}, Timeout length: {:?}", @@ -1057,9 +1016,9 @@ where // and then check the signature threshold `t`, if `t+1` is greater than the // number of authorities and we still have not reached the maximum number of // retries, we should retry the keygen - let next_best = self.get_next_best_authorities(header); + let next_best = self.get_next_best_authorities(header).await; let n = next_best.len(); - let t = self.get_next_signature_threshold(header) as usize; + let t = self.get_next_signature_threshold(header).await as usize; // in this case, if t + 1 is equal to n, we should retry the keygen // indefinitely. // For example, if we are running a 3 node network, with 1-of-2 DKG, it will not @@ -1080,7 +1039,7 @@ where )); metric_inc!(self, dkg_keygen_retry_counter); // Start the queued Keygen protocol again. - if let Err(e) = self.handle_queued_dkg_setup(header, queued) { + if let Err(e) = self.handle_queued_dkg_setup(header, queued).await { self.logger.error(format!("🕸️ Error handling queued DKG setup: {e:?}")); } // Increment the retry count @@ -1093,24 +1052,26 @@ where ), session_id: rounds.session_id, }) + .await } } } } - fn maybe_rotate_local_sessions(&self, header: &B::Header) { - if let Some((active, queued)) = self.validator_set(header) { + async fn maybe_rotate_local_sessions(&self, header: &B::Header) { + if let Some((active, queued)) = self.validator_set(header).await { self.logger.debug(format!("🕸️ ACTIVE SESSION ID {:?}", active.id)); metric_set!(self, dkg_validator_set_id, active.id); // verify the new validator set let _ = self.verify_validator_set(header.number(), active.clone()); // Check if the on chain authority_set_id is the same as the queued_authority_set_id. - let (set_id, _) = self.get_dkg_pub_key(header); + let (set_id, _) = self.get_dkg_pub_key(header).await; let queued_authority_set_id = self.queued_validator_set.read().id; self.logger.debug(format!("🕸️ CURRENT SET ID: {set_id:?}")); self.logger .debug(format!("🕸️ QUEUED AUTHORITY SET ID: {queued_authority_set_id:?}")); if set_id != queued_authority_set_id { + self.logger.debug(format!("🕸️ Queued authority set id {queued_authority_set_id} is not the same as the on chain authority set id {set_id}, will not rotate the local sessions.")); return } // Update the validator sets @@ -1133,18 +1094,11 @@ where *self.rounds.write() = self.next_rounds.write().take(); // We also rotate the best authority caches *self.best_authorities.write() = self.next_best_authorities.read().clone(); - *self.next_best_authorities.write() = self.get_next_best_authorities(header); + *self.next_best_authorities.write() = self.get_next_best_authorities(header).await; // since we just rotate, we reset the keygen retry counter self.keygen_retry_count.store(0, Ordering::Relaxed); // clear the currently being signing proposals cache. self.currently_signing_proposals.write().clear(); - // Reset all the signing rounds. - self.signing_rounds.write().iter_mut().for_each(|v| { - if let Some(r) = v.as_mut() { - let _ = r.shutdown("Rotating next round"); - } - *v = None; - }); // Reset per session metrics if let Some(metrics) = self.metrics.as_ref() { metrics.reset_session_metrics(); @@ -1156,17 +1110,17 @@ where } } - fn handle_finality_notification(&self, notification: FinalityNotification) { + async fn handle_finality_notification(&self, notification: FinalityNotification) { self.logger.trace(format!("🕸️ Finality notification: {notification:?}")); // Handle finality notifications - self.process_block_notification(¬ification.header); + self.process_block_notification(¬ification.header).await; } #[cfg_attr( feature = "debug-tracing", dkg_logging::instrument(target = "dkg", skip_all, ret, err, fields(signed_dkg_message)) )] - fn verify_signature_against_authorities( + async fn verify_signature_against_authorities( &self, signed_dkg_msg: SignedDKGMessage, ) -> Result, DKGError> { @@ -1176,9 +1130,10 @@ where &self.latest_header, &self.client, ) + .await } - pub fn verify_signature_against_authorities_inner( + pub async fn verify_signature_against_authorities_inner( logger: &DebugLogger, signed_dkg_msg: SignedDKGMessage, latest_header: &Arc>>, @@ -1191,8 +1146,11 @@ where })?; // Get authority accounts let mut authorities: Option<(Vec, Vec)> = None; - if let Some(header) = latest_header.read().clone() { + let latest_header = { latest_header.read().clone() }; + + if let Some(header) = latest_header { authorities = Self::validator_set_inner(logger, &header, client) + .await .map(|a| (a.0.authorities.into(), a.1.authorities.into())); } @@ -1231,7 +1189,7 @@ where feature = "debug-tracing", dkg_logging::instrument(target = "dkg", skip_all, fields(dkg_error)) )] - pub fn handle_dkg_error(&self, dkg_error: DKGError) { + pub async fn handle_dkg_error(&self, dkg_error: DKGError) { self.logger.error(format!("Received error: {dkg_error:?}")); metric_inc!(self, dkg_error_counter); let authorities: Vec = @@ -1270,11 +1228,14 @@ where for offender in offenders { match dkg_error { DKGError::KeygenMisbehaviour { bad_actors: _, .. } => - self.handle_dkg_report(DKGReport::KeygenMisbehaviour { offender, session_id }), + self.handle_dkg_report(DKGReport::KeygenMisbehaviour { offender, session_id }) + .await, DKGError::KeygenTimeout { .. } => - self.handle_dkg_report(DKGReport::KeygenMisbehaviour { offender, session_id }), + self.handle_dkg_report(DKGReport::KeygenMisbehaviour { offender, session_id }) + .await, DKGError::SignMisbehaviour { bad_actors: _, .. } => - self.handle_dkg_report(DKGReport::SignMisbehaviour { offender, session_id }), + self.handle_dkg_report(DKGReport::SignMisbehaviour { offender, session_id }) + .await, _ => (), } } @@ -1285,14 +1246,24 @@ where feature = "debug-tracing", dkg_logging::instrument(target = "dkg", skip_all, ret, err, fields(dkg_msg)) )] - fn process_incoming_dkg_message( + async fn process_incoming_dkg_message( &self, dkg_msg: SignedDKGMessage, ) -> Result<(), DKGError> { metric_inc!(self, dkg_inbound_messages); - // discard the message if from previous round - if let Some(current_round) = self.rounds.read().as_ref() { - if dkg_msg.msg.session_id < current_round.session_id { + let rounds = self.rounds.read().clone(); + let next_rounds = self.next_rounds.read().clone(); + let is_keygen_type = matches!(dkg_msg.msg.payload, DKGMsgPayload::Keygen { .. }); + self.logger.info(format!( + "Processing incoming DKG message: {:?} | {:?}", + dkg_msg.msg.session_id, + rounds.as_ref().map(|x| x.session_id) + )); + + // discard the message if from previous round (keygen checking only. SigningManagerV2 + // internally handles session checks) + if let Some(current_round) = &rounds { + if dkg_msg.msg.session_id < current_round.session_id && is_keygen_type { self.logger.warn(format!( "Message is for already completed round: {}, Discarding message", dkg_msg.msg.session_id @@ -1301,69 +1272,51 @@ where } } - match &dkg_msg.msg.payload { + let is_delivery_type = matches!( + dkg_msg.msg.payload, + DKGMsgPayload::Keygen(..) | DKGMsgPayload::Offline(..) | DKGMsgPayload::Vote(..) + ); + + let res = match &dkg_msg.msg.payload { DKGMsgPayload::Keygen(_) => { - let msg = Arc::new(dkg_msg); - if let Some(rounds) = self.rounds.read().as_ref() { - if rounds.session_id == msg.msg.session_id { - if let Err(err) = rounds.deliver_message(msg) { + if let Some(rounds) = &rounds { + if rounds.session_id == dkg_msg.msg.session_id { + if let Err(err) = rounds.deliver_message(dkg_msg) { self.handle_dkg_error(DKGError::CriticalError { reason: err.to_string(), }) + .await } return Ok(()) } } - if let Some(rounds) = self.next_rounds.read().as_ref() { - if rounds.session_id == msg.msg.session_id { - if let Err(err) = rounds.deliver_message(msg) { + if let Some(next_rounds) = next_rounds { + if next_rounds.session_id == dkg_msg.msg.session_id { + if let Err(err) = next_rounds.deliver_message(dkg_msg) { self.handle_dkg_error(DKGError::CriticalError { reason: err.to_string(), }) + .await } return Ok(()) } } + // TODO: if the message belongs to neither, investigate if we maybe need to enqueue + // the message (did someone else's protocol start before ours, and neither of our + // rounds are set-up?) + Ok(()) }, DKGMsgPayload::Offline(..) | DKGMsgPayload::Vote(..) => { - let msg = Arc::new(dkg_msg); - let async_index = msg.msg.payload.get_async_index(); - self.logger.debug(format!("Received message for async index {async_index}")); - if let Some(Some(rounds)) = self.signing_rounds.read().get(async_index as usize) { - self.logger.debug(format!( - "Message is for signing execution in session {}", - rounds.session_id - )); - if rounds.session_id == msg.msg.session_id { - self.logger.debug(format!( - "Message is for this signing execution in session: {}", - rounds.session_id - )); - if let Err(err) = rounds.deliver_message(msg) { - self.handle_dkg_error(DKGError::CriticalError { - reason: err.to_string(), - }) - } - } else { - let message = - format!("Message is for another signing round: {}", rounds.session_id); - self.logger.error(&message); - return Err(DKGError::GenericError { reason: message }) - } - } else { - let message = format!("No signing rounds for async index {async_index}"); - self.logger.error(&message); - return Err(DKGError::GenericError { reason: message }) - } - Ok(()) + self.signing_manager.deliver_message(dkg_msg); + return Ok(()) }, DKGMsgPayload::PublicKeyBroadcast(_) => { - match self.verify_signature_against_authorities(dkg_msg) { + match self.verify_signature_against_authorities(dkg_msg).await { Ok(dkg_msg) => { - match handle_public_key_broadcast(self, dkg_msg) { + match handle_public_key_broadcast(self, dkg_msg).await { Ok(()) => (), Err(err) => self .logger @@ -1378,9 +1331,9 @@ where Ok(()) }, DKGMsgPayload::MisbehaviourBroadcast(_) => { - match self.verify_signature_against_authorities(dkg_msg) { + match self.verify_signature_against_authorities(dkg_msg).await { Ok(dkg_msg) => { - match handle_misbehaviour_report(self, dkg_msg) { + match handle_misbehaviour_report(self, dkg_msg).await { Ok(()) => (), Err(err) => self .logger @@ -1395,10 +1348,16 @@ where Ok(()) }, + }; + + if is_delivery_type { + self.logger.warn(format!("Did not deliver message! res: {res:?}")); } + + res } - fn handle_dkg_report(&self, dkg_report: DKGReport) { + async fn handle_dkg_report(&self, dkg_report: DKGReport) { let (offender, session_id, misbehaviour_type) = match dkg_report { // Keygen misbehaviour possibly leads to keygen failure. This should be slashed // more severely than sign misbehaviour events. @@ -1418,7 +1377,7 @@ where let misbehaviour_msg = DKGMisbehaviourMessage { misbehaviour_type, session_id, offender, signature: vec![] }; - let gossip = gossip_misbehaviour_report(self, misbehaviour_msg); + let gossip = gossip_misbehaviour_report(self, misbehaviour_msg).await; if gossip.is_err() { self.logger.info("🕸️ DKG gossip_misbehaviour_report failed!"); } @@ -1464,230 +1423,8 @@ where Ok(Public::from(signer)) } - fn submit_unsigned_proposals(&self, header: &B::Header) -> Result<(), DKGError> { - let on_chain_dkg = self.get_dkg_pub_key(header); - let session_id = on_chain_dkg.0; - let dkg_pub_key = on_chain_dkg.1; - let at = header.hash(); - // Check whether the worker is in the best set or return - let party_i = match self.get_party_index(header) { - Some(party_index) => { - self.logger.info(format!("🕸️ PARTY {party_index} | SESSION {session_id} | IN THE SET OF BEST AUTHORITIES")); - KeygenPartyId::try_from(party_index)? - }, - None => { - self.logger - .info(format!("🕸️ NOT IN THE SET OF BEST AUTHORITIES: session {session_id}")); - return Ok(()) - }, - }; - - // check if we should clear our proposal hash cache, - // the condition is that `PROPOSAL_HASH_LIFETIME` blocks have passed since the last - // block time we cached a proposal hash for. - // this could be done without actually keeping track of the last block time we cached a - // proposal hash for, by taking the modulo of the block number with - // `PROPOSAL_HASH_LIFETIME`, - let should_clear_proposals_cache = { - // take the modulo of the block number with `PROPOSAL_HASH_LIFETIME` - // if the result is 0, then `PROPOSAL_HASH_LIFETIME` blocks have passed since the last - // block time we cached a proposal hash for. - header - .number() - .checked_rem(&PROPOSAL_HASH_LIFETIME.into()) - .map(|x| x.is_zero()) - .unwrap_or(false) - }; - - if should_clear_proposals_cache { - self.currently_signing_proposals.write().clear(); - } - - let unsigned_proposals = match self.client.runtime_api().get_unsigned_proposals(at) { - Ok(res) => { - let mut filtered_unsigned_proposals = Vec::new(); - for proposal in res { - if let Some(hash) = proposal.hash() { - if !self.currently_signing_proposals.read().contains(&hash) { - // update unsigned proposal counter - metric_inc!(self, dkg_unsigned_proposal_counter); - filtered_unsigned_proposals.push(proposal); - } - - // lets limit the max proposals we sign at one time to prevent overflow - if filtered_unsigned_proposals.len() >= - MAX_UNSIGNED_PROPOSALS_PER_SIGNING_SET - { - break - } - } - } - filtered_unsigned_proposals - }, - Err(e) => { - self.logger - .error(format!("🕸️ PARTY {party_i} | Failed to get unsigned proposals: {e:?}")); - return Err(DKGError::GenericError { - reason: format!("Failed to get unsigned proposals: {e:?}"), - }) - }, - }; - if unsigned_proposals.is_empty() { - return Ok(()) - } else { - self.logger.debug(format!( - "🕸️ PARTY {party_i} | Got unsigned proposals count {}", - unsigned_proposals.len() - )); - } - - let best_authorities: Vec<_> = self - .get_best_authorities(header) - .into_iter() - .flat_map(|(i, p)| KeygenPartyId::try_from(i).map(|i| (i, p))) - .collect(); - let threshold = self.get_signature_threshold(header); - let authority_public_key = self.get_authority_public_key(); - let mut count = 0; - let mut seed = dkg_pub_key; - - // Generate multiple signing sets for signing the same unsigned proposals. - // The goal is to successfully sign proposals immediately in the event that - // some authorities are not present. - // - // For example, if we have authorities: [1,2,3] and we only generate a single - // signing set (1,2), then if either party is absent, we will not be able to sign - // until we handle a misbehaviour. Instead, we brute force sign with multiple sets. - // For `n` authorities, to cover all signing sets of size `t+1`, we need to generate - // (n choose (t+1)) sets. - // - // Sets with the same values are not unique. We only care about all unique, unordered - // permutations of size `t+1`. i.e. (1,2), (2,3), (1,3) === (2,1), (3,2), (3,1) - let factorial = |num: u64| match num { - 0 => 1, - 1.. => (1..=num).product(), - }; - let mut signing_sets = Vec::new(); - let n = factorial(best_authorities.len() as u64); - let k = factorial((threshold + 1) as u64); - let n_minus_k = factorial((best_authorities.len() - threshold as usize - 1) as u64); - let num_combinations = std::cmp::min(n / (k * n_minus_k), MAX_SIGNING_SETS); - self.logger.debug(format!("Generating {num_combinations} signing sets")); - while signing_sets.len() < num_combinations as usize { - if count > 0 { - seed = sp_core::keccak_256(&seed).to_vec(); - } - let maybe_set = self.generate_signers(&seed, threshold, best_authorities.clone()).ok(); - if let Some(set) = maybe_set { - let set = HashSet::<_>::from_iter(set.iter().cloned()); - if !signing_sets.contains(&set) { - signing_sets.push(set); - } - } - - count += 1; - } - metric_set!(self, dkg_signing_sets, signing_sets.len()); - - let mut futures = Vec::with_capacity(signing_sets.len()); - #[allow(clippy::needless_range_loop)] - for i in 0..signing_sets.len() { - // Filter for only the signing sets that contain our party index. - if signing_sets[i].contains(&party_i) { - self.logger.info(format!( - "🕸️ Session Id {:?} | Async index {:?} | {}-out-of-{} signers: ({:?})", - session_id, - i, - threshold, - best_authorities.len(), - signing_sets[i].clone(), - )); - match self.create_signing_protocol( - best_authorities.clone(), - authority_public_key.clone(), - party_i, - session_id, - threshold, - ProtoStageType::Signing, - unsigned_proposals.clone(), - signing_sets[i].clone().into_iter().sorted().collect::>(), - // using i here as the async index is not correct at all, - // instead we should find a free index in the `signing_rounds` and use that - // - // FIXME: use a free index in the `signing_rounds` instead of `i` - i as _, - ) { - Ok(task) => futures.push(task), - Err(err) => { - self.logger.error(format!("Error creating signing protocol: {:?}", &err)); - self.handle_dkg_error(err) - }, - } - } - } - - if futures.is_empty() { - self.logger - .error("While creating the signing protocol, 0 were created".to_string()); - Err(DKGError::GenericError { - reason: "While creating the signing protocol, 0 were created".to_string(), - }) - } else { - let proposal_hashes = - unsigned_proposals.iter().filter_map(|x| x.hash()).collect::>(); - // save the proposal hashes in the currently_signing_proposals. - // this is used to check if we have already signed a proposal or not. - let logger = self.logger.clone(); - self.currently_signing_proposals.write().extend(proposal_hashes.clone()); - logger.info(format!("Signing protocol created, added {:?} proposals to currently_signing_proposals list", proposal_hashes.len())); - // the goal of the meta task is to select the first winner - let meta_signing_protocol = async move { - // select the first future to return Ok(()), ignoring every failure - // (note: the errors are not truly ignored since each individual future - // has logic to handle errors internally, including misbehaviour monitors - let mut results = futures::future::select_ok(futures).await.into_iter(); - if let Some((_success, _losing_futures)) = results.next() { - logger.info(format!( - "*** SUCCESSFULLY EXECUTED meta signing protocol {_success:?} ***" - )); - } else { - logger.warn("*** UNSUCCESSFULLY EXECUTED meta signing protocol".to_string()); - } - }; - - // spawn in parallel - let _handle = tokio::task::spawn(meta_signing_protocol); - Ok(()) - } - } - - /// After keygen, this should be called to generate a random set of signers - /// NOTE: since the random set is called using a deterministic seed to and RNG, - /// the resulting set is deterministic - fn generate_signers( - &self, - seed: &[u8], - t: u16, - best_authorities: Vec<(KeygenPartyId, Public)>, - ) -> Result, DKGError> { - let only_public_keys = best_authorities.iter().map(|(_, p)| p).cloned().collect::>(); - let mut final_set = self.get_unjailed_signers(&only_public_keys)?; - // Mutate the final set if we don't have enough unjailed signers - if final_set.len() <= t as usize { - let jailed_set = self.get_jailed_signers(&only_public_keys)?; - let diff = t as usize + 1 - final_set.len(); - final_set = final_set - .iter() - .chain(jailed_set.iter().take(diff)) - .cloned() - .collect::>(); - } - - select_random_set(seed, final_set, t + 1) - .map(|set| set.into_iter().flat_map(KeygenPartyId::try_from).collect::>()) - .map_err(|err| DKGError::CreateOfflineStage { - reason: format!("generate_signers failed, reason: {err}"), - }) + async fn handle_unsigned_proposals(&self, header: &B::Header) -> Result<(), DKGError> { + self.signing_manager.on_block_finalized(header, self).await } fn get_jailed_signers_inner( @@ -1704,7 +1441,10 @@ where .get_signing_jailed(at, best_authorities.to_vec()) .unwrap_or_default()) } - fn get_unjailed_signers(&self, best_authorities: &[Public]) -> Result, DKGError> { + pub(crate) fn get_unjailed_signers( + &self, + best_authorities: &[Public], + ) -> Result, DKGError> { let jailed_signers = self.get_jailed_signers_inner(best_authorities)?; Ok(best_authorities .iter() @@ -1715,7 +1455,10 @@ where } /// Get the jailed signers - fn get_jailed_signers(&self, best_authorities: &[Public]) -> Result, DKGError> { + pub(crate) fn get_jailed_signers( + &self, + best_authorities: &[Public], + ) -> Result, DKGError> { let jailed_signers = self.get_jailed_signers_inner(best_authorities)?; Ok(best_authorities .iter() @@ -1725,40 +1468,47 @@ where .collect()) } - fn should_execute_new_keygen(&self, header: &B::Header) -> bool { + async fn should_execute_new_keygen(&self, header: &B::Header) -> bool { // query runtime api to check if we should execute new keygen. let at = header.hash(); - self.client.runtime_api().should_execute_new_keygen(at).unwrap_or_default() + self.exec_client_function(move |client| { + client.runtime_api().should_execute_new_keygen(at).unwrap_or_default() + }) + .await + } + + /// Wraps the call in a SpawnBlocking task + pub async fn exec_client_function(&self, function: F) -> T + where + for<'a> F: FnOnce(&'a C) -> T, + T: Send + 'static, + F: Send + 'static, + { + let client = &self.client; + exec_client_function(client, function).await } /// Wait for initial finalized block async fn initialization(&mut self) { - use futures::future; - self.client - .finality_notification_stream() - .take_while(|notif| { - if let Some((active, queued)) = self.validator_set(¬if.header) { - // Cache the authority sets and best authorities - *self.best_authorities.write() = self.get_best_authorities(¬if.header); - *self.current_validator_set.write() = active; - *self.queued_validator_set.write() = queued; - // Route this to the finality notification handler - self.handle_finality_notification(notif.clone()); - self.logger.debug("Initialization complete"); - // End the initialization stream - future::ready(false) - } else { - future::ready(true) - } - }) - .for_each(|_| future::ready(())) - .await; + let mut stream = self.client.finality_notification_stream(); + while let Some(notif) = stream.next().await { + if let Some((active, queued)) = self.validator_set(¬if.header).await { + // Cache the authority sets and best authorities + *self.best_authorities.write() = self.get_best_authorities(¬if.header).await; + *self.current_validator_set.write() = active; + *self.queued_validator_set.write() = queued; + // Route this to the finality notification handler + self.handle_finality_notification(notif.clone()).await; + self.logger.debug("Initialization complete"); + // End the initialization stream + return + } + } } // *** Main run loop *** pub async fn run(mut self) { - let _tag = self.keygen_gossip_engine.local_peer_id().to_string(); - dkg_logging::define_span!("DKG Client", _tag); + crate::deadlock_detection::deadlock_detect(); let (misbehaviour_tx, misbehaviour_rx) = tokio::sync::mpsc::unbounded_channel(); self.misbehaviour_tx = Some(misbehaviour_tx); self.initialization().await; @@ -1783,28 +1533,29 @@ where let self_ = self.clone(); tokio::spawn(async move { while let Some(notification) = stream.next().await { - self_.logger.debug("Going to handle Finality notification"); - self_.handle_finality_notification(notification); + dkg_logging::debug!("Going to handle Finality notification"); + self_.handle_finality_notification(notification).await; } + + self_.logger.error("Finality notification stream ended"); }) } fn spawn_keygen_messages_stream_task(&self) -> tokio::task::JoinHandle<()> { let keygen_gossip_engine = self.keygen_gossip_engine.clone(); - let mut keygen_stream = keygen_gossip_engine - .message_available_notification() - .filter_map(move |_| futures::future::ready(keygen_gossip_engine.peek_last_message())); + let mut keygen_stream = + keygen_gossip_engine.get_stream().expect("keygen gossip stream already taken"); let self_ = self.clone(); tokio::spawn(async move { - while let Some(msg) = keygen_stream.next().await { + while let Some(msg) = keygen_stream.recv().await { + let msg_hash = crate::debug_logger::raw_message_to_hash(msg.msg.payload.payload()); self_.logger.debug(format!( - "Going to handle keygen message for session {}", + "Going to handle keygen message for session {} | hash: {msg_hash}", msg.msg.session_id )); - match self_.process_incoming_dkg_message(msg) { - Ok(_) => { - self_.keygen_gossip_engine.acknowledge_last_message(); - }, + self_.logger.checkpoint_message_raw(msg.msg.payload.payload(), "CP1-keygen"); + match self_.process_incoming_dkg_message(msg).await { + Ok(_) => {}, Err(e) => { self_.logger.error(format!("Error processing keygen message: {e:?}")); }, @@ -1815,20 +1566,18 @@ where fn spawn_signing_messages_stream_task(&self) -> tokio::task::JoinHandle<()> { let signing_gossip_engine = self.signing_gossip_engine.clone(); - let mut signing_stream = signing_gossip_engine - .message_available_notification() - .filter_map(move |_| futures::future::ready(signing_gossip_engine.peek_last_message())); + let mut signing_stream = + signing_gossip_engine.get_stream().expect("signing gossip stream already taken"); let self_ = self.clone(); tokio::spawn(async move { - while let Some(msg) = signing_stream.next().await { + while let Some(msg) = signing_stream.recv().await { self_.logger.debug(format!( "Going to handle signing message for session {}", msg.msg.session_id )); - match self_.process_incoming_dkg_message(msg) { - Ok(_) => { - self_.signing_gossip_engine.acknowledge_last_message(); - }, + self_.logger.checkpoint_message_raw(msg.msg.payload.payload(), "CP1-signing"); + match self_.process_incoming_dkg_message(msg).await { + Ok(_) => {}, Err(e) => { self_.logger.error(format!("Error processing signing message: {e:?}")); }, @@ -1845,7 +1594,7 @@ where tokio::spawn(async move { while let Some(misbehaviour) = misbehaviour_rx.recv().await { self_.logger.debug("Going to handle Misbehaviour"); - let gossip = gossip_misbehaviour_report(&self_, misbehaviour); + let gossip = gossip_misbehaviour_report(&self_, misbehaviour).await; if gossip.is_err() { self_.logger.info("🕸️ DKG gossip_misbehaviour_report failed!"); } @@ -1860,7 +1609,7 @@ where tokio::spawn(async move { while let Ok(error) = error_handler_rx.recv().await { logger.debug("Going to handle Error"); - self_.handle_dkg_error(error); + self_.handle_dkg_error(error).await; } }) } @@ -1906,7 +1655,7 @@ impl KeystoreExt for DKGKeystore { } #[auto_impl::auto_impl(&mut, &, Arc)] -pub trait HasLatestHeader { +pub trait HasLatestHeader: Send + Sync + 'static { fn get_latest_header(&self) -> &Arc>>; /// Gets latest block number from latest block header fn get_latest_block_number(&self) -> NumberFor { @@ -1921,9 +1670,9 @@ pub trait HasLatestHeader { impl HasLatestHeader for DKGWorker where B: Block, - BE: Backend, + BE: Backend + 'static, GE: GossipEngineIface, - C: Client, + C: Client + 'static, MaxProposalLength: Get, MaxAuthorities: Get, { @@ -1931,3 +1680,29 @@ where &self.latest_header } } + +pub struct Clock { + pub latest_header: Arc>>, +} + +impl HasLatestHeader for Clock { + fn get_latest_header(&self) -> &Arc>> { + &self.latest_header + } +} + +/// Wraps the call in a SpawnBlocking task +async fn exec_client_function(client: &Arc, function: F) -> T +where + for<'a> F: FnOnce(&'a C) -> T, + B: Block, + BE: Backend, + C: Client + 'static, + T: Send + 'static, + F: Send + 'static, +{ + let client = client.clone(); + tokio::task::spawn_blocking(move || function(&client)) + .await + .expect("Failed to spawn blocking task") +} diff --git a/dkg-logging/Cargo.toml b/dkg-logging/Cargo.toml index 8e1541bcb..b34394070 100644 --- a/dkg-logging/Cargo.toml +++ b/dkg-logging/Cargo.toml @@ -12,4 +12,11 @@ edition = { workspace = true } [dependencies] tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter", "json"] } -tracing-filter = "0.1.0-alpha.2" \ No newline at end of file +tracing-filter = "0.1.0-alpha.2" +parking_lot = { workspace = true } +serde = { workspace = true } +sp-core = { workspace = true } +lazy_static = { workspace = true } +tokio = { workspace = true } +serde_json = { workspace = true } +hex = { workspace = true } diff --git a/dkg-logging/src/debug_logger.rs b/dkg-logging/src/debug_logger.rs new file mode 100644 index 000000000..980024213 --- /dev/null +++ b/dkg-logging/src/debug_logger.rs @@ -0,0 +1,438 @@ +#![allow(clippy::unwrap_used)] +use crate::{debug, error, info, trace, warn}; +use lazy_static::lazy_static; +use parking_lot::RwLock; +use serde::Serialize; +use sp_core::{bytes::to_hex, hashing::sha2_256}; +use std::{collections::HashMap, fmt::Debug, io::Write, sync::Arc, time::Instant}; + +#[derive(Clone, Debug)] +pub struct DebugLogger { + identifier: Arc>, + to_file_io: tokio::sync::mpsc::UnboundedSender, + file_handle: Arc>>, + events_file_handle_keygen: Arc>>, + events_file_handle_signing: Arc>>, + events_file_handle_voting: Arc>>, + checkpoints_enabled: bool, +} + +struct Checkpoint { + checkpoint: String, + message_hash: String, +} + +lazy_static! { + static ref CHECKPOINTS: RwLock> = RwLock::new(HashMap::new()); +} + +#[derive(Debug, Copy, Clone)] +pub enum AsyncProtocolType { + Keygen, + Signing { hash: [u8; 32] }, + Voting { hash: [u8; 32] }, +} + +#[derive(Debug)] +enum MessageType { + Default(String), + Event(RoundsEvent), +} + +lazy_static::lazy_static! { + static ref INIT_TIME: Instant = Instant::now(); + static ref NAMES_MAP: RwLock> = RwLock::new(HashMap::new()); + static ref PARTY_I_MAP: RwLock> = RwLock::new(HashMap::new()); +} + +// names for mapping the uuids to a human-readable name +const NAMES: &[&str] = &[ + "Alice", "Bob", "Charlie", "Dave", "Eve", "Faythe", "Grace", "Heidi", "Ivan", "Judy", + "Mallory", "Niaj", "Olivia", "Peggy", "Rupert", "Sybil", "Trent", "Walter", "Wendy", "Zach", +]; + +pub struct RoundsEvent { + name: String, + event: RoundsEventType, + proto: AsyncProtocolType, +} +pub enum RoundsEventType { + SentMessage { + session: usize, + round: usize, + sender: u16, + receiver: Option, + msg_hash: String, + }, + ReceivedMessage { + session: usize, + round: usize, + sender: u16, + receiver: Option, + msg_hash: String, + }, + ProcessedMessage { + session: usize, + round: usize, + sender: u16, + receiver: Option, + msg_hash: String, + }, + ProceededToRound { + session: u64, + round: usize, + }, + // this probably shouldn't happen, but just in case, we will emit events if this does occur + PartyIndexChanged { + previous: usize, + new: usize, + }, +} + +impl RoundsEventType { + fn sender(&self) -> Option { + match self { + RoundsEventType::SentMessage { sender, .. } => Some(*sender as usize), + RoundsEventType::ReceivedMessage { sender, .. } => Some(*sender as usize), + RoundsEventType::ProcessedMessage { sender, .. } => Some(*sender as usize), + _ => None, + } + } +} + +impl AsyncProtocolType { + fn hash(&self) -> Option<&[u8; 32]> { + match self { + AsyncProtocolType::Keygen => None, + AsyncProtocolType::Signing { hash } => Some(hash), + AsyncProtocolType::Voting { hash } => Some(hash), + } + } +} + +fn get_legible_name(idx: Option) -> String { + if let Some(party_i) = idx { + let party_i = party_i as usize; + if let Some(uuid) = PARTY_I_MAP.read().get(&party_i).cloned() { + if let Some(name) = NAMES_MAP.read().get(&uuid).cloned() { + name.to_string() + } else { + party_i.to_string() + } + } else { + party_i.to_string() + } + } else { + "everyone".to_string() + } +} + +impl Debug for RoundsEvent { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let me = &self.name; + let hash_opt = self.proto.hash().map(hex::encode); + let hash_str = + hash_opt.map(|hash| format!(" unsigned proposal {hash}")).unwrap_or_default(); + match &self.event { + RoundsEventType::SentMessage { session, round, receiver, msg_hash, .. } => { + let receiver = get_legible_name(*receiver); + writeln!(f, "{me} sent a message to {receiver} for session {session} round {round}{hash_str} | {msg_hash}") + }, + RoundsEventType::ReceivedMessage { session, round, sender, receiver, msg_hash } => { + let msg_type = receiver.map(|_| "direct").unwrap_or("broadcast"); + let sender = get_legible_name(Some(*sender)); + writeln!(f, "{me} received a {msg_type} message from {sender} for session {session} round {round}{hash_str}| {msg_hash}") + }, + RoundsEventType::ProcessedMessage { session, round, sender, receiver, msg_hash } => { + let msg_type = receiver.map(|_| "direct").unwrap_or("broadcast"); + let sender = get_legible_name(Some(*sender)); + writeln!(f, "{me} processed a {msg_type} message from {sender} for session {session} round {round}{hash_str}| {msg_hash}") + }, + RoundsEventType::ProceededToRound { session, round } => { + writeln!(f, "\n~~~~~~~~~~~~~~~~~ {me} Proceeded to round {round} for session {session} {hash_str} ~~~~~~~~~~~~~~~~~") + }, + RoundsEventType::PartyIndexChanged { previous, new } => { + writeln!(f, "!!!! Party index changed from {previous} to {new} !!!!") + }, + } + } +} + +type EventFiles = + (Option, Option, Option, Option); + +impl DebugLogger { + pub fn new( + identifier: T, + file: Option, + ) -> std::io::Result { + // use a channel for sending file I/O requests to a dedicated thread to avoid blocking the + // DKG workers + + let checkpoints_enabled = std::env::var("CHECKPOINTS").unwrap_or_default() == "enabled"; + if checkpoints_enabled { + static HAS_CHECKPOINT_TRACKER_RUN: std::sync::atomic::AtomicBool = + std::sync::atomic::AtomicBool::new(false); + if !HAS_CHECKPOINT_TRACKER_RUN.swap(true, std::sync::atomic::Ordering::Relaxed) { + // spawn a task to periodically print out the last checkpoint for each message + println!("Running checkpoint tracker"); + tokio::task::spawn(async move { + loop { + tokio::time::sleep(tokio::time::Duration::from_secs(3)).await; + let lock = CHECKPOINTS.read(); + for checkpoint in lock.values() { + warn!(target: "dkg", "Checkpoint for {} last at {}", checkpoint.message_hash, checkpoint.checkpoint); + } + } + }); + } + } + + let (file, events_file_keygen, events_file_signing, events_file_voting) = + Self::get_files(file)?; + + let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel(); + let file_handle = Arc::new(RwLock::new(file)); + let fh_task = file_handle.clone(); + + let events_file_handle = Arc::new(RwLock::new(events_file_keygen)); + let events_fh_task = events_file_handle.clone(); + + let events_file_handle_signing = Arc::new(RwLock::new(events_file_signing)); + let events_fh_task_signing = events_file_handle_signing.clone(); + + let events_file_handle_voting = Arc::new(RwLock::new(events_file_voting)); + let events_fh_task_voting = events_file_handle_voting.clone(); + + if tokio::runtime::Handle::try_current().is_ok() { + tokio::task::spawn(async move { + while let Some(message) = rx.recv().await { + match message { + MessageType::Default(message) => + if let Some(file) = fh_task.write().as_mut() { + writeln!(file, "{message}").unwrap(); + }, + MessageType::Event(event) => match event.proto { + AsyncProtocolType::Keygen => { + if let Some(file) = events_fh_task.write().as_mut() { + writeln!(file, "{event:?}").unwrap(); + } + }, + AsyncProtocolType::Signing { .. } => { + if let Some(file) = events_fh_task_signing.write().as_mut() { + writeln!(file, "{event:?}").unwrap(); + } + }, + AsyncProtocolType::Voting { .. } => { + if let Some(file) = events_fh_task_voting.write().as_mut() { + writeln!(file, "{event:?}").unwrap(); + } + }, + }, + } + } + }); + } + + Ok(Self { + identifier: Arc::new(identifier.to_string().into()), + to_file_io: tx, + file_handle, + events_file_handle_keygen: events_file_handle, + events_file_handle_signing, + events_file_handle_voting, + checkpoints_enabled, + }) + } + + fn get_files(base_output: Option) -> std::io::Result { + if let Some(file_path) = &base_output { + let file = std::fs::File::create(file_path)?; + let events_file = std::fs::File::create(format!("{}.keygen.log", file_path.display()))?; + let events_file_signing = + std::fs::File::create(format!("{}.signing.log", file_path.display()))?; + let events_file_voting = + std::fs::File::create(format!("{}.voting.log", file_path.display()))?; + Ok((Some(file), Some(events_file), Some(events_file_signing), Some(events_file_voting))) + } else { + Ok((None, None, None, None)) + } + } + + pub fn set_id(&self, id: T) { + let id = id.to_string(); + let mut names_map = NAMES_MAP.write(); + let len = names_map.len(); + assert!(len < NAMES.len()); + names_map.insert(id.clone(), NAMES[len]); + *self.identifier.write() = id; + } + + pub fn set_output(&self, file: Option) -> std::io::Result<()> { + let (file, event_file, signing_file, voting_file) = Self::get_files(file)?; + *self.file_handle.write() = file; + *self.events_file_handle_keygen.write() = event_file; + *self.events_file_handle_signing.write() = signing_file; + *self.events_file_handle_voting.write() = voting_file; + Ok(()) + } + + fn get_identifier(&self) -> String { + self.identifier.read().to_string() + } + + pub fn trace(&self, message: T) { + self.log_to_file("dkg_gadget", "trace", &message); + trace!(target: "dkg_gadget", "[{}]: {message}", self.get_identifier()); + } + + pub fn debug(&self, message: T) { + self.log_to_file("dkg_gadget", "debug", &message); + debug!(target: "dkg_gadget", "[{}]: {message}", self.get_identifier()); + } + + pub fn info(&self, message: T) { + self.log_to_file("dkg_gadget", "info", &message); + info!(target: "dkg_gadget", "[{}]: {message}", self.get_identifier()); + } + + pub fn warn(&self, message: T) { + self.log_to_file("dkg_gadget", "warn", &message); + warn!(target: "dkg_gadget", "[{}]: {message}", self.get_identifier()); + } + + pub fn error(&self, message: T) { + self.log_to_file("dkg_gadget", "error", &message); + error!(target: "dkg_gadget", "[{}]: {message}", self.get_identifier()); + } + + pub fn trace_signing(&self, message: T) { + self.log_to_file("dkg_gadget::async_protocol::signing", "trace", &message); + trace!(target: "dkg_gadget::signing", "[{}]: {message}", self.get_identifier()); + } + + pub fn debug_signing(&self, message: T) { + self.log_to_file("dkg_gadget::async_protocol::signing", "debug", &message); + debug!(target: "dkg_gadget::signing", "[{}]: {message}", self.get_identifier()); + } + + pub fn info_signing(&self, message: T) { + self.log_to_file("dkg_gadget::async_protocol::signing", "info", &message); + info!(target: "dkg_gadget::signing", "[{}]: {message}", self.get_identifier()); + } + + pub fn warn_signing(&self, message: T) { + self.log_to_file("dkg_gadget::async_protocol::signing", "warn", &message); + warn!(target: "dkg_gadget::signing", "[{}]: {message}", self.get_identifier()); + } + + pub fn error_signing(&self, message: T) { + self.log_to_file("dkg_gadget::async_protocol::signing", "error", &message); + error!(target: "dkg_gadget::signing", "[{}]: {message}", self.get_identifier()); + } + + pub fn trace_keygen(&self, message: T) { + self.log_to_file("dkg_gadget::async_protocol::keygen", "trace", &message); + trace!(target: "dkg_gadget::keygen", "[{}]: {message}", self.get_identifier()); + } + + pub fn debug_keygen(&self, message: T) { + self.log_to_file("dkg_gadget::async_protocol::keygen", "debug", &message); + debug!(target: "dkg_gadget::keygen", "[{}]: {message}", self.get_identifier()); + } + + pub fn info_keygen(&self, message: T) { + self.log_to_file("dkg_gadget::async_protocol::keygen", "info", &message); + info!(target: "dkg_gadget::keygen", "[{}]: {message}", self.get_identifier()); + } + + pub fn warn_keygen(&self, message: T) { + self.log_to_file("dkg_gadget::async_protocol::keygen", "warn", &message); + warn!(target: "dkg_gadget::keygen", "[{}]: {message}", self.get_identifier()); + } + + pub fn error_keygen(&self, message: T) { + self.log_to_file("dkg_gadget::async_protocol::keygen", "error", &message); + error!(target: "dkg_gadget::keygen", "[{}]: {message}", self.get_identifier()); + } + + fn log_to_file(&self, target: &str, level: &str, message: T) { + let time = INIT_TIME.elapsed(); + let message = format!("[{target}] [{level}] [{time:?}] : {message}"); + if let Err(err) = self.to_file_io.send(MessageType::Default(message)) { + error!(target: "dkg_gadget", "failed to send log message to file: {err:?}"); + } + } + + pub fn round_event>(&self, proto: T, event: RoundsEventType) { + let id = self.identifier.read().clone(); + let proto = proto.into(); + if let Some(sender) = event.sender() { + if matches!(event, RoundsEventType::SentMessage { .. }) { + let prev_val = PARTY_I_MAP.write().insert(sender, id.clone()); + if let Some(prev_val) = prev_val { + if prev_val != id { + // This means our ID changed. This shouldn't happen in the harness tests + } + } + } + } + + let name = if let Some(val) = NAMES_MAP.read().get(&id) { val.to_string() } else { id }; + let event = RoundsEvent { name, event, proto }; + if let Err(err) = self.to_file_io.send(MessageType::Event(event)) { + error!(target: "dkg_gadget", "failed to send event message to file: {err:?}"); + } + } + + pub fn checkpoint_message(&self, msg: T, checkpoint: impl Into) { + if self.checkpoints_enabled { + let hash = message_to_string_hash(&msg); + CHECKPOINTS.write().insert( + hash.clone(), + Checkpoint { checkpoint: checkpoint.into(), message_hash: hash }, + ); + } + } + + pub fn checkpoint_message_raw(&self, payload: &[u8], checkpoint: impl Into) { + if self.checkpoints_enabled { + let hash = raw_message_to_hash(payload); + CHECKPOINTS.write().insert( + hash.clone(), + Checkpoint { checkpoint: checkpoint.into(), message_hash: hash }, + ); + } + } + + pub fn clear_checkpoints(&self) { + if self.checkpoints_enabled { + CHECKPOINTS.write().clear(); + } + } + + pub fn clear_checkpoint_for_message(&self, msg: T) { + if self.checkpoints_enabled { + let hash = message_to_string_hash(&msg); + CHECKPOINTS.write().remove(&hash); + } + } + + pub fn clear_checkpoint_for_message_raw(&self, payload: &[u8]) { + if self.checkpoints_enabled { + let hash = raw_message_to_hash(payload); + CHECKPOINTS.write().remove(&hash); + } + } +} + +pub fn message_to_string_hash(msg: T) -> String { + let message = serde_json::to_vec(&msg).expect("message_to_string_hash"); + let message = sha2_256(&message); + to_hex(&message, false) +} + +pub fn raw_message_to_hash(payload: &[u8]) -> String { + let message = sha2_256(payload); + to_hex(&message, false) +} diff --git a/dkg-logging/src/lib.rs b/dkg-logging/src/lib.rs index a72debe5e..f7213b876 100644 --- a/dkg-logging/src/lib.rs +++ b/dkg-logging/src/lib.rs @@ -5,6 +5,8 @@ use tracing_subscriber::{ EnvFilter, }; +pub mod debug_logger; + pub fn setup_log() { let _ = SubscriberBuilder::default() .with_line_number(true) diff --git a/dkg-mock-blockchain/Cargo.toml b/dkg-mock-blockchain/Cargo.toml index cc96ffca0..6c330678c 100644 --- a/dkg-mock-blockchain/Cargo.toml +++ b/dkg-mock-blockchain/Cargo.toml @@ -22,3 +22,5 @@ sp-consensus = { workspace = true } sc-network = { workspace = true } sc-utils = { workspace = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +dkg-runtime-primitives = { workspace = true, features = ["testing", "std"] } +dkg-logging = { workspace = true } diff --git a/dkg-mock-blockchain/src/data_types.rs b/dkg-mock-blockchain/src/data_types.rs index 1ee2af22b..cf958d161 100644 --- a/dkg-mock-blockchain/src/data_types.rs +++ b/dkg-mock-blockchain/src/data_types.rs @@ -35,9 +35,9 @@ pub enum AttachedCommand { /// about its internal state back to the MockBlockchain server for centralized /// introspection #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct MockClientResponse { - pub result: Result<(), String>, - pub trace_id: Uuid, +pub enum MockClientResponse { + Keygen { result: Result<(), String>, trace_id: Uuid, pub_key: Vec }, + Sign { result: Result<(), String>, trace_id: Uuid }, } /// For keeping track of various events sent to subscribing clients diff --git a/dkg-mock-blockchain/src/mock_blockchain_config.rs b/dkg-mock-blockchain/src/mock_blockchain_config.rs index 9ed0d5159..568581cd8 100644 --- a/dkg-mock-blockchain/src/mock_blockchain_config.rs +++ b/dkg-mock-blockchain/src/mock_blockchain_config.rs @@ -18,6 +18,8 @@ pub struct MockBlockchainConfig { pub positive_cases: usize, // a set of error cases pub error_cases: Option>, + // the number of unsigned proposals to send per session + pub unsigned_proposals_per_session: Option, } #[derive(Serialize, Deserialize, Debug, Clone)] diff --git a/dkg-mock-blockchain/src/server.rs b/dkg-mock-blockchain/src/server.rs index e17c6e8b0..998843ac3 100644 --- a/dkg-mock-blockchain/src/server.rs +++ b/dkg-mock-blockchain/src/server.rs @@ -1,10 +1,13 @@ use crate::{ mock_blockchain_config::MockBlockchainConfig, transport::*, FinalityNotification, - MockBlockchainEvent, TestBlock, TestCase, + MockBlockchainEvent, MockClientResponse, TestBlock, TestCase, }; use atomic::Atomic; +use dkg_logging::debug_logger::DebugLogger; +use dkg_runtime_primitives::UnsignedProposal; use futures::{SinkExt, StreamExt}; use sc_client_api::FinalizeSummary; +use sp_runtime::app_crypto::sp_core::hashing::sha2_256; use std::{ collections::{HashMap, VecDeque}, net::SocketAddr, @@ -20,7 +23,7 @@ use uuid::Uuid; pub type PeerId = sc_network::PeerId; #[derive(Clone)] -pub struct MockBlockchain { +pub struct MockBlockchain { listener: Arc>>, config: MockBlockchainConfig, clients: Arc>>, @@ -29,6 +32,8 @@ pub struct MockBlockchain { // the orchestrator receives updates from its client sub-tasks from this receiver orchestrator_rx: Arc>>>, orchestrator_state: Arc>, + blockchain: T, + logger: DebugLogger, } /// For communicating between the orchestrator task and each spawned client sub-task @@ -42,8 +47,9 @@ enum ClientToOrchestratorEvent { } #[derive(Debug)] -struct TestResult { - result: Result<(), String>, +enum TestResult { + Keygen { result: Result<(), String>, pub_key: Vec }, + Sign { result: Result<(), String> }, } #[derive(Debug)] @@ -69,12 +75,17 @@ enum OrchestratorState { struct ConnectedClientState { // a map from tracing id => test case. Once the test case passes // for the specific client, the test case will be removed from the list - outstanding_tasks: HashMap, + outstanding_tasks_keygen: HashMap, + outstanding_tasks_signing: HashMap>, orchestrator_to_client_subtask: mpsc::UnboundedSender, } -impl MockBlockchain { - pub async fn new(config: MockBlockchainConfig) -> std::io::Result { +impl MockBlockchain { + pub async fn new( + config: MockBlockchainConfig, + blockchain: T, + logger: DebugLogger, + ) -> std::io::Result { let listener = TcpListener::bind(&config.bind).await?; let clients = Arc::new(RwLock::new(HashMap::new())); let (to_orchestrator, orchestrator_rx) = mpsc::unbounded_channel(); @@ -87,6 +98,8 @@ impl MockBlockchain { orchestrator_state, to_orchestrator, orchestrator_rx: Arc::new(Mutex::new(Some(orchestrator_rx))), + blockchain, + logger, }) } @@ -127,7 +140,8 @@ impl MockBlockchain { // create a channel for allowing the orchestrator to send this sub-task commands let (orchestrator_to_this_task, mut orchestrator_rx) = mpsc::unbounded_channel(); let state = ConnectedClientState { - outstanding_tasks: Default::default(), + outstanding_tasks_keygen: Default::default(), + outstanding_tasks_signing: Default::default(), orchestrator_to_client_subtask: orchestrator_to_this_task, }; @@ -155,8 +169,13 @@ impl MockBlockchain { panic!("Received invalid packet {pkt:?} inside to_orchestrator for {peer_id:?}") }, ProtocolPacket::ClientToBlockchain { event } => { - let trace_id = event.trace_id; - let result = TestResult { result: event.result }; + let (result, trace_id) = match event { + MockClientResponse::Keygen { result, trace_id, pub_key } => + (TestResult::Keygen { result, pub_key }, trace_id), + MockClientResponse::Sign { result, trace_id } => + (TestResult::Sign { result }, trace_id), + }; + self.to_orchestrator .send(ClientToOrchestratorEvent::TestResult { peer_id: *peer_id, @@ -200,8 +219,9 @@ impl MockBlockchain { async fn orchestrate(self) -> std::io::Result<()> { let mut test_cases = self.generate_test_cases(); let mut client_to_orchestrator_rx = self.orchestrator_rx.lock().await.take().unwrap(); - let round_id = &mut 0; - let mut current_round_completed_count = 0; + let mut current_round_completed_count_keygen = 0; + let mut current_round_completed_count_signing = 0; + let intra_test_phase = &mut IntraTestPhase::new(); let cl = self.clients.clone(); let state = self.orchestrator_state.clone(); @@ -211,14 +231,19 @@ impl MockBlockchain { let mut interval = tokio::time::interval(Duration::from_secs(5)); loop { interval.tick().await; + log::info!(target: "dkg", "Orchestrator state is {state:?}", state = state.load(Ordering::SeqCst)); if state.load(Ordering::SeqCst) != OrchestratorState::AwaitingRoundCompletion { continue } let clients = cl.read().await; for (id, client) in clients.iter() { - if !client.outstanding_tasks.is_empty() { - log::warn!(target: "dkg", "Client {id:?} has {tasks:?} outstanding task(s)", tasks = client.outstanding_tasks.len()); + if !client.outstanding_tasks_keygen.is_empty() { + log::warn!(target: "dkg", "Client {id:?} has {tasks:?} outstanding KEYGEN task(s)", tasks = client.outstanding_tasks_keygen.len()); + } + + if !client.outstanding_tasks_signing.is_empty() { + log::warn!(target: "dkg", "Client {id:?} has {tasks:?} outstanding SIGNING task(s)", tasks = client.outstanding_tasks_signing.values().map(|r| r.len()).sum::()); } } } @@ -234,7 +259,8 @@ impl MockBlockchain { if clients.len() == self.config.n_clients { // we are ready to begin testing rounds std::mem::drop(clients); - self.orchestrator_begin_next_round(&mut test_cases, round_id).await; + self.orchestrator_begin_next_round(&mut test_cases, intra_test_phase) + .await; } }, @@ -246,29 +272,103 @@ impl MockBlockchain { ClientToOrchestratorEvent::ClientReady => log_invalid_signal(&o_state, &client_update), ClientToOrchestratorEvent::TestResult { peer_id, trace_id, result } => { + let res = match result { + TestResult::Keygen { result, pub_key } => { + // set the public key that way other nodes can verify that + // the public key was submitted + // TODO: Make sure that we set_next_public key based on input + self.blockchain.set_pub_key( + intra_test_phase.round_number(), + pub_key.clone(), + ); + + result + }, + + TestResult::Sign { result } => result, + }; + let mut clients = self.clients.write().await; let client = clients.get_mut(peer_id).unwrap(); - if let Err(err) = &result.result { - log::error!(target: "dkg", "Peer {peer_id:?} unsuccessfully completed test {trace_id:?}. Reason: {err:?}"); - // do not remove from map. At the end , any remaining tasks will - // cause the orchestrator to have a nonzero exit code (useful for - // pipeline testing) - } else { - log::info!(target: "dkg", "Peer {peer_id:?} successfully completed test {trace_id:?}"); - // remove from map - assert!(client.outstanding_tasks.remove(trace_id).is_some()); - } // regardless of success, increment completed count for the current // round - current_round_completed_count += 1; + if matches!(result, TestResult::Keygen { .. }) && + matches!(intra_test_phase, IntraTestPhase::Keygen { .. }) + { + if let Err(err) = res { + log::error!(target: "dkg", "Peer {peer_id:?} unsuccessfully completed KEYGEN test {trace_id:?}. Reason: {err:?}"); + } else { + log::info!(target: "dkg", "Peer {peer_id:?} successfully completed KEYGEN test {trace_id:?}"); + client.outstanding_tasks_keygen.remove(trace_id); + } + current_round_completed_count_keygen += 1; + } + + if matches!(result, TestResult::Sign { .. }) && + matches!(intra_test_phase, IntraTestPhase::Signing { .. }) + { + if let Err(err) = res { + log::error!(target: "dkg", "Peer {peer_id:?} unsuccessfully completed SIGNING test {trace_id:?}. Reason: {err:?}"); + } else { + log::info!(target: "dkg", "Peer {peer_id:?} successfully completed SIGNING test {trace_id:?}"); + let entry = client + .outstanding_tasks_signing + .get_mut(trace_id) + .expect("Should exist"); + assert!(entry.pop().is_some()); + if entry.is_empty() { + // remove from map + client.outstanding_tasks_signing.remove(trace_id); + } + } + current_round_completed_count_signing += 1; + log::info!(target: "dkg", "RBX {}", current_round_completed_count_signing); + } }, } // at the end, check if the round is complete - if current_round_completed_count == self.config.n_clients { - current_round_completed_count = 0; // reset to 0 for next round - self.orchestrator_begin_next_round(&mut test_cases, round_id).await + let keygen_complete = + current_round_completed_count_keygen == self.config.n_clients; + if keygen_complete && matches!(intra_test_phase, IntraTestPhase::Keygen { .. }) + { + // keygen is complete, and, we are ready to rotate into either the next + // session, or the next test phase (i.e., signing) + if intra_test_phase.unsigned_proposals_count() > 0 { + // since there are unsigned proposals, we need to keep the current + // session and begin the signing tests + intra_test_phase.keygen_to_signing(); + // only do this to create a new block header + intra_test_phase.increment_round_number(); + self.begin_next_test_print(intra_test_phase).await; + self.send_finality_notification(intra_test_phase).await; + continue + } else { + // there are no unsigned proposals, so we can move on to the next + // session + } + } + + let signing_complete = + if self.config.unsigned_proposals_per_session.unwrap_or(0) > 0 { + let current_round_unsigned_proposals_needed = + intra_test_phase.unsigned_proposals_count(); + current_round_completed_count_signing == + (self.config.threshold + 1) * + current_round_unsigned_proposals_needed + } else { + // pretend signing is complete to move on to the next session/round + true + }; + + if keygen_complete && signing_complete { + current_round_completed_count_keygen = 0; // reset to 0 for next round + current_round_completed_count_signing = 0; + intra_test_phase.increment_round_number(); + // clear all signing + keygen tests, since t+1 are needed, not n + self.clear_tasks().await; + self.orchestrator_begin_next_round(&mut test_cases, intra_test_phase).await } }, o_state @ OrchestratorState::Complete => @@ -279,6 +379,12 @@ impl MockBlockchain { Err(generic_error("client_to_orchestrator_tx's all dropped")) } + async fn clear_tasks(&self) { + let mut clients = self.clients.write().await; + clients.values_mut().for_each(|client| client.outstanding_tasks_signing.clear()); + clients.values_mut().for_each(|client| client.outstanding_tasks_keygen.clear()); + } + fn generate_test_cases(&self) -> VecDeque { let mut test_cases = VecDeque::new(); @@ -302,35 +408,42 @@ impl MockBlockchain { async fn orchestrator_begin_next_round( &self, test_cases: &mut VecDeque, - round_number: &mut u64, + test_phase: &mut IntraTestPhase, ) { log::info!(target: "dkg", "[Orchestrator] Running next round!"); if let Some(next_case) = test_cases.pop_front() { - for x in (1..=3).rev() { - log::info!(target: "dkg", "[Orchestrator] Beginning next test in {x}"); - tokio::time::sleep(Duration::from_millis(1000)).await - } + // the first round will not have any unsigned proposals since we're waiting for keygen + // otherwise, preload the unsigned proposals + let round_number = test_phase.round_number(); + let unsigned_proposals = if round_number >= 1 { + let unsigned_proposals = + (0..self.config.unsigned_proposals_per_session.unwrap_or(0)) + .map(|idx| { + // ensure a unique unsigned proposal per session per proposal + let mut bytes = round_number.to_be_bytes().to_vec(); + bytes.extend_from_slice(&idx.to_be_bytes()); + bytes.extend_from_slice(Uuid::new_v4().as_ref()); + let hash = sha2_256(&bytes); + UnsignedProposal::testing_dummy(hash.to_vec()) + }) + .collect::>(); + if !unsigned_proposals.is_empty() { + Some(unsigned_proposals) + // self.blockchain.set_unsigned_proposals(unsigned_proposals); + } else { + None + } + } else { + None + }; + // begin the next test + test_phase.session_init(unsigned_proposals, next_case); + self.begin_next_test_print(test_phase).await; self.orchestrator_set_state(OrchestratorState::AwaitingRoundCompletion); - let trace_id = Uuid::new_v4(); // phase 1: send finality notifications to each client - let mut write = self.clients.write().await; - let next_finality_notification = create_mocked_finality_blockchain_event(*round_number); - for (_id, client) in write.iter_mut() { - client.outstanding_tasks.insert(trace_id, next_case.clone()); - // First, send out a MockBlockChainEvent (happens before each round occurs) - client - .orchestrator_to_client_subtask - .send(OrchestratorToClientEvent::BlockChainEvent { - trace_id, - event: next_finality_notification.clone(), - }) - .unwrap(); - } - - // increment the round number - *round_number += 1; + self.send_finality_notification(test_phase).await; } else { log::info!(target: "dkg", "Orchestrator has finished running all tests"); self.orchestrator_set_state(OrchestratorState::Complete); @@ -338,12 +451,14 @@ impl MockBlockchain { // check to see the final state let read = self.clients.read().await; for (peer_id, client_state) in &*read { - let outstanding_tasks = &client_state.outstanding_tasks; + let outstanding_tasks_keygen = &client_state.outstanding_tasks_keygen; + let outstanding_tasks_signing = &client_state.outstanding_tasks_signing; // the client should have no outstanding tasks if successful - let success = outstanding_tasks.is_empty(); + let success = + outstanding_tasks_keygen.is_empty() && outstanding_tasks_signing.is_empty(); if !success { exit_code = 1; - log::info!(target: "dkg", "Peer {peer_id:?} final state FAILURE | Failed tasks: {outstanding_tasks:?}") + log::info!(target: "dkg", "Peer {peer_id:?} final state FAILURE | Failed tasks: KEYGEN: {outstanding_tasks_keygen:?}, SIGNING: {outstanding_tasks_signing:?}") } else { log::info!(target: "dkg", "Peer {peer_id:?} SUCCESS!") } @@ -360,9 +475,63 @@ impl MockBlockchain { } } + async fn begin_next_test_print(&self, test_phase: &IntraTestPhase) { + self.logger.clear_checkpoints(); + let test_round = test_phase.round_number(); + let test_phase = match test_phase { + IntraTestPhase::Keygen { .. } => "KEYGEN", + IntraTestPhase::Signing { .. } => "SIGNING", + }; + + for x in (1..=3).rev() { + log::info!(target: "dkg", "[Orchestrator] Beginning next {test_phase} test for session {test_round} in {x}"); + tokio::time::sleep(Duration::from_millis(1000)).await + } + } + fn orchestrator_set_state(&self, state: OrchestratorState) { self.orchestrator_state.store(state, Ordering::SeqCst); } + + async fn send_finality_notification(&self, test_phase: &mut IntraTestPhase) { + // phase 1: send finality notifications to each client + let round_number = test_phase.round_number(); + let trace_id = test_phase.trace_id(); + let next_case = test_phase.test_case().unwrap().clone(); + + let mut write = self.clients.write().await; + let next_finality_notification = create_mocked_finality_blockchain_event(round_number); + for (_id, client) in write.iter_mut() { + match test_phase { + IntraTestPhase::Keygen { trace_id, .. } => { + client.outstanding_tasks_keygen.insert(*trace_id, next_case.clone()); + // always set the unsigned props to empty to ensure no signing protocol executes + self.blockchain.set_unsigned_proposals(vec![]); + }, + IntraTestPhase::Signing { trace_id, queued_unsigned_proposals, .. } => { + if let Some(unsigned_propos) = queued_unsigned_proposals.clone() { + for _ in 0..unsigned_propos.len() { + client + .outstanding_tasks_signing + .entry(*trace_id) + .or_default() + .push(next_case.clone()); + } + self.blockchain.set_unsigned_proposals(unsigned_propos); + } + }, + } + + // Send out a MockBlockChainEvent + client + .orchestrator_to_client_subtask + .send(OrchestratorToClientEvent::BlockChainEvent { + trace_id, + event: next_finality_notification.clone(), + }) + .unwrap(); + } + } } fn generic_error>(err: T) -> std::io::Error { @@ -379,6 +548,8 @@ fn create_mocked_finality_blockchain_event(block_number: u64) -> MockBlockchainE let header = sp_runtime::generic::Header::::new_from_number(block_number); let mut slice = [0u8; 32]; slice[..8].copy_from_slice(&block_number.to_be_bytes()); + // add random uuid to ensure uniqueness + slice[8..24].copy_from_slice(&Uuid::new_v4().to_u128_le().to_be_bytes()); let hash = sp_runtime::testing::H256::from(slice); let summary = FinalizeSummary { header, finalized: vec![hash], stale_heads: vec![] }; @@ -387,3 +558,119 @@ fn create_mocked_finality_blockchain_event(block_number: u64) -> MockBlockchainE let notification = FinalityNotification::::from_summary(summary, tx); MockBlockchainEvent::FinalityNotification { notification } } + +pub trait MutableBlockchain: Clone + Send + 'static { + fn set_unsigned_proposals( + &self, + propos: Vec<(UnsignedProposal>, u64)>, + ); + fn set_pub_key(&self, session: u64, key: Vec); +} + +enum IntraTestPhase { + Keygen { + trace_id: Uuid, + queued_unsigned_proposals: + Option>>>, + round_number: u64, + test_case: Option, + }, + Signing { + trace_id: Uuid, + queued_unsigned_proposals: + Option>, u64)>>, + round_number: u64, + test_case: TestCase, + }, +} + +impl IntraTestPhase { + fn new() -> Self { + Self::Keygen { + trace_id: Uuid::new_v4(), + queued_unsigned_proposals: None, + round_number: 0, + test_case: None, + } + } + + fn keygen_to_signing(&mut self) { + if let Self::Keygen { trace_id, queued_unsigned_proposals, round_number, test_case } = self + { + let queued_unsigned_proposals = queued_unsigned_proposals.take(); + let mut queued_unsigned_proposals_with_expiry: Vec<( + UnsignedProposal>, + u64, + )> = Default::default(); + for prop in queued_unsigned_proposals.iter() { + for p in prop.iter() { + queued_unsigned_proposals_with_expiry.push((p.clone(), 1_u64)); + } + } + let test_case = test_case.take().unwrap(); + *self = Self::Signing { + trace_id: *trace_id, + queued_unsigned_proposals: Some(queued_unsigned_proposals_with_expiry), + round_number: *round_number, + test_case, + }; + } else { + panic!("Invalid call to keygen_to_signing") + } + } + + fn session_init( + &mut self, + unsigned_proposals: Option< + Vec>>, + >, + test_case: TestCase, + ) { + // rotate signing into keygen, or keygen into keygen if there are no signing tests. + // Does not increment the round number, as this should be done manually elsewhere + let round_number = self.round_number(); + *self = Self::Keygen { + trace_id: Uuid::new_v4(), + queued_unsigned_proposals: unsigned_proposals, + round_number, + test_case: Some(test_case), + }; + } + + fn increment_round_number(&mut self) { + match self { + Self::Keygen { round_number, .. } => *round_number += 1, + Self::Signing { round_number, .. } => *round_number += 1, + } + } + + fn round_number(&self) -> u64 { + match self { + Self::Keygen { round_number, .. } => *round_number, + Self::Signing { round_number, .. } => *round_number, + } + } + + fn unsigned_proposals_count(&self) -> usize { + match self { + Self::Keygen { queued_unsigned_proposals, .. } => + queued_unsigned_proposals.as_ref().map(|v| v.len()).unwrap_or(0), + Self::Signing { queued_unsigned_proposals, .. } => + queued_unsigned_proposals.as_ref().map(|v| v.len()).unwrap_or(0), + } + } + + fn trace_id(&self) -> Uuid { + match self { + Self::Keygen { trace_id, .. } => *trace_id, + Self::Signing { trace_id, .. } => *trace_id, + } + } + + fn test_case(&self) -> Option<&TestCase> { + match self { + Self::Keygen { test_case, .. } => test_case.as_ref(), + Self::Signing { test_case, .. } => Some(test_case), + } + } +} diff --git a/dkg-primitives/src/types.rs b/dkg-primitives/src/types.rs index 9c0c387e8..0be9b1287 100644 --- a/dkg-primitives/src/types.rs +++ b/dkg-primitives/src/types.rs @@ -57,6 +57,8 @@ pub struct DKGMessage { pub session_id: SessionId, /// enum for active or queued pub status: DKGMsgStatus, + /// The bytes of the round ID + pub associated_block_id: Vec, } #[derive(Debug, Clone, Decode, Encode)] @@ -110,6 +112,22 @@ pub enum DKGMsgPayload { } impl DKGMsgPayload { + pub fn payload(&self) -> &Vec { + match self { + DKGMsgPayload::Offline(msg) => &msg.offline_msg, + DKGMsgPayload::Vote(msg) => &msg.partial_signature, + DKGMsgPayload::Keygen(msg) => &msg.keygen_msg, + DKGMsgPayload::PublicKeyBroadcast(msg) => &msg.pub_key, + DKGMsgPayload::MisbehaviourBroadcast(msg) => &msg.signature, + } + } + pub fn unsigned_proposal_hash(&self) -> Option<&[u8; 32]> { + match self { + DKGMsgPayload::Offline(msg) => Some(&msg.unsigned_proposal_hash), + DKGMsgPayload::Vote(msg) => Some(&msg.unsigned_proposal_hash), + _ => None, + } + } /// NOTE: this is hacky /// TODO: Change enums for keygen, offline, vote pub fn async_proto_only_get_sender_id(&self) -> Option { @@ -130,14 +148,6 @@ impl DKGMsgPayload { DKGMsgPayload::MisbehaviourBroadcast(_) => "misbehaviour", } } - - pub fn get_async_index(&self) -> u8 { - match self { - DKGMsgPayload::Offline(m) => m.async_index, - DKGMsgPayload::Vote(m) => m.async_index, - _ => 0, - } - } } #[derive(Debug, Clone, Decode, Encode)] @@ -158,8 +168,8 @@ pub struct DKGOfflineMessage { pub signer_set_id: SignerSetId, /// Serialized offline stage msg pub offline_msg: Vec, - /// Index in async protocols - pub async_index: u8, + // the unsigned proposal this message is associated with + pub unsigned_proposal_hash: [u8; 32], } #[derive(Debug, Clone, Decode, Encode)] @@ -171,8 +181,8 @@ pub struct DKGVoteMessage { pub round_key: Vec, /// Serialized partial signature pub partial_signature: Vec, - /// Index in async protocols - pub async_index: u8, + // the unsigned proposal this message is associated with + pub unsigned_proposal_hash: [u8; 32], } #[derive(Debug, Clone, Decode, Encode)] diff --git a/dkg-runtime-primitives/src/lib.rs b/dkg-runtime-primitives/src/lib.rs index b31105a65..c427fca2e 100644 --- a/dkg-runtime-primitives/src/lib.rs +++ b/dkg-runtime-primitives/src/lib.rs @@ -77,17 +77,11 @@ pub type MmrRootHash = H256; /// Authority set id starts with zero at genesis pub const GENESIS_AUTHORITY_SET_ID: u64 = 0; -/// Gossip message resending limit for outbound messages -pub const GOSSIP_MESSAGE_RESENDING_LIMIT: u8 = 5; - /// The keygen timeout limit in blocks before we consider misbehaviours pub const KEYGEN_TIMEOUT: u32 = 10; -/// The offline timeout limit in blocks before we consider misbehaviours -pub const OFFLINE_TIMEOUT: u32 = 2; - -/// The sign timeout limit in blocks before we consider misbehaviours -pub const SIGN_TIMEOUT: u32 = 2; +/// The sign timeout limit in blocks before we consider proposal as stalled +pub const SIGN_TIMEOUT: u32 = 10; // Engine ID for DKG pub const DKG_ENGINE_ID: sp_runtime::ConsensusEngineId = *b"WDKG"; @@ -245,12 +239,12 @@ pub struct UnsignedProposal + Clone> { impl + Clone> UnsignedProposal { #[cfg(feature = "testing")] #[allow(clippy::unwrap_used)] // allow unwraps in tests - pub fn testing_dummy() -> Self { - let data = BoundedVec::try_from(vec![0, 1, 2]).unwrap(); + pub fn testing_dummy(data: Vec) -> Self { + let data = BoundedVec::try_from(data).unwrap(); Self { typed_chain_id: webb_proposals::TypedChainId::None, - key: DKGPayloadKey::RefreshVote(webb_proposals::Nonce(0)), - proposal: Proposal::Unsigned { kind: ProposalKind::Refresh, data }, + key: DKGPayloadKey::AnchorCreateProposal(webb_proposals::Nonce(0)), + proposal: Proposal::Unsigned { kind: ProposalKind::AnchorCreate, data }, } } pub fn hash(&self) -> Option<[u8; 32]> { @@ -301,7 +295,7 @@ sp_api::decl_runtime_apis! { /// Fetch DKG public key for current authorities fn dkg_pub_key() -> (AuthoritySetId, Vec); /// Get list of unsigned proposals - fn get_unsigned_proposals() -> Vec>; + fn get_unsigned_proposals() -> Vec<(UnsignedProposal, N)>; /// Get maximum delay before which an offchain extrinsic should be submitted fn get_max_extrinsic_delay(block_number: N) -> N; /// Current and Queued Authority Account Ids [/current_authorities/, /next_authorities/] diff --git a/dkg-runtime-primitives/src/utils.rs b/dkg-runtime-primitives/src/utils.rs index e3b7a4d18..062ebf569 100644 --- a/dkg-runtime-primitives/src/utils.rs +++ b/dkg-runtime-primitives/src/utils.rs @@ -131,6 +131,13 @@ pub enum SignatureError { } impl SignatureError { + pub fn ty(&self) -> &str { + match self { + Self::InvalidDKGKey(_) => "InvalidDKGKey", + Self::InvalidRecovery(_) => "InvalidRecovery", + Self::InvalidECDSASignature(_) => "InvalidECDSASignature", + } + } pub fn expected_public_key(&self) -> Option> { match self { Self::InvalidRecovery(v) => Some(v.expected.clone()), diff --git a/dkg-test-orchestrator/README.md b/dkg-test-orchestrator/README.md index 60783671f..7c108b658 100644 --- a/dkg-test-orchestrator/README.md +++ b/dkg-test-orchestrator/README.md @@ -57,10 +57,7 @@ positive_cases = 10 ## Running the orchestrator ``` -# Build the dkg-standalone node -cargo build --release -p dkg-standalone-node --features=integration-tests,testing - # run the orchestrator, making sure to use the proper config -cargo run --package dkg-test-orchestrator --features=debug-tracing -- --config /path/to/orchestrator_config.toml --tmp ./tmp +cargo run --package dkg-test-orchestrator --features=debug-tracing -- --config /path/to/orchestrator_config.toml --tmp ./tmp --clean # log files for each client will be individually present inside the ./tmp folder, denoted by their peer IDs -``` \ No newline at end of file +``` diff --git a/dkg-test-orchestrator/config/test_n3t2.toml b/dkg-test-orchestrator/config/test_n3t2.toml index 48095dc25..92edf21c8 100644 --- a/dkg-test-orchestrator/config/test_n3t2.toml +++ b/dkg-test-orchestrator/config/test_n3t2.toml @@ -6,6 +6,7 @@ n_clients = 3 threshold = 2 # the minimum latency in the network. Each client will receive updates at # min_simulated_latency + random(0, 0.25*min_simulated_latency) -min_simulated_latency = "100ms" +min_simulated_latency = "0ms" # The number of positive cases to run -positive_cases = 10 \ No newline at end of file +positive_cases = 10 +unsigned_proposals_per_session = 10 diff --git a/dkg-test-orchestrator/src/client.rs b/dkg-test-orchestrator/src/client.rs index fe83c9ddb..fbf6dbac1 100644 --- a/dkg-test-orchestrator/src/client.rs +++ b/dkg-test-orchestrator/src/client.rs @@ -12,6 +12,7 @@ use sc_utils::mpsc::*; use sp_api::{offchain::storage::InMemOffchainStorage, BlockT, ProvideRuntimeApi}; use crate::dummy_api::*; +use dkg_gadget::worker::TestClientPayload; use sp_runtime::testing::H256; use std::{collections::HashMap, sync::Arc}; use tokio::{net::ToSocketAddrs, sync::mpsc::UnboundedReceiver}; @@ -47,7 +48,7 @@ impl TestClient { mock_bc_addr: T, peer_id: PeerId, api: DummyApi, - mut from_dkg_worker: UnboundedReceiver<(uuid::Uuid, Result<(), String>)>, + mut from_dkg_worker: UnboundedReceiver, latest_test_uuid: Arc>>, logger: DebugLogger, ) -> std::io::Result { @@ -60,8 +61,14 @@ impl TestClient { let this = TestClient { inner: TestClientState { - finality_stream: Arc::new(MultiSubscribableStream::new("finality_stream")), - import_stream: Arc::new(MultiSubscribableStream::new("import_stream")), + finality_stream: Arc::new(MultiSubscribableStream::new( + "finality_stream", + logger.clone(), + )), + import_stream: Arc::new(MultiSubscribableStream::new( + "import_stream", + logger.clone(), + )), api, offchain_storage: Default::default(), local_test_cases: Arc::new(Mutex::new(Default::default())), @@ -71,14 +78,18 @@ impl TestClient { let _this_for_dkg_listener = this.clone(); let logger0 = logger.clone(); let dkg_worker_listener = async move { - while let Some((trace_id, result)) = from_dkg_worker.recv().await { + while let Some((trace_id, result, pub_key)) = from_dkg_worker.recv().await { logger0.info(format!( "The client {peer_id:?} has finished test {trace_id:?}. Result: {result:?}" )); - let packet = ProtocolPacket::ClientToBlockchain { - event: MockClientResponse { result, trace_id }, + let event = if let Some(pub_key) = pub_key { + MockClientResponse::Keygen { result, trace_id, pub_key } + } else { + MockClientResponse::Sign { result, trace_id } }; + + let packet = ProtocolPacket::ClientToBlockchain { event }; tx0.lock().await.send(packet).await.unwrap(); } @@ -90,6 +101,7 @@ impl TestClient { logger .info(format!("Complete: orchestrator<=>DKG communications for peer {peer_id:?}")); while let Some(packet) = rx.next().await { + logger.info("Received a packet"); match packet { ProtocolPacket::InitialHandshake => { // pong back the handshake response @@ -109,6 +121,7 @@ impl TestClient { .local_test_cases .lock() .insert(trace_id, None); + logger.info("Passing finality notification to the client"); this_for_orchestrator_rx.inner.finality_stream.send(notification); }, MockBlockchainEvent::ImportNotification { notification } => { @@ -117,6 +130,7 @@ impl TestClient { .local_test_cases .lock() .insert(trace_id, None); + logger.info("Passing import notification to the client"); this_for_orchestrator_rx.inner.import_stream.send(notification); }, } @@ -147,12 +161,13 @@ impl TestClient { // to arbitrarily subscribe to the stream and receive all events. pub struct MultiSubscribableStream { inner: parking_lot::RwLock>>, + logger: DebugLogger, tag: &'static str, } impl MultiSubscribableStream { - pub fn new(tag: &'static str) -> Self { - Self { inner: parking_lot::RwLock::new(vec![]), tag } + pub fn new(tag: &'static str, logger: DebugLogger) -> Self { + Self { inner: parking_lot::RwLock::new(vec![]), tag, logger } } pub fn subscribe(&self) -> TracingUnboundedReceiver { @@ -164,9 +179,17 @@ impl MultiSubscribableStream { pub fn send(&self, t: T) { let mut lock = self.inner.write(); + + if lock.is_empty() { + self.logger.error("No subscribers to send to"); + } + assert!(!lock.is_empty()); + let count_init = lock.len(); // receiver will naturally drop when no longer used. - lock.retain(|tx| tx.unbounded_send(t.clone()).is_ok()) + lock.retain(|tx| tx.unbounded_send(t.clone()).is_ok()); + let diff = count_init - lock.len(); + self.logger.info(format!("Dropped {diff} subscribers (init: {count_init}")); } } diff --git a/dkg-test-orchestrator/src/dummy_api.rs b/dkg-test-orchestrator/src/dummy_api.rs index 1bbc2c6a1..7a23e076e 100644 --- a/dkg-test-orchestrator/src/dummy_api.rs +++ b/dkg-test-orchestrator/src/dummy_api.rs @@ -1,5 +1,5 @@ use dkg_gadget::debug_logger::DebugLogger; -use dkg_mock_blockchain::TestBlock; +use dkg_mock_blockchain::{MutableBlockchain, TestBlock}; use dkg_runtime_primitives::{crypto::AuthorityId, UnsignedProposal}; use hash_db::HashDB; use parking_lot::RwLock; @@ -30,6 +30,21 @@ pub struct DummyApiInner { pub authority_sets: HashMap>>, pub dkg_keys: HashMap>, + pub unsigned_proposals: + Vec<(UnsignedProposal>, u64)>, +} + +impl MutableBlockchain for DummyApi { + fn set_unsigned_proposals( + &self, + propos: Vec<(UnsignedProposal>, u64)>, + ) { + self.inner.write().unsigned_proposals = propos; + } + + fn set_pub_key(&self, session: u64, key: Vec) { + self.inner.write().dkg_keys.insert(session, key); + } } impl DummyApi { @@ -44,7 +59,7 @@ impl DummyApi { let mut dkg_keys = HashMap::new(); // add a empty-key for the genesis block to drive the DKG forward dkg_keys.insert(0 as _, vec![]); - for x in 1..=n_sessions { + for x in 1..=(n_sessions + 1) { // add dummy keys for all other sessions dkg_keys.insert(x as _, vec![0, 1, 2, 3, 4, 5]); } @@ -57,6 +72,7 @@ impl DummyApi { signing_n, authority_sets: HashMap::new(), dkg_keys, + unsigned_proposals: vec![], })), logger, } @@ -633,10 +649,9 @@ impl fn get_unsigned_proposals( &self, - _: H256, - ) -> ApiResult>>> { - // TODO: parameter to increase number of proposals - Ok(vec![UnsignedProposal::testing_dummy()]) + _hash: H256, + ) -> ApiResult>, u64)>> { + Ok(self.inner.read().unsigned_proposals.clone()) } fn get_max_extrinsic_delay( diff --git a/dkg-test-orchestrator/src/in_memory_gossip_engine.rs b/dkg-test-orchestrator/src/in_memory_gossip_engine.rs index c549d40cd..b1d654dc9 100644 --- a/dkg-test-orchestrator/src/in_memory_gossip_engine.rs +++ b/dkg-test-orchestrator/src/in_memory_gossip_engine.rs @@ -1,26 +1,21 @@ use dkg_gadget::gossip_engine::GossipEngineIface; use dkg_primitives::types::{DKGError, SignedDKGMessage}; use dkg_runtime_primitives::crypto::AuthorityId; -use futures::Stream; use parking_lot::Mutex; -use std::{ - collections::{HashMap, VecDeque}, - pin::Pin, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; -use sp_keystore::SyncCryptoStore; +use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; -use crate::{client::MultiSubscribableStream, dummy_api::DummyApi}; +use crate::dummy_api::DummyApi; use dkg_gadget::debug_logger::DebugLogger; -use dkg_runtime_primitives::{crypto, KEY_TYPE}; +use dkg_runtime_primitives::crypto; pub type PeerId = sc_network::PeerId; #[derive(Clone)] pub struct InMemoryGossipEngine { - clients: Arc>>>>, - notifier: Arc>>>, + clients: Arc>>>>, + message_deliver_rx: Arc>>>>, this_peer: Option, this_peer_public_key: Option, // Maps Peer IDs to public keys @@ -38,7 +33,7 @@ impl InMemoryGossipEngine { pub fn new() -> Self { Self { clients: Arc::new(Mutex::new(Default::default())), - notifier: Arc::new(Mutex::new(Default::default())), + message_deliver_rx: Arc::new(Mutex::new(None)), this_peer: None, this_peer_public_key: None, mapping: Arc::new(Mutex::new(Default::default())), @@ -46,48 +41,54 @@ impl InMemoryGossipEngine { } } + #[allow(dead_code)] + fn public_to_peer_id(&self, public: AuthorityId) -> Option { + let mapping = self.mapping.lock(); + for (peer_id, public_key) in mapping.iter() { + if public_key == &public { + return Some(*peer_id) + } + } + + None + } + // generates a new PeerId internally and adds to the hashmap pub fn clone_for_new_peer( &self, dummy_api: &DummyApi, n_blocks: u64, - keyring: dkg_gadget::keyring::Keyring, - key_store: &dyn SyncCryptoStore, + this_peer: PeerId, + public_key: crypto::Public, + logger: &DebugLogger, ) -> Self { - let public_key: crypto::Public = - SyncCryptoStore::ecdsa_generate_new(key_store, KEY_TYPE, Some(&keyring.to_seed())) - .ok() - .unwrap() - .into(); - - let this_peer = PeerId::random(); - let stream = MultiSubscribableStream::new("stream notifier"); self.mapping.lock().insert(this_peer, public_key.clone()); - assert!(self.clients.lock().insert(this_peer, Default::default()).is_none()); - self.notifier.lock().insert(this_peer, stream); // by default, add this peer to the best authorities // TODO: make the configurable let mut lock = dummy_api.inner.write(); // add +1 to allow calls for queued_authorities at block=n_blocks to not fail - for x in 0..n_blocks + 1 { - lock.authority_sets.entry(x).or_default().force_push(public_key.clone()); + for x in 0..=(n_blocks + 1) { + let entry = lock.authority_sets.entry(x).or_default(); + if !entry.contains(&public_key) { + entry.force_push(public_key.clone()); + } } + // give a new tx/rx handle to each new peer + let (message_deliver_tx, message_deliver_rx) = tokio::sync::mpsc::unbounded_channel(); + self.clients.lock().insert(this_peer, message_deliver_tx); + Self { clients: self.clients.clone(), - notifier: self.notifier.clone(), + message_deliver_rx: Arc::new(Mutex::new(Some(message_deliver_rx))), this_peer: Some(this_peer), this_peer_public_key: Some(public_key), mapping: self.mapping.clone(), - logger: None, + logger: Some(logger.clone()), } } - pub fn set_logger(&mut self, logger: DebugLogger) { - self.logger = Some(logger); - } - pub fn peer_id(&self) -> (&PeerId, &AuthorityId) { (self.this_peer.as_ref().unwrap(), self.this_peer_public_key.as_ref().unwrap()) } @@ -96,14 +97,6 @@ impl InMemoryGossipEngine { impl GossipEngineIface for InMemoryGossipEngine { type Clock = u128; - fn logger(&self) -> &DebugLogger { - self.logger.as_ref().unwrap() - } - - fn local_peer_id(&self) -> PeerId { - *self.peer_id().0 - } - /// Send a DKG message to a specific peer. fn send( &self, @@ -114,63 +107,36 @@ impl GossipEngineIface for InMemoryGossipEngine { let tx = clients .get_mut(&recipient) .ok_or_else(|| error(format!("Peer {recipient:?} does not exist")))?; - tx.push_back(message); + tx.send(message).map_err(|_| error("Failed to send message"))?; - // notify the receiver - self.notifier.lock().get(&recipient).unwrap().send(()); Ok(()) } /// Send a DKG message to all peers. fn gossip(&self, message: SignedDKGMessage) -> Result<(), DKGError> { let mut clients = self.clients.lock(); - let notifiers = self.notifier.lock(); let (this_peer, _) = self.peer_id(); for (peer_id, tx) in clients.iter_mut() { if peer_id != this_peer { - tx.push_back(message.clone()); - } - } - - for (peer_id, notifier) in notifiers.iter() { - if peer_id != this_peer { - notifier.send(()); + tx.send(message.clone()) + .map_err(|_| error("Failed to send broadcast message"))?; } } Ok(()) } - /// A stream that sends messages when they are ready to be polled from the message queue. - fn message_available_notification(&self) -> Pin + Send>> { - let (this_peer, _) = self.peer_id(); - let rx = self.notifier.lock().get(this_peer).unwrap().subscribe(); - Box::pin(rx) as _ - } - /// Peek the front of the message queue. - /// - /// Note that this will not remove the message from the queue, it will only return it. For - /// removing the message from the queue, use `acknowledge_last_message`. - /// - /// Returns `None` if there are no messages in the queue. - fn peek_last_message(&self) -> Option> { - let (this_peer, _) = self.peer_id(); - let clients = self.clients.lock(); - clients.get(this_peer).unwrap().front().cloned() + + fn get_stream(&self) -> Option>> { + self.message_deliver_rx.lock().take() } - /// Acknowledge the last message (the front of the queue) and mark it as processed, then - /// removes it from the queue. - fn acknowledge_last_message(&self) { - let (this_peer, _) = self.peer_id(); - let mut clients = self.clients.lock(); - clients.get_mut(this_peer).unwrap().pop_front(); + + fn local_peer_id(&self) -> PeerId { + *self.peer_id().0 } - /// Clears the Message Queue. - fn clear_queue(&self) { - let (this_peer, _) = self.peer_id(); - let mut clients = self.clients.lock(); - clients.get_mut(this_peer).unwrap().clear(); + fn logger(&self) -> &DebugLogger { + self.logger.as_ref().unwrap() } } diff --git a/dkg-test-orchestrator/src/main.rs b/dkg-test-orchestrator/src/main.rs index 5b31652cc..0909a4f66 100644 --- a/dkg-test-orchestrator/src/main.rs +++ b/dkg-test-orchestrator/src/main.rs @@ -5,12 +5,17 @@ //! In summary, running this test orchestrator is an "all in one" replacement //! for needing to run multiple clients. Each individual DKG node's stdout will be //! piped to the temporary directory -#![allow(clippy::unwrap_used)] // allow unwraps in tests +#![allow(clippy::unwrap_used)] +extern crate core; + +// allow unwraps in tests use crate::in_memory_gossip_engine::InMemoryGossipEngine; use dkg_gadget::worker::TestBundle; use dkg_mock_blockchain::*; +use dkg_runtime_primitives::{crypto, KEY_TYPE}; use futures::TryStreamExt; use parking_lot::RwLock; +use sp_keystore::SyncCryptoStore; use std::{path::PathBuf, sync::Arc}; use structopt::StructOpt; @@ -26,12 +31,24 @@ mod in_memory_gossip_engine; struct Args { #[structopt(short = "c", long = "config")] // path to the configuration for the mock blockchain - config_path: PathBuf, + config_path: Option, #[structopt(short = "t", long = "tmp")] tmp_path: PathBuf, + #[structopt(long)] + clean: bool, + #[structopt(long)] + threshold: Option, + #[structopt(long)] + n: Option, + #[structopt(long)] + bind: Option, + #[structopt(long)] + n_tests: Option, + #[structopt(short = "p")] + proposals_per_test: Option, } -#[tokio::main] +#[tokio::main(flavor = "multi_thread")] async fn main() -> Result<(), Box> { let args = Args::from_args(); log::info!(target: "dkg", "Orchestrator args: {args:?}"); @@ -41,35 +58,29 @@ async fn main() -> Result<(), Box> { // before launching the DKGs, make sure to run to setup the logging dkg_logging::setup_simple_log(); - let data = tokio::fs::read_to_string(&args.config_path).await?; - let config: MockBlockchainConfig = toml::from_str(&data)?; + let config = args_to_config(&args)?; let n_clients = config.n_clients; let t = config.threshold; // set the number of blocks to the sum of the number of positive and negative cases // in other words, the each block gets 1 test case - let n_blocks = + let mut n_blocks = config.positive_cases + config.error_cases.as_ref().map(|r| r.len()).unwrap_or(0); + n_blocks *= 2; // 2 test rounds per test case let bind_addr = config.bind.clone(); - // first, spawn the orchestrator/mock-blockchain - let orchestrator_task = MockBlockchain::new(config).await?.execute(); - let orchestrator_handle = tokio::task::spawn(orchestrator_task); - // give time for the orchestrator to bind - tokio::time::sleep(std::time::Duration::from_millis(1000)).await; - - let children_processes_dkg_clients = futures::stream::FuturesUnordered::new(); // the gossip engine and the dummy api share a state between ALL clients in this process // we will use the SAME gossip engine for both keygen and signing - let gossip_engine = &InMemoryGossipEngine::new(); + let keygen_gossip_engine = &InMemoryGossipEngine::new(); + let signing_gossip_engine = &InMemoryGossipEngine::new(); let keygen_t = t as u16; let keygen_n = n_clients as u16; let signing_t = t as u16; let signing_n = n_clients as u16; // logging for the dummy api only - let output = std::fs::File::create(args.tmp_path.join("dummy_api.log"))?; + let output = args.tmp_path.join("dummy_api.log"); let dummy_api_logger = - dkg_gadget::debug_logger::DebugLogger::new("dummy-api".to_string(), Some(output)); + dkg_gadget::debug_logger::DebugLogger::new("dummy-api".to_string(), Some(output))?; let api = &crate::dummy_api::DummyApi::new( keygen_t, @@ -80,31 +91,54 @@ async fn main() -> Result<(), Box> { dummy_api_logger.clone(), ); + // first, spawn the orchestrator/mock-blockchain + let orchestrator_task = MockBlockchain::new(config, api.clone(), dummy_api_logger.clone()) + .await? + .execute(); + let orchestrator_handle = tokio::task::spawn(orchestrator_task); + // give time for the orchestrator to bind + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + + let children_processes_dkg_clients = futures::stream::FuturesUnordered::new(); + // setup the clients for idx in 0..n_clients { let latest_header = Arc::new(RwLock::new(None)); let current_test_id = Arc::new(RwLock::new(None)); let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); // pass the dummy api logger initially, with the intent of overwriting it later + let logger = dkg_gadget::debug_logger::DebugLogger::new("pre-init", None)?; let mut key_store: dkg_gadget::keystore::DKGKeystore = - dkg_gadget::keystore::DKGKeystore::new_default(dummy_api_logger.clone()); + dkg_gadget::keystore::DKGKeystore::new_default(logger.clone()); let keyring = dkg_gadget::keyring::Keyring::Custom(idx as _); - let mut keygen_gossip_engine = gossip_engine.clone_for_new_peer( + + let public_key: crypto::Public = SyncCryptoStore::ecdsa_generate_new( + key_store.as_dyn_crypto_store().unwrap(), + KEY_TYPE, + Some(&keyring.to_seed()), + ) + .ok() + .unwrap() + .into(); + let peer_id = PeerId::random(); + // output the logs for this specific peer to a file + let output = args.tmp_path.join(format!("{peer_id}.log")); + let logger = dkg_gadget::debug_logger::DebugLogger::new(peer_id, Some(output))?; + let keygen_gossip_engine = keygen_gossip_engine.clone_for_new_peer( api, n_blocks as _, - keyring, - key_store.as_dyn_crypto_store().unwrap(), + peer_id, + public_key.clone(), + &logger, + ); + let signing_gossip_engine = signing_gossip_engine.clone_for_new_peer( + api, + n_blocks as _, + peer_id, + public_key, + &logger, ); - let mut signing_gossip_engine = keygen_gossip_engine.clone(); - // set the loggers for the gossip engines - let (peer_id, _public_key) = keygen_gossip_engine.peer_id(); - let peer_id = *peer_id; - // output the logs for this specific peer to a file - let output = std::fs::File::create(args.tmp_path.join(format!("{peer_id}.log")))?; - let logger = dkg_gadget::debug_logger::DebugLogger::new(peer_id, Some(output)); - keygen_gossip_engine.set_logger(logger.clone()); - signing_gossip_engine.set_logger(logger.clone()); key_store.set_logger(logger.clone()); let client = Arc::new( @@ -143,8 +177,9 @@ async fn main() -> Result<(), Box> { _marker: Default::default(), }; - let worker = dkg_gadget::worker::DKGWorker::new(dkg_worker_params, logger); + let worker = dkg_gadget::worker::DKGWorker::new(dkg_worker_params, logger.clone()); worker.run().await; + logger.error("DKG Worker ended"); Err::<(), _>(std::io::Error::new( std::io::ErrorKind::Other, format!("Worker for peer {peer_id:?} ended"), @@ -166,15 +201,70 @@ async fn main() -> Result<(), Box> { } fn validate_args(args: &Args) -> Result<(), String> { - let config_path = &args.config_path; - let tmp_path = &args.tmp_path; - if !config_path.is_file() { - return Err(format!("{:?} is not a valid config path", args.config_path)) + if let Some(config_path) = &args.config_path { + if !config_path.is_file() { + return Err(format!("{:?} is not a valid config path", args.config_path)) + } } + let tmp_path = &args.tmp_path; + if !tmp_path.is_dir() { return Err(format!("{:?} is not a valid tmp path", args.tmp_path)) } + if args.proposals_per_test.is_some() || + args.n.is_some() || + args.threshold.is_some() || + args.n_tests.is_some() || + args.bind.is_some() + { + if args.proposals_per_test.is_none() || + args.n.is_none() || + args.threshold.is_none() || + args.n_tests.is_none() || + args.bind.is_none() + { + return Err("If any of the following arguments are specified, all of them must be specified: proposals-per-test, n, threshold, n-tests, bind".to_string()) + } + + if args.config_path.is_some() { + return Err("Either the config path or a manual set of args must be passed".to_string()) + } + } + + if args.clean { + std::fs::remove_dir_all(tmp_path) + .map_err(|err| format!("Failed to clean tmp path: {err:?}"))?; + std::fs::create_dir(tmp_path) + .map_err(|err| format!("Failed to create tmp path: {err:?}"))?; + } + Ok(()) } + +fn args_to_config(args: &Args) -> Result { + if let Some(config_path) = &args.config_path { + let config = std::fs::read_to_string(config_path) + .map_err(|err| format!("Failed to read config file: {err:?}"))?; + let config: MockBlockchainConfig = toml::from_str(&config) + .map_err(|err| format!("Failed to parse config file: {err:?}"))?; + Ok(config) + } else { + let n = args.n.unwrap() as _; + let threshold = args.threshold.unwrap() as _; + let n_tests = args.n_tests.unwrap(); + let proposals_per_test = args.proposals_per_test.unwrap(); + let bind = args.bind.clone().unwrap(); + let config = MockBlockchainConfig { + threshold, + min_simulated_latency: None, + positive_cases: n_tests, + error_cases: None, + bind, + n_clients: n, + unsigned_proposals_per_session: Some(proposals_per_test), + }; + Ok(config) + } +} diff --git a/dkg-test-suite/scripts/submitProposals.ts b/dkg-test-suite/scripts/submitProposals.ts index 41d581748..1c702c102 100755 --- a/dkg-test-suite/scripts/submitProposals.ts +++ b/dkg-test-suite/scripts/submitProposals.ts @@ -13,11 +13,11 @@ import { async function run() { const api = await ApiPromise.create({ - provider: new WsProvider('ws://127.0.0.1:9944'), + provider: new WsProvider('wss://tangle-standalone-archive.webb.tools'), }); await api.isReady; const keyring = new Keyring({ type: 'sr25519' }); - const sudoAccount = keyring.addFromUri('//Alice'); + const sudoAccount = keyring.addFromUri('//TangleStandaloneSudo'); // 000000000000d30c8839c1145609e564b986f667b273ddcb8496010000001389 const resourceId = ResourceId.newFromContractAddress( diff --git a/dkg-test-suite/tests/e2e/keygenChanges.test.ts b/dkg-test-suite/tests/e2e/keygenChanges.test.ts index 955026998..6581e1953 100644 --- a/dkg-test-suite/tests/e2e/keygenChanges.test.ts +++ b/dkg-test-suite/tests/e2e/keygenChanges.test.ts @@ -54,11 +54,12 @@ describe('Keygen Changes Flow', function () { if (fs.existsSync(tmpDir)) { fs.rmSync(tmpDir, { recursive: true }); } - aliceNode = startStandaloneNode('alice', { tmp: true, printLogs: true }); - bobNode = startStandaloneNode('bob', { tmp: true, printLogs: true }); + aliceNode = startStandaloneNode('alice', { tmp: true, printLogs: true, output_dir: tmpDir }); + bobNode = startStandaloneNode('bob', { tmp: true, printLogs: true, output_dir: tmpDir }); charlieNode = startStandaloneNode('charlie', { tmp: true, printLogs: true, + output_dir: tmpDir, }); api = await ApiPromise.create({ diff --git a/dkg-test-suite/tests/e2e/misbehaviourReporting.test.ts b/dkg-test-suite/tests/e2e/misbehaviourReporting.test.ts index 68999b250..0a17f6ae5 100644 --- a/dkg-test-suite/tests/e2e/misbehaviourReporting.test.ts +++ b/dkg-test-suite/tests/e2e/misbehaviourReporting.test.ts @@ -64,11 +64,12 @@ describe.skip('Misbehavior Flow', function () { if (fs.existsSync(tmpDir)) { fs.rmSync(tmpDir, { recursive: true }); } - aliceNode = startStandaloneNode('alice', { tmp: true, printLogs: false }); - bobNode = startStandaloneNode('bob', { tmp: true, printLogs: false }); + aliceNode = startStandaloneNode('alice', { tmp: true, printLogs: false, output_dir: tmpDir }); + bobNode = startStandaloneNode('bob', { tmp: true, printLogs: false, output_dir: tmpDir }); charlieNode = startStandaloneNode('charlie', { tmp: true, printLogs: false, + output_dir: tmpDir, }); api = await ApiPromise.create({ diff --git a/dkg-test-suite/tests/updateAnchorProposal.test.ts b/dkg-test-suite/tests/updateAnchorProposal.test.ts index af7cd44c1..c0590040e 100644 --- a/dkg-test-suite/tests/updateAnchorProposal.test.ts +++ b/dkg-test-suite/tests/updateAnchorProposal.test.ts @@ -100,7 +100,7 @@ it('should be able to sign anchor update proposal', async () => { newMerkleRoot1._hex, srcResourceId ); - + // register proposal resourceId. await registerResourceId(polkadotApi, anchorProposal.header.resourceId); // get alice account to send the transaction to the dkg node. diff --git a/dkg-test-suite/tests/utils/setup.ts b/dkg-test-suite/tests/utils/setup.ts index 93adb8e9b..729ee2d04 100644 --- a/dkg-test-suite/tests/utils/setup.ts +++ b/dkg-test-suite/tests/utils/setup.ts @@ -147,12 +147,14 @@ type StartOption = { tmp: boolean; printLogs: boolean; chain?: 'dev' | 'local'; + output_dir?: string | null; }; const defaultOptions: StartOption = { tmp: true, printLogs: false, chain: 'local', + output_dir: null }; export function startStandaloneNode( authority: 'alice' | 'bob' | 'charlie' | 'dave' | 'eve' | 'ferdie', @@ -186,6 +188,7 @@ export function startStandaloneNode( `--ws-port=${ports[authority].ws}`, `--rpc-port=${ports[authority].http}`, `--port=${ports[authority].p2p}`, + options.output_dir ? `--output-path=${options.output_dir}/${authority}` : ``, ...(authority == 'alice' ? [ '--node-key', @@ -276,17 +279,10 @@ export async function waitForEvent( const eventsValue = api.registry.createType("Vec", events.toU8a()); // Loop through the Vec for (var event of eventsValue) { - console.log("Checking event: ", event); const section = event.event.section; const method = event.event.method; const data = event.event.data; - console.log("Event section = ", section, ", method = ", method); - console.log("Event musteq = ", pallet, ", method = ", eventVariant); if (section === pallet && method === eventVariant) { - console.log( - `Event ($section}.${method}) =>`, - data - ); if (dataQuery) { for (const value of data) { const jsonData = value.toJSON(); @@ -416,4 +412,4 @@ export function ethAddressFromUncompressedPublicKey( const pubKeyHash = ethers.utils.keccak256(publicKey); // we hash it. const address = ethers.utils.getAddress(`0x${pubKeyHash.slice(-40)}`); // take the last 20 bytes and convert it to an address. return address as `0x${string}`; -} \ No newline at end of file +} diff --git a/dkg-test-suite/tests/utils/util.ts b/dkg-test-suite/tests/utils/util.ts index 44c846296..c3061dc9d 100644 --- a/dkg-test-suite/tests/utils/util.ts +++ b/dkg-test-suite/tests/utils/util.ts @@ -58,9 +58,9 @@ export const executeBefore = async () => { if (fs.existsSync(tmpDir)) { fs.rmSync(tmpDir, { recursive: true }); } - aliceNode = startStandaloneNode('alice', { tmp: true, printLogs: true }); - bobNode = startStandaloneNode('bob', { tmp: true, printLogs: true }); - charlieNode = startStandaloneNode('charlie', { tmp: true, printLogs: true }); + aliceNode = startStandaloneNode('alice', { tmp: true, printLogs: true, output_dir: tmpDir }); + bobNode = startStandaloneNode('bob', { tmp: true, printLogs: true, output_dir: tmpDir }); + charlieNode = startStandaloneNode('charlie', { tmp: true, printLogs: true, output_dir: tmpDir }); console.log('started alice, bob, charlie nodes'); diff --git a/pallets/dkg-proposal-handler/src/lib.rs b/pallets/dkg-proposal-handler/src/lib.rs index a51964888..c0567d4a9 100644 --- a/pallets/dkg-proposal-handler/src/lib.rs +++ b/pallets/dkg-proposal-handler/src/lib.rs @@ -317,6 +317,7 @@ pub mod pallet { /// This function will look for any unsigned proposals past `UnsignedProposalExpiry` /// and remove storage. fn on_idle(now: T::BlockNumber, mut remaining_weight: Weight) -> Weight { + use dkg_runtime_primitives::ProposalKind::*; // fetch all unsigned proposals let unsigned_proposals: Vec<_> = UnsignedProposalQueue::::iter().collect(); let unsigned_proposals_len = unsigned_proposals.len() as u64; @@ -325,7 +326,14 @@ pub mod pallet { // filter out proposals to delete let unsigned_proposal_past_expiry = unsigned_proposals.into_iter().filter( - |(_, _, StoredUnsignedProposal { timestamp, .. })| { + |(_, _, StoredUnsignedProposal { proposal, timestamp })| { + let kind = proposal.kind(); + + // Skip expiration for keygen related proposals + match kind { + Refresh | ProposerSetUpdate => return false, + _ => (), + }; let time_passed = now.checked_sub(timestamp).unwrap_or_default(); time_passed > T::UnsignedProposalExpiry::get() }, @@ -392,10 +400,11 @@ pub mod pallet { }); log::error!( target: "runtime::dkg_proposal_handler", - "Invalid proposal signature with kind: {:?}, data: {:?}, sig: {:?}", + "Invalid proposal signature with kind: {:?}, data: {:?}, sig: {:?} | ERR: {}", kind, data, - signature + signature, + e.ty() ); // skip it. continue @@ -731,14 +740,17 @@ impl Pallet { // *** API methods *** pub fn get_unsigned_proposals( - ) -> Vec> { + ) -> Vec<(dkg_runtime_primitives::UnsignedProposal, T::BlockNumber)> { UnsignedProposalQueue::::iter() .map(|(typed_chain_id, key, stored_unsigned_proposal)| { - dkg_runtime_primitives::UnsignedProposal { - typed_chain_id, - key, - proposal: stored_unsigned_proposal.proposal, - } + ( + dkg_runtime_primitives::UnsignedProposal { + typed_chain_id, + key, + proposal: stored_unsigned_proposal.proposal, + }, + stored_unsigned_proposal.timestamp, + ) }) .collect() } diff --git a/scripts/harness_stress_test.sh b/scripts/harness_stress_test.sh new file mode 100755 index 000000000..962dd8704 --- /dev/null +++ b/scripts/harness_stress_test.sh @@ -0,0 +1,5 @@ +#!/bin/sh +set -e +for i in {1..10}; do + cargo run --package dkg-test-orchestrator --features=debug-tracing -- --config ./dkg-test-orchestrator/config/test_n3t2.toml --tmp ./tmp --clean +done || exit diff --git a/scripts/run-standalone.sh b/scripts/run-standalone.sh index 8b60d53b0..515fe413a 100755 --- a/scripts/run-standalone.sh +++ b/scripts/run-standalone.sh @@ -1,5 +1,18 @@ #!/usr/bin/env bash set -e +# ensure we kill all child processes when we exit +trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT + +#define default ports +ports=(30304 30305 30308) + +#check to see process is not orphaned or already running +for port in ${ports[@]}; do + if [[ $(lsof -i -P -n | grep LISTEN | grep :$port) ]]; then + echo "Port $port has a running process. Exiting" + exit -1 + fi +done CLEAN=${CLEAN:-false} # Parse arguments for the script @@ -31,20 +44,20 @@ cd "$PROJECT_ROOT" echo "*** Start Webb DKG Node ***" # Alice -./target/release/dkg-standalone-node --tmp --chain local --validator -lerror --alice \ +./target/release/dkg-standalone-node --tmp --chain local --validator -lerror --alice --output-path=./tmp/alice/output.log \ --rpc-cors all --ws-external \ - --port 30304 \ + --port ${ports[0]} \ --ws-port 9944 & # Bob -./target/release/dkg-standalone-node --tmp --chain local --validator -lerror --bob \ +./target/release/dkg-standalone-node --tmp --chain local --validator -lerror --bob --output-path=./tmp/bob/output.log \ --rpc-cors all --ws-external \ - --port 30305 \ + --port ${ports[1]} \ --ws-port 9945 & # Charlie -./target/release/dkg-standalone-node --tmp --chain local --validator -linfo --charlie \ +./target/release/dkg-standalone-node --tmp --chain local --validator -linfo --charlie --output-path=./tmp/charlie/output.log \ --rpc-cors all --ws-external \ --ws-port 9948 \ - --port 30308 \ + --port ${ports[2]} \ -ldkg=debug \ -ldkg_gadget::worker=debug \ -lruntime::dkg_metadata=debug \ diff --git a/standalone/node/src/cli.rs b/standalone/node/src/cli.rs index df75ccfc5..5bcd8ab9e 100644 --- a/standalone/node/src/cli.rs +++ b/standalone/node/src/cli.rs @@ -19,9 +19,10 @@ use sc_cli::RunCmd; pub struct Cli { #[clap(subcommand)] pub subcommand: Option, - #[clap(flatten)] pub run: RunCmd, + #[arg(long, short = 'o')] + pub output_path: Option, } #[derive(Debug, clap::Subcommand)] diff --git a/standalone/node/src/command.rs b/standalone/node/src/command.rs index faa53e73a..6bf6b5fc9 100644 --- a/standalone/node/src/command.rs +++ b/standalone/node/src/command.rs @@ -200,8 +200,16 @@ pub fn run() -> sc_cli::Result<()> { }, None => { let runner = cli.create_runner(&cli.run)?; + if let Some(output_path) = &cli.output_path { + let mut dir = output_path.clone(); + dir.pop(); // get the dir + if !dir.exists() { + std::fs::create_dir_all(dir)?; + } + } + runner.run_node_until_exit(|config| async move { - service::new_full(config).map_err(sc_cli::Error::Service) + service::new_full(config, cli.output_path).map_err(sc_cli::Error::Service) }) }, } diff --git a/standalone/node/src/service.rs b/standalone/node/src/service.rs index e24b0df2f..c98b10a38 100644 --- a/standalone/node/src/service.rs +++ b/standalone/node/src/service.rs @@ -15,12 +15,14 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. +use dkg_gadget::debug_logger::DebugLogger; use dkg_standalone_runtime::{self, opaque::Block, RuntimeApi}; use sc_client_api::BlockBackend; use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; pub use sc_executor::NativeElseWasmExecutor; use sc_finality_grandpa::SharedVoterState; use sc_keystore::LocalKeystore; +use sc_network_common::service::NetworkStateInfo; use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; @@ -172,7 +174,10 @@ fn remote_keystore(_url: &str) -> Result, &'static str> { } /// Builds a new service for a full client. -pub fn new_full(mut config: Configuration) -> Result { +pub fn new_full( + mut config: Configuration, + debug_output: Option, +) -> Result { let sc_service::PartialComponents { client, backend, @@ -260,6 +265,10 @@ pub fn new_full(mut config: Configuration) -> Result Some(keystore_container.sync_keystore()), ); + // setup debug logging + let local_peer_id = network.local_peer_id(); + let debug_logger = DebugLogger::new(local_peer_id, debug_output)?; + let dkg_params = dkg_gadget::DKGParams { client: client.clone(), backend: backend.clone(), @@ -268,6 +277,7 @@ pub fn new_full(mut config: Configuration) -> Result prometheus_registry: prometheus_registry.clone(), local_keystore: keystore_container.local_keystore(), _block: std::marker::PhantomData::, + debug_logger, }; // Start the DKG gadget. diff --git a/standalone/runtime/src/lib.rs b/standalone/runtime/src/lib.rs index 503e6a501..3b0d239cd 100644 --- a/standalone/runtime/src/lib.rs +++ b/standalone/runtime/src/lib.rs @@ -1003,7 +1003,7 @@ impl_runtime_apis! { as EstimateNextSessionRotation>::estimate_current_session_progress(block_number).0 } - fn get_unsigned_proposals() -> Vec> { + fn get_unsigned_proposals() -> Vec<(UnsignedProposal, BlockNumber)> { DKGProposalHandler::get_unsigned_proposals() }