diff --git a/.github/workflows/benchmarks-networking.yml b/.github/workflows/benchmarks-networking.yml index 79494b9a015c..8f4246c79548 100644 --- a/.github/workflows/benchmarks-networking.yml +++ b/.github/workflows/benchmarks-networking.yml @@ -92,6 +92,7 @@ jobs: uses: benchmark-action/github-action-benchmark@v1 with: tool: "cargo" + name: ${{ env.BENCH }} output-file-path: ./charts/${{ env.BENCH }}.txt benchmark-data-dir-path: ./bench/${{ env.BENCH }} github-token: ${{ steps.app-token.outputs.token }} @@ -103,6 +104,7 @@ jobs: uses: benchmark-action/github-action-benchmark@v1 with: tool: "cargo" + name: ${{ env.BENCH }} output-file-path: ./charts/${{ env.BENCH }}.txt benchmark-data-dir-path: ./bench/${{ env.BENCH }} github-token: ${{ steps.app-token.outputs.token }} diff --git a/.github/workflows/release-reusable-rc-buid.yml b/.github/workflows/release-reusable-rc-buid.yml index 0222b2aa91e2..035b547603e1 100644 --- a/.github/workflows/release-reusable-rc-buid.yml +++ b/.github/workflows/release-reusable-rc-buid.yml @@ -149,7 +149,6 @@ jobs: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - SKIP_WASM_BUILD: 1 steps: - name: Checkout sources uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 14a235bcda86..878f241317a4 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -160,39 +160,6 @@ zombienet-polkadot-functional-0010-validator-disabling: --local-dir="${LOCAL_DIR}/functional" --test="0010-validator-disabling.zndsl" -.zombienet-polkadot-functional-0011-async-backing-6-seconds-rate: - extends: - - .zombienet-polkadot-common - script: - - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh - --local-dir="${LOCAL_DIR}/functional" - --test="0011-async-backing-6-seconds-rate.zndsl" - -zombienet-polkadot-elastic-scaling-0001-basic-3cores-6s-blocks: - extends: - - .zombienet-polkadot-common - variables: - FORCED_INFRA_INSTANCE: "spot-iops" - before_script: - - !reference [ .zombienet-polkadot-common, before_script ] - - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/elastic_scaling - script: - - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh - --local-dir="${LOCAL_DIR}/elastic_scaling" - --test="0001-basic-3cores-6s-blocks.zndsl" - -.zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains: - extends: - - .zombienet-polkadot-common - before_script: - - !reference [ .zombienet-polkadot-common, before_script ] - - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/elastic_scaling - script: - - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh - --local-dir="${LOCAL_DIR}/elastic_scaling" - --test="0002-elastic-scaling-doesnt-break-parachains.zndsl" - - .zombienet-polkadot-functional-0012-spam-statement-distribution-requests: extends: - .zombienet-polkadot-common @@ -236,14 +203,6 @@ zombienet-polkadot-functional-0015-coretime-shared-core: --local-dir="${LOCAL_DIR}/functional" --test="0016-approval-voting-parallel.zndsl" -.zombienet-polkadot-functional-0017-sync-backing: - extends: - - .zombienet-polkadot-common - script: - - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh - --local-dir="${LOCAL_DIR}/functional" - --test="0017-sync-backing.zndsl" - zombienet-polkadot-functional-0018-shared-core-idle-parachain: extends: - .zombienet-polkadot-common @@ -386,6 +345,8 @@ zombienet-polkadot-malus-0001-dispute-valid: --local-dir="${LOCAL_DIR}/integrationtests" --test="0001-dispute-valid-block.zndsl" +# sdk tests + .zombienet-polkadot-coretime-revenue: extends: - .zombienet-polkadot-common @@ -411,8 +372,78 @@ zombienet-polkadot-elastic-scaling-slot-based-3cores: - !reference [ ".zombienet-polkadot-common", "before_script" ] - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - export CUMULUS_IMAGE="docker.io/paritypr/test-parachain:${PIPELINE_IMAGE_TAG}" + - export X_INFRA_INSTANCE=spot # use spot by default script: # we want to use `--no-capture` in zombienet tests. - unset NEXTEST_FAILURE_OUTPUT - unset NEXTEST_SUCCESS_OUTPUT - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- elastic_scaling::slot_based_3cores::slot_based_3cores_test + +zombienet-polkadot-elastic-scaling-doesnt-break-parachains: + extends: + - .zombienet-polkadot-common + needs: + - job: build-polkadot-zombienet-tests + artifacts: true + before_script: + - !reference [ ".zombienet-polkadot-common", "before_script" ] + - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}" + - export X_INFRA_INSTANCE=spot # use spot by default + variables: + KUBERNETES_CPU_REQUEST: "1" + script: + # we want to use `--no-capture` in zombienet tests. + - unset NEXTEST_FAILURE_OUTPUT + - unset NEXTEST_SUCCESS_OUTPUT + - RUST_LOG=info,zombienet_=trace cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- elastic_scaling::doesnt_break_parachains::doesnt_break_parachains_test + +zombienet-polkadot-elastic-scaling-basic-3cores: + extends: + - .zombienet-polkadot-common + needs: + - job: build-polkadot-zombienet-tests + artifacts: true + before_script: + - !reference [ ".zombienet-polkadot-common", "before_script" ] + - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}" + - export CUMULUS_IMAGE="${COL_IMAGE}" + - export X_INFRA_INSTANCE=spot # use spot by default + script: + # we want to use `--no-capture` in zombienet tests. + - unset NEXTEST_FAILURE_OUTPUT + - unset NEXTEST_SUCCESS_OUTPUT + - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- elastic_scaling::basic_3cores::basic_3cores_test + +zombienet-polkadot-functional-sync-backing: + extends: + - .zombienet-polkadot-common + needs: + - job: build-polkadot-zombienet-tests + artifacts: true + before_script: + - !reference [ ".zombienet-polkadot-common", "before_script" ] + - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}" + # Hardcoded to an old polkadot-parachain image, pre async backing. + - export CUMULUS_IMAGE="docker.io/paritypr/polkadot-parachain-debug:master-99623e62" + - export X_INFRA_INSTANCE=spot # use spot by default + script: + # we want to use `--no-capture` in zombienet tests. + - unset NEXTEST_FAILURE_OUTPUT + - unset NEXTEST_SUCCESS_OUTPUT + - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- functional::sync_backing::sync_backing_test + +zombienet-polkadot-functional-async-backing-6-seconds-rate: + extends: + - .zombienet-polkadot-common + needs: + - job: build-polkadot-zombienet-tests + artifacts: true + before_script: + - !reference [ ".zombienet-polkadot-common", "before_script" ] + - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}" + - export X_INFRA_INSTANCE=spot # use spot by default + script: + # we want to use `--no-capture` in zombienet tests. + - unset NEXTEST_FAILURE_OUTPUT + - unset NEXTEST_SUCCESS_OUTPUT + - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- functional::async_backing_6_seconds_rate::async_backing_6_seconds_rate_test diff --git a/Cargo.lock b/Cargo.lock index ef0eb9f7e3dd..4e2272bdc988 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6213,7 +6213,7 @@ dependencies = [ "regex", "syn 2.0.87", "termcolor", - "toml 0.8.12", + "toml 0.8.19", "walkdir", ] @@ -9777,29 +9777,6 @@ dependencies = [ "libc", ] -[[package]] -name = "libp2p" -version = "0.52.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94495eb319a85b70a68b85e2389a95bb3555c71c49025b78c691a854a7e6464" -dependencies = [ - "bytes", - "either", - "futures", - "futures-timer", - "getrandom", - "instant", - "libp2p-allow-block-list 0.2.0", - "libp2p-connection-limits 0.2.1", - "libp2p-core 0.40.1", - "libp2p-identity", - "libp2p-swarm 0.43.7", - "multiaddr 0.18.1", - "pin-project", - "rw-stream-sink", - "thiserror", -] - [[package]] name = "libp2p" version = "0.54.1" @@ -9811,9 +9788,9 @@ dependencies = [ "futures", "futures-timer", "getrandom", - "libp2p-allow-block-list 0.4.0", - "libp2p-connection-limits 0.4.0", - "libp2p-core 0.42.0", + "libp2p-allow-block-list", + "libp2p-connection-limits", + "libp2p-core", "libp2p-dns", "libp2p-identify", "libp2p-identity", @@ -9824,7 +9801,7 @@ dependencies = [ "libp2p-ping", "libp2p-quic", "libp2p-request-response", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "libp2p-tcp", "libp2p-upnp", "libp2p-websocket", @@ -9835,39 +9812,15 @@ dependencies = [ "thiserror", ] -[[package]] -name = "libp2p-allow-block-list" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55b46558c5c0bf99d3e2a1a38fd54ff5476ca66dd1737b12466a1824dd219311" -dependencies = [ - "libp2p-core 0.40.1", - "libp2p-identity", - "libp2p-swarm 0.43.7", - "void", -] - [[package]] name = "libp2p-allow-block-list" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" dependencies = [ - "libp2p-core 0.42.0", - "libp2p-identity", - "libp2p-swarm 0.45.1", - "void", -] - -[[package]] -name = "libp2p-connection-limits" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f5107ad45cb20b2f6c3628c7b6014b996fcb13a88053f4569c872c6e30abf58" -dependencies = [ - "libp2p-core 0.40.1", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.43.7", + "libp2p-swarm", "void", ] @@ -9877,37 +9830,9 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" dependencies = [ - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", - "void", -] - -[[package]] -name = "libp2p-core" -version = "0.40.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd44289ab25e4c9230d9246c475a22241e301b23e8f4061d3bdef304a1a99713" -dependencies = [ - "either", - "fnv", - "futures", - "futures-timer", - "instant", - "libp2p-identity", - "log", - "multiaddr 0.18.1", - "multihash 0.19.1", - "multistream-select", - "once_cell", - "parking_lot 0.12.3", - "pin-project", - "quick-protobuf 0.8.1", - "rand", - "rw-stream-sink", - "smallvec", - "thiserror", - "unsigned-varint 0.7.2", + "libp2p-swarm", "void", ] @@ -9948,7 +9873,7 @@ dependencies = [ "async-trait", "futures", "hickory-resolver", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "parking_lot 0.12.3", "smallvec", @@ -9966,9 +9891,9 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "lru 0.12.3", "quick-protobuf 0.8.1", "quick-protobuf-codec", @@ -10010,9 +9935,9 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "quick-protobuf 0.8.1", "quick-protobuf-codec", "rand", @@ -10035,9 +9960,9 @@ dependencies = [ "futures", "hickory-proto", "if-watch", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "rand", "smallvec", "socket2 0.5.7", @@ -10053,12 +9978,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" dependencies = [ "futures", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identify", "libp2p-identity", "libp2p-kad", "libp2p-ping", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "pin-project", "prometheus-client", "web-time", @@ -10074,7 +9999,7 @@ dependencies = [ "bytes", "curve25519-dalek 4.1.3", "futures", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "multiaddr 0.18.1", "multihash 0.19.1", @@ -10099,9 +10024,9 @@ dependencies = [ "either", "futures", "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "rand", "tracing", "void", @@ -10118,7 +10043,7 @@ dependencies = [ "futures", "futures-timer", "if-watch", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "libp2p-tls", "parking_lot 0.12.3", @@ -10142,9 +10067,9 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "rand", "smallvec", "tracing", @@ -10152,27 +10077,6 @@ dependencies = [ "web-time", ] -[[package]] -name = "libp2p-swarm" -version = "0.43.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "580189e0074af847df90e75ef54f3f30059aedda37ea5a1659e8b9fca05c0141" -dependencies = [ - "either", - "fnv", - "futures", - "futures-timer", - "instant", - "libp2p-core 0.40.1", - "libp2p-identity", - "log", - "multistream-select", - "once_cell", - "rand", - "smallvec", - "void", -] - [[package]] name = "libp2p-swarm" version = "0.45.1" @@ -10183,7 +10087,7 @@ dependencies = [ "fnv", "futures", "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", "lru 0.12.3", @@ -10219,7 +10123,7 @@ dependencies = [ "futures-timer", "if-watch", "libc", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "socket2 0.5.7", "tokio", @@ -10234,7 +10138,7 @@ checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" dependencies = [ "futures", "futures-rustls", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "rcgen 0.11.3", "ring 0.17.8", @@ -10254,8 +10158,8 @@ dependencies = [ "futures", "futures-timer", "igd-next", - "libp2p-core 0.42.0", - "libp2p-swarm 0.45.1", + "libp2p-core", + "libp2p-swarm", "tokio", "tracing", "void", @@ -10270,7 +10174,7 @@ dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "parking_lot 0.12.3", "pin-project-lite", @@ -10290,7 +10194,7 @@ checksum = "788b61c80789dba9760d8c669a5bedb642c8267555c803fabd8396e4ca5c5882" dependencies = [ "either", "futures", - "libp2p-core 0.42.0", + "libp2p-core", "thiserror", "tracing", "yamux 0.12.1", @@ -11300,17 +11204,6 @@ dependencies = [ "libc", ] -[[package]] -name = "nix" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" -dependencies = [ - "bitflags 2.6.0", - "cfg-if", - "libc", -] - [[package]] name = "nix" version = "0.29.0" @@ -13015,7 +12908,7 @@ dependencies = [ "parity-wasm", "sp-runtime 31.0.1", "tempfile", - "toml 0.8.12", + "toml 0.8.19", "twox-hash", ] @@ -14264,14 +14157,10 @@ dependencies = [ name = "pallet-node-authorization" version = "28.0.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", "log", "parity-scale-codec", + "polkadot-sdk-frame 0.1.0", "scale-info", - "sp-core 28.0.0", - "sp-io 30.0.0", - "sp-runtime 31.0.1", ] [[package]] @@ -14940,7 +14829,7 @@ dependencies = [ "polkavm-linker 0.18.0", "sp-core 28.0.0", "sp-io 30.0.0", - "toml 0.8.12", + "toml 0.8.19", ] [[package]] @@ -14955,7 +14844,7 @@ dependencies = [ "polkavm-linker 0.10.0", "sp-runtime 39.0.2", "tempfile", - "toml 0.8.12", + "toml 0.8.19", ] [[package]] @@ -15175,17 +15064,11 @@ dependencies = [ name = "pallet-salary" version = "13.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", "log", "pallet-ranked-collective 28.0.0", "parity-scale-codec", + "polkadot-sdk-frame 0.1.0", "scale-info", - "sp-arithmetic 23.0.0", - "sp-core 28.0.0", - "sp-io 30.0.0", - "sp-runtime 31.0.1", ] [[package]] @@ -19858,6 +19741,7 @@ dependencies = [ "env_logger 0.11.3", "log", "parity-scale-codec", + "polkadot-primitives 7.0.0", "serde", "serde_json", "substrate-build-script-utils", @@ -19934,12 +19818,6 @@ dependencies = [ "log", ] -[[package]] -name = "polkavm-common" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92c99f7eee94e7be43ba37eef65ad0ee8cbaf89b7c00001c3f6d2be985cb1817" - [[package]] name = "polkavm-common" version = "0.9.0" @@ -19969,15 +19847,6 @@ dependencies = [ "polkavm-assembler 0.18.0", ] -[[package]] -name = "polkavm-derive" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79fa916f7962348bd1bb1a65a83401675e6fc86c51a0fdbcf92a3108e58e6125" -dependencies = [ - "polkavm-derive-impl-macro 0.8.0", -] - [[package]] name = "polkavm-derive" version = "0.9.1" @@ -20005,18 +19874,6 @@ dependencies = [ "polkavm-derive-impl-macro 0.18.0", ] -[[package]] -name = "polkavm-derive-impl" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c10b2654a8a10a83c260bfb93e97b262cf0017494ab94a65d389e0eda6de6c9c" -dependencies = [ - "polkavm-common 0.8.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", -] - [[package]] name = "polkavm-derive-impl" version = "0.9.0" @@ -20053,16 +19910,6 @@ dependencies = [ "syn 2.0.87", ] -[[package]] -name = "polkavm-derive-impl-macro" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e85319a0d5129dc9f021c62607e0804f5fb777a05cdda44d750ac0732def66" -dependencies = [ - "polkavm-derive-impl 0.8.0", - "syn 2.0.87", -] - [[package]] name = "polkavm-derive-impl-macro" version = "0.9.0" @@ -23046,7 +22893,7 @@ dependencies = [ "futures", "futures-timer", "ip_network", - "libp2p 0.54.1", + "libp2p", "linked_hash_set", "litep2p", "log", @@ -23222,7 +23069,7 @@ dependencies = [ "async-trait", "futures", "futures-timer", - "libp2p 0.54.1", + "libp2p", "log", "parking_lot 0.12.3", "rand", @@ -23679,7 +23526,7 @@ version = "15.0.0" dependencies = [ "chrono", "futures", - "libp2p 0.54.1", + "libp2p", "log", "parking_lot 0.12.3", "pin-project", @@ -26181,53 +26028,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "sp-core" -version = "31.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d7a0fd8f16dcc3761198fc83be12872f823b37b749bc72a3a6a1f702509366" -dependencies = [ - "array-bytes", - "bitflags 1.3.2", - "blake2 0.10.6", - "bounded-collections", - "bs58", - "dyn-clonable", - "ed25519-zebra 3.1.0", - "futures", - "hash-db", - "hash256-std-hasher", - "impl-serde 0.4.0", - "itertools 0.10.5", - "k256", - "libsecp256k1", - "log", - "merlin", - "parity-bip39", - "parity-scale-codec", - "parking_lot 0.12.3", - "paste", - "primitive-types 0.12.2", - "rand", - "scale-info", - "schnorrkel 0.11.4", - "secp256k1 0.28.2", - "secrecy 0.8.0", - "serde", - "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-externalities 0.27.0", - "sp-runtime-interface 26.0.0", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-storage 20.0.0", - "ss58-registry", - "substrate-bip39 0.5.0", - "thiserror", - "tracing", - "w3f-bls", - "zeroize", -] - [[package]] name = "sp-core" version = "32.0.0" @@ -26568,18 +26368,6 @@ dependencies = [ "sp-storage 19.0.0", ] -[[package]] -name = "sp-externalities" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d6a4572eadd4a63cff92509a210bf425501a0c5e76574b30a366ac77653787" -dependencies = [ - "environmental", - "parity-scale-codec", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-storage 20.0.0", -] - [[package]] name = "sp-externalities" version = "0.28.0" @@ -27164,26 +26952,6 @@ dependencies = [ "trybuild", ] -[[package]] -name = "sp-runtime-interface" -version = "26.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e48a675ea4858333d4d755899ed5ed780174aa34fec15953428d516af5452295" -dependencies = [ - "bytes", - "impl-trait-for-tuples", - "parity-scale-codec", - "polkavm-derive 0.8.0", - "primitive-types 0.12.2", - "sp-externalities 0.27.0", - "sp-runtime-interface-proc-macro 18.0.0", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-storage 20.0.0", - "sp-tracing 16.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-wasm-interface 20.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "static_assertions", -] - [[package]] name = "sp-runtime-interface" version = "27.0.0" @@ -27541,20 +27309,6 @@ dependencies = [ "sp-debug-derive 14.0.0", ] -[[package]] -name = "sp-storage" -version = "20.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dba5791cb3978e95daf99dad919ecb3ec35565604e88cd38d805d9d4981e8bd" -dependencies = [ - "impl-serde 0.4.0", - "parity-scale-codec", - "ref-cast", - "serde", - "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "sp-storage" version = "21.0.0" @@ -27626,19 +27380,6 @@ dependencies = [ "tracing-subscriber 0.3.18", ] -[[package]] -name = "sp-tracing" -version = "16.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0351810b9d074df71c4514c5228ed05c250607cba131c1c9d1526760ab69c05c" -dependencies = [ - "parity-scale-codec", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tracing", - "tracing-core", - "tracing-subscriber 0.2.25", -] - [[package]] name = "sp-tracing" version = "17.0.1" @@ -27895,20 +27636,6 @@ dependencies = [ "wasmtime", ] -[[package]] -name = "sp-wasm-interface" -version = "20.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ef97172c42eb4c6c26506f325f48463e9bc29b2034a587f1b9e48c751229bee" -dependencies = [ - "anyhow", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "wasmtime", -] - [[package]] name = "sp-wasm-interface" version = "21.0.1" @@ -28448,19 +28175,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "substrate-bip39" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2b564c293e6194e8b222e52436bcb99f60de72043c7f845cf6c4406db4df121" -dependencies = [ - "hmac 0.12.1", - "pbkdf2", - "schnorrkel 0.11.4", - "sha2 0.10.8", - "zeroize", -] - [[package]] name = "substrate-bip39" version = "0.6.0" @@ -28801,7 +28515,7 @@ dependencies = [ "sp-version 29.0.0", "strum 0.26.3", "tempfile", - "toml 0.8.12", + "toml 0.8.19", "walkdir", "wasm-opt", ] @@ -28822,7 +28536,7 @@ dependencies = [ "sp-maybe-compressed-blob 11.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "strum 0.26.3", "tempfile", - "toml 0.8.12", + "toml 0.8.19", "walkdir", "wasm-opt", ] @@ -29819,33 +29533,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit 0.19.15", -] - -[[package]] -name = "toml" -version = "0.8.12" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.12", + "toml_edit 0.22.22", ] [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" dependencies = [ "serde", ] @@ -29857,8 +29559,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ "indexmap 2.7.0", - "serde", - "serde_spanned", "toml_datetime", "winnow 0.5.15", ] @@ -29876,9 +29576,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.12" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap 2.7.0", "serde", @@ -32080,9 +31780,9 @@ dependencies = [ [[package]] name = "zombienet-configuration" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d716b3ff8112d98ced15f53b0c72454f8cde533fe2b68bb04379228961efbd80" +checksum = "5ced2fca1322821431f03d06dcf2ea74d3a7369760b6c587b372de6eada3ce43" dependencies = [ "anyhow", "lazy_static", @@ -32093,23 +31793,23 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "toml 0.7.8", + "toml 0.8.19", "url", "zombienet-support", ] [[package]] name = "zombienet-orchestrator" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4098a7d33b729b59e32c41a87aa4d484bd1b8771a059bbd4edfb4d430b3b2d74" +checksum = "86ecd17133c3129547b6472591b5e58d4aee1fc63c965a3418fd56d33a8a4e82" dependencies = [ "anyhow", "async-trait", "futures", "glob-match", "hex", - "libp2p 0.52.4", + "libp2p", "libsecp256k1", "multiaddr 0.18.1", "rand", @@ -32118,7 +31818,7 @@ dependencies = [ "serde", "serde_json", "sha2 0.10.8", - "sp-core 31.0.0", + "sp-core 34.0.0", "subxt", "subxt-signer", "thiserror", @@ -32133,9 +31833,9 @@ dependencies = [ [[package]] name = "zombienet-prom-metrics-parser" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "961e30be45b34f6ebeabf29ee2f47b0cd191ea62e40c064752572207509a6f5c" +checksum = "23702db0819a050c8a0130a769b105695137020a64207b4597aa021f06924552" dependencies = [ "pest", "pest_derive", @@ -32144,9 +31844,9 @@ dependencies = [ [[package]] name = "zombienet-provider" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab0f7f01780b7c99a6c40539d195d979f234305f32808d547438b50829d44262" +checksum = "83e903843c62cd811e7730ccc618dcd14444d20e8aadfcd7d7561c7b47d8f984" dependencies = [ "anyhow", "async-trait", @@ -32155,7 +31855,7 @@ dependencies = [ "hex", "k8s-openapi", "kube", - "nix 0.27.1", + "nix 0.29.0", "regex", "reqwest 0.11.27", "serde", @@ -32175,9 +31875,9 @@ dependencies = [ [[package]] name = "zombienet-sdk" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99a3c5f2d657235b3ab7dc384677e63cde21983029e99106766ecd49e9f8d7f3" +checksum = "e457b12c8fdc7003c12dd56855da09812ac11dd232e4ec01acccb2899fe05e44" dependencies = [ "async-trait", "futures", @@ -32193,14 +31893,14 @@ dependencies = [ [[package]] name = "zombienet-support" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "296f887ea88e07edd771f8e1d0dec5297a58b422f4b884a6292a21ebe03277cb" +checksum = "43547d65b19a92cf0ee44380239d82ef345e7d26f7b04b9e0ecf48496af6346b" dependencies = [ "anyhow", "async-trait", "futures", - "nix 0.27.1", + "nix 0.29.0", "rand", "regex", "reqwest 0.11.27", diff --git a/Cargo.toml b/Cargo.toml index c917a8a8fead..c30a9949e85e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1391,7 +1391,7 @@ xcm-procedural = { path = "polkadot/xcm/procedural", default-features = false } xcm-runtime-apis = { path = "polkadot/xcm/xcm-runtime-apis", default-features = false } xcm-simulator = { path = "polkadot/xcm/xcm-simulator", default-features = false } zeroize = { version = "1.7.0", default-features = false } -zombienet-sdk = { version = "0.2.19" } +zombienet-sdk = { version = "0.2.20" } zstd = { version = "0.12.4", default-features = false } [profile.release] diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs index 00adcdfa186a..cb4232376c6f 100644 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs @@ -3,5 +3,6 @@ #![cfg_attr(not(feature = "std"), no_std)] pub mod register_token; +pub mod send_native_eth; pub mod send_token; pub mod send_token_to_penpal; diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_native_eth.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_native_eth.rs new file mode 100755 index 000000000000..d3e8d76e6b39 --- /dev/null +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_native_eth.rs @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +// Generated, do not edit! +// See ethereum client README.md for instructions to generate + +use hex_literal::hex; +use snowbridge_beacon_primitives::{ + types::deneb, AncestryProof, BeaconHeader, ExecutionProof, VersionedExecutionPayloadHeader, +}; +use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; +use sp_core::U256; +use sp_std::vec; + +pub fn make_send_native_eth_message() -> InboundQueueFixture { + InboundQueueFixture { + message: Message { + event_log: Log { + address: hex!("87d1f7fdfee7f651fabc8bfcb6e086c278b77a7d").into(), + topics: vec![ + hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), + hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), + hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), + ], + data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa0000000000010000000000000000000000000000000000000000008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e8764817000000000000000000000000").into(), + }, + proof: Proof { + receipt_proof: (vec![ + hex!("17cd4d05dde30703008a4f213205923630cff8e6bc9d5d95a52716bfb5551fd7").to_vec(), + ], vec![ + hex!("f903b4822080b903ae02f903aa018301a7fcb9010000000000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000000000000000000000000000000000000000000000000000001080000000000000000000000000000000000000000080000000000020000000000000000000800010100000000000000000000000000000000000200000000000000000000000000001000000040080008000000000000000000040000000021000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000220000000000000000000000000000000000000000000000000000200000000000000f9029ff9015d9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a00000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000000000000000000000000000000000000000003e8b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000208eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48f9013c9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8c000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa0000000000010000000000000000000000000000000000000000008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e8764817000000000000000000000000").to_vec(), + ]), + execution_proof: ExecutionProof { + header: BeaconHeader { + slot: 246, + proposer_index: 7, + parent_root: hex!("4faaac5d2fa0b8884fe1175c7cac1c92aac9eba5a20b4302edb98a56428c5974").into(), + state_root: hex!("882c13f1d56df781e3444a78cae565bfa1c89822c86cdb0daea71f5351231580").into(), + body_root: hex!("c47eb72204b1ca567396dacef8b0214027eb7f0789330b55166085d1f9cb4c65").into(), + }, + ancestry_proof: Some(AncestryProof { + header_branch: vec![ + hex!("38e2454bc93c4cfafcea772b8531e4802bbd2561366620699096dd4e591bc488").into(), + hex!("3d7389fb144ccaeca8b8e1667ce1d1538dfceb50bf1e49c4b368a223f051fda3").into(), + hex!("0d49c9c24137ad4d86ebca2f36a159573a68b5d5d60e317776c77cc8b6093034").into(), + hex!("0fadc6735bcdc2793a5039a806fbf39984c39374ed4d272c1147e1c23df88983").into(), + hex!("3a058ad4b169eebb4c754c8488d41e56a7a0e5f8b55b5ec67452a8d326585c69").into(), + hex!("de200426caa9bc03f8e0033b4ef4df1db6501924b5c10fb7867e76db942b903c").into(), + hex!("48b578632bc40eebb517501f179ffdd06d762c03e9383df16fc651eeddd18806").into(), + hex!("98d9d6904b2a6a285db4c4ae59a07100cd38ec4d9fb7a16a10fe83ec99e6ba1d").into(), + hex!("1b2bbae6e684864b714654a60778664e63ba6c3c9bed8074ec1a0380fe5042e6").into(), + hex!("eb907a888eadf5a7e2bd0a3a5a9369e409c7aa688bd4cde758d5b608c6c82785").into(), + hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(), + hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(), + hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(), + ], + finalized_block_root: hex!("440615588532ce496a93d189cb0ef1df7cf67d529faee0fd03213ce26ea115e5").into(), + }), + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: hex!("a8c89213b7d7d2ac76462d89e6a7384374db905b657ad803d3c86f88f86c39df").into(), + fee_recipient: hex!("0000000000000000000000000000000000000000").into(), + state_root: hex!("a1e8175213a6a43da17fae65109245867cbc60e3ada16b8ac28c6b208761c772").into(), + receipts_root: hex!("17cd4d05dde30703008a4f213205923630cff8e6bc9d5d95a52716bfb5551fd7").into(), + logs_bloom: hex!("00000000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000000000000000000000000000000000000000000000000000001080000000000000000000000000000000000000000080000000000020000000000000000000800010100000000000000000000000000000000000200000000000000000000000000001000000040080008000000000000000000040000000021000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000220000000000000000000000000000000000000000000000000000200000000000000").into(), + prev_randao: hex!("b9b26dc14ea8c57d069fde0c94ad31c2558365c3986a0c06558470f8c02e62ce").into(), + block_number: 246, + gas_limit: 62908420, + gas_used: 108540, + timestamp: 1734718384, + extra_data: hex!("d983010e08846765746888676f312e32322e358664617277696e").into(), + base_fee_per_gas: U256::from(7u64), + block_hash: hex!("878195e2ea83c74d475363d03d41a7fbfc4026d6e5bcffb713928253984a64a7").into(), + transactions_root: hex!("909139b3137666b4551b629ce6d9fb7e5e6f6def8a48d078448ec6600fe63c7f").into(), + withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![ + hex!("5d78e26ea639df17c2194ff925f782b9522009d58cfc60e3d34ba79a19f8faf1").into(), + hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), + hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), + hex!("3d84b2809a36450186e5169995a5e3cab55d751aee90fd8456b33d871ccaa463").into(), + ], + } + }, + }, + finalized_header: BeaconHeader { + slot: 608, + proposer_index: 3, + parent_root: hex!("f10c2349530dbd339a72886270e2e304bb68155af68c918c850acd9ab341350f").into(), + state_root: hex!("6df0ef4cbb4986a84ff0763727402b88636e6b5535022cd3ad6967b8dd799402").into(), + body_root: hex!("f66fc1c022f07f91c777ad5c464625fc0b43d3e7a45650567dce60011210f574").into(), + }, + block_roots_root: hex!("1c0dbf54db070770f5e573b72afe0aac2b0e3cf312107d1cd73bf64d7a2ed90c").into(), + } +} diff --git a/bridges/snowbridge/primitives/router/src/inbound/mock.rs b/bridges/snowbridge/primitives/router/src/inbound/mock.rs new file mode 100644 index 000000000000..537853b324f6 --- /dev/null +++ b/bridges/snowbridge/primitives/router/src/inbound/mock.rs @@ -0,0 +1,48 @@ +use crate::inbound::{MessageToXcm, TokenId}; +use frame_support::parameter_types; +use sp_runtime::{ + traits::{IdentifyAccount, MaybeEquivalence, Verify}, + MultiSignature, +}; +use xcm::{latest::WESTEND_GENESIS_HASH, prelude::*}; + +pub const CHAIN_ID: u64 = 11155111; +pub const NETWORK: NetworkId = Ethereum { chain_id: CHAIN_ID }; + +parameter_types! { + pub EthereumNetwork: NetworkId = NETWORK; + + pub const CreateAssetCall: [u8;2] = [53, 0]; + pub const CreateAssetExecutionFee: u128 = 2_000_000_000; + pub const CreateAssetDeposit: u128 = 100_000_000_000; + pub const SendTokenExecutionFee: u128 = 1_000_000_000; + pub const InboundQueuePalletInstance: u8 = 80; + pub UniversalLocation: InteriorLocation = + [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1002)].into(); + pub AssetHubFromEthereum: Location = Location::new(1,[GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)),Parachain(1000)]); +} + +type Signature = MultiSignature; +type AccountId = <::Signer as IdentifyAccount>::AccountId; +type Balance = u128; + +pub(crate) struct MockTokenIdConvert; +impl MaybeEquivalence for MockTokenIdConvert { + fn convert(_id: &TokenId) -> Option { + Some(Location::parent()) + } + fn convert_back(_loc: &Location) -> Option { + None + } +} + +pub(crate) type MessageConverter = MessageToXcm< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + MockTokenIdConvert, + UniversalLocation, + AssetHubFromEthereum, +>; diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs index bc5d401cd4f7..1c210afb1f74 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs @@ -2,6 +2,8 @@ // SPDX-FileCopyrightText: 2023 Snowfork //! Converts messages from Ethereum to XCM messages +#[cfg(test)] +mod mock; #[cfg(test)] mod tests; @@ -394,10 +396,16 @@ where // Convert ERC20 token address to a location that can be understood by Assets Hub. fn convert_token_address(network: NetworkId, token: H160) -> Location { - Location::new( - 2, - [GlobalConsensus(network), AccountKey20 { network: None, key: token.into() }], - ) + // If the token is `0x0000000000000000000000000000000000000000` then return the location of + // native Ether. + if token == H160([0; 20]) { + Location::new(2, [GlobalConsensus(network)]) + } else { + Location::new( + 2, + [GlobalConsensus(network), AccountKey20 { network: None, key: token.into() }], + ) + } } /// Constructs an XCM message destined for AssetHub that withdraws assets from the sovereign diff --git a/bridges/snowbridge/primitives/router/src/inbound/tests.rs b/bridges/snowbridge/primitives/router/src/inbound/tests.rs index 786aa594f653..11d7928602c6 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/tests.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/tests.rs @@ -1,21 +1,12 @@ use super::EthereumLocationsConverterFor; -use crate::inbound::CallIndex; -use frame_support::{assert_ok, parameter_types}; +use crate::inbound::{ + mock::*, Command, ConvertMessage, Destination, MessageV1, VersionedMessage, H160, +}; +use frame_support::assert_ok; use hex_literal::hex; use xcm::prelude::*; use xcm_executor::traits::ConvertLocation; -const NETWORK: NetworkId = Ethereum { chain_id: 11155111 }; - -parameter_types! { - pub EthereumNetwork: NetworkId = NETWORK; - - pub const CreateAssetCall: CallIndex = [1, 1]; - pub const CreateAssetExecutionFee: u128 = 123; - pub const CreateAssetDeposit: u128 = 891; - pub const SendTokenExecutionFee: u128 = 592; -} - #[test] fn test_ethereum_network_converts_successfully() { let expected_account: [u8; 32] = @@ -81,3 +72,74 @@ fn test_reanchor_all_assets() { assert_eq!(reanchored_asset_with_ethereum_context, asset.clone()); } } + +#[test] +fn test_convert_send_token_with_weth() { + const WETH: H160 = H160([0xff; 20]); + const AMOUNT: u128 = 1_000_000; + const FEE: u128 = 1_000; + const ACCOUNT_ID: [u8; 32] = [0xBA; 32]; + const MESSAGE: VersionedMessage = VersionedMessage::V1(MessageV1 { + chain_id: CHAIN_ID, + command: Command::SendToken { + token: WETH, + destination: Destination::AccountId32 { id: ACCOUNT_ID }, + amount: AMOUNT, + fee: FEE, + }, + }); + let result = MessageConverter::convert([1; 32].into(), MESSAGE); + assert_ok!(&result); + let (xcm, fee) = result.unwrap(); + assert_eq!(FEE, fee); + + let expected_assets = ReserveAssetDeposited( + vec![Asset { + id: AssetId(Location { + parents: 2, + interior: Junctions::X2( + [GlobalConsensus(NETWORK), AccountKey20 { network: None, key: WETH.into() }] + .into(), + ), + }), + fun: Fungible(AMOUNT), + }] + .into(), + ); + let actual_assets = xcm.into_iter().find(|x| matches!(x, ReserveAssetDeposited(..))); + assert_eq!(actual_assets, Some(expected_assets)) +} + +#[test] +fn test_convert_send_token_with_eth() { + const ETH: H160 = H160([0x00; 20]); + const AMOUNT: u128 = 1_000_000; + const FEE: u128 = 1_000; + const ACCOUNT_ID: [u8; 32] = [0xBA; 32]; + const MESSAGE: VersionedMessage = VersionedMessage::V1(MessageV1 { + chain_id: CHAIN_ID, + command: Command::SendToken { + token: ETH, + destination: Destination::AccountId32 { id: ACCOUNT_ID }, + amount: AMOUNT, + fee: FEE, + }, + }); + let result = MessageConverter::convert([1; 32].into(), MESSAGE); + assert_ok!(&result); + let (xcm, fee) = result.unwrap(); + assert_eq!(FEE, fee); + + let expected_assets = ReserveAssetDeposited( + vec![Asset { + id: AssetId(Location { + parents: 2, + interior: Junctions::X1([GlobalConsensus(NETWORK)].into()), + }), + fun: Fungible(AMOUNT), + }] + .into(), + ); + let actual_assets = xcm.into_iter().find(|x| matches!(x, ReserveAssetDeposited(..))); + assert_eq!(actual_assets, Some(expected_assets)) +} diff --git a/bridges/snowbridge/primitives/router/src/outbound/mod.rs b/bridges/snowbridge/primitives/router/src/outbound/mod.rs index 3b5dbdb77c89..622c40807015 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/mod.rs @@ -289,8 +289,13 @@ where let (token, amount) = match reserve_asset { Asset { id: AssetId(inner_location), fun: Fungible(amount) } => match inner_location.unpack() { + // Get the ERC20 contract address of the token. (0, [AccountKey20 { network, key }]) if self.network_matches(network) => Some((H160(*key), *amount)), + // If there is no ERC20 contract address in the location then signal to the + // gateway that is a native Ether transfer by using + // `0x0000000000000000000000000000000000000000` as the token address. + (0, []) => Some((H160([0; 20]), *amount)), _ => None, }, _ => None, diff --git a/bridges/snowbridge/primitives/router/src/outbound/tests.rs b/bridges/snowbridge/primitives/router/src/outbound/tests.rs index 44f81ce31b3a..2a60f9f3e0ea 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/tests.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/tests.rs @@ -515,6 +515,46 @@ fn xcm_converter_convert_with_wildcard_all_asset_filter_succeeds() { assert_eq!(result, Ok((expected_payload, [0; 32]))); } +#[test] +fn xcm_converter_convert_with_native_eth_succeeds() { + let network = BridgedNetwork::get(); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + // The asset is `{ parents: 0, interior: X1(Here) }` relative to ethereum. + let assets: Assets = vec![Asset { id: AssetId([].into()), fun: Fungible(1000) }].into(); + let filter: AssetFilter = Wild(All); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + // The token address that is expected to be sent should be + // `0x0000000000000000000000000000000000000000`. The solidity will + // interpret this as a transfer of ETH. + let expected_payload = Command::AgentExecute { + agent_id: Default::default(), + command: AgentExecuteCommand::TransferToken { + token: H160([0; 20]), + recipient: beneficiary_address.into(), + amount: 1000, + }, + }; + let result = converter.convert(); + assert_eq!(result, Ok((expected_payload, [0; 32]))); +} + #[test] fn xcm_converter_convert_with_fees_less_than_reserve_yields_success() { let network = BridgedNetwork::get(); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs index 5ef0993f70a1..43398eb8bd48 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs @@ -16,7 +16,8 @@ pub mod genesis; pub use bridge_hub_rococo_runtime::{ - xcm_config::XcmConfig as BridgeHubRococoXcmConfig, EthereumBeaconClient, EthereumInboundQueue, + self as bridge_hub_rococo_runtime, xcm_config::XcmConfig as BridgeHubRococoXcmConfig, + EthereumBeaconClient, EthereumInboundQueue, ExistentialDeposit as BridgeHubRococoExistentialDeposit, RuntimeOrigin as BridgeHubRococoRuntimeOrigin, }; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs index 54bc395c86f0..f84d42cb29f8 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs @@ -50,6 +50,7 @@ mod imports { AssetHubWestendParaPallet as AssetHubWestendPallet, }, bridge_hub_rococo_emulated_chain::{ + bridge_hub_rococo_runtime::bridge_to_ethereum_config::EthereumGatewayAddress, genesis::ED as BRIDGE_HUB_ROCOCO_ED, BridgeHubRococoExistentialDeposit, BridgeHubRococoParaPallet as BridgeHubRococoPallet, BridgeHubRococoRuntimeOrigin, BridgeHubRococoXcmConfig, EthereumBeaconClient, EthereumInboundQueue, diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index c72d5045ddc0..6364ff9fe959 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -20,8 +20,8 @@ use hex_literal::hex; use rococo_westend_system_emulated_network::BridgeHubRococoParaSender as BridgeHubRococoSender; use snowbridge_core::{inbound::InboundQueueFixture, outbound::OperatingMode}; use snowbridge_pallet_inbound_queue_fixtures::{ - register_token::make_register_token_message, send_token::make_send_token_message, - send_token_to_penpal::make_send_token_to_penpal_message, + register_token::make_register_token_message, send_native_eth::make_send_native_eth_message, + send_token::make_send_token_message, send_token_to_penpal::make_send_token_to_penpal_message, }; use snowbridge_pallet_system; use snowbridge_router_primitives::inbound::{ @@ -238,7 +238,7 @@ fn register_weth_token_from_ethereum_to_asset_hub() { /// Tests the registering of a token as an asset on AssetHub, and then subsequently sending /// a token from Ethereum to AssetHub. #[test] -fn send_token_from_ethereum_to_asset_hub() { +fn send_weth_token_from_ethereum_to_asset_hub() { BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), INITIAL_FUND); // Fund ethereum sovereign on AssetHub @@ -278,7 +278,7 @@ fn send_token_from_ethereum_to_asset_hub() { /// Tests sending a token to a 3rd party parachain, called PenPal. The token reserve is /// still located on AssetHub. #[test] -fn send_token_from_ethereum_to_penpal() { +fn send_weth_from_ethereum_to_penpal() { let asset_hub_sovereign = BridgeHubRococo::sovereign_account_id_of(Location::new( 1, [Parachain(AssetHubRococo::para_id().into())], @@ -515,6 +515,176 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { }); } +/// Tests the full cycle of eth transfers: +/// - sending a token to AssetHub +/// - returning the token to Ethereum +#[test] +fn send_eth_asset_from_asset_hub_to_ethereum_and_back() { + let ethereum_network: NetworkId = EthereumNetwork::get().into(); + let origin_location = (Parent, Parent, ethereum_network).into(); + + use ahr_xcm_config::bridging::to_ethereum::DefaultBridgeHubEthereumBaseFee; + let assethub_location = BridgeHubRococo::sibling_location_of(AssetHubRococo::para_id()); + let assethub_sovereign = BridgeHubRococo::sovereign_account_id_of(assethub_location); + let ethereum_sovereign: AccountId = + EthereumLocationsConverterFor::::convert_location(&origin_location).unwrap(); + + AssetHubRococo::force_default_xcm_version(Some(XCM_VERSION)); + BridgeHubRococo::force_default_xcm_version(Some(XCM_VERSION)); + AssetHubRococo::force_xcm_version(origin_location.clone(), XCM_VERSION); + + BridgeHubRococo::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]); + AssetHubRococo::fund_accounts(vec![ + (AssetHubRococoReceiver::get(), INITIAL_FUND), + (ethereum_sovereign.clone(), INITIAL_FUND), + ]); + + // Register ETH + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeOrigin = ::RuntimeOrigin; + assert_ok!(::ForeignAssets::force_create( + RuntimeOrigin::root(), + origin_location.clone(), + ethereum_sovereign.into(), + true, + 1000, + )); + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::ForceCreated { .. }) => {}, + ] + ); + }); + const ETH_AMOUNT: u128 = 1_000_000_000_000_000_000; + + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeOrigin = ::RuntimeOrigin; + + // Set the gateway. This is needed because new fixtures use a different gateway address. + assert_ok!(::System::set_storage( + RuntimeOrigin::root(), + vec![( + EthereumGatewayAddress::key().to_vec(), + sp_core::H160(hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d")).encode(), + )], + )); + + // Construct SendToken message and sent to inbound queue + assert_ok!(send_inbound_message(make_send_native_eth_message())); + + // Check that the send token message was sent using xcm + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + ] + ); + }); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeOrigin = ::RuntimeOrigin; + + let _issued_event = RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { + asset_id: origin_location.clone(), + owner: AssetHubRococoReceiver::get().into(), + amount: ETH_AMOUNT, + }); + // Check that AssetHub has issued the foreign asset + assert_expected_events!( + AssetHubRococo, + vec![ + _issued_event => {}, + ] + ); + let assets = + vec![Asset { id: AssetId(origin_location.clone()), fun: Fungible(ETH_AMOUNT) }]; + let multi_assets = VersionedAssets::from(Assets::from(assets)); + + let destination = origin_location.clone().into(); + + let beneficiary = VersionedLocation::from(Location::new( + 0, + [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], + )); + + let free_balance_before = ::Balances::free_balance( + AssetHubRococoReceiver::get(), + ); + // Send the Weth back to Ethereum + ::PolkadotXcm::limited_reserve_transfer_assets( + RuntimeOrigin::signed(AssetHubRococoReceiver::get()), + Box::new(destination), + Box::new(beneficiary), + Box::new(multi_assets), + 0, + Unlimited, + ) + .unwrap(); + + let _burned_event = RuntimeEvent::ForeignAssets(pallet_assets::Event::Burned { + asset_id: origin_location.clone(), + owner: AssetHubRococoReceiver::get().into(), + balance: ETH_AMOUNT, + }); + // Check that AssetHub has issued the foreign asset + let _destination = origin_location.clone(); + assert_expected_events!( + AssetHubRococo, + vec![ + _burned_event => {}, + RuntimeEvent::PolkadotXcm(pallet_xcm::Event::Sent { + destination: _destination, .. + }) => {}, + ] + ); + + let free_balance_after = ::Balances::free_balance( + AssetHubRococoReceiver::get(), + ); + // Assert at least DefaultBridgeHubEthereumBaseFee charged from the sender + let free_balance_diff = free_balance_before - free_balance_after; + assert!(free_balance_diff > DefaultBridgeHubEthereumBaseFee::get()); + }); + + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + // Check that the transfer token back to Ethereum message was queue in the Ethereum + // Outbound Queue + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::EthereumOutboundQueue(snowbridge_pallet_outbound_queue::Event::MessageAccepted {..}) => {}, + RuntimeEvent::EthereumOutboundQueue(snowbridge_pallet_outbound_queue::Event::MessageQueued {..}) => {}, + ] + ); + + let events = BridgeHubRococo::events(); + // Check that the local fee was credited to the Snowbridge sovereign account + assert!( + events.iter().any(|event| matches!( + event, + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount }) + if *who == TREASURY_ACCOUNT.into() && *amount == 16903333 + )), + "Snowbridge sovereign takes local fee." + ); + // Check that the remote fee was credited to the AssetHub sovereign account + assert!( + events.iter().any(|event| matches!( + event, + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount }) + if *who == assethub_sovereign && *amount == 2680000000000, + )), + "AssetHub sovereign takes remote fee." + ); + }); +} + #[test] fn send_token_from_ethereum_to_asset_hub_fail_for_insufficient_fund() { // Insufficient fund @@ -565,7 +735,7 @@ fn register_weth_token_in_asset_hub_fail_for_insufficient_fee() { }); } -fn send_token_from_ethereum_to_asset_hub_with_fee(account_id: [u8; 32], fee: u128) { +fn send_weth_from_ethereum_to_asset_hub_with_fee(account_id: [u8; 32], fee: u128) { let ethereum_network_v5: NetworkId = EthereumNetwork::get().into(); let weth_asset_location: Location = Location::new(2, [ethereum_network_v5.into(), AccountKey20 { network: None, key: WETH }]); @@ -623,8 +793,8 @@ fn send_token_from_ethereum_to_asset_hub_with_fee(account_id: [u8; 32], fee: u12 } #[test] -fn send_token_from_ethereum_to_existent_account_on_asset_hub() { - send_token_from_ethereum_to_asset_hub_with_fee(AssetHubRococoSender::get().into(), XCM_FEE); +fn send_weth_from_ethereum_to_existent_account_on_asset_hub() { + send_weth_from_ethereum_to_asset_hub_with_fee(AssetHubRococoSender::get().into(), XCM_FEE); AssetHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; @@ -640,8 +810,8 @@ fn send_token_from_ethereum_to_existent_account_on_asset_hub() { } #[test] -fn send_token_from_ethereum_to_non_existent_account_on_asset_hub() { - send_token_from_ethereum_to_asset_hub_with_fee([1; 32], XCM_FEE); +fn send_weth_from_ethereum_to_non_existent_account_on_asset_hub() { + send_weth_from_ethereum_to_asset_hub_with_fee([1; 32], XCM_FEE); AssetHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; @@ -657,8 +827,8 @@ fn send_token_from_ethereum_to_non_existent_account_on_asset_hub() { } #[test] -fn send_token_from_ethereum_to_non_existent_account_on_asset_hub_with_insufficient_fee() { - send_token_from_ethereum_to_asset_hub_with_fee([1; 32], INSUFFICIENT_XCM_FEE); +fn send_weth_from_ethereum_to_non_existent_account_on_asset_hub_with_insufficient_fee() { + send_weth_from_ethereum_to_asset_hub_with_fee([1; 32], INSUFFICIENT_XCM_FEE); AssetHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; @@ -675,10 +845,10 @@ fn send_token_from_ethereum_to_non_existent_account_on_asset_hub_with_insufficie } #[test] -fn send_token_from_ethereum_to_non_existent_account_on_asset_hub_with_sufficient_fee_but_do_not_satisfy_ed( +fn send_weth_from_ethereum_to_non_existent_account_on_asset_hub_with_sufficient_fee_but_do_not_satisfy_ed( ) { // On AH the xcm fee is 26_789_690 and the ED is 3_300_000 - send_token_from_ethereum_to_asset_hub_with_fee([1; 32], 30_000_000); + send_weth_from_ethereum_to_asset_hub_with_fee([1; 32], 30_000_000); AssetHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs index a1663dc98a34..ce85d23b21cb 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs @@ -47,6 +47,7 @@ pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; pub mod rocksdb_weights; +pub mod xcm; pub use block_weights::constants::BlockExecutionWeight; pub use extrinsic_weights::constants::ExtrinsicBaseWeight; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs new file mode 100644 index 000000000000..d73ce8c440fc --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs @@ -0,0 +1,273 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod pallet_xcm_benchmarks_fungible; +mod pallet_xcm_benchmarks_generic; + +use crate::{xcm_config::MaxAssetsIntoHolding, Runtime}; +use alloc::vec::Vec; +use frame_support::weights::Weight; +use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; +use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; +use sp_runtime::BoundedVec; +use xcm::{ + latest::{prelude::*, AssetTransferFilter}, + DoubleEncoded, +}; + +trait WeighAssets { + fn weigh_assets(&self, weight: Weight) -> Weight; +} + +// Collectives only knows about WND. +const MAX_ASSETS: u64 = 1; + +impl WeighAssets for AssetFilter { + fn weigh_assets(&self, weight: Weight) -> Weight { + match self { + Self::Definite(assets) => weight.saturating_mul(assets.inner().iter().count() as u64), + Self::Wild(asset) => match asset { + All => weight.saturating_mul(MAX_ASSETS), + AllOf { fun, .. } => match fun { + WildFungibility::Fungible => weight, + // Magic number 2 has to do with the fact that we could have up to 2 times + // MaxAssetsIntoHolding in the worst-case scenario. + WildFungibility::NonFungible => + weight.saturating_mul((MaxAssetsIntoHolding::get() * 2) as u64), + }, + AllCounted(count) => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), + AllOfCounted { count, .. } => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), + }, + } + } +} + +impl WeighAssets for Assets { + fn weigh_assets(&self, weight: Weight) -> Weight { + weight.saturating_mul(self.inner().iter().count() as u64) + } +} + +pub struct CollectivesWestendXcmWeight(core::marker::PhantomData); +impl XcmWeightInfo for CollectivesWestendXcmWeight { + fn withdraw_asset(assets: &Assets) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::withdraw_asset()) + } + fn reserve_asset_deposited(assets: &Assets) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::reserve_asset_deposited()) + } + fn receive_teleported_asset(assets: &Assets) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::receive_teleported_asset()) + } + fn query_response( + _query_id: &u64, + _response: &Response, + _max_weight: &Weight, + _querier: &Option, + ) -> Weight { + XcmGeneric::::query_response() + } + fn transfer_asset(assets: &Assets, _dest: &Location) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::transfer_asset()) + } + fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::transfer_reserve_asset()) + } + fn transact( + _origin_type: &OriginKind, + _fallback_max_weight: &Option, + _call: &DoubleEncoded, + ) -> Weight { + XcmGeneric::::transact() + } + fn hrmp_new_channel_open_request( + _sender: &u32, + _max_message_size: &u32, + _max_capacity: &u32, + ) -> Weight { + // XCM Executor does not currently support HRMP channel operations + Weight::MAX + } + fn hrmp_channel_accepted(_recipient: &u32) -> Weight { + // XCM Executor does not currently support HRMP channel operations + Weight::MAX + } + fn hrmp_channel_closing(_initiator: &u32, _sender: &u32, _recipient: &u32) -> Weight { + // XCM Executor does not currently support HRMP channel operations + Weight::MAX + } + fn clear_origin() -> Weight { + XcmGeneric::::clear_origin() + } + fn descend_origin(_who: &InteriorLocation) -> Weight { + XcmGeneric::::descend_origin() + } + fn report_error(_query_response_info: &QueryResponseInfo) -> Weight { + XcmGeneric::::report_error() + } + + fn deposit_asset(assets: &AssetFilter, _dest: &Location) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::deposit_asset()) + } + fn deposit_reserve_asset(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::deposit_reserve_asset()) + } + fn exchange_asset(_give: &AssetFilter, _receive: &Assets, _maximal: &bool) -> Weight { + Weight::MAX + } + fn initiate_reserve_withdraw( + assets: &AssetFilter, + _reserve: &Location, + _xcm: &Xcm<()>, + ) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::initiate_reserve_withdraw()) + } + fn initiate_teleport(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::initiate_teleport()) + } + fn initiate_transfer( + _dest: &Location, + remote_fees: &Option, + _preserve_origin: &bool, + assets: &Vec, + _xcm: &Xcm<()>, + ) -> Weight { + let mut weight = if let Some(remote_fees) = remote_fees { + let fees = remote_fees.inner(); + fees.weigh_assets(XcmFungibleWeight::::initiate_transfer()) + } else { + Weight::zero() + }; + for asset_filter in assets { + let assets = asset_filter.inner(); + let extra = assets.weigh_assets(XcmFungibleWeight::::initiate_transfer()); + weight = weight.saturating_add(extra); + } + weight + } + fn report_holding(_response_info: &QueryResponseInfo, _assets: &AssetFilter) -> Weight { + XcmGeneric::::report_holding() + } + fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { + XcmGeneric::::buy_execution() + } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } + fn refund_surplus() -> Weight { + XcmGeneric::::refund_surplus() + } + fn set_error_handler(_xcm: &Xcm) -> Weight { + XcmGeneric::::set_error_handler() + } + fn set_appendix(_xcm: &Xcm) -> Weight { + XcmGeneric::::set_appendix() + } + fn clear_error() -> Weight { + XcmGeneric::::clear_error() + } + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight + } + fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight { + XcmGeneric::::claim_asset() + } + fn trap(_code: &u64) -> Weight { + XcmGeneric::::trap() + } + fn subscribe_version(_query_id: &QueryId, _max_response_weight: &Weight) -> Weight { + XcmGeneric::::subscribe_version() + } + fn unsubscribe_version() -> Weight { + XcmGeneric::::unsubscribe_version() + } + fn burn_asset(assets: &Assets) -> Weight { + assets.weigh_assets(XcmGeneric::::burn_asset()) + } + fn expect_asset(assets: &Assets) -> Weight { + assets.weigh_assets(XcmGeneric::::expect_asset()) + } + fn expect_origin(_origin: &Option) -> Weight { + XcmGeneric::::expect_origin() + } + fn expect_error(_error: &Option<(u32, XcmError)>) -> Weight { + XcmGeneric::::expect_error() + } + fn expect_transact_status(_transact_status: &MaybeErrorCode) -> Weight { + XcmGeneric::::expect_transact_status() + } + fn query_pallet(_module_name: &Vec, _response_info: &QueryResponseInfo) -> Weight { + XcmGeneric::::query_pallet() + } + fn expect_pallet( + _index: &u32, + _name: &Vec, + _module_name: &Vec, + _crate_major: &u32, + _min_crate_minor: &u32, + ) -> Weight { + XcmGeneric::::expect_pallet() + } + fn report_transact_status(_response_info: &QueryResponseInfo) -> Weight { + XcmGeneric::::report_transact_status() + } + fn clear_transact_status() -> Weight { + XcmGeneric::::clear_transact_status() + } + fn universal_origin(_: &Junction) -> Weight { + Weight::MAX + } + fn export_message(_: &NetworkId, _: &Junctions, _: &Xcm<()>) -> Weight { + Weight::MAX + } + fn lock_asset(_: &Asset, _: &Location) -> Weight { + Weight::MAX + } + fn unlock_asset(_: &Asset, _: &Location) -> Weight { + Weight::MAX + } + fn note_unlockable(_: &Asset, _: &Location) -> Weight { + Weight::MAX + } + fn request_unlock(_: &Asset, _: &Location) -> Weight { + Weight::MAX + } + fn set_fees_mode(_: &bool) -> Weight { + XcmGeneric::::set_fees_mode() + } + fn set_topic(_topic: &[u8; 32]) -> Weight { + XcmGeneric::::set_topic() + } + fn clear_topic() -> Weight { + XcmGeneric::::clear_topic() + } + fn alias_origin(_: &Location) -> Weight { + XcmGeneric::::alias_origin() + } + fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { + XcmGeneric::::unpaid_execution() + } + fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { + XcmGeneric::::execute_with_origin() + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs new file mode 100644 index 000000000000..00826cbb8d79 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -0,0 +1,211 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_xcm_benchmarks::fungible` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-10-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-augrssgt-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("collectives-westend-dev"), DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::fungible +// --chain=collectives-westend-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weights for `pallet_xcm_benchmarks::fungible`. +pub struct WeightInfo(PhantomData); +impl WeightInfo { + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + pub fn withdraw_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 30_401_000 picoseconds. + Weight::from_parts(30_813_000, 3593) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + pub fn transfer_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `153` + // Estimated: `6196` + // Minimum execution time: 43_150_000 picoseconds. + Weight::from_parts(43_919_000, 6196) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn transfer_reserve_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `223` + // Estimated: `6196` + // Minimum execution time: 67_808_000 picoseconds. + Weight::from_parts(69_114_000, 6196) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(4)) + } + // Storage: `Benchmark::Override` (r:0 w:0) + // Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub fn reserve_asset_deposited() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_reserve_withdraw() -> Weight { + // Proof Size summary in bytes: + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 29_312_000 picoseconds. + Weight::from_parts(30_347_000, 3535) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + pub fn receive_teleported_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_283_000 picoseconds. + Weight::from_parts(2_448_000, 0) + } + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + pub fn deposit_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `52` + // Estimated: `3593` + // Minimum execution time: 23_556_000 picoseconds. + Weight::from_parts(24_419_000, 3593) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn deposit_reserve_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `122` + // Estimated: `3593` + // Minimum execution time: 58_342_000 picoseconds. + Weight::from_parts(59_598_000, 3593) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(3)) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_teleport() -> Weight { + // Proof Size summary in bytes: + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 28_285_000 picoseconds. + Weight::from_parts(29_016_000, 3535) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `122` + // Estimated: `3593` + // Minimum execution time: 65_211_000 picoseconds. + Weight::from_parts(67_200_000, 3593) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs new file mode 100644 index 000000000000..ae94edc3d731 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -0,0 +1,355 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_xcm_benchmarks::generic` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("collectives-westend-dev"), DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::generic +// --chain=collectives-westend-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weights for `pallet_xcm_benchmarks::generic`. +pub struct WeightInfo(PhantomData); +impl WeightInfo { + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn report_holding() -> Weight { + // Proof Size summary in bytes: + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 29_015_000 picoseconds. + Weight::from_parts(30_359_000, 3535) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + pub fn buy_execution() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 572_000 picoseconds. + Weight::from_parts(637_000, 0) + } + pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_550_000 picoseconds. + Weight::from_parts(1_604_000, 0) + } + // Storage: `PolkadotXcm::Queries` (r:1 w:0) + // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub fn query_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `3497` + // Minimum execution time: 7_354_000 picoseconds. + Weight::from_parts(7_808_000, 3497) + .saturating_add(T::DbWeight::get().reads(1)) + } + pub fn transact() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_716_000 picoseconds. + Weight::from_parts(7_067_000, 0) + } + pub fn refund_surplus() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_280_000 picoseconds. + Weight::from_parts(1_355_000, 0) + } + pub fn set_error_handler() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 587_000 picoseconds. + Weight::from_parts(645_000, 0) + } + pub fn set_appendix() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 629_000 picoseconds. + Weight::from_parts(662_000, 0) + } + pub fn clear_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 590_000 picoseconds. + Weight::from_parts(639_000, 0) + } + pub fn descend_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 651_000 picoseconds. + Weight::from_parts(688_000, 0) + } + pub fn clear_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 601_000 picoseconds. + Weight::from_parts(630_000, 0) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn report_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 25_650_000 picoseconds. + Weight::from_parts(26_440_000, 3535) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub fn claim_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 10_492_000 picoseconds. + Weight::from_parts(10_875_000, 3555) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + pub fn trap() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 597_000 picoseconds. + Weight::from_parts(647_000, 0) + } + // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) + // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn subscribe_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 23_732_000 picoseconds. + Weight::from_parts(24_290_000, 3503) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(3)) + } + // Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1) + // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub fn unsubscribe_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_446_000 picoseconds. + Weight::from_parts(2_613_000, 0) + .saturating_add(T::DbWeight::get().writes(1)) + } + pub fn burn_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 960_000 picoseconds. + Weight::from_parts(1_045_000, 0) + } + pub fn expect_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 703_000 picoseconds. + Weight::from_parts(739_000, 0) + } + pub fn expect_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 616_000 picoseconds. + Weight::from_parts(651_000, 0) + } + pub fn expect_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 621_000 picoseconds. + Weight::from_parts(660_000, 0) + } + pub fn expect_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 794_000 picoseconds. + Weight::from_parts(831_000, 0) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn query_pallet() -> Weight { + // Proof Size summary in bytes: + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 29_527_000 picoseconds. + Weight::from_parts(30_614_000, 3535) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + pub fn expect_pallet() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_189_000 picoseconds. + Weight::from_parts(3_296_000, 0) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn report_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 25_965_000 picoseconds. + Weight::from_parts(26_468_000, 3535) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + pub fn clear_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 618_000 picoseconds. + Weight::from_parts(659_000, 0) + } + pub fn set_topic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 593_000 picoseconds. + Weight::from_parts(618_000, 0) + } + pub fn clear_topic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 603_000 picoseconds. + Weight::from_parts(634_000, 0) + } + pub fn alias_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_000_000, 0) + } + pub fn set_fees_mode() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 568_000 picoseconds. + Weight::from_parts(629_000, 0) + } + pub fn unpaid_execution() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 598_000 picoseconds. + Weight::from_parts(655_000, 0) + } + pub fn asset_claimer() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 707_000 picoseconds. + Weight::from_parts(749_000, 0) + } + pub fn execute_with_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 713_000 picoseconds. + Weight::from_parts(776_000, 0) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs index 9eb9b85a3918..c5ab21fe8f90 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -21,7 +21,6 @@ use super::{ use frame_support::{ parameter_types, traits::{tokens::imbalance::ResolveTo, ConstU32, Contains, Equals, Everything, Nothing}, - weights::Weight, }; use frame_system::EnsureRoot; use pallet_collator_selection::StakingPotAccountId; @@ -39,12 +38,12 @@ use xcm_builder::{ AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, - EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter, - HashedDescription, IsConcrete, LocatableAssetId, OriginToPluralityVoice, ParentAsSuperuser, - ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, SiblingParachainAsNative, - SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, + LocatableAssetId, OriginToPluralityVoice, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, }; use xcm_executor::XcmExecutor; @@ -125,11 +124,6 @@ pub type XcmOriginToTransactDispatchOrigin = ( ); parameter_types! { - /// The amount of weight an XCM operation takes. This is a safe overestimate. - pub const BaseXcmWeight: Weight = Weight::from_parts(1_000_000_000, 1024); - /// A temporary weight value for each XCM instruction. - /// NOTE: This should be removed after we account for PoV weights. - pub const TempFixedXcmWeight: Weight = Weight::from_parts(1_000_000_000, 0); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; // Fellows pluralistic body. @@ -208,7 +202,11 @@ impl xcm_executor::Config for XcmConfig { type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; type Barrier = Barrier; - type Weigher = FixedWeightBounds; + type Weigher = WeightInfoBounds< + crate::weights::xcm::CollectivesWestendXcmWeight, + RuntimeCall, + MaxInstructions, + >; type Trader = UsingComponents< WeightToFee, WndLocation, @@ -275,7 +273,11 @@ impl pallet_xcm::Config for Runtime { type XcmExecutor = XcmExecutor; type XcmTeleportFilter = Everything; type XcmReserveTransferFilter = Nothing; // This parachain is not meant as a reserve location. - type Weigher = FixedWeightBounds; + type Weigher = WeightInfoBounds< + crate::weights::xcm::CollectivesWestendXcmWeight, + RuntimeCall, + MaxInstructions, + >; type UniversalLocation = UniversalLocation; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; diff --git a/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs b/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs index 68d7d31f67f3..98192bfd2a90 100644 --- a/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs +++ b/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs @@ -96,7 +96,7 @@ //! Two ways exist to run the benchmarks of a runtime. //! //! 1. The old school way: Most Polkadot-SDK based nodes (such as the ones integrated in -//! [`templates`]) have an a `benchmark` subcommand integrated into themselves. +//! [`templates`]) have a `benchmark` subcommand integrated into themselves. //! 2. The more [`crate::reference_docs::omni_node`] compatible way of running the benchmarks would //! be using [`frame-omni-bencher`] CLI, which only relies on a runtime. //! diff --git a/polkadot/node/core/approval-voting/src/persisted_entries.rs b/polkadot/node/core/approval-voting/src/persisted_entries.rs index d891af01c3ab..a5d42d9fd6e6 100644 --- a/polkadot/node/core/approval-voting/src/persisted_entries.rs +++ b/polkadot/node/core/approval-voting/src/persisted_entries.rs @@ -561,7 +561,7 @@ impl BlockEntry { self.distributed_assignments.resize(new_len, false); self.distributed_assignments |= bitfield; - // If the an operation did not change our current bitfield, we return true. + // If an operation did not change our current bitfield, we return true. let distributed = total_one_bits == self.distributed_assignments.count_ones(); distributed diff --git a/polkadot/node/core/pvf-checker/src/interest_view.rs b/polkadot/node/core/pvf-checker/src/interest_view.rs index 05a6f12de5d8..617d0e0b5d88 100644 --- a/polkadot/node/core/pvf-checker/src/interest_view.rs +++ b/polkadot/node/core/pvf-checker/src/interest_view.rs @@ -58,7 +58,7 @@ impl PvfData { Self { judgement: None, seen_in } } - /// Mark a the `PvfData` as seen in the provided relay-chain block referenced by `relay_hash`. + /// Mark the `PvfData` as seen in the provided relay-chain block referenced by `relay_hash`. pub fn seen_in(&mut self, relay_hash: Hash) { self.seen_in.insert(relay_hash); } diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs index 323b2cb08fec..5d79260e3ad2 100644 --- a/polkadot/node/network/approval-distribution/src/tests.rs +++ b/polkadot/node/network/approval-distribution/src/tests.rs @@ -1255,7 +1255,7 @@ fn import_approval_happy_path_v1_v2_peers() { } ); - // send the an approval from peer_b + // send an approval from peer_b let approval = IndirectSignedApprovalVoteV2 { block_hash: hash, candidate_indices: candidate_index.into(), @@ -1385,7 +1385,7 @@ fn import_approval_happy_path_v2() { } ); - // send the an approval from peer_b + // send an approval from peer_b let approval = IndirectSignedApprovalVoteV2 { block_hash: hash, candidate_indices, @@ -1893,7 +1893,7 @@ fn import_approval_bad() { .unwrap() .unwrap(); - // send the an approval from peer_b, we don't have an assignment yet + // send an approval from peer_b, we don't have an assignment yet let approval = IndirectSignedApprovalVoteV2 { block_hash: hash, candidate_indices: candidate_index.into(), @@ -4172,7 +4172,7 @@ fn import_versioned_approval() { } ); - // send the an approval from peer_a + // send an approval from peer_a let approval = IndirectSignedApprovalVote { block_hash: hash, candidate_index, diff --git a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md index 40394412d81b..7e155cdf7d58 100644 --- a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md +++ b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md @@ -406,7 +406,7 @@ Some(core_index), response_sender)` * Construct a `IndirectSignedApprovalVote` using the information about the vote. * Dispatch `ApprovalDistributionMessage::DistributeApproval`. * ELSE - * Re-arm the timer with latest tick we have the send a the vote. + * Re-arm the timer with latest tick we have then send the vote. ### Determining Approval of Candidate diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index 82a3136cc0d9..bb77ec0000e5 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -84,7 +84,7 @@ pub type LocalAssetTransactor = FungibleAdapter< LocalCheckAccount, >; -/// The means that we convert an the XCM message origin location into a local dispatch origin. +/// The means that we convert the XCM message origin location into a local dispatch origin. type LocalOriginConverter = ( // A `Signed` origin of the sovereign account that the original location controls. SovereignSignedViaLocation, diff --git a/polkadot/zombienet-sdk-tests/Cargo.toml b/polkadot/zombienet-sdk-tests/Cargo.toml index 120857c9a42e..ba7517ddce66 100644 --- a/polkadot/zombienet-sdk-tests/Cargo.toml +++ b/polkadot/zombienet-sdk-tests/Cargo.toml @@ -12,6 +12,7 @@ anyhow = { workspace = true } codec = { workspace = true, features = ["derive"] } env_logger = { workspace = true } log = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } serde = { workspace = true } serde_json = { workspace = true } subxt = { workspace = true, features = ["substrate-compat"] } diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs new file mode 100644 index 000000000000..42aa83d9da7a --- /dev/null +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs @@ -0,0 +1,135 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Test that a parachain that uses a basic collator (like adder-collator) with elastic scaling +// can achieve full throughput of 3 candidates per block. + +use anyhow::anyhow; + +use crate::helpers::{ + assert_para_throughput, rococo, + rococo::runtime_types::{ + pallet_broker::coretime_interface::CoreAssignment, + polkadot_runtime_parachains::assigner_coretime::PartsOf57600, + }, +}; +use polkadot_primitives::Id as ParaId; +use serde_json::json; +use subxt::{OnlineClient, PolkadotConfig}; +use subxt_signer::sr25519::dev; +use zombienet_sdk::NetworkConfigBuilder; + +#[tokio::test(flavor = "multi_thread")] +async fn basic_3cores_test() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let images = zombienet_sdk::environment::get_images_from_env(); + + let config = NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=debug").into()]) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "num_cores": 2, + "max_validators_per_core": 1 + }, + "async_backing_params": { + "max_candidate_depth": 6, + "allowed_ancestry_len": 2 + } + } + } + })) + // Have to set a `with_node` outside of the loop below, so that `r` has the right + // type. + .with_node(|node| node.with_name("validator-0")); + + (1..4).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) + }) + .with_parachain(|p| { + p.with_id(2000) + .with_default_command("adder-collator") + .cumulus_based(false) + .with_default_image(images.cumulus.as_str()) + .with_default_args(vec![("-lparachain=debug").into()]) + .with_collator(|n| n.with_name("adder-2000")) + }) + .with_parachain(|p| { + p.with_id(2001) + .with_default_command("adder-collator") + .cumulus_based(false) + .with_default_image(images.cumulus.as_str()) + .with_default_args(vec![("-lparachain=debug").into()]) + .with_collator(|n| n.with_name("adder-2001")) + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + })?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; + + let relay_node = network.get_node("validator-0")?; + + let relay_client: OnlineClient = relay_node.wait_client().await?; + let alice = dev::alice(); + + // Assign two extra cores to adder-2000. + relay_client + .tx() + .sign_and_submit_then_watch_default( + &rococo::tx() + .sudo() + .sudo(rococo::runtime_types::rococo_runtime::RuntimeCall::Utility( + rococo::runtime_types::pallet_utility::pallet::Call::batch { + calls: vec![ + rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime( + rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core { + core: 0, + begin: 0, + assignment: vec![(CoreAssignment::Task(2000), PartsOf57600(57600))], + end_hint: None + } + ), + rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime( + rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core { + core: 1, + begin: 0, + assignment: vec![(CoreAssignment::Task(2000), PartsOf57600(57600))], + end_hint: None + } + ), + ], + }, + )), + &alice, + ) + .await? + .wait_for_finalized_success() + .await?; + + log::info!("2 more cores assigned to adder-2000"); + + assert_para_throughput( + &relay_client, + 15, + [(ParaId::from(2000), 40..46), (ParaId::from(2001), 12..16)] + .into_iter() + .collect(), + ) + .await?; + + log::info!("Test finished successfully"); + + Ok(()) +} diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs new file mode 100644 index 000000000000..f83400d2b22a --- /dev/null +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs @@ -0,0 +1,133 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Test that a paraid that doesn't use elastic scaling which acquired multiple cores does not brick +// itself if ElasticScalingMVP feature is enabled in genesis. + +use anyhow::anyhow; + +use crate::helpers::{ + assert_finalized_block_height, assert_para_throughput, rococo, + rococo::runtime_types::{ + pallet_broker::coretime_interface::CoreAssignment, + polkadot_runtime_parachains::assigner_coretime::PartsOf57600, + }, +}; +use polkadot_primitives::{CoreIndex, Id as ParaId}; +use serde_json::json; +use std::collections::{BTreeMap, VecDeque}; +use subxt::{OnlineClient, PolkadotConfig}; +use subxt_signer::sr25519::dev; +use zombienet_sdk::NetworkConfigBuilder; + +#[tokio::test(flavor = "multi_thread")] +async fn doesnt_break_parachains_test() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let images = zombienet_sdk::environment::get_images_from_env(); + + let config = NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=debug").into()]) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "num_cores": 1, + "max_validators_per_core": 2 + }, + "async_backing_params": { + "max_candidate_depth": 6, + "allowed_ancestry_len": 2 + } + } + } + })) + // Have to set a `with_node` outside of the loop below, so that `r` has the right + // type. + .with_node(|node| node.with_name("validator-0")); + + (1..4).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) + }) + .with_parachain(|p| { + // Use rococo-parachain default, which has 6 second slot time. Also, don't use + // slot-based collator. + p.with_id(2000) + .with_default_command("polkadot-parachain") + .with_default_image(images.cumulus.as_str()) + .with_default_args(vec![("-lparachain=debug,aura=debug").into()]) + .with_collator(|n| n.with_name("collator-2000")) + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + })?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; + + let relay_node = network.get_node("validator-0")?; + let para_node = network.get_node("collator-2000")?; + + let relay_client: OnlineClient = relay_node.wait_client().await?; + let alice = dev::alice(); + + relay_client + .tx() + .sign_and_submit_then_watch_default( + &rococo::tx() + .sudo() + .sudo(rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime( + rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core { + core: 0, + begin: 0, + assignment: vec![(CoreAssignment::Task(2000), PartsOf57600(57600))], + end_hint: None + } + )), + &alice, + ) + .await? + .wait_for_finalized_success() + .await?; + + log::info!("1 more core assigned to the parachain"); + + let para_id = ParaId::from(2000); + // Expect the parachain to be making normal progress, 1 candidate backed per relay chain block. + assert_para_throughput(&relay_client, 15, [(para_id, 13..16)].into_iter().collect()).await?; + + let para_client = para_node.wait_client().await?; + // Assert the parachain finalized block height is also on par with the number of backed + // candidates. + assert_finalized_block_height(¶_client, 12..16).await?; + + // Sanity check that indeed the parachain has two assigned cores. + let cq = relay_client + .runtime_api() + .at_latest() + .await? + .call_raw::>>("ParachainHost_claim_queue", None) + .await?; + + assert_eq!( + cq, + [ + (CoreIndex(0), [para_id, para_id].into_iter().collect()), + (CoreIndex(1), [para_id, para_id].into_iter().collect()), + ] + .into_iter() + .collect() + ); + + log::info!("Test finished successfully"); + + Ok(()) +} diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs index bb296a419df1..9cfd5db5a096 100644 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs @@ -1,8 +1,6 @@ // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 -#[subxt::subxt(runtime_metadata_path = "metadata-files/rococo-local.scale")] -pub mod rococo {} - -mod helpers; +mod basic_3cores; +mod doesnt_break_parachains; mod slot_based_3cores; diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs index 41ec1250ecc4..aa9f41320135 100644 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs @@ -6,14 +6,14 @@ use anyhow::anyhow; -use super::{ - helpers::assert_para_throughput, - rococo, +use crate::helpers::{ + assert_finalized_block_height, assert_para_throughput, rococo, rococo::runtime_types::{ pallet_broker::coretime_interface::CoreAssignment, polkadot_runtime_parachains::assigner_coretime::PartsOf57600, }, }; +use polkadot_primitives::Id as ParaId; use serde_json::json; use subxt::{OnlineClient, PolkadotConfig}; use subxt_signer::sr25519::dev; @@ -63,7 +63,6 @@ async fn slot_based_3cores_test() -> Result<(), anyhow::Error> { .with_default_command("test-parachain") .with_default_image(images.cumulus.as_str()) .with_chain("elastic-scaling-mvp") - .with_default_args(vec![("--experimental-use-slot-based").into()]) .with_default_args(vec![ ("--experimental-use-slot-based").into(), ("-lparachain=debug,aura=debug").into(), @@ -93,6 +92,8 @@ async fn slot_based_3cores_test() -> Result<(), anyhow::Error> { let network = spawn_fn(config).await?; let relay_node = network.get_node("validator-0")?; + let para_node_elastic = network.get_node("collator-elastic")?; + let para_node_elastic_mvp = network.get_node("collator-elastic-mvp")?; let relay_client: OnlineClient = relay_node.wait_client().await?; let alice = dev::alice(); @@ -156,10 +157,17 @@ async fn slot_based_3cores_test() -> Result<(), anyhow::Error> { assert_para_throughput( &relay_client, 15, - [(2100, 39..46), (2200, 39..46)].into_iter().collect(), + [(ParaId::from(2100), 39..46), (ParaId::from(2200), 39..46)] + .into_iter() + .collect(), ) .await?; + // Assert the parachain finalized block height is also on par with the number of backed + // candidates. + assert_finalized_block_height(¶_node_elastic.wait_client().await?, 36..46).await?; + assert_finalized_block_height(¶_node_elastic_mvp.wait_client().await?, 36..46).await?; + log::info!("Test finished successfully"); Ok(()) diff --git a/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs b/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs new file mode 100644 index 000000000000..14f86eb130f7 --- /dev/null +++ b/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs @@ -0,0 +1,95 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Test we are producing 12-second parachain blocks if using an old collator, pre async-backing. + +use anyhow::anyhow; + +use crate::helpers::{assert_finalized_block_height, assert_para_throughput}; +use polkadot_primitives::Id as ParaId; +use serde_json::json; +use subxt::{OnlineClient, PolkadotConfig}; +use zombienet_sdk::NetworkConfigBuilder; + +#[tokio::test(flavor = "multi_thread")] +async fn async_backing_6_seconds_rate_test() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let images = zombienet_sdk::environment::get_images_from_env(); + + let config = NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=debug").into()]) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "group_rotation_frequency": 4, + "lookahead": 2, + "max_candidate_depth": 3, + "allowed_ancestry_len": 2, + }, + } + } + })) + .with_node(|node| node.with_name("validator-0")); + + (1..12) + .fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) + }) + .with_parachain(|p| { + p.with_id(2000) + .with_default_command("adder-collator") + .with_default_image( + std::env::var("COL_IMAGE") + .unwrap_or("docker.io/paritypr/colander:latest".to_string()) + .as_str(), + ) + .cumulus_based(false) + .with_default_args(vec![("-lparachain=debug").into()]) + .with_collator(|n| n.with_name("collator-adder-2000")) + }) + .with_parachain(|p| { + p.with_id(2001) + .with_default_command("polkadot-parachain") + .with_default_image(images.cumulus.as_str()) + .with_default_args(vec![("-lparachain=debug,aura=debug").into()]) + .with_collator(|n| n.with_name("collator-2001")) + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + })?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; + + let relay_node = network.get_node("validator-0")?; + let para_node_2001 = network.get_node("collator-2001")?; + + let relay_client: OnlineClient = relay_node.wait_client().await?; + + assert_para_throughput( + &relay_client, + 15, + [(ParaId::from(2000), 11..16), (ParaId::from(2001), 11..16)] + .into_iter() + .collect(), + ) + .await?; + + // Assert the parachain finalized block height is also on par with the number of backed + // candidates. We can only do this for the collator based on cumulus. + assert_finalized_block_height(¶_node_2001.wait_client().await?, 10..16).await?; + + log::info!("Test finished successfully"); + + Ok(()) +} diff --git a/polkadot/zombienet-sdk-tests/tests/functional/mod.rs b/polkadot/zombienet-sdk-tests/tests/functional/mod.rs new file mode 100644 index 000000000000..ecdab38e1d28 --- /dev/null +++ b/polkadot/zombienet-sdk-tests/tests/functional/mod.rs @@ -0,0 +1,5 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +mod async_backing_6_seconds_rate; +mod sync_backing; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs b/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs new file mode 100644 index 000000000000..6da45e284491 --- /dev/null +++ b/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs @@ -0,0 +1,74 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Test we are producing 12-second parachain blocks if using an old collator, pre async-backing. + +use anyhow::anyhow; + +use crate::helpers::{assert_finalized_block_height, assert_para_throughput}; +use polkadot_primitives::Id as ParaId; +use serde_json::json; +use subxt::{OnlineClient, PolkadotConfig}; +use zombienet_sdk::NetworkConfigBuilder; + +#[tokio::test(flavor = "multi_thread")] +async fn sync_backing_test() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let images = zombienet_sdk::environment::get_images_from_env(); + + let config = NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=debug").into()]) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "group_rotation_frequency": 4, + }, + } + } + })) + .with_node(|node| node.with_name("validator-0")); + + (1..5).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) + }) + .with_parachain(|p| { + p.with_id(2000) + .with_default_command("polkadot-parachain") + // This must be a very old polkadot-parachain image, pre async backing + .with_default_image(images.cumulus.as_str()) + .with_default_args(vec![("-lparachain=debug,aura=debug").into()]) + .with_collator(|n| n.with_name("collator-2000")) + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + })?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; + + let relay_node = network.get_node("validator-0")?; + let para_node = network.get_node("collator-2000")?; + + let relay_client: OnlineClient = relay_node.wait_client().await?; + + assert_para_throughput(&relay_client, 15, [(ParaId::from(2000), 5..9)].into_iter().collect()) + .await?; + + // Assert the parachain finalized block height is also on par with the number of backed + // candidates. + assert_finalized_block_height(¶_node.wait_client().await?, 5..9).await?; + + log::info!("Test finished successfully"); + + Ok(()) +} diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/helpers.rs b/polkadot/zombienet-sdk-tests/tests/helpers/mod.rs similarity index 65% rename from polkadot/zombienet-sdk-tests/tests/elastic_scaling/helpers.rs rename to polkadot/zombienet-sdk-tests/tests/helpers/mod.rs index 7d4ad4a1dd8b..470345ca4d62 100644 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/helpers.rs +++ b/polkadot/zombienet-sdk-tests/tests/helpers/mod.rs @@ -1,19 +1,22 @@ // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 -use super::rococo; +use polkadot_primitives::Id as ParaId; use std::{collections::HashMap, ops::Range}; use subxt::{OnlineClient, PolkadotConfig}; +#[subxt::subxt(runtime_metadata_path = "metadata-files/rococo-local.scale")] +pub mod rococo {} + // Helper function for asserting the throughput of parachains (total number of backed candidates in // a window of relay chain blocks), after the first session change. pub async fn assert_para_throughput( relay_client: &OnlineClient, stop_at: u32, - expected_candidate_ranges: HashMap>, + expected_candidate_ranges: HashMap>, ) -> Result<(), anyhow::Error> { let mut blocks_sub = relay_client.blocks().subscribe_finalized().await?; - let mut candidate_count: HashMap = HashMap::new(); + let mut candidate_count: HashMap = HashMap::new(); let mut current_block_count = 0; let mut had_first_session_change = false; @@ -31,7 +34,7 @@ pub async fn assert_para_throughput( current_block_count += 1; for event in events.find::() { - *(candidate_count.entry(event?.0.descriptor.para_id.0).or_default()) += 1; + *(candidate_count.entry(event?.0.descriptor.para_id.0.into()).or_default()) += 1; } } @@ -58,3 +61,21 @@ pub async fn assert_para_throughput( Ok(()) } + +// Helper function for retrieving the latest finalized block height and asserting it's within a +// range. +pub async fn assert_finalized_block_height( + client: &OnlineClient, + expected_range: Range, +) -> Result<(), anyhow::Error> { + if let Some(block) = client.blocks().subscribe_finalized().await?.next().await { + let height = block?.number(); + log::info!("Finalized block number {height}"); + + assert!( + expected_range.contains(&height), + "Finalized block number {height} not within range {expected_range:?}" + ); + } + Ok(()) +} diff --git a/polkadot/zombienet-sdk-tests/tests/lib.rs b/polkadot/zombienet-sdk-tests/tests/lib.rs index 977e0f90b1c9..9feb9775e450 100644 --- a/polkadot/zombienet-sdk-tests/tests/lib.rs +++ b/polkadot/zombienet-sdk-tests/tests/lib.rs @@ -1,7 +1,12 @@ // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 +#[cfg(feature = "zombie-metadata")] +mod helpers; + #[cfg(feature = "zombie-metadata")] mod elastic_scaling; #[cfg(feature = "zombie-metadata")] +mod functional; +#[cfg(feature = "zombie-metadata")] mod smoke; diff --git a/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs b/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs index 2da2436a1111..59a71a83e01e 100644 --- a/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs +++ b/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs @@ -10,21 +10,24 @@ //! normal parachain runtime WILL mess things up. use anyhow::anyhow; -#[subxt::subxt(runtime_metadata_path = "metadata-files/rococo-local.scale")] -pub mod rococo {} #[subxt::subxt(runtime_metadata_path = "metadata-files/coretime-rococo-local.scale")] mod coretime_rococo {} -use rococo::runtime_types::{ - staging_xcm::v4::{ - asset::{Asset, AssetId, Assets, Fungibility}, - junction::Junction, - junctions::Junctions, - location::Location, +use crate::helpers::rococo::{ + self as rococo_api, + runtime_types::{ + polkadot_parachain_primitives::primitives, + staging_xcm::v4::{ + asset::{Asset, AssetId, Assets, Fungibility}, + junction::Junction, + junctions::Junctions, + location::Location, + }, + xcm::{VersionedAssets, VersionedLocation}, }, - xcm::{VersionedAssets, VersionedLocation}, }; + use serde_json::json; use std::{fmt::Display, sync::Arc}; use subxt::{events::StaticEvent, utils::AccountId32, OnlineClient, PolkadotConfig}; @@ -41,8 +44,6 @@ use coretime_rococo::{ }, }; -use rococo::{self as rococo_api, runtime_types::polkadot_parachain_primitives::primitives}; - type CoretimeRuntimeCall = coretime_api::runtime_types::coretime_rococo_runtime::RuntimeCall; type CoretimeUtilityCall = coretime_api::runtime_types::pallet_utility::pallet::Call; type CoretimeBrokerCall = coretime_api::runtime_types::pallet_broker::pallet::Call; diff --git a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml deleted file mode 100644 index 611978a33a5f..000000000000 --- a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml +++ /dev/null @@ -1,49 +0,0 @@ -[settings] -timeout = 1000 - -[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] - max_candidate_depth = 6 - allowed_ancestry_len = 2 - -[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] - max_validators_per_core = 1 - num_cores = 3 - -[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] - max_approval_coalesce_count = 5 - -[relaychain] -default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" -chain = "rococo-local" -default_command = "polkadot" - - [relaychain.default_resources] - limits = { memory = "4G", cpu = "3" } - requests = { memory = "4G", cpu = "3" } - - [[relaychain.node_groups]] - name = "elastic-validator" - count = 5 - args = [ "-lparachain=debug,parachain::candidate-backing=trace,parachain::provisioner=trace,parachain::prospective-parachains=trace,runtime=debug"] - -{% for id in range(2000,2002) %} -[[parachains]] -id = {{id}} -addToGenesis = true - [parachains.default_resources] - limits = { memory = "4G", cpu = "3" } - requests = { memory = "4G", cpu = "3" } - - [parachains.collator] - name = "some-parachain" - image = "{{COL_IMAGE}}" - command = "adder-collator" - args = ["-lparachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug"] - -{% endfor %} - -# This represents the layout of the adder collator block header. -[types.Header] -number = "u64" -parent_hash = "Hash" -post_state = "Hash" diff --git a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl deleted file mode 100644 index d47ef8f415f7..000000000000 --- a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl +++ /dev/null @@ -1,28 +0,0 @@ -Description: Test with adder collator using 3 cores and async backing -Network: ./0001-basic-3cores-6s-blocks.toml -Creds: config - -# Check authority status. -elastic-validator-0: reports node_roles is 4 -elastic-validator-1: reports node_roles is 4 -elastic-validator-2: reports node_roles is 4 -elastic-validator-3: reports node_roles is 4 -elastic-validator-4: reports node_roles is 4 - - -# Register 2 extra cores to this some-parachain. -elastic-validator-0: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds -elastic-validator-0: js-script ./assign-core.js with "1,2000,57600" return is 0 within 600 seconds - -# Wait for 20 relay chain blocks -elastic-validator-0: reports substrate_block_height{status="best"} is at least 20 within 600 seconds - -# Non elastic parachain should progress normally -some-parachain-1: count of log lines containing "Parachain velocity: 1" is at least 5 within 20 seconds -# Sanity -some-parachain-1: count of log lines containing "Parachain velocity: 2" is 0 - -# Parachain should progress 3 blocks per relay chain block ideally, however CI might not be -# the most performant environment so we'd just use a lower bound of 2 blocks per RCB -elastic-validator-0: parachain 2000 block height is at least 20 within 200 seconds - diff --git a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml deleted file mode 100644 index 046d707cc1e8..000000000000 --- a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml +++ /dev/null @@ -1,40 +0,0 @@ -[settings] -timeout = 1000 -bootnode = true - -[relaychain.genesis.runtimeGenesis.patch.configuration.config] - needed_approvals = 4 - -[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] - max_validators_per_core = 2 - num_cores = 2 - -[relaychain] -default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" -chain = "rococo-local" -default_command = "polkadot" - -[relaychain.default_resources] -limits = { memory = "4G", cpu = "2" } -requests = { memory = "2G", cpu = "1" } - - [[relaychain.nodes]] - name = "alice" - validator = "true" - - [[relaychain.node_groups]] - name = "validator" - count = 3 - args = [ "-lparachain=debug,runtime=debug"] - -[[parachains]] -id = 2000 -default_command = "polkadot-parachain" -add_to_genesis = false -register_para = true -onboard_as_parachain = false - - [parachains.collator] - name = "collator2000" - command = "polkadot-parachain" - args = [ "-lparachain=debug", "--experimental-use-slot-based" ] diff --git a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl deleted file mode 100644 index 0cfc29f532d1..000000000000 --- a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl +++ /dev/null @@ -1,20 +0,0 @@ -Description: Test that a paraid acquiring multiple cores does not brick itself if ElasticScalingMVP feature is enabled in genesis -Network: ./0002-elastic-scaling-doesnt-break-parachains.toml -Creds: config - -# Check authority status. -validator: reports node_roles is 4 - -validator: reports substrate_block_height{status="finalized"} is at least 10 within 100 seconds - -# Ensure parachain was able to make progress. -validator: parachain 2000 block height is at least 10 within 200 seconds - -# Register the second core assigned to this parachain. -alice: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds -alice: js-script ./assign-core.js with "1,2000,57600" return is 0 within 600 seconds - -validator: reports substrate_block_height{status="finalized"} is at least 35 within 100 seconds - -# Ensure parachain is now making progress. -validator: parachain 2000 block height is at least 30 within 200 seconds diff --git a/polkadot/zombienet_tests/elastic_scaling/assign-core.js b/polkadot/zombienet_tests/elastic_scaling/assign-core.js deleted file mode 120000 index eeb6402c06f5..000000000000 --- a/polkadot/zombienet_tests/elastic_scaling/assign-core.js +++ /dev/null @@ -1 +0,0 @@ -../assign-core.js \ No newline at end of file diff --git a/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml b/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml deleted file mode 100644 index b776622fdce3..000000000000 --- a/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml +++ /dev/null @@ -1,54 +0,0 @@ -[settings] -timeout = 1000 - -[relaychain] -default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" -chain = "rococo-local" - -[relaychain.genesis.runtimeGenesis.patch.configuration.config] - needed_approvals = 4 - relay_vrf_modulo_samples = 6 - -[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] - max_candidate_depth = 3 - allowed_ancestry_len = 2 - -[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] - lookahead = 2 - group_rotation_frequency = 4 - - -[relaychain.default_resources] -limits = { memory = "4G", cpu = "2" } -requests = { memory = "2G", cpu = "1" } - - [[relaychain.node_groups]] - name = "alice" - args = [ "-lparachain=debug" ] - count = 12 - -[[parachains]] -id = 2000 -addToGenesis = true -genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=1" - - [parachains.collator] - name = "collator01" - image = "{{COL_IMAGE}}" - command = "undying-collator" - args = ["-lparachain=debug", "--pov-size=100000", "--pvf-complexity=1", "--parachain-id=2000"] - -[[parachains]] -id = 2001 -cumulus_based = true - - [parachains.collator] - name = "collator02" - image = "{{CUMULUS_IMAGE}}" - command = "polkadot-parachain" - args = ["-lparachain=debug"] - -[types.Header] -number = "u64" -parent_hash = "Hash" -post_state = "Hash" \ No newline at end of file diff --git a/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.zndsl b/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.zndsl deleted file mode 100644 index 0d01af82833e..000000000000 --- a/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.zndsl +++ /dev/null @@ -1,20 +0,0 @@ -Description: Test we are producing blocks at 6 seconds clip -Network: ./0011-async-backing-6-seconds-rate.toml -Creds: config - -# Check authority status. -alice: reports node_roles is 4 - -# Ensure parachains are registered. -alice: parachain 2000 is registered within 60 seconds -alice: parachain 2001 is registered within 60 seconds - -# Ensure parachains made progress. -alice: reports substrate_block_height{status="finalized"} is at least 10 within 100 seconds - -# This parachains should produce blocks at 6s clip, let's assume an 8s rate, allowing for -# some slots to be missed on slower machines -alice: parachain 2000 block height is at least 30 within 240 seconds -# This should already have produced the needed blocks -alice: parachain 2001 block height is at least 30 within 6 seconds - diff --git a/polkadot/zombienet_tests/functional/0017-sync-backing.toml b/polkadot/zombienet_tests/functional/0017-sync-backing.toml deleted file mode 100644 index 2550054c8dad..000000000000 --- a/polkadot/zombienet_tests/functional/0017-sync-backing.toml +++ /dev/null @@ -1,48 +0,0 @@ -[settings] -timeout = 1000 - -[relaychain] -default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" -chain = "rococo-local" - -[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] - max_candidate_depth = 0 - allowed_ancestry_len = 0 - -[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] - lookahead = 2 - group_rotation_frequency = 4 - -[relaychain.default_resources] -limits = { memory = "4G", cpu = "2" } -requests = { memory = "2G", cpu = "1" } - - [[relaychain.node_groups]] - name = "alice" - args = [ "-lparachain=debug" ] - count = 10 - -[[parachains]] -id = 2000 -addToGenesis = true - - [parachains.collator] - name = "collator01" - image = "{{COL_IMAGE}}" - command = "adder-collator" - args = ["-lparachain=debug"] - -[[parachains]] -id = 2001 -cumulus_based = true - - [parachains.collator] - name = "collator02" - image = "{{CUMULUS_IMAGE}}" - command = "polkadot-parachain" - args = ["-lparachain=debug"] - -[types.Header] -number = "u64" -parent_hash = "Hash" -post_state = "Hash" \ No newline at end of file diff --git a/polkadot/zombienet_tests/functional/0017-sync-backing.zndsl b/polkadot/zombienet_tests/functional/0017-sync-backing.zndsl deleted file mode 100644 index a53de784b2d1..000000000000 --- a/polkadot/zombienet_tests/functional/0017-sync-backing.zndsl +++ /dev/null @@ -1,22 +0,0 @@ -Description: Test we are producing 12-second parachain blocks if sync backing is configured -Network: ./0017-sync-backing.toml -Creds: config - -# Check authority status. -alice: reports node_roles is 4 - -# Ensure parachains are registered. -alice: parachain 2000 is registered within 60 seconds -alice: parachain 2001 is registered within 60 seconds - -# Ensure parachains made progress. -alice: reports substrate_block_height{status="finalized"} is at least 10 within 100 seconds - -# This parachains should produce blocks at 12s clip, let's assume an 14s rate, allowing for -# some slots to be missed on slower machines -alice: parachain 2000 block height is at least 21 within 300 seconds -alice: parachain 2000 block height is lower than 25 within 2 seconds - -# This should already have produced the needed blocks -alice: parachain 2001 block height is at least 21 within 10 seconds -alice: parachain 2001 block height is lower than 25 within 2 seconds diff --git a/prdoc/pr_6820.prdoc b/prdoc/pr_6820.prdoc new file mode 100644 index 000000000000..85249a33341d --- /dev/null +++ b/prdoc/pr_6820.prdoc @@ -0,0 +1,8 @@ +title: Add XCM benchmarks to collectives-westend +doc: +- audience: Runtime Dev + description: Collectives-westend was using `FixedWeightBounds`, meaning the same + weight per instruction. Added proper benchmarks. +crates: +- name: collectives-westend-runtime + bump: patch diff --git a/prdoc/pr_6855.prdoc b/prdoc/pr_6855.prdoc new file mode 100644 index 000000000000..a665115ce6c7 --- /dev/null +++ b/prdoc/pr_6855.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Snowbridge - Support bridging native ETH + +doc: + - audience: Runtime User + description: + Support Native ETH as an asset type instead of only supporting WETH. WETH is still supported, but adds + support for ETH in the inbound and outbound routers. + +crates: + - name: snowbridge-router-primitives + bump: minor + - name: snowbridge-pallet-inbound-queue-fixtures + bump: minor diff --git a/prdoc/pr_7040.prdoc b/prdoc/pr_7040.prdoc new file mode 100644 index 000000000000..f88e96a70371 --- /dev/null +++ b/prdoc/pr_7040.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: '[pallet-node-authorization] Migrate to using frame umbrella crate' + +doc: + - audience: Runtime Dev + description: This PR migrates the pallet-node-authorization to use the frame umbrella crate. This + is part of the ongoing effort to migrate all pallets to use the frame umbrella crate. + The effort is tracked [here](https://github.com/paritytech/polkadot-sdk/issues/6504). + +crates: + - name: pallet-node-authorization + bump: minor + - name: polkadot-sdk-frame + bump: minor diff --git a/prdoc/pr_7046.prdoc b/prdoc/pr_7046.prdoc new file mode 100644 index 000000000000..113cc9c7aac5 --- /dev/null +++ b/prdoc/pr_7046.prdoc @@ -0,0 +1,7 @@ +title: adding warning when using default substrateWeight in production +doc: +- audience: Runtime Dev + description: |- + PR for #3581 + Added a cfg to show a deprecated warning message when using std +crates: [] diff --git a/prdoc/pr_7048.prdoc b/prdoc/pr_7048.prdoc new file mode 100644 index 000000000000..0f3856bc1287 --- /dev/null +++ b/prdoc/pr_7048.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: '[pallet-salary] Migrate to using frame umbrella crate' + +doc: + - audience: Runtime Dev + description: > + This PR migrates the `pallet-salary` to use the FRAME umbrella crate. + This is part of the ongoing effort to migrate all pallets to use the FRAME umbrella crate. + The effort is tracked [here](https://github.com/paritytech/polkadot-sdk/issues/6504). + +crates: + - name: pallet-salary + bump: minor + - name: polkadot-sdk-frame + bump: minor diff --git a/prdoc/pr_7069.prdoc b/prdoc/pr_7069.prdoc new file mode 100644 index 000000000000..a0fc5cafb020 --- /dev/null +++ b/prdoc/pr_7069.prdoc @@ -0,0 +1,10 @@ +title: Fix defensive! macro to be used in umbrella crates +doc: +- audience: Runtime Dev + description: |- + PR for #7054 + + Replaced frame_support with $crate from @gui1117 's suggestion to fix the dependency issue +crates: +- name: frame-support + bump: patch diff --git a/prdoc/pr_7073.prdoc b/prdoc/pr_7073.prdoc new file mode 100644 index 000000000000..3bcd129d0317 --- /dev/null +++ b/prdoc/pr_7073.prdoc @@ -0,0 +1,16 @@ +title: Implement NetworkRequest for litep2p +doc: +- audience: Node Dev + description: |- + # Description + + Implements NetworkRequest::request for litep2p that we need for networking benchmarks + + + ## Review Notes + + Duplicates implementation for NetworkService + https://github.com/paritytech/polkadot-sdk/blob/5bf9dd2aa9bf944434203128783925bdc2ad8c01/substrate/client/network/src/service.rs#L1186-L1205 +crates: +- name: sc-network + bump: patch diff --git a/prdoc/pr_7074.prdoc b/prdoc/pr_7074.prdoc new file mode 100644 index 000000000000..d49e5f8d831f --- /dev/null +++ b/prdoc/pr_7074.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Unset SKIP_WASM_BUILD=1 for aarch64 binaries release + +doc: + - audience: [ Node Dev, Runtime Dev] + description: + Fix the release pipeline environment by unsetting SKIP_WASM_BUILD=1 + so that aarch64 binaries are built so that they contain runtimes + accordingly. + +crates: [ ] diff --git a/substrate/client/allocator/src/freeing_bump.rs b/substrate/client/allocator/src/freeing_bump.rs index 144c0764540d..405916adc3c3 100644 --- a/substrate/client/allocator/src/freeing_bump.rs +++ b/substrate/client/allocator/src/freeing_bump.rs @@ -182,7 +182,7 @@ const NIL_MARKER: u32 = u32::MAX; enum Link { /// Nil, denotes that there is no next element. Nil, - /// Link to the next element represented as a pointer to the a header. + /// Link to the next element represented as a pointer to the header. Ptr(u32), } diff --git a/substrate/client/api/src/proof_provider.rs b/substrate/client/api/src/proof_provider.rs index 7f60f856ae80..9043d3482723 100644 --- a/substrate/client/api/src/proof_provider.rs +++ b/substrate/client/api/src/proof_provider.rs @@ -82,7 +82,7 @@ pub trait ProofProvider { ) -> sp_blockchain::Result>; /// Verify read storage proof for a set of keys. - /// Returns collected key-value pairs and a the nested state + /// Returns collected key-value pairs and the nested state /// depth of current iteration or 0 if completed. fn verify_range_proof( &self, diff --git a/substrate/client/network/benches/notifications_protocol.rs b/substrate/client/network/benches/notifications_protocol.rs index 40a810d616b5..a406e328d5a6 100644 --- a/substrate/client/network/benches/notifications_protocol.rs +++ b/substrate/client/network/benches/notifications_protocol.rs @@ -36,19 +36,16 @@ use std::{sync::Arc, time::Duration}; use substrate_test_runtime_client::runtime; use tokio::{sync::Mutex, task::JoinHandle}; -const SMALL_PAYLOAD: &[(u32, usize, &'static str)] = &[ - // (Exponent of size, number of notifications, label) - (6, 100, "64B"), - (9, 100, "512B"), - (12, 100, "4KB"), - (15, 100, "64KB"), -]; -const LARGE_PAYLOAD: &[(u32, usize, &'static str)] = &[ - // (Exponent of size, number of notifications, label) - (18, 10, "256KB"), - (21, 10, "2MB"), - (24, 10, "16MB"), - (27, 10, "128MB"), +const NUMBER_OF_NOTIFICATIONS: usize = 100; +const PAYLOAD: &[(u32, &'static str)] = &[ + // (Exponent of size, label) + (6, "64B"), + (9, "512B"), + (12, "4KB"), + (15, "64KB"), + (18, "256KB"), + (21, "2MB"), + (24, "16MB"), ]; const MAX_SIZE: u64 = 2u64.pow(30); @@ -156,12 +153,19 @@ where tokio::select! { Some(event) = notification_service1.next_event() => { if let NotificationEvent::NotificationStreamOpened { .. } = event { - break; + // Send a 32MB notification to preheat the network + notification_service1.send_async_notification(&peer_id2, vec![0; 2usize.pow(25)]).await.unwrap(); } }, Some(event) = notification_service2.next_event() => { - if let NotificationEvent::ValidateInboundSubstream { result_tx, .. } = event { - result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap(); + match event { + NotificationEvent::ValidateInboundSubstream { result_tx, .. } => { + result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap(); + }, + NotificationEvent::NotificationReceived { .. } => { + break; + } + _ => {} } }, } @@ -255,64 +259,53 @@ async fn run_with_backpressure(setup: Arc, size: usize, limit: usize let _ = tokio::join!(network1, network2); } -fn run_benchmark(c: &mut Criterion, payload: &[(u32, usize, &'static str)], group: &str) { +fn run_benchmark(c: &mut Criterion) { let rt = tokio::runtime::Runtime::new().unwrap(); let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let mut group = c.benchmark_group(group); + let mut group = c.benchmark_group("notifications_protocol"); group.plot_config(plot_config); + group.sample_size(10); let libp2p_setup = setup_workers::>(&rt); - for &(exponent, limit, label) in payload.iter() { + for &(exponent, label) in PAYLOAD.iter() { let size = 2usize.pow(exponent); - group.throughput(Throughput::Bytes(limit as u64 * size as u64)); - group.bench_with_input( - BenchmarkId::new("libp2p/serially", label), - &(size, limit), - |b, &(size, limit)| { - b.to_async(&rt).iter(|| run_serially(Arc::clone(&libp2p_setup), size, limit)); - }, - ); + group.throughput(Throughput::Bytes(NUMBER_OF_NOTIFICATIONS as u64 * size as u64)); + group.bench_with_input(BenchmarkId::new("libp2p/serially", label), &size, |b, &size| { + b.to_async(&rt) + .iter(|| run_serially(Arc::clone(&libp2p_setup), size, NUMBER_OF_NOTIFICATIONS)); + }); group.bench_with_input( BenchmarkId::new("libp2p/with_backpressure", label), - &(size, limit), - |b, &(size, limit)| { - b.to_async(&rt) - .iter(|| run_with_backpressure(Arc::clone(&libp2p_setup), size, limit)); + &size, + |b, &size| { + b.to_async(&rt).iter(|| { + run_with_backpressure(Arc::clone(&libp2p_setup), size, NUMBER_OF_NOTIFICATIONS) + }); }, ); } drop(libp2p_setup); let litep2p_setup = setup_workers::(&rt); - for &(exponent, limit, label) in payload.iter() { + for &(exponent, label) in PAYLOAD.iter() { let size = 2usize.pow(exponent); - group.throughput(Throughput::Bytes(limit as u64 * size as u64)); - group.bench_with_input( - BenchmarkId::new("litep2p/serially", label), - &(size, limit), - |b, &(size, limit)| { - b.to_async(&rt).iter(|| run_serially(Arc::clone(&litep2p_setup), size, limit)); - }, - ); + group.throughput(Throughput::Bytes(NUMBER_OF_NOTIFICATIONS as u64 * size as u64)); + group.bench_with_input(BenchmarkId::new("litep2p/serially", label), &size, |b, &size| { + b.to_async(&rt) + .iter(|| run_serially(Arc::clone(&litep2p_setup), size, NUMBER_OF_NOTIFICATIONS)); + }); group.bench_with_input( BenchmarkId::new("litep2p/with_backpressure", label), - &(size, limit), - |b, &(size, limit)| { - b.to_async(&rt) - .iter(|| run_with_backpressure(Arc::clone(&litep2p_setup), size, limit)); + &size, + |b, &size| { + b.to_async(&rt).iter(|| { + run_with_backpressure(Arc::clone(&litep2p_setup), size, NUMBER_OF_NOTIFICATIONS) + }); }, ); } drop(litep2p_setup); } -fn run_benchmark_with_small_payload(c: &mut Criterion) { - run_benchmark(c, SMALL_PAYLOAD, "notifications_protocol/small_payload"); -} - -fn run_benchmark_with_large_payload(c: &mut Criterion) { - run_benchmark(c, LARGE_PAYLOAD, "notifications_protocol/large_payload"); -} - -criterion_group!(benches, run_benchmark_with_small_payload, run_benchmark_with_large_payload); +criterion_group!(benches, run_benchmark); criterion_main!(benches); diff --git a/substrate/client/network/benches/request_response_protocol.rs b/substrate/client/network/benches/request_response_protocol.rs index 85381112b753..97c6d72ddf1e 100644 --- a/substrate/client/network/benches/request_response_protocol.rs +++ b/substrate/client/network/benches/request_response_protocol.rs @@ -37,19 +37,16 @@ use substrate_test_runtime_client::runtime; use tokio::{sync::Mutex, task::JoinHandle}; const MAX_SIZE: u64 = 2u64.pow(30); -const SMALL_PAYLOAD: &[(u32, usize, &'static str)] = &[ - // (Exponent of size, number of requests, label) - (6, 100, "64B"), - (9, 100, "512B"), - (12, 100, "4KB"), - (15, 100, "64KB"), -]; -const LARGE_PAYLOAD: &[(u32, usize, &'static str)] = &[ - // (Exponent of size, number of requests, label) - (18, 10, "256KB"), - (21, 10, "2MB"), - (24, 10, "16MB"), - (27, 10, "128MB"), +const NUMBER_OF_REQUESTS: usize = 100; +const PAYLOAD: &[(u32, &'static str)] = &[ + // (Exponent of size, label) + (6, "64B"), + (9, "512B"), + (12, "4KB"), + (15, "64KB"), + (18, "256KB"), + (21, "2MB"), + (24, "16MB"), ]; pub fn create_network_worker() -> ( @@ -154,6 +151,21 @@ where let handle1 = tokio::spawn(worker1.run()); let handle2 = tokio::spawn(worker2.run()); + let _ = tokio::spawn({ + let rx2 = rx2.clone(); + + async move { + let req = rx2.recv().await.unwrap(); + req.pending_response + .send(OutgoingResponse { + result: Ok(vec![0; 2usize.pow(25)]), + reputation_changes: vec![], + sent_feedback: None, + }) + .unwrap(); + } + }); + let ready = tokio::spawn({ let network_service1 = Arc::clone(&network_service1); @@ -165,6 +177,16 @@ where network_service2.listen_addresses()[0].clone() }; network_service1.add_known_address(peer_id2, listen_address2.into()); + let _ = network_service1 + .request( + peer_id2.into(), + "/request-response/1".into(), + vec![0; 2], + None, + IfDisconnected::TryConnect, + ) + .await + .unwrap(); } }); @@ -210,8 +232,8 @@ async fn run_serially(setup: Arc, size: usize, limit: usize) { async move { loop { tokio::select! { - res = rx2.recv() => { - let IncomingRequest { pending_response, .. } = res.unwrap(); + req = rx2.recv() => { + let IncomingRequest { pending_response, .. } = req.unwrap(); pending_response.send(OutgoingResponse { result: Ok(vec![0; size]), reputation_changes: vec![], @@ -269,49 +291,35 @@ async fn run_with_backpressure(setup: Arc, size: usize, limit: usize let _ = tokio::join!(network1, network2); } -fn run_benchmark(c: &mut Criterion, payload: &[(u32, usize, &'static str)], group: &str) { +fn run_benchmark(c: &mut Criterion) { let rt = tokio::runtime::Runtime::new().unwrap(); let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let mut group = c.benchmark_group(group); + let mut group = c.benchmark_group("request_response_protocol"); group.plot_config(plot_config); + group.sample_size(10); let libp2p_setup = setup_workers::>(&rt); - for &(exponent, limit, label) in payload.iter() { + for &(exponent, label) in PAYLOAD.iter() { let size = 2usize.pow(exponent); - group.throughput(Throughput::Bytes(limit as u64 * size as u64)); - group.bench_with_input( - BenchmarkId::new("libp2p/serially", label), - &(size, limit), - |b, &(size, limit)| { - b.to_async(&rt).iter(|| run_serially(Arc::clone(&libp2p_setup), size, limit)); - }, - ); + group.throughput(Throughput::Bytes(NUMBER_OF_REQUESTS as u64 * size as u64)); + group.bench_with_input(BenchmarkId::new("libp2p/serially", label), &size, |b, &size| { + b.to_async(&rt) + .iter(|| run_serially(Arc::clone(&libp2p_setup), size, NUMBER_OF_REQUESTS)); + }); } drop(libp2p_setup); - // TODO: NetworkRequest::request should be implemented for Litep2pNetworkService let litep2p_setup = setup_workers::(&rt); - // for &(exponent, limit, label) in payload.iter() { - // let size = 2usize.pow(exponent); - // group.throughput(Throughput::Bytes(limit as u64 * size as u64)); - // group.bench_with_input( - // BenchmarkId::new("litep2p/serially", label), - // &(size, limit), - // |b, &(size, limit)| { - // b.to_async(&rt).iter(|| run_serially(Arc::clone(&litep2p_setup), size, limit)); - // }, - // ); - // } + for &(exponent, label) in PAYLOAD.iter() { + let size = 2usize.pow(exponent); + group.throughput(Throughput::Bytes(NUMBER_OF_REQUESTS as u64 * size as u64)); + group.bench_with_input(BenchmarkId::new("litep2p/serially", label), &size, |b, &size| { + b.to_async(&rt) + .iter(|| run_serially(Arc::clone(&litep2p_setup), size, NUMBER_OF_REQUESTS)); + }); + } drop(litep2p_setup); } -fn run_benchmark_with_small_payload(c: &mut Criterion) { - run_benchmark(c, SMALL_PAYLOAD, "request_response_benchmark/small_payload"); -} - -fn run_benchmark_with_large_payload(c: &mut Criterion) { - run_benchmark(c, LARGE_PAYLOAD, "request_response_benchmark/large_payload"); -} - -criterion_group!(benches, run_benchmark_with_small_payload, run_benchmark_with_large_payload); +criterion_group!(benches, run_benchmark); criterion_main!(benches); diff --git a/substrate/client/network/src/litep2p/service.rs b/substrate/client/network/src/litep2p/service.rs index d270e90efdf5..2d4a117d1563 100644 --- a/substrate/client/network/src/litep2p/service.rs +++ b/substrate/client/network/src/litep2p/service.rs @@ -28,8 +28,8 @@ use crate::{ peer_store::PeerStoreProvider, service::out_events, Event, IfDisconnected, NetworkDHTProvider, NetworkEventStream, NetworkPeers, NetworkRequest, - NetworkSigner, NetworkStateInfo, NetworkStatus, NetworkStatusProvider, ProtocolName, - RequestFailure, Signature, + NetworkSigner, NetworkStateInfo, NetworkStatus, NetworkStatusProvider, OutboundFailure, + ProtocolName, RequestFailure, Signature, }; use codec::DecodeAll; @@ -526,13 +526,23 @@ impl NetworkStateInfo for Litep2pNetworkService { impl NetworkRequest for Litep2pNetworkService { async fn request( &self, - _target: PeerId, - _protocol: ProtocolName, - _request: Vec, - _fallback_request: Option<(Vec, ProtocolName)>, - _connect: IfDisconnected, + target: PeerId, + protocol: ProtocolName, + request: Vec, + fallback_request: Option<(Vec, ProtocolName)>, + connect: IfDisconnected, ) -> Result<(Vec, ProtocolName), RequestFailure> { - unimplemented!(); + let (tx, rx) = oneshot::channel(); + + self.start_request(target, protocol, request, fallback_request, tx, connect); + + match rx.await { + Ok(v) => v, + // The channel can only be closed if the network worker no longer exists. If the + // network worker no longer exists, then all connections to `target` are necessarily + // closed, and we legitimately report this situation as a "ConnectionClosed". + Err(_) => Err(RequestFailure::Network(OutboundFailure::ConnectionClosed)), + } } fn start_request( diff --git a/substrate/frame/balances/src/impl_currency.rs b/substrate/frame/balances/src/impl_currency.rs index 23feb46b72ca..bc7e77c191db 100644 --- a/substrate/frame/balances/src/impl_currency.rs +++ b/substrate/frame/balances/src/impl_currency.rs @@ -632,7 +632,7 @@ where /// /// This is `Polite` and thus will not repatriate any funds which would lead the total balance /// to be less than the frozen amount. Returns `Ok` with the actual amount of funds moved, - /// which may be less than `value` since the operation is done an a `BestEffort` basis. + /// which may be less than `value` since the operation is done on a `BestEffort` basis. fn repatriate_reserved( slashed: &T::AccountId, beneficiary: &T::AccountId, diff --git a/substrate/frame/benchmarking/src/v1.rs b/substrate/frame/benchmarking/src/v1.rs index 64f93b22cf1b..99aad0301c12 100644 --- a/substrate/frame/benchmarking/src/v1.rs +++ b/substrate/frame/benchmarking/src/v1.rs @@ -1894,7 +1894,7 @@ macro_rules! add_benchmark { /// This macro allows users to easily generate a list of benchmarks for the pallets configured /// in the runtime. /// -/// To use this macro, first create a an object to store the list: +/// To use this macro, first create an object to store the list: /// /// ```ignore /// let mut list = Vec::::new(); diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs index effbb6e786c0..fa1c48ee65ed 100644 --- a/substrate/frame/elections-phragmen/src/lib.rs +++ b/substrate/frame/elections-phragmen/src/lib.rs @@ -616,7 +616,7 @@ pub mod pallet { #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// A new term with new_members. This indicates that enough candidates existed to run - /// the election, not that enough have has been elected. The inner value must be examined + /// the election, not that enough have been elected. The inner value must be examined /// for this purpose. A `NewTerm(\[\])` indicates that some candidates got their bond /// slashed and none were elected, whilst `EmptyTerm` means that no candidates existed to /// begin with. diff --git a/substrate/frame/node-authorization/Cargo.toml b/substrate/frame/node-authorization/Cargo.toml index 174736493934..7e55ad178091 100644 --- a/substrate/frame/node-authorization/Cargo.toml +++ b/substrate/frame/node-authorization/Cargo.toml @@ -16,28 +16,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } +frame = { workspace = true, features = ["experimental", "runtime"] } log = { workspace = true } scale-info = { features = ["derive"], workspace = true } -sp-core = { workspace = true } -sp-io = { workspace = true } -sp-runtime = { workspace = true } [features] default = ["std"] std = [ "codec/std", - "frame-support/std", - "frame-system/std", + "frame/std", "log/std", "scale-info/std", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", ] try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", - "sp-runtime/try-runtime", + "frame/try-runtime", ] diff --git a/substrate/frame/node-authorization/src/lib.rs b/substrate/frame/node-authorization/src/lib.rs index 7682b54ea0f2..3cec0d3bcb63 100644 --- a/substrate/frame/node-authorization/src/lib.rs +++ b/substrate/frame/node-authorization/src/lib.rs @@ -47,18 +47,18 @@ pub mod weights; extern crate alloc; use alloc::{collections::btree_set::BTreeSet, vec::Vec}; +use frame::{ + deps::{sp_core::OpaquePeerId as PeerId, sp_io}, + prelude::*, +}; pub use pallet::*; -use sp_core::OpaquePeerId as PeerId; -use sp_runtime::traits::StaticLookup; pub use weights::WeightInfo; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -#[frame_support::pallet] +#[frame::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; #[pallet::pallet] #[pallet::without_storage_info] @@ -111,7 +111,7 @@ pub mod pallet { StorageMap<_, Blake2_128Concat, PeerId, BTreeSet, ValueQuery>; #[pallet::genesis_config] - #[derive(frame_support::DefaultNoBound)] + #[derive(DefaultNoBound)] pub struct GenesisConfig { pub nodes: Vec<(PeerId, T::AccountId)>, } @@ -171,7 +171,7 @@ pub mod pallet { impl Hooks> for Pallet { /// Set reserved node every block. It may not be enabled depends on the offchain /// worker settings when starting the node. - fn offchain_worker(now: frame_system::pallet_prelude::BlockNumberFor) { + fn offchain_worker(now: BlockNumberFor) { let network_state = sp_io::offchain::network_state(); match network_state { Err(_) => log::error!( diff --git a/substrate/frame/node-authorization/src/mock.rs b/substrate/frame/node-authorization/src/mock.rs index 656d2bfa39ad..c6665a479e11 100644 --- a/substrate/frame/node-authorization/src/mock.rs +++ b/substrate/frame/node-authorization/src/mock.rs @@ -20,13 +20,11 @@ use super::*; use crate as pallet_node_authorization; -use frame_support::{derive_impl, ord_parameter_types, traits::ConstU32}; -use frame_system::EnsureSignedBy; -use sp_runtime::BuildStorage; +use frame::testing_prelude::*; type Block = frame_system::mocking::MockBlock; -frame_support::construct_runtime!( +construct_runtime!( pub enum Test { System: frame_system, @@ -61,7 +59,7 @@ pub fn test_node(id: u8) -> PeerId { PeerId(vec![id]) } -pub fn new_test_ext() -> sp_io::TestExternalities { +pub fn new_test_ext() -> TestState { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_node_authorization::GenesisConfig:: { nodes: vec![(test_node(10), 10), (test_node(20), 20), (test_node(30), 30)], diff --git a/substrate/frame/node-authorization/src/tests.rs b/substrate/frame/node-authorization/src/tests.rs index 4704b5adf269..cf60ab6efbd8 100644 --- a/substrate/frame/node-authorization/src/tests.rs +++ b/substrate/frame/node-authorization/src/tests.rs @@ -19,8 +19,7 @@ use super::*; use crate::mock::*; -use frame_support::{assert_noop, assert_ok}; -use sp_runtime::traits::BadOrigin; +use frame::testing_prelude::*; #[test] fn add_well_known_node_works() { diff --git a/substrate/frame/node-authorization/src/weights.rs b/substrate/frame/node-authorization/src/weights.rs index 881eeaf7a4c0..cd2935458b9d 100644 --- a/substrate/frame/node-authorization/src/weights.rs +++ b/substrate/frame/node-authorization/src/weights.rs @@ -21,8 +21,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; +use frame::weights_prelude::*; pub trait WeightInfo { fn add_well_known_node() -> Weight; diff --git a/substrate/frame/preimage/src/lib.rs b/substrate/frame/preimage/src/lib.rs index 658e7fec5348..849ffddf4fb3 100644 --- a/substrate/frame/preimage/src/lib.rs +++ b/substrate/frame/preimage/src/lib.rs @@ -236,7 +236,7 @@ pub mod pallet { Self::do_unrequest_preimage(&hash) } - /// Ensure that the a bulk of pre-images is upgraded. + /// Ensure that the bulk of pre-images is upgraded. /// /// The caller pays no fee if at least 90% of pre-images were successfully updated. #[pallet::call_index(4)] diff --git a/substrate/frame/recovery/README.md b/substrate/frame/recovery/README.md index fdaef5784fdb..39f691407046 100644 --- a/substrate/frame/recovery/README.md +++ b/substrate/frame/recovery/README.md @@ -62,7 +62,7 @@ The intended life cycle of a successful recovery takes the following steps: ### Malicious Recovery Attempts -Initializing a the recovery process for a recoverable account is open and +Initializing the recovery process for a recoverable account is open and permissionless. However, the recovery deposit is an economic deterrent that should disincentivize would-be attackers from trying to maliciously recover accounts. diff --git a/substrate/frame/recovery/src/lib.rs b/substrate/frame/recovery/src/lib.rs index 4de1919cdc33..42fb641983f6 100644 --- a/substrate/frame/recovery/src/lib.rs +++ b/substrate/frame/recovery/src/lib.rs @@ -75,7 +75,7 @@ //! //! ### Malicious Recovery Attempts //! -//! Initializing a the recovery process for a recoverable account is open and +//! Initializing the recovery process for a recoverable account is open and //! permissionless. However, the recovery deposit is an economic deterrent that //! should disincentivize would-be attackers from trying to maliciously recover //! accounts. @@ -403,7 +403,7 @@ pub mod pallet { .map_err(|e| e.error) } - /// Allow ROOT to bypass the recovery process and set an a rescuer account + /// Allow ROOT to bypass the recovery process and set a rescuer account /// for a lost account directly. /// /// The dispatch origin for this call must be _ROOT_. diff --git a/substrate/frame/salary/Cargo.toml b/substrate/frame/salary/Cargo.toml index b3ed95bf1de5..626993a0547b 100644 --- a/substrate/frame/salary/Cargo.toml +++ b/substrate/frame/salary/Cargo.toml @@ -17,43 +17,25 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -frame-benchmarking = { optional = true, workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } +frame = { workspace = true, features = ["experimental", "runtime"] } log = { workspace = true } pallet-ranked-collective = { optional = true, workspace = true } scale-info = { features = ["derive"], workspace = true } -sp-arithmetic = { workspace = true } -sp-core = { workspace = true } -sp-io = { workspace = true } -sp-runtime = { workspace = true } [features] default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", - "frame-support/experimental", - "frame-support/std", - "frame-system/std", + "frame/std", "log/std", "pallet-ranked-collective/std", "scale-info/std", - "sp-arithmetic/std", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", ] runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", + "frame/runtime-benchmarks", "pallet-ranked-collective/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", ] try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", + "frame/try-runtime", "pallet-ranked-collective?/try-runtime", - "sp-runtime/try-runtime", ] diff --git a/substrate/frame/salary/src/benchmarking.rs b/substrate/frame/salary/src/benchmarking.rs index aeae8d2d67f8..6dfd6f6dd488 100644 --- a/substrate/frame/salary/src/benchmarking.rs +++ b/substrate/frame/salary/src/benchmarking.rs @@ -22,10 +22,7 @@ use super::*; use crate::Pallet as Salary; -use frame_benchmarking::v2::*; -use frame_system::{Pallet as System, RawOrigin}; -use sp_core::Get; - +use frame::benchmarking::prelude::*; const SEED: u32 = 0; fn ensure_member_with_salary, I: 'static>(who: &T::AccountId) { @@ -37,7 +34,7 @@ fn ensure_member_with_salary, I: 'static>(who: &T::AccountId) { for _ in 0..255 { let r = T::Members::rank_of(who).expect("prior guard ensures `who` is a member; qed"); if !T::Salary::get_salary(r, &who).is_zero() { - break + break; } T::Members::promote(who).unwrap(); } diff --git a/substrate/frame/salary/src/lib.rs b/substrate/frame/salary/src/lib.rs index efb4f5d3c542..6a843625f4a7 100644 --- a/substrate/frame/salary/src/lib.rs +++ b/substrate/frame/salary/src/lib.rs @@ -19,20 +19,10 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Decode, Encode, MaxEncodedLen}; use core::marker::PhantomData; -use scale_info::TypeInfo; -use sp_arithmetic::traits::{Saturating, Zero}; -use sp_runtime::{Perbill, RuntimeDebug}; - -use frame_support::{ - defensive, - dispatch::DispatchResultWithPostInfo, - ensure, - traits::{ - tokens::{GetSalary, Pay, PaymentStatus}, - RankedMembers, RankedMembersSwapHandler, - }, +use frame::{ + prelude::*, + traits::tokens::{GetSalary, Pay, PaymentStatus}, }; #[cfg(test)] @@ -85,12 +75,9 @@ pub struct ClaimantStatus { status: ClaimState, } -#[frame_support::pallet] +#[frame::pallet] pub mod pallet { use super::*; - use frame_support::{dispatch::Pays, pallet_prelude::*}; - use frame_system::pallet_prelude::*; - #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); @@ -460,15 +447,15 @@ impl, I: 'static> ) { if who == new_who { defensive!("Should not try to swap with self"); - return + return; } if Claimant::::contains_key(new_who) { defensive!("Should not try to overwrite existing claimant"); - return + return; } let Some(claimant) = Claimant::::take(who) else { - frame_support::defensive!("Claimant should exist when swapping"); + defensive!("Claimant should exist when swapping"); return; }; diff --git a/substrate/frame/salary/src/tests/integration.rs b/substrate/frame/salary/src/tests/integration.rs index 0c1fb8bbdcba..e4e9c8f6a31b 100644 --- a/substrate/frame/salary/src/tests/integration.rs +++ b/substrate/frame/salary/src/tests/integration.rs @@ -19,25 +19,14 @@ use crate as pallet_salary; use crate::*; -use frame_support::{ - assert_noop, assert_ok, derive_impl, hypothetically, - pallet_prelude::Weight, - parameter_types, - traits::{ConstU64, EitherOf, MapSuccess, NoOpPoll}, -}; +use frame::{deps::sp_io, testing_prelude::*}; use pallet_ranked_collective::{EnsureRanked, Geometric}; -use sp_core::{ConstU16, Get}; -use sp_runtime::{ - traits::{Convert, ReduceBy, ReplaceWithDefault}, - BuildStorage, -}; type Rank = u16; type Block = frame_system::mocking::MockBlock; -frame_support::construct_runtime!( - pub enum Test - { +construct_runtime!( + pub struct Test { System: frame_system, Salary: pallet_salary, Club: pallet_ranked_collective, @@ -145,9 +134,9 @@ impl pallet_ranked_collective::Config for Test { type BenchmarkSetup = Salary; } -pub fn new_test_ext() -> sp_io::TestExternalities { +pub fn new_test_ext() -> TestState { let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let mut ext = sp_io::TestExternalities::new(t); + let mut ext = TestState::new(t); ext.execute_with(|| System::set_block_number(1)); ext } @@ -194,7 +183,7 @@ fn swap_exhaustive_works() { // The events mess up the storage root: System::reset_events(); - sp_io::storage::root(sp_runtime::StateVersion::V1) + sp_io::storage::root(StateVersion::V1) }); let root_swap = hypothetically!({ @@ -207,7 +196,7 @@ fn swap_exhaustive_works() { // The events mess up the storage root: System::reset_events(); - sp_io::storage::root(sp_runtime::StateVersion::V1) + sp_io::storage::root(StateVersion::V1) }); assert_eq!(root_add, root_swap); diff --git a/substrate/frame/salary/src/tests/unit.rs b/substrate/frame/salary/src/tests/unit.rs index db1c8b947ef5..3bb7bc4adf1e 100644 --- a/substrate/frame/salary/src/tests/unit.rs +++ b/substrate/frame/salary/src/tests/unit.rs @@ -17,23 +17,15 @@ //! The crate's tests. -use std::collections::BTreeMap; - -use core::cell::RefCell; -use frame_support::{ - assert_noop, assert_ok, derive_impl, - pallet_prelude::Weight, - parameter_types, - traits::{tokens::ConvertRank, ConstU64}, -}; -use sp_runtime::{traits::Identity, BuildStorage, DispatchResult}; - use crate as pallet_salary; use crate::*; +use core::cell::RefCell; +use frame::{deps::sp_runtime::traits::Identity, testing_prelude::*, traits::tokens::ConvertRank}; +use std::collections::BTreeMap; -type Block = frame_system::mocking::MockBlock; +type Block = MockBlock; -frame_support::construct_runtime!( +construct_runtime!( pub enum Test { System: frame_system, @@ -124,7 +116,7 @@ impl RankedMembers for TestClub { } fn demote(who: &Self::AccountId) -> DispatchResult { CLUB.with(|club| match club.borrow().get(who) { - None => Err(sp_runtime::DispatchError::Unavailable), + None => Err(DispatchError::Unavailable), Some(&0) => { club.borrow_mut().remove(&who); Ok(()) @@ -156,9 +148,9 @@ impl Config for Test { type Budget = Budget; } -pub fn new_test_ext() -> sp_io::TestExternalities { +pub fn new_test_ext() -> TestState { let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let mut ext = sp_io::TestExternalities::new(t); + let mut ext = TestState::new(t); ext.execute_with(|| System::set_block_number(1)); ext } diff --git a/substrate/frame/salary/src/weights.rs b/substrate/frame/salary/src/weights.rs index f1cdaaa225a4..43c001b30d33 100644 --- a/substrate/frame/salary/src/weights.rs +++ b/substrate/frame/salary/src/weights.rs @@ -46,8 +46,8 @@ #![allow(unused_imports)] #![allow(missing_docs)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; +use frame::weights_prelude::*; /// Weight functions needed for `pallet_salary`. pub trait WeightInfo { diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index d63f90eb973f..f79a52bc6c5b 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -203,8 +203,12 @@ pub mod prelude { /// Dispatch types from `frame-support`, other fundamental traits #[doc(no_inline)] pub use frame_support::dispatch::{GetDispatchInfo, PostDispatchInfo}; - pub use frame_support::traits::{ - Contains, EstimateNextSessionRotation, IsSubType, OnRuntimeUpgrade, OneSessionHandler, + pub use frame_support::{ + defensive, defensive_assert, + traits::{ + Contains, EitherOf, EstimateNextSessionRotation, IsSubType, MapSuccess, NoOpPoll, + OnRuntimeUpgrade, OneSessionHandler, RankedMembers, RankedMembersSwapHandler, + }, }; /// Pallet prelude of `frame-system`. @@ -228,11 +232,10 @@ pub mod prelude { /// Runtime traits #[doc(no_inline)] pub use sp_runtime::traits::{ - BlockNumberProvider, Bounded, DispatchInfoOf, Dispatchable, SaturatedConversion, - Saturating, StaticLookup, TrailingZeroInput, + BlockNumberProvider, Bounded, Convert, DispatchInfoOf, Dispatchable, ReduceBy, + ReplaceWithDefault, SaturatedConversion, Saturating, StaticLookup, TrailingZeroInput, }; - - /// Other runtime types and traits + /// Other error/result types for runtime #[doc(no_inline)] pub use sp_runtime::{ BoundToRuntimeAppPublic, DispatchErrorWithPostInfo, DispatchResultWithInfo, TokenError, @@ -262,7 +265,7 @@ pub mod benchmarking { pub use frame_benchmarking::benchmarking::*; // The system origin, which is very often needed in benchmarking code. Might be tricky only // if the pallet defines its own `#[pallet::origin]` and call it `RawOrigin`. - pub use frame_system::RawOrigin; + pub use frame_system::{Pallet as System, RawOrigin}; } #[deprecated( @@ -319,7 +322,7 @@ pub mod testing_prelude { /// Other helper macros from `frame_support` that help with asserting in tests. pub use frame_support::{ assert_err, assert_err_ignore_postinfo, assert_error_encoded_size, assert_noop, assert_ok, - assert_storage_noop, storage_alias, + assert_storage_noop, hypothetically, storage_alias, }; pub use frame_system::{self, mocking::*}; @@ -328,6 +331,9 @@ pub mod testing_prelude { pub use sp_io::TestExternalities; pub use sp_io::TestExternalities as TestState; + + /// Commonly used runtime traits for testing. + pub use sp_runtime::{traits::BadOrigin, StateVersion}; } /// All of the types and tools needed to build FRAME-based runtimes. @@ -505,7 +511,7 @@ pub mod runtime { #[cfg(feature = "std")] pub mod testing_prelude { pub use sp_core::storage::Storage; - pub use sp_runtime::BuildStorage; + pub use sp_runtime::{BuildStorage, DispatchError}; } } diff --git a/substrate/frame/support/src/dispatch_context.rs b/substrate/frame/support/src/dispatch_context.rs index b34c6bdada3d..42776e71cb88 100644 --- a/substrate/frame/support/src/dispatch_context.rs +++ b/substrate/frame/support/src/dispatch_context.rs @@ -140,7 +140,7 @@ impl Value<'_, T> { /// Runs the given `callback` in the dispatch context and gives access to some user defined value. /// -/// Passes the a mutable reference of [`Value`] to the callback. The value will be of type `T` and +/// Passes a mutable reference of [`Value`] to the callback. The value will be of type `T` and /// is identified using the [`TypeId`] of `T`. This means that `T` should be some unique type to /// make the value unique. If no value is set yet [`Value::get()`] and [`Value::get_mut()`] will /// return `None`. It is totally valid to have some `T` that is shared between different callers to diff --git a/substrate/frame/support/src/storage/child.rs b/substrate/frame/support/src/storage/child.rs index 5ebba2693658..7109e9213b0f 100644 --- a/substrate/frame/support/src/storage/child.rs +++ b/substrate/frame/support/src/storage/child.rs @@ -163,7 +163,7 @@ pub fn kill_storage(child_info: &ChildInfo, limit: Option) -> KillStorageRe /// operating on the same prefix should pass `Some` and this value should be equal to the /// previous call result's `maybe_cursor` field. The only exception to this is when you can /// guarantee that the subsequent call is in a new block; in this case the previous call's result -/// cursor need not be passed in an a `None` may be passed instead. This exception may be useful +/// cursor need not be passed in and a `None` may be passed instead. This exception may be useful /// then making this call solely from a block-hook such as `on_initialize`. /// Returns [`MultiRemovalResults`] to inform about the result. Once the resultant `maybe_cursor` diff --git a/substrate/frame/support/src/storage/unhashed.rs b/substrate/frame/support/src/storage/unhashed.rs index 7f9bc93d7d81..495c50caa2d6 100644 --- a/substrate/frame/support/src/storage/unhashed.rs +++ b/substrate/frame/support/src/storage/unhashed.rs @@ -124,7 +124,7 @@ pub fn kill_prefix(prefix: &[u8], limit: Option) -> sp_io::KillStorageResul /// operating on the same prefix should pass `Some` and this value should be equal to the /// previous call result's `maybe_cursor` field. The only exception to this is when you can /// guarantee that the subsequent call is in a new block; in this case the previous call's result -/// cursor need not be passed in an a `None` may be passed instead. This exception may be useful +/// cursor need not be passed in and a `None` may be passed instead. This exception may be useful /// then making this call solely from a block-hook such as `on_initialize`. /// /// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once the diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index 0dc3abdce956..9fef4383ad67 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -66,7 +66,7 @@ impl Get for VariantCountOf { #[macro_export] macro_rules! defensive { () => { - frame_support::__private::log::error!( + $crate::__private::log::error!( target: "runtime::defensive", "{}", $crate::traits::DEFENSIVE_OP_PUBLIC_ERROR @@ -74,7 +74,7 @@ macro_rules! defensive { debug_assert!(false, "{}", $crate::traits::DEFENSIVE_OP_INTERNAL_ERROR); }; ($error:expr $(,)?) => { - frame_support::__private::log::error!( + $crate::__private::log::error!( target: "runtime::defensive", "{}: {:?}", $crate::traits::DEFENSIVE_OP_PUBLIC_ERROR, @@ -83,7 +83,7 @@ macro_rules! defensive { debug_assert!(false, "{}: {:?}", $crate::traits::DEFENSIVE_OP_INTERNAL_ERROR, $error); }; ($error:expr, $proof:expr $(,)?) => { - frame_support::__private::log::error!( + $crate::__private::log::error!( target: "runtime::defensive", "{}: {:?}: {:?}", $crate::traits::DEFENSIVE_OP_PUBLIC_ERROR, diff --git a/substrate/frame/support/src/traits/preimages.rs b/substrate/frame/support/src/traits/preimages.rs index 80020d8d0080..6e46a7489654 100644 --- a/substrate/frame/support/src/traits/preimages.rs +++ b/substrate/frame/support/src/traits/preimages.rs @@ -38,7 +38,7 @@ pub enum Bounded { /// for transitioning from legacy state. In the future we will make this a pure /// `Dummy` item storing only the final `dummy` field. Legacy { hash: H::Output, dummy: core::marker::PhantomData }, - /// A an bounded `Call`. Its encoding must be at most 128 bytes. + /// A bounded `Call`. Its encoding must be at most 128 bytes. Inline(BoundedInline), /// A hash of the call together with an upper limit for its size.` Lookup { hash: H::Output, len: u32 }, diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index b412d4b52fed..8909d2b2e486 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -666,7 +666,7 @@ pub struct CallApiAtParams<'a, Block: BlockT> { pub extensions: &'a RefCell, } -/// Something that can call into the an api at a given block. +/// Something that can call into an api at a given block. #[cfg(feature = "std")] pub trait CallApiAt { /// The state backend that is used to store the block states. diff --git a/substrate/primitives/runtime/src/traits/mod.rs b/substrate/primitives/runtime/src/traits/mod.rs index 5b6cacc7e008..8f5b484e4e3f 100644 --- a/substrate/primitives/runtime/src/traits/mod.rs +++ b/substrate/primitives/runtime/src/traits/mod.rs @@ -1963,7 +1963,7 @@ pub trait AccountIdConversion: Sized { Self::try_from_sub_account::<()>(a).map(|x| x.0) } - /// Convert this value amalgamated with the a secondary "sub" value into an account ID, + /// Convert this value amalgamated with a secondary "sub" value into an account ID, /// truncating any unused bytes. This is infallible. /// /// NOTE: The account IDs from this and from `into_account` are *not* guaranteed to be distinct diff --git a/templates/parachain/pallets/template/src/weights.rs b/templates/parachain/pallets/template/src/weights.rs index 9295492bc20b..4d6dd5642a1e 100644 --- a/templates/parachain/pallets/template/src/weights.rs +++ b/templates/parachain/pallets/template/src/weights.rs @@ -39,6 +39,12 @@ pub trait WeightInfo { } /// Weights for pallet_template using the Substrate node and recommended hardware. +#[cfg_attr( + not(feature = "std"), + deprecated( + note = "SubstrateWeight is auto-generated and should not be used in production. Replace it with runtime benchmarked weights." + ) +)] pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: Template Something (r:0 w:1)