Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into libp2p-0.53.x
Browse files Browse the repository at this point in the history
# Conflicts:
#	substrate/client/network/src/discovery.rs
  • Loading branch information
nazar-pc committed Dec 13, 2024
2 parents 41fed42 + 482bf08 commit 4e39867
Show file tree
Hide file tree
Showing 153 changed files with 5,946 additions and 2,259 deletions.
11 changes: 11 additions & 0 deletions .gitlab/pipeline/zombienet/polkadot.yml
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,17 @@ zombienet-polkadot-functional-0018-shared-core-idle-parachain:
--local-dir="${LOCAL_DIR}/functional"
--test="0018-shared-core-idle-parachain.zndsl"

zombienet-polkadot-functional-0019-coretime-collation-fetching-fairness:
extends:
- .zombienet-polkadot-common
before_script:
- !reference [ .zombienet-polkadot-common, before_script ]
- cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/functional
script:
- /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
--local-dir="${LOCAL_DIR}/functional"
--test="0019-coretime-collation-fetching-fairness.zndsl"

zombienet-polkadot-smoke-0001-parachains-smoke-test:
extends:
- .zombienet-polkadot-common
Expand Down
10 changes: 7 additions & 3 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -854,7 +854,7 @@ macro_magic = { version = "0.5.1" }
maplit = { version = "1.0.2" }
memmap2 = { version = "0.9.3" }
memory-db = { version = "0.32.0", default-features = false }
merkleized-metadata = { version = "0.1.2" }
merkleized-metadata = { version = "0.2.0" }
merlin = { version = "3.0", default-features = false }
messages-relay = { path = "bridges/relays/messages" }
metered = { version = "0.6.1", default-features = false, package = "prioritized-metered-channel" }
Expand Down
1 change: 1 addition & 0 deletions bridges/bin/runtime-common/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ runtime-benchmarks = [
"pallet-utility/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
"sp-trie",
"xcm/runtime-benchmarks",
]
integrity-test = ["static_assertions"]
test-helpers = ["bp-runtime/test-helpers", "sp-trie"]
1 change: 1 addition & 0 deletions bridges/modules/xcm-bridge-hub-router/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ runtime-benchmarks = [
"frame-system/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
"xcm-builder/runtime-benchmarks",
"xcm/runtime-benchmarks",
]
try-runtime = [
"frame-support/try-runtime",
Expand Down
1 change: 1 addition & 0 deletions bridges/modules/xcm-bridge-hub/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ runtime-benchmarks = [
"sp-runtime/runtime-benchmarks",
"xcm-builder/runtime-benchmarks",
"xcm-executor/runtime-benchmarks",
"xcm/runtime-benchmarks",
]
try-runtime = [
"frame-support/try-runtime",
Expand Down
1 change: 1 addition & 0 deletions bridges/snowbridge/pallets/inbound-queue/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ runtime-benchmarks = [
"snowbridge-router-primitives/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
"xcm-executor/runtime-benchmarks",
"xcm/runtime-benchmarks",
]
try-runtime = [
"frame-support/try-runtime",
Expand Down
1 change: 1 addition & 0 deletions bridges/snowbridge/pallets/system/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ runtime-benchmarks = [
"snowbridge-pallet-outbound-queue/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
"xcm-executor/runtime-benchmarks",
"xcm/runtime-benchmarks",
]
try-runtime = [
"frame-support/try-runtime",
Expand Down
1 change: 1 addition & 0 deletions bridges/snowbridge/primitives/core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -64,4 +64,5 @@ runtime-benchmarks = [
"sp-runtime/runtime-benchmarks",
"xcm-builder/runtime-benchmarks",
"xcm-executor/runtime-benchmarks",
"xcm/runtime-benchmarks",
]
1 change: 1 addition & 0 deletions bridges/snowbridge/primitives/router/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -51,4 +51,5 @@ runtime-benchmarks = [
"snowbridge-core/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
"xcm-executor/runtime-benchmarks",
"xcm/runtime-benchmarks",
]
1 change: 1 addition & 0 deletions bridges/snowbridge/runtime/runtime-common/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -43,4 +43,5 @@ runtime-benchmarks = [
"snowbridge-core/runtime-benchmarks",
"xcm-builder/runtime-benchmarks",
"xcm-executor/runtime-benchmarks",
"xcm/runtime-benchmarks",
]
1 change: 1 addition & 0 deletions bridges/snowbridge/runtime/test-common/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -92,5 +92,6 @@ runtime-benchmarks = [
"snowbridge-pallet-system/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
"xcm-executor/runtime-benchmarks",
"xcm/runtime-benchmarks",
]
fast-runtime = []
1 change: 1 addition & 0 deletions cumulus/client/consensus/aura/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ sp-blockchain = { workspace = true, default-features = true }
sp-consensus = { workspace = true, default-features = true }
sp-consensus-aura = { workspace = true, default-features = true }
sp-core = { workspace = true, default-features = true }
sp-trie = { workspace = true, default-features = true }
sp-inherents = { workspace = true, default-features = true }
sp-keystore = { workspace = true, default-features = true }
sp-runtime = { workspace = true, default-features = true }
Expand Down
144 changes: 144 additions & 0 deletions cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Cumulus.

// Cumulus is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.

// Cumulus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.

// You should have received a copy of the GNU General Public License
// along with Cumulus. If not, see <http://www.gnu.org/licenses/>.

use futures::{stream::FusedStream, StreamExt};
use sc_consensus::{BlockImport, StateAction};
use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
use sp_api::{ApiExt, CallApiAt, CallContext, Core, ProvideRuntimeApi, StorageProof};
use sp_runtime::traits::{Block as BlockT, Header as _};
use sp_trie::proof_size_extension::ProofSizeExt;
use std::sync::Arc;

/// Handle for receiving the block and the storage proof from the [`SlotBasedBlockImport`].
///
/// This handle should be passed to [`Params`](super::Params) or can also be dropped if the node is
/// not running as collator.
pub struct SlotBasedBlockImportHandle<Block> {
receiver: TracingUnboundedReceiver<(Block, StorageProof)>,
}

impl<Block> SlotBasedBlockImportHandle<Block> {
/// Returns the next item.
///
/// The future will never return when the internal channel is closed.
pub async fn next(&mut self) -> (Block, StorageProof) {
loop {
if self.receiver.is_terminated() {
futures::pending!()
} else if let Some(res) = self.receiver.next().await {
return res
}
}
}
}

/// Special block import for the slot based collator.
pub struct SlotBasedBlockImport<Block, BI, Client> {
inner: BI,
client: Arc<Client>,
sender: TracingUnboundedSender<(Block, StorageProof)>,
}

impl<Block, BI, Client> SlotBasedBlockImport<Block, BI, Client> {
/// Create a new instance.
///
/// The returned [`SlotBasedBlockImportHandle`] needs to be passed to the
/// [`Params`](super::Params), so that this block import instance can communicate with the
/// collation task. If the node is not running as a collator, just dropping the handle is fine.
pub fn new(inner: BI, client: Arc<Client>) -> (Self, SlotBasedBlockImportHandle<Block>) {
let (sender, receiver) = tracing_unbounded("SlotBasedBlockImportChannel", 1000);

(Self { sender, client, inner }, SlotBasedBlockImportHandle { receiver })
}
}

impl<Block, BI: Clone, Client> Clone for SlotBasedBlockImport<Block, BI, Client> {
fn clone(&self) -> Self {
Self { inner: self.inner.clone(), client: self.client.clone(), sender: self.sender.clone() }
}
}

#[async_trait::async_trait]
impl<Block, BI, Client> BlockImport<Block> for SlotBasedBlockImport<Block, BI, Client>
where
Block: BlockT,
BI: BlockImport<Block> + Send + Sync,
BI::Error: Into<sp_consensus::Error>,
Client: ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync,
Client::StateBackend: Send,
Client::Api: Core<Block>,
{
type Error = sp_consensus::Error;

async fn check_block(
&self,
block: sc_consensus::BlockCheckParams<Block>,
) -> Result<sc_consensus::ImportResult, Self::Error> {
self.inner.check_block(block).await.map_err(Into::into)
}

async fn import_block(
&self,
mut params: sc_consensus::BlockImportParams<Block>,
) -> Result<sc_consensus::ImportResult, Self::Error> {
// If the channel exists and it is required to execute the block, we will execute the block
// here. This is done to collect the storage proof and to prevent re-execution, we push
// downwards the state changes. `StateAction::ApplyChanges` is ignored, because it either
// means that the node produced the block itself or the block was imported via state sync.
if !self.sender.is_closed() && !matches!(params.state_action, StateAction::ApplyChanges(_))
{
let mut runtime_api = self.client.runtime_api();

runtime_api.set_call_context(CallContext::Onchain);

runtime_api.record_proof();
let recorder = runtime_api
.proof_recorder()
.expect("Proof recording is enabled in the line above; qed.");
runtime_api.register_extension(ProofSizeExt::new(recorder));

let parent_hash = *params.header.parent_hash();

let block = Block::new(params.header.clone(), params.body.clone().unwrap_or_default());

runtime_api
.execute_block(parent_hash, block.clone())
.map_err(|e| Box::new(e) as Box<_>)?;

let storage_proof =
runtime_api.extract_proof().expect("Proof recording was enabled above; qed");

let state = self.client.state_at(parent_hash).map_err(|e| Box::new(e) as Box<_>)?;
let gen_storage_changes = runtime_api
.into_storage_changes(&state, parent_hash)
.map_err(sp_consensus::Error::ChainLookup)?;

if params.header.state_root() != &gen_storage_changes.transaction_storage_root {
return Err(sp_consensus::Error::Other(Box::new(
sp_blockchain::Error::InvalidStateRoot,
)))
}

params.state_action = StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(
gen_storage_changes,
));

let _ = self.sender.unbounded_send((block, storage_proof));
}

self.inner.import_block(params).await.map_err(Into::into)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ pub struct Params<Block: BlockT, RClient, CS> {
pub collator_service: CS,
/// Receiver channel for communication with the block builder task.
pub collator_receiver: TracingUnboundedReceiver<CollatorMessage<Block>>,
/// The handle from the special slot based block import.
pub block_import_handle: super::SlotBasedBlockImportHandle<Block>,
}

/// Asynchronously executes the collation task for a parachain.
Expand All @@ -55,28 +57,49 @@ pub struct Params<Block: BlockT, RClient, CS> {
/// collations to the relay chain. It listens for new best relay chain block notifications and
/// handles collator messages. If our parachain is scheduled on a core and we have a candidate,
/// the task will build a collation and send it to the relay chain.
pub async fn run_collation_task<Block, RClient, CS>(mut params: Params<Block, RClient, CS>)
where
pub async fn run_collation_task<Block, RClient, CS>(
Params {
relay_client,
collator_key,
para_id,
reinitialize,
collator_service,
mut collator_receiver,
mut block_import_handle,
}: Params<Block, RClient, CS>,
) where
Block: BlockT,
CS: CollatorServiceInterface<Block> + Send + Sync + 'static,
RClient: RelayChainInterface + Clone + 'static,
{
let Ok(mut overseer_handle) = params.relay_client.overseer_handle() else {
let Ok(mut overseer_handle) = relay_client.overseer_handle() else {
tracing::error!(target: LOG_TARGET, "Failed to get overseer handle.");
return
};

cumulus_client_collator::initialize_collator_subsystems(
&mut overseer_handle,
params.collator_key,
params.para_id,
params.reinitialize,
collator_key,
para_id,
reinitialize,
)
.await;

let collator_service = params.collator_service;
while let Some(collator_message) = params.collator_receiver.next().await {
handle_collation_message(collator_message, &collator_service, &mut overseer_handle).await;
loop {
futures::select! {
collator_message = collator_receiver.next() => {
let Some(message) = collator_message else {
return;
};

handle_collation_message(message, &collator_service, &mut overseer_handle).await;
},
block_import_msg = block_import_handle.next().fuse() => {
// TODO: Implement me.
// Issue: https://github.com/paritytech/polkadot-sdk/issues/6495
let _ = block_import_msg;
}
}
}
}

Expand Down
Loading

0 comments on commit 4e39867

Please sign in to comment.