Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Switch to full wasm execution #1881

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
101 changes: 49 additions & 52 deletions node/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ use sp_runtime::traits::AccountIdConversion;
use crate::{
chain_spec,
cli::{Cli, RelayChainCli, Subcommand},
service::{evm, AltairRuntimeExecutor, CentrifugeRuntimeExecutor, DevelopmentRuntimeExecutor},
service::evm,
};

pub const LOCAL_PARA_ID: ParaId = ParaId::new(2000u32);
Expand Down Expand Up @@ -165,28 +165,28 @@ macro_rules! construct_async_run {

match runner.config().chain_spec.identify() {
ChainIdentity::Altair => runner.async_run(|$config| {
let $components = evm::new_partial::<altair_runtime::RuntimeApi, _, AltairRuntimeExecutor>(
let $components = evm::new_partial::<altair_runtime::RuntimeApi, _>(
&$config,
first_evm_block,
crate::service::build_import_queue::<altair_runtime::RuntimeApi, AltairRuntimeExecutor>
crate::service::build_import_queue::<altair_runtime::RuntimeApi>
)?;
let task_manager = $components.task_manager;
{ $( $code )* }.map(|v| (v, task_manager))
}),
ChainIdentity::Centrifuge => runner.async_run(|$config| {
let $components = evm::new_partial::<centrifuge_runtime::RuntimeApi, _, CentrifugeRuntimeExecutor>(
let $components = evm::new_partial::<centrifuge_runtime::RuntimeApi, _>(
&$config,
first_evm_block,
crate::service::build_import_queue::<centrifuge_runtime::RuntimeApi, CentrifugeRuntimeExecutor>,
crate::service::build_import_queue::<centrifuge_runtime::RuntimeApi>,
)?;
let task_manager = $components.task_manager;
{ $( $code )* }.map(|v| (v, task_manager))
}),
ChainIdentity::Development => runner.async_run(|$config| {
let $components = evm::new_partial::<development_runtime::RuntimeApi, _, DevelopmentRuntimeExecutor>(
let $components = evm::new_partial::<development_runtime::RuntimeApi, _>(
&$config,
first_evm_block,
crate::service::build_import_queue::<development_runtime::RuntimeApi, DevelopmentRuntimeExecutor>,
crate::service::build_import_queue::<development_runtime::RuntimeApi>,
)?;
let task_manager = $components.task_manager;
{ $( $code )* }.map(|v| (v, task_manager))
Expand Down Expand Up @@ -351,51 +351,48 @@ pub fn run() -> Result<()> {
);

match config.chain_spec.identify() {
ChainIdentity::Altair => crate::service::start_node::<
altair_runtime::RuntimeApi,
AltairRuntimeExecutor,
>(
config,
polkadot_config,
cli.eth,
collator_options,
id,
hwbench,
first_evm_block,
)
.await
.map(|r| r.0)
.map_err(Into::into),
ChainIdentity::Centrifuge => crate::service::start_node::<
centrifuge_runtime::RuntimeApi,
CentrifugeRuntimeExecutor,
>(
config,
polkadot_config,
cli.eth,
collator_options,
id,
hwbench,
first_evm_block,
)
.await
.map(|r| r.0)
.map_err(Into::into),
ChainIdentity::Development => crate::service::start_node::<
development_runtime::RuntimeApi,
DevelopmentRuntimeExecutor,
>(
config,
polkadot_config,
cli.eth,
collator_options,
id,
hwbench,
first_evm_block,
)
.await
.map(|r| r.0)
.map_err(Into::into),
ChainIdentity::Altair => {
crate::service::start_node::<altair_runtime::RuntimeApi>(
config,
polkadot_config,
cli.eth,
collator_options,
id,
hwbench,
first_evm_block,
)
.await
.map(|r| r.0)
.map_err(Into::into)
}
ChainIdentity::Centrifuge => {
crate::service::start_node::<centrifuge_runtime::RuntimeApi>(
config,
polkadot_config,
cli.eth,
collator_options,
id,
hwbench,
first_evm_block,
)
.await
.map(|r| r.0)
.map_err(Into::into)
}
ChainIdentity::Development => {
crate::service::start_node::<development_runtime::RuntimeApi>(
config,
polkadot_config,
cli.eth,
collator_options,
id,
hwbench,
first_evm_block,
)
.await
.map(|r| r.0)
.map_err(Into::into)
}
}
})
}
Expand Down
110 changes: 25 additions & 85 deletions node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface};
use fc_db::Backend as FrontierBackend;
use fc_rpc::pending::{AuraConsensusDataProvider, ConsensusDataProvider};
use polkadot_primitives::CollatorPair;
use sc_executor::NativeElseWasmExecutor;
use sc_executor::WasmExecutor;
use sc_network_sync::SyncingService;
use sc_service::{Configuration, TFullBackend, TFullClient, TaskManager};
use sc_telemetry::TelemetryHandle;
Expand All @@ -40,13 +40,19 @@ use crate::rpc::{self};
pub(crate) mod evm;
use evm::EthConfiguration;

type FullClient<RuntimeApi, Executor> =
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>;
#[cfg(feature = "runtime-benchmarks")]
type HostFunctions = (sp_io::SubstrateHostFunctions, frame_benchmarking::benchmarking::HostFunctions);

#[cfg(not(feature = "runtime-benchmarks"))]
type HostFunctions = sp_io::SubstrateHostFunctions;

type FullClient<RuntimeApi> =
TFullClient<Block, RuntimeApi, WasmExecutor<HostFunctions>>;

type FullBackend = TFullBackend<Block>;

type ParachainBlockImport<RuntimeApi, Executor> =
TParachainBlockImport<Block, Arc<FullClient<RuntimeApi, Executor>>, FullBackend>;
type ParachainBlockImport<RuntimeApi> =
TParachainBlockImport<Block, Arc<FullClient<RuntimeApi>>, FullBackend>;

pub trait RuntimeApiCollection:
sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block>
Expand Down Expand Up @@ -82,88 +88,24 @@ impl<Api> RuntimeApiCollection for Api where
{
}

// Native Altair executor instance.
pub struct AltairRuntimeExecutor;

impl sc_executor::NativeExecutionDispatch for AltairRuntimeExecutor {
/// Only enable the benchmarking host functions when we actually want to
/// benchmark.
#[cfg(feature = "runtime-benchmarks")]
type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions;
/// Otherwise we only use the default Substrate host functions.
#[cfg(not(feature = "runtime-benchmarks"))]
type ExtendHostFunctions = ();

fn dispatch(method: &str, data: &[u8]) -> Option<Vec<u8>> {
altair_runtime::api::dispatch(method, data)
}

fn native_version() -> sc_executor::NativeVersion {
altair_runtime::native_version()
}
}

// Native Centrifuge executor instance.
pub struct CentrifugeRuntimeExecutor;

impl sc_executor::NativeExecutionDispatch for CentrifugeRuntimeExecutor {
/// Only enable the benchmarking host functions when we actually want to
/// benchmark.
#[cfg(feature = "runtime-benchmarks")]
type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions;
/// Otherwise we only use the default Substrate host functions.
#[cfg(not(feature = "runtime-benchmarks"))]
type ExtendHostFunctions = ();

fn dispatch(method: &str, data: &[u8]) -> Option<Vec<u8>> {
centrifuge_runtime::api::dispatch(method, data)
}

fn native_version() -> sc_executor::NativeVersion {
centrifuge_runtime::native_version()
}
}

// Native Development executor instance.
pub struct DevelopmentRuntimeExecutor;

impl sc_executor::NativeExecutionDispatch for DevelopmentRuntimeExecutor {
/// Only enable the benchmarking host functions when we actually want to
/// benchmark.
#[cfg(feature = "runtime-benchmarks")]
type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions;
/// Otherwise we only use the default Substrate host functions.
#[cfg(not(feature = "runtime-benchmarks"))]
type ExtendHostFunctions = ();

fn dispatch(method: &str, data: &[u8]) -> Option<Vec<u8>> {
development_runtime::api::dispatch(method, data)
}

fn native_version() -> sc_executor::NativeVersion {
development_runtime::native_version()
}
}

/// Start a generic parachain node.
pub async fn start_node<RuntimeApi, Executor>(
pub async fn start_node<RuntimeApi>(
parachain_config: Configuration,
polkadot_config: Configuration,
eth_config: EthConfiguration,
collator_options: CollatorOptions,
id: ParaId,
hwbench: Option<sc_sysinfo::HwBench>,
first_evm_block: BlockNumber,
) -> sc_service::error::Result<(TaskManager, Arc<FullClient<RuntimeApi, Executor>>)>
) -> sc_service::error::Result<(TaskManager, Arc<FullClient<RuntimeApi>>)>
where
RuntimeApi:
ConstructRuntimeApi<Block, FullClient<RuntimeApi, Executor>> + Send + Sync + 'static,
ConstructRuntimeApi<Block, FullClient<RuntimeApi>> + Send + Sync + 'static,
RuntimeApi::RuntimeApi: RuntimeApiCollection,
Executor: sc_executor::NativeExecutionDispatch + 'static,
{
let is_authority = parachain_config.role.is_authority();

evm::start_node_impl::<RuntimeApi, Executor, _, _>(
evm::start_node_impl::<RuntimeApi, _, _>(
parachain_config,
polkadot_config,
eth_config,
Expand Down Expand Up @@ -234,7 +176,7 @@ where
)?;
Ok(module)
},
build_import_queue::<RuntimeApi, Executor>,
build_import_queue::<RuntimeApi>,
)
.await
}
Expand All @@ -243,9 +185,9 @@ where
///
/// NOTE: Almost entirely taken from Polkadot SDK.
#[allow(clippy::type_complexity)]
pub fn build_import_queue<RuntimeApi, Executor>(
client: Arc<FullClient<RuntimeApi, Executor>>,
block_import: ParachainBlockImport<RuntimeApi, Executor>,
pub fn build_import_queue<RuntimeApi>(
client: Arc<FullClient<RuntimeApi>>,
block_import: ParachainBlockImport<RuntimeApi>,
config: &Configuration,
telemetry: Option<TelemetryHandle>,
task_manager: &TaskManager,
Expand All @@ -254,9 +196,8 @@ pub fn build_import_queue<RuntimeApi, Executor>(
) -> Result<sc_consensus::DefaultImportQueue<Block>, sc_service::Error>
where
RuntimeApi:
ConstructRuntimeApi<Block, FullClient<RuntimeApi, Executor>> + Send + Sync + 'static,
ConstructRuntimeApi<Block, FullClient<RuntimeApi>> + Send + Sync + 'static,
RuntimeApi::RuntimeApi: RuntimeApiCollection,
Executor: sc_executor::NativeExecutionDispatch + 'static,
{
let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?;
let block_import = evm::BlockImport::new(
Expand Down Expand Up @@ -293,14 +234,14 @@ where
/// NOTE: Taken from Polkadot SDK because Moonbeam uses their custom Nimbus
/// consensus
#[allow(clippy::too_many_arguments)]
fn start_consensus<RuntimeApi, Executor>(
client: Arc<FullClient<RuntimeApi, Executor>>,
block_import: ParachainBlockImport<RuntimeApi, Executor>,
fn start_consensus<RuntimeApi>(
client: Arc<FullClient<RuntimeApi>>,
block_import: ParachainBlockImport<RuntimeApi>,
prometheus_registry: Option<&Registry>,
telemetry: Option<TelemetryHandle>,
task_manager: &TaskManager,
relay_chain_interface: Arc<dyn RelayChainInterface>,
transaction_pool: Arc<sc_transaction_pool::FullPool<Block, FullClient<RuntimeApi, Executor>>>,
transaction_pool: Arc<sc_transaction_pool::FullPool<Block, FullClient<RuntimeApi>>>,
sync_oracle: Arc<SyncingService<Block>>,
keystore: KeystorePtr,
relay_chain_slot_duration: Duration,
Expand All @@ -311,9 +252,8 @@ fn start_consensus<RuntimeApi, Executor>(
) -> Result<(), sc_service::Error>
where
RuntimeApi:
ConstructRuntimeApi<Block, FullClient<RuntimeApi, Executor>> + Send + Sync + 'static,
ConstructRuntimeApi<Block, FullClient<RuntimeApi>> + Send + Sync + 'static,
RuntimeApi::RuntimeApi: RuntimeApiCollection,
Executor: sc_executor::NativeExecutionDispatch + 'static,
{
use cumulus_client_consensus_aura::collators::basic::{
self as basic_aura, Params as BasicAuraParams,
Expand Down
Loading
Loading