From c4d7b591834055bdf3afed96b696e0a2cef0f383 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 15 Oct 2024 22:04:20 +0900 Subject: [PATCH 01/51] perf(rpc): add optional block argument to `trace_block_until_with_inspector` (#11631) --- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 22 +++++++++++++++++---- crates/rpc/rpc/src/otterscan.rs | 1 + crates/rpc/rpc/src/trace.rs | 17 ++++++++++++---- 3 files changed, 32 insertions(+), 8 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 457cbb4811f3..981de8fa6c45 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -1,12 +1,14 @@ //! Loads a pending block from database. Helper trait for `eth_` call and trace RPC methods. +use std::sync::Arc; + use crate::FromEvmError; use alloy_primitives::B256; use alloy_rpc_types::{BlockId, TransactionInfo}; use futures::Future; use reth_chainspec::ChainSpecProvider; use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::Header; +use reth_primitives::{Header, SealedBlockWithSenders}; use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_types::{ cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, @@ -247,6 +249,7 @@ pub trait Trace: LoadState { fn trace_block_until( &self, block_id: BlockId, + block: Option>, highest_index: Option, config: TracingInspectorConfig, f: F, @@ -266,6 +269,7 @@ pub trait Trace: LoadState { { self.trace_block_until_with_inspector( block_id, + block, highest_index, move || TracingInspector::new(config), f, @@ -285,6 +289,7 @@ pub trait Trace: LoadState { fn trace_block_until_with_inspector( &self, block_id: BlockId, + block: Option>, highest_index: Option, mut inspector_setup: Setup, f: F, @@ -305,8 +310,15 @@ pub trait Trace: LoadState { R: Send + 'static, { async move { + let block = async { + if block.is_some() { + return Ok(block) + } + self.block_with_senders(block_id).await + }; + let ((cfg, block_env, _), block) = - futures::try_join!(self.evm_env_at(block_id), self.block_with_senders(block_id))?; + futures::try_join!(self.evm_env_at(block_id), block)?; let Some(block) = block else { return Ok(None) }; @@ -409,6 +421,7 @@ pub trait Trace: LoadState { fn trace_block_with( &self, block_id: BlockId, + block: Option>, config: TracingInspectorConfig, f: F, ) -> impl Future>, Self::Error>> + Send @@ -427,7 +440,7 @@ pub trait Trace: LoadState { + 'static, R: Send + 'static, { - self.trace_block_until(block_id, None, config, f) + self.trace_block_until(block_id, block, None, config, f) } /// Executes all transactions of a block and returns a list of callback results invoked for each @@ -447,6 +460,7 @@ pub trait Trace: LoadState { fn trace_block_inspector( &self, block_id: BlockId, + block: Option>, insp_setup: Setup, f: F, ) -> impl Future>, Self::Error>> + Send @@ -467,6 +481,6 @@ pub trait Trace: LoadState { Insp: for<'a, 'b> Inspector> + Send + 'static, R: Send + 'static, { - self.trace_block_until_with_inspector(block_id, None, insp_setup, f) + self.trace_block_until_with_inspector(block_id, block, None, insp_setup, f) } } diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 31db343a104f..45722978f9f5 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -334,6 +334,7 @@ where .eth .trace_block_with( num.into(), + None, TracingInspectorConfig::default_parity(), |tx_info, inspector, _, _, _| { Ok(inspector.into_parity_builder().into_localized_transaction_traces(tx_info)) diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 0cd94ef15b89..8ac532ff341d 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use alloy_primitives::{map::HashSet, Bytes, B256, U256}; use alloy_rpc_types::{ state::{EvmOverrides, StateOverride}, @@ -37,6 +35,7 @@ use revm_inspectors::{ opcode::OpcodeGasInspector, tracing::{parity::populate_state_diff, TracingInspector, TracingInspectorConfig}, }; +use std::sync::Arc; use tokio::sync::{AcquireError, OwnedSemaphorePermit}; /// `trace` API implementation. @@ -278,14 +277,21 @@ where } // fetch all blocks in that range - let blocks = self.provider().block_range(start..=end).map_err(Eth::Error::from_eth_err)?; + let blocks = self + .provider() + .sealed_block_with_senders_range(start..=end) + .map_err(Eth::Error::from_eth_err)? + .into_iter() + .map(Arc::new) + .collect::>(); // trace all blocks let mut block_traces = Vec::with_capacity(blocks.len()); for block in &blocks { let matcher = matcher.clone(); let traces = self.eth_api().trace_block_until( - block.number.into(), + block.hash().into(), + Some(block.clone()), None, TracingInspectorConfig::default_parity(), move |tx_info, inspector, _, _, _| { @@ -369,6 +375,7 @@ where ) -> Result>, Eth::Error> { let traces = self.eth_api().trace_block_with( block_id, + None, TracingInspectorConfig::default_parity(), |tx_info, inspector, _, _, _| { let traces = @@ -405,6 +412,7 @@ where self.eth_api() .trace_block_with( block_id, + None, TracingInspectorConfig::from_parity_config(&trace_types), move |tx_info, inspector, res, state, db| { let mut full_trace = @@ -460,6 +468,7 @@ where .eth_api() .trace_block_inspector( block_id, + None, OpcodeGasInspector::default, move |tx_info, inspector, _res, _, _| { let trace = TransactionOpcodeGas { From a235f7214c588ab79af7996a07524cfbf5d2628b Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 15 Oct 2024 15:53:43 +0200 Subject: [PATCH 02/51] feat(trie): sparse trie (#11741) --- Cargo.lock | 15 + crates/trie/sparse/Cargo.toml | 32 ++ crates/trie/sparse/benches/root.rs | 191 +++++++++ crates/trie/sparse/src/errors.rs | 49 +++ crates/trie/sparse/src/lib.rs | 9 + crates/trie/sparse/src/state.rs | 131 ++++++ crates/trie/sparse/src/trie.rs | 627 +++++++++++++++++++++++++++++ crates/trie/trie/src/prefix_set.rs | 2 +- 8 files changed, 1055 insertions(+), 1 deletion(-) create mode 100644 crates/trie/sparse/benches/root.rs create mode 100644 crates/trie/sparse/src/errors.rs create mode 100644 crates/trie/sparse/src/state.rs create mode 100644 crates/trie/sparse/src/trie.rs diff --git a/Cargo.lock b/Cargo.lock index 828444080432..7ee37e080aed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9221,6 +9221,21 @@ dependencies = [ [[package]] name = "reth-trie-sparse" version = "1.1.0" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "assert_matches", + "criterion", + "itertools 0.13.0", + "proptest", + "rayon", + "reth-primitives", + "reth-trie", + "reth-trie-common", + "smallvec", + "thiserror", + "tracing", +] [[package]] name = "revm" diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index 4ebb56145e1f..c31bbe2df2fe 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -10,3 +10,35 @@ description = "Sparse MPT implementation" [lints] workspace = true + + +[dependencies] +# reth +reth-primitives.workspace = true +reth-trie-common.workspace = true +reth-trie.workspace = true + +# alloy +alloy-primitives.workspace = true +alloy-rlp.workspace = true + +# tracing +tracing.workspace = true + +# misc +thiserror.workspace = true +rayon.workspace = true +smallvec = { workspace = true, features = ["const_new"] } + +[dev-dependencies] +reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } +reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } +reth-trie = { workspace = true, features = ["test-utils"] } +assert_matches.workspace = true +itertools.workspace = true +proptest.workspace = true +criterion.workspace = true + +[[bench]] +name = "root" +harness = false diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs new file mode 100644 index 000000000000..4078eb7af314 --- /dev/null +++ b/crates/trie/sparse/benches/root.rs @@ -0,0 +1,191 @@ +#![allow(missing_docs, unreachable_pub)] +use alloy_primitives::{map::HashMap, B256, U256}; +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; +use itertools::Itertools; +use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; +use reth_trie::{ + hashed_cursor::{noop::NoopHashedStorageCursor, HashedPostStateStorageCursor}, + node_iter::{TrieElement, TrieNodeIter}, + trie_cursor::{noop::NoopStorageTrieCursor, InMemoryStorageTrieCursor}, + updates::StorageTrieUpdates, + walker::TrieWalker, + HashedStorage, +}; +use reth_trie_common::{HashBuilder, Nibbles}; +use reth_trie_sparse::SparseTrie; + +pub fn calculate_root_from_leaves(c: &mut Criterion) { + let mut group = c.benchmark_group("calculate root from leaves"); + group.sample_size(20); + + for size in [1_000, 5_000, 10_000, 100_000] { + let state = generate_test_data(size); + + // hash builder + group.bench_function(BenchmarkId::new("hash builder", size), |b| { + b.iter_with_setup(HashBuilder::default, |mut hb| { + for (key, value) in state.iter().sorted_by_key(|(key, _)| *key) { + hb.add_leaf(Nibbles::unpack(key), &alloy_rlp::encode_fixed_size(value)); + } + hb.root(); + }) + }); + + // sparse trie + group.bench_function(BenchmarkId::new("sparse trie", size), |b| { + b.iter_with_setup(SparseTrie::revealed_empty, |mut sparse| { + for (key, value) in &state { + sparse + .update_leaf( + Nibbles::unpack(key), + alloy_rlp::encode_fixed_size(value).to_vec(), + ) + .unwrap(); + } + sparse.root().unwrap(); + }) + }); + } +} + +pub fn calculate_root_from_leaves_repeated(c: &mut Criterion) { + let mut group = c.benchmark_group("calculate root from leaves repeated"); + group.sample_size(20); + + for init_size in [1_000, 10_000, 100_000] { + let init_state = generate_test_data(init_size); + + for update_size in [100, 1_000, 5_000, 10_000] { + for num_updates in [1, 3, 5, 10] { + let updates = + (0..num_updates).map(|_| generate_test_data(update_size)).collect::>(); + + // hash builder + let benchmark_id = BenchmarkId::new( + "hash builder", + format!("init size {init_size} | update size {update_size} | num updates {num_updates}"), + ); + group.bench_function(benchmark_id, |b| { + b.iter_with_setup( + || { + let init_storage = HashedStorage::from_iter(false, init_state.clone()); + let storage_updates = updates + .clone() + .into_iter() + .map(|update| HashedStorage::from_iter(false, update)) + .collect::>(); + + let mut hb = HashBuilder::default().with_updates(true); + for (key, value) in init_state.iter().sorted_by_key(|(key, _)| *key) { + hb.add_leaf( + Nibbles::unpack(key), + &alloy_rlp::encode_fixed_size(value), + ); + } + hb.root(); + + let (_, updates) = hb.split(); + let trie_updates = StorageTrieUpdates::new(updates); + (init_storage, storage_updates, trie_updates) + }, + |(init_storage, storage_updates, mut trie_updates)| { + let mut storage = init_storage; + for update in storage_updates { + storage.extend(&update); + + let prefix_set = update.construct_prefix_set().freeze(); + let storage_sorted = storage.clone().into_sorted(); + let trie_updates_sorted = trie_updates.clone().into_sorted(); + + let walker = TrieWalker::new( + InMemoryStorageTrieCursor::new( + B256::ZERO, + NoopStorageTrieCursor::default(), + Some(&trie_updates_sorted), + ), + prefix_set, + ); + let mut node_iter = TrieNodeIter::new( + walker, + HashedPostStateStorageCursor::new( + NoopHashedStorageCursor::default(), + Some(&storage_sorted), + ), + ); + + let mut hb = HashBuilder::default().with_updates(true); + while let Some(node) = node_iter.try_next().unwrap() { + match node { + TrieElement::Branch(node) => { + hb.add_branch( + node.key, + node.value, + node.children_are_in_trie, + ); + } + TrieElement::Leaf(hashed_slot, value) => { + hb.add_leaf( + Nibbles::unpack(hashed_slot), + alloy_rlp::encode_fixed_size(&value).as_ref(), + ); + } + } + } + hb.root(); + + trie_updates.finalize(node_iter.walker, hb); + } + }, + ) + }); + + // sparse trie + let benchmark_id = BenchmarkId::new( + "sparse trie", + format!("init size {init_size} | update size {update_size} | num updates {num_updates}"), + ); + group.bench_function(benchmark_id, |b| { + b.iter_with_setup( + || { + let mut sparse = SparseTrie::revealed_empty(); + for (key, value) in &init_state { + sparse + .update_leaf( + Nibbles::unpack(key), + alloy_rlp::encode_fixed_size(value).to_vec(), + ) + .unwrap(); + } + sparse.root().unwrap(); + sparse + }, + |mut sparse| { + for update in &updates { + for (key, value) in update { + sparse + .update_leaf( + Nibbles::unpack(key), + alloy_rlp::encode_fixed_size(value).to_vec(), + ) + .unwrap(); + } + sparse.root().unwrap(); + } + }, + ) + }); + } + } + } +} + +fn generate_test_data(size: usize) -> HashMap { + let mut runner = TestRunner::new(ProptestConfig::default()); + proptest::collection::hash_map(any::(), any::(), size) + .new_tree(&mut runner) + .unwrap() + .current() +} + +criterion_group!(root, calculate_root_from_leaves, calculate_root_from_leaves_repeated); +criterion_main!(root); diff --git a/crates/trie/sparse/src/errors.rs b/crates/trie/sparse/src/errors.rs new file mode 100644 index 000000000000..f60d1736c06f --- /dev/null +++ b/crates/trie/sparse/src/errors.rs @@ -0,0 +1,49 @@ +//! Errors for sparse trie. + +use alloy_primitives::{Bytes, B256}; +use reth_trie::Nibbles; +use thiserror::Error; + +/// Result type with [`SparseStateTrieError`] as error. +pub type SparseStateTrieResult = Result; + +/// Error encountered in [`crate::SparseStateTrie`]. +#[derive(Error, Debug)] +pub enum SparseStateTrieError { + /// Encountered invalid root node. + #[error("invalid root node at {path:?}: {node:?}")] + InvalidRootNode { + /// Path to first proof node. + path: Nibbles, + /// Encoded first proof node. + node: Bytes, + }, + /// Sparse trie error. + #[error(transparent)] + Sparse(#[from] SparseTrieError), + /// RLP error. + #[error(transparent)] + Rlp(#[from] alloy_rlp::Error), +} + +/// Result type with [`SparseTrieError`] as error. +pub type SparseTrieResult = Result; + +/// Error encountered in [`crate::SparseTrie`]. +#[derive(Error, Debug)] +pub enum SparseTrieError { + /// Sparse trie is still blind. Thrown on attempt to update it. + #[error("sparse trie is blind")] + Blind, + /// Encountered blinded node on update. + #[error("attempted to update blind node at {path:?}: {hash}")] + BlindedNode { + /// Blind node path. + path: Nibbles, + /// Node hash + hash: B256, + }, + /// RLP error. + #[error(transparent)] + Rlp(#[from] alloy_rlp::Error), +} diff --git a/crates/trie/sparse/src/lib.rs b/crates/trie/sparse/src/lib.rs index 5d3d4a5b6f8d..b3cb2c5fdffa 100644 --- a/crates/trie/sparse/src/lib.rs +++ b/crates/trie/sparse/src/lib.rs @@ -1 +1,10 @@ //! The implementation of sparse MPT. + +mod state; +pub use state::*; + +mod trie; +pub use trie::*; + +mod errors; +pub use errors::*; diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs new file mode 100644 index 000000000000..cfb17ef36ff1 --- /dev/null +++ b/crates/trie/sparse/src/state.rs @@ -0,0 +1,131 @@ +use crate::{SparseStateTrieError, SparseStateTrieResult, SparseTrie}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + Bytes, B256, +}; +use alloy_rlp::Decodable; +use reth_trie::{Nibbles, TrieNode}; + +/// Sparse state trie representing lazy-loaded Ethereum state trie. +#[derive(Default, Debug)] +pub struct SparseStateTrie { + /// Sparse account trie. + pub(crate) state: SparseTrie, + /// Sparse storage tries. + #[allow(dead_code)] + pub(crate) storages: HashMap, + /// Collection of revealed account and storage keys. + #[allow(dead_code)] + pub(crate) revealed: HashMap>, +} + +impl SparseStateTrie { + /// Create state trie from state trie. + pub fn from_state(state: SparseTrie) -> Self { + Self { state, ..Default::default() } + } + + /// Returns `true` if account was already revealed. + pub fn is_account_revealed(&self, account: &B256) -> bool { + self.revealed.contains_key(account) + } + + /// Returns `true` if storage slot for account was already revealed. + pub fn is_storage_slot_revealed(&self, account: &B256, slot: &B256) -> bool { + self.revealed.get(account).map_or(false, |slots| slots.contains(slot)) + } + + /// Reveal unknown trie paths from provided leaf path and its proof. + /// NOTE: This method does not extensively validate the proof. + pub fn reveal_account( + &mut self, + account: B256, + proof: impl IntoIterator, + ) -> SparseStateTrieResult<()> { + let mut proof = proof.into_iter().peekable(); + + // reveal root and initialize the trie if not already + let Some((path, node)) = proof.next() else { return Ok(()) }; + if !path.is_empty() { + return Err(SparseStateTrieError::InvalidRootNode { path, node }) + } + + // Decode root node and perform sanity check. + let root_node = TrieNode::decode(&mut &node[..])?; + if matches!(root_node, TrieNode::EmptyRoot) && proof.peek().is_some() { + return Err(SparseStateTrieError::InvalidRootNode { path, node }) + } + + // Reveal root node if it wasn't already. + let trie = self.state.reveal_root(root_node)?; + + // add the remaining proof nodes + for (path, bytes) in proof { + let node = TrieNode::decode(&mut &bytes[..])?; + trie.reveal_node(path, node)?; + } + + // Mark leaf path as revealed. + self.revealed.entry(account).or_default(); + + Ok(()) + } + + /// Update the leaf node. + pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseStateTrieResult<()> { + self.state.update_leaf(path, value)?; + Ok(()) + } + + /// Returns sparse trie root if the trie has been revealed. + pub fn root(&mut self) -> Option { + self.state.root() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::Bytes; + use alloy_rlp::EMPTY_STRING_CODE; + use assert_matches::assert_matches; + use reth_trie::HashBuilder; + use reth_trie_common::proof::ProofRetainer; + + #[test] + fn sparse_trie_reveal_empty() { + let retainer = ProofRetainer::from_iter([Nibbles::default()]); + let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); + hash_builder.root(); + let proofs = hash_builder.take_proof_nodes(); + assert_eq!(proofs.len(), 1); + + let mut sparse = SparseStateTrie::default(); + assert_eq!(sparse.state, SparseTrie::Blind); + sparse.reveal_account(Default::default(), proofs.into_inner()).unwrap(); + assert_eq!(sparse.state, SparseTrie::revealed_empty()); + } + + #[test] + fn reveal_first_node_not_root() { + let mut sparse = SparseStateTrie::default(); + let proof = [(Nibbles::from_nibbles([0x1]), Bytes::from([EMPTY_STRING_CODE]))]; + assert_matches!( + sparse.reveal_account(Default::default(), proof), + Err(SparseStateTrieError::InvalidRootNode { .. }) + ); + } + + #[test] + fn reveal_invalid_proof_with_empty_root() { + let mut sparse = SparseStateTrie::default(); + let proof = [ + (Nibbles::default(), Bytes::from([EMPTY_STRING_CODE])), + (Nibbles::from_nibbles([0x1]), Bytes::new()), + ]; + assert_matches!( + sparse.reveal_account(Default::default(), proof), + Err(SparseStateTrieError::InvalidRootNode { .. }) + ); + } +} diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs new file mode 100644 index 000000000000..1b83e07e48d6 --- /dev/null +++ b/crates/trie/sparse/src/trie.rs @@ -0,0 +1,627 @@ +use crate::{SparseTrieError, SparseTrieResult}; +use alloy_primitives::{hex, keccak256, map::HashMap, B256}; +use alloy_rlp::Decodable; +use reth_trie::{ + prefix_set::{PrefixSet, PrefixSetMut}, + RlpNode, +}; +use reth_trie_common::{ + BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, TrieMask, TrieNode, CHILD_INDEX_RANGE, + EMPTY_ROOT_HASH, +}; +use smallvec::SmallVec; +use std::{collections::HashSet, fmt}; + +/// Inner representation of the sparse trie. +/// Sparse trie is blind by default until nodes are revealed. +#[derive(PartialEq, Eq, Default, Debug)] +pub enum SparseTrie { + /// None of the trie nodes are known. + #[default] + Blind, + /// The trie nodes have been revealed. + Revealed(RevealedSparseTrie), +} + +impl SparseTrie { + /// Creates new revealed empty trie. + pub fn revealed_empty() -> Self { + Self::Revealed(RevealedSparseTrie::default()) + } + + /// Returns `true` if the sparse trie has no revealed nodes. + pub const fn is_blind(&self) -> bool { + matches!(self, Self::Blind) + } + + /// Returns mutable reference to revealed sparse trie if the trie is not blind. + pub fn as_revealed_mut(&mut self) -> Option<&mut RevealedSparseTrie> { + if let Self::Revealed(revealed) = self { + Some(revealed) + } else { + None + } + } + + /// Reveals the root node if the trie is blinded. + /// + /// # Returns + /// + /// Mutable reference to [`RevealedSparseTrie`]. + pub fn reveal_root(&mut self, root: TrieNode) -> SparseTrieResult<&mut RevealedSparseTrie> { + if self.is_blind() { + *self = Self::Revealed(RevealedSparseTrie::from_root(root)?) + } + Ok(self.as_revealed_mut().unwrap()) + } + + /// Update the leaf node. + pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { + let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + revealed.update_leaf(path, value)?; + Ok(()) + } + + /// Calculates and returns the trie root if the trie has been revealed. + pub fn root(&mut self) -> Option { + Some(self.as_revealed_mut()?.root()) + } +} + +/// The representation of revealed sparse trie. +#[derive(PartialEq, Eq)] +pub struct RevealedSparseTrie { + /// All trie nodes. + nodes: HashMap, + /// All leaf values. + values: HashMap>, + /// Prefix set. + prefix_set: PrefixSetMut, + /// Reusable buffer for RLP encoding of nodes. + rlp_buf: Vec, +} + +impl fmt::Debug for RevealedSparseTrie { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RevealedSparseTrie") + .field("nodes", &self.nodes) + .field("values", &self.values) + .field("prefix_set", &self.prefix_set) + .field("rlp_buf", &hex::encode(&self.rlp_buf)) + .finish() + } +} + +impl Default for RevealedSparseTrie { + fn default() -> Self { + Self { + nodes: HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]), + values: HashMap::default(), + prefix_set: PrefixSetMut::default(), + rlp_buf: Vec::new(), + } + } +} + +impl RevealedSparseTrie { + /// Create new revealed sparse trie from the given root node. + pub fn from_root(node: TrieNode) -> SparseTrieResult { + let mut this = Self { + nodes: HashMap::default(), + values: HashMap::default(), + prefix_set: PrefixSetMut::default(), + rlp_buf: Vec::new(), + }; + this.reveal_node(Nibbles::default(), node)?; + Ok(this) + } + + /// Reveal the trie node only if it was not known already. + pub fn reveal_node(&mut self, path: Nibbles, node: TrieNode) -> SparseTrieResult<()> { + // TODO: revise all inserts to not overwrite existing entries + match node { + TrieNode::EmptyRoot => { + debug_assert!(path.is_empty()); + self.nodes.insert(path, SparseNode::Empty); + } + TrieNode::Branch(branch) => { + let mut stack_ptr = branch.as_ref().first_child_index(); + for idx in CHILD_INDEX_RANGE { + if branch.state_mask.is_bit_set(idx) { + let mut child_path = path.clone(); + child_path.push_unchecked(idx); + self.reveal_node_or_hash(child_path, &branch.stack[stack_ptr])?; + stack_ptr += 1; + } + } + self.nodes + .insert(path, SparseNode::Branch { state_mask: branch.state_mask, hash: None }); + } + TrieNode::Extension(ext) => { + let mut child_path = path.clone(); + child_path.extend_from_slice_unchecked(&ext.key); + self.reveal_node_or_hash(child_path, &ext.child)?; + self.nodes.insert(path, SparseNode::Extension { key: ext.key, hash: None }); + } + TrieNode::Leaf(leaf) => { + let mut full = path.clone(); + full.extend_from_slice_unchecked(&leaf.key); + self.values.insert(full, leaf.value); + self.nodes.insert(path, SparseNode::new_leaf(leaf.key)); + } + } + + Ok(()) + } + + fn reveal_node_or_hash(&mut self, path: Nibbles, child: &[u8]) -> SparseTrieResult<()> { + if child.len() == B256::len_bytes() + 1 { + // TODO: revise insert to not overwrite existing entries + self.nodes.insert(path, SparseNode::Hash(B256::from_slice(&child[1..]))); + return Ok(()) + } + + self.reveal_node(path, TrieNode::decode(&mut &child[..])?) + } + + /// Update the leaf node with provided value. + pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { + self.prefix_set.insert(path.clone()); + let existing = self.values.insert(path.clone(), value); + if existing.is_some() { + // trie structure unchanged, return immediately + return Ok(()) + } + + let mut current = Nibbles::default(); + while let Some(node) = self.nodes.get_mut(¤t) { + match node { + SparseNode::Empty => { + *node = SparseNode::new_leaf(path); + break + } + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { path: current, hash: *hash }) + } + SparseNode::Leaf { key: current_key, .. } => { + current.extend_from_slice_unchecked(current_key); + + // this leaf is being updated + if current == path { + unreachable!("we already checked leaf presence in the beginning"); + } + + // find the common prefix + let common = current.common_prefix_length(&path); + + // update existing node + let new_ext_key = current.slice(current.len() - current_key.len()..common); + *node = SparseNode::new_ext(new_ext_key); + + // create a branch node and corresponding leaves + self.nodes.insert( + current.slice(..common), + SparseNode::new_split_branch(current[common], path[common]), + ); + self.nodes.insert( + path.slice(..=common), + SparseNode::new_leaf(path.slice(common + 1..)), + ); + self.nodes.insert( + current.slice(..=common), + SparseNode::new_leaf(current.slice(common + 1..)), + ); + + break; + } + SparseNode::Extension { key, .. } => { + current.extend_from_slice(key); + if !path.starts_with(¤t) { + // find the common prefix + let common = current.common_prefix_length(&path); + + *key = current.slice(current.len() - key.len()..common); + + // create state mask for new branch node + // NOTE: this might overwrite the current extension node + let branch = SparseNode::new_split_branch(current[common], path[common]); + self.nodes.insert(current.slice(..common), branch); + + // create new leaf + let new_leaf = SparseNode::new_leaf(path.slice(common + 1..)); + self.nodes.insert(path.slice(..=common), new_leaf); + + // recreate extension to previous child if needed + let key = current.slice(common + 1..); + if !key.is_empty() { + self.nodes.insert(current.slice(..=common), SparseNode::new_ext(key)); + } + + break; + } + } + SparseNode::Branch { state_mask, .. } => { + let nibble = path[current.len()]; + current.push_unchecked(nibble); + if !state_mask.is_bit_set(nibble) { + state_mask.set_bit(nibble); + let new_leaf = SparseNode::new_leaf(path.slice(current.len()..)); + self.nodes.insert(current, new_leaf); + break; + } + } + }; + } + + Ok(()) + } + + /// Remove leaf node from the trie. + pub fn remove_leaf(&mut self, _path: Nibbles) { + unimplemented!() + } + + /// Return the root of the sparse trie. + /// Updates all remaining dirty nodes before calculating the root. + pub fn root(&mut self) -> B256 { + // take the current prefix set. + let mut prefix_set = std::mem::take(&mut self.prefix_set).freeze(); + let root_rlp = self.rlp_node(Nibbles::default(), &mut prefix_set); + if root_rlp.len() == B256::len_bytes() + 1 { + B256::from_slice(&root_rlp[1..]) + } else { + keccak256(root_rlp) + } + } + + /// Update node hashes only if their path exceeds the provided level. + pub fn update_rlp_node_level(&mut self, min_len: usize) { + let mut paths = Vec::from([Nibbles::default()]); + let mut targets = HashSet::::default(); + + while let Some(mut path) = paths.pop() { + match self.nodes.get(&path).unwrap() { + SparseNode::Empty | SparseNode::Hash(_) => {} + SparseNode::Leaf { .. } => { + targets.insert(path); + } + SparseNode::Extension { key, .. } => { + if path.len() >= min_len { + targets.insert(path); + } else { + path.extend_from_slice_unchecked(key); + paths.push(path); + } + } + SparseNode::Branch { state_mask, .. } => { + if path.len() >= min_len { + targets.insert(path); + } else { + for bit in CHILD_INDEX_RANGE { + if state_mask.is_bit_set(bit) { + let mut child_path = path.clone(); + child_path.push_unchecked(bit); + paths.push(child_path); + } + } + } + } + } + } + + let mut prefix_set = self.prefix_set.clone().freeze(); + for target in targets { + self.rlp_node(target, &mut prefix_set); + } + } + + fn rlp_node(&mut self, path: Nibbles, prefix_set: &mut PrefixSet) -> RlpNode { + // stack of paths we need rlp nodes for + let mut path_stack = Vec::from([path]); + // stack of rlp nodes + let mut rlp_node_stack = Vec::<(Nibbles, RlpNode)>::new(); + // reusable branch child path + let mut branch_child_buf = SmallVec::<[Nibbles; 16]>::new_const(); + // reusable branch value stack + let mut branch_value_stack_buf = SmallVec::<[RlpNode; 16]>::new_const(); + + 'main: while let Some(path) = path_stack.pop() { + let rlp_node = match self.nodes.get_mut(&path).unwrap() { + SparseNode::Empty => RlpNode::word_rlp(&EMPTY_ROOT_HASH), + SparseNode::Hash(hash) => RlpNode::word_rlp(hash), + SparseNode::Leaf { key, hash } => { + self.rlp_buf.clear(); + let mut path = path.clone(); + path.extend_from_slice_unchecked(key); + if let Some(hash) = hash.filter(|_| !prefix_set.contains(&path)) { + RlpNode::word_rlp(&hash) + } else { + let value = self.values.get(&path).unwrap(); + let rlp_node = LeafNodeRef { key, value }.rlp(&mut self.rlp_buf); + if rlp_node.len() == B256::len_bytes() + 1 { + *hash = Some(B256::from_slice(&rlp_node[1..])); + } + rlp_node + } + } + SparseNode::Extension { key, hash } => { + let mut child_path = path.clone(); + child_path.extend_from_slice_unchecked(key); + if let Some(hash) = hash.filter(|_| !prefix_set.contains(&path)) { + RlpNode::word_rlp(&hash) + } else if rlp_node_stack.last().map_or(false, |e| e.0 == child_path) { + let (_, child) = rlp_node_stack.pop().unwrap(); + self.rlp_buf.clear(); + let rlp_node = ExtensionNodeRef::new(key, &child).rlp(&mut self.rlp_buf); + if rlp_node.len() == B256::len_bytes() + 1 { + *hash = Some(B256::from_slice(&rlp_node[1..])); + } + rlp_node + } else { + path_stack.extend([path, child_path]); // need to get rlp node for child first + continue + } + } + SparseNode::Branch { state_mask, hash } => { + if let Some(hash) = hash.filter(|_| !prefix_set.contains(&path)) { + rlp_node_stack.push((path, RlpNode::word_rlp(&hash))); + continue + } + + branch_child_buf.clear(); + for bit in CHILD_INDEX_RANGE { + if state_mask.is_bit_set(bit) { + let mut child = path.clone(); + child.push_unchecked(bit); + branch_child_buf.push(child); + } + } + + branch_value_stack_buf.clear(); + for child_path in &branch_child_buf { + if rlp_node_stack.last().map_or(false, |e| &e.0 == child_path) { + let (_, child) = rlp_node_stack.pop().unwrap(); + branch_value_stack_buf.push(child); + } else { + debug_assert!(branch_value_stack_buf.is_empty()); + path_stack.push(path); + path_stack.extend(branch_child_buf.drain(..)); + continue 'main + } + } + + self.rlp_buf.clear(); + let rlp_node = BranchNodeRef::new(&branch_value_stack_buf, *state_mask) + .rlp(&mut self.rlp_buf); + if rlp_node.len() == B256::len_bytes() + 1 { + *hash = Some(B256::from_slice(&rlp_node[1..])); + } + rlp_node + } + }; + rlp_node_stack.push((path, rlp_node)); + } + + rlp_node_stack.pop().unwrap().1 + } +} + +/// Enum representing trie nodes in sparse trie. +#[derive(PartialEq, Eq, Clone, Debug)] +pub enum SparseNode { + /// Empty trie node. + Empty, + /// The hash of the node that was not revealed. + Hash(B256), + /// Sparse leaf node with remaining key suffix. + Leaf { + /// Remaining key suffix for the leaf node. + key: Nibbles, + /// Pre-computed hash of the sparse node. + /// Can be reused unless this trie path has been updated. + hash: Option, + }, + /// Sparse extension node with key. + Extension { + /// The key slice stored by this extension node. + key: Nibbles, + /// Pre-computed hash of the sparse node. + /// Can be reused unless this trie path has been updated. + hash: Option, + }, + /// Sparse branch node with state mask. + Branch { + /// The bitmask representing children present in the branch node. + state_mask: TrieMask, + /// Pre-computed hash of the sparse node. + /// Can be reused unless this trie path has been updated. + hash: Option, + }, +} + +impl SparseNode { + /// Create new sparse node from [`TrieNode`]. + pub fn from_node(node: TrieNode) -> Self { + match node { + TrieNode::EmptyRoot => Self::Empty, + TrieNode::Leaf(leaf) => Self::new_leaf(leaf.key), + TrieNode::Extension(ext) => Self::new_ext(ext.key), + TrieNode::Branch(branch) => Self::new_branch(branch.state_mask), + } + } + + /// Create new [`SparseNode::Branch`] from state mask. + pub const fn new_branch(state_mask: TrieMask) -> Self { + Self::Branch { state_mask, hash: None } + } + + /// Create new [`SparseNode::Branch`] with two bits set. + pub const fn new_split_branch(bit_a: u8, bit_b: u8) -> Self { + let state_mask = TrieMask::new( + // set bits for both children + (1u16 << bit_a) | (1u16 << bit_b), + ); + Self::Branch { state_mask, hash: None } + } + + /// Create new [`SparseNode::Extension`] from the key slice. + pub const fn new_ext(key: Nibbles) -> Self { + Self::Extension { key, hash: None } + } + + /// Create new [`SparseNode::Leaf`] from leaf key and value. + pub const fn new_leaf(key: Nibbles) -> Self { + Self::Leaf { key, hash: None } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::U256; + use itertools::Itertools; + use proptest::prelude::*; + use reth_trie_common::HashBuilder; + + #[test] + fn sparse_trie_is_blind() { + assert!(SparseTrie::default().is_blind()); + assert!(!SparseTrie::revealed_empty().is_blind()); + } + + #[test] + fn sparse_trie_empty_update_one() { + let path = Nibbles::unpack(B256::with_last_byte(42)); + let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + + let mut hash_builder = HashBuilder::default(); + hash_builder.add_leaf(path.clone(), &value); + let expected = hash_builder.root(); + + let mut sparse = RevealedSparseTrie::default(); + sparse.update_leaf(path, value.to_vec()).unwrap(); + let root = sparse.root(); + assert_eq!(root, expected); + } + + #[test] + fn sparse_trie_empty_update_multiple_lower_nibbles() { + let paths = (0..=16).map(|b| Nibbles::unpack(B256::with_last_byte(b))).collect::>(); + let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + + let mut hash_builder = HashBuilder::default(); + for path in &paths { + hash_builder.add_leaf(path.clone(), &value); + } + let expected = hash_builder.root(); + + let mut sparse = RevealedSparseTrie::default(); + for path in &paths { + sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); + } + let root = sparse.root(); + assert_eq!(root, expected); + } + + #[test] + fn sparse_trie_empty_update_multiple_upper_nibbles() { + let paths = (239..=255).map(|b| Nibbles::unpack(B256::repeat_byte(b))).collect::>(); + let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + + let mut hash_builder = HashBuilder::default(); + for path in &paths { + hash_builder.add_leaf(path.clone(), &value); + } + let expected = hash_builder.root(); + + let mut sparse = RevealedSparseTrie::default(); + for path in &paths { + sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); + } + let root = sparse.root(); + assert_eq!(root, expected); + } + + #[test] + fn sparse_trie_empty_update_multiple() { + let paths = (0..=255) + .map(|b| { + Nibbles::unpack(if b % 2 == 0 { + B256::repeat_byte(b) + } else { + B256::with_last_byte(b) + }) + }) + .collect::>(); + let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + + let mut hash_builder = HashBuilder::default(); + for path in paths.iter().sorted_unstable_by_key(|key| *key) { + hash_builder.add_leaf(path.clone(), &value); + } + let expected = hash_builder.root(); + + let mut sparse = RevealedSparseTrie::default(); + for path in &paths { + sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); + } + let root = sparse.root(); + assert_eq!(root, expected); + } + + #[test] + fn sparse_trie_empty_update_repeated() { + let paths = (0..=255).map(|b| Nibbles::unpack(B256::repeat_byte(b))).collect::>(); + let old_value = alloy_rlp::encode_fixed_size(&U256::from(1)); + let new_value = alloy_rlp::encode_fixed_size(&U256::from(2)); + + let mut hash_builder = HashBuilder::default(); + for path in paths.iter().sorted_unstable_by_key(|key| *key) { + hash_builder.add_leaf(path.clone(), &old_value); + } + let expected = hash_builder.root(); + + let mut sparse = RevealedSparseTrie::default(); + for path in &paths { + sparse.update_leaf(path.clone(), old_value.to_vec()).unwrap(); + } + let root = sparse.root(); + assert_eq!(root, expected); + + let mut hash_builder = HashBuilder::default(); + for path in paths.iter().sorted_unstable_by_key(|key| *key) { + hash_builder.add_leaf(path.clone(), &new_value); + } + let expected = hash_builder.root(); + + for path in &paths { + sparse.update_leaf(path.clone(), new_value.to_vec()).unwrap(); + } + let root = sparse.root(); + assert_eq!(root, expected); + } + + #[test] + fn sparse_trie_empty_update_fuzz() { + proptest!(ProptestConfig::with_cases(10), |(updates: Vec>)| { + let mut state = std::collections::BTreeMap::default(); + let mut sparse = RevealedSparseTrie::default(); + + for update in updates { + for (key, value) in &update { + sparse.update_leaf(Nibbles::unpack(key), alloy_rlp::encode_fixed_size(value).to_vec()).unwrap(); + } + let root = sparse.root(); + + state.extend(update); + let mut hash_builder = HashBuilder::default(); + for (key, value) in &state { + hash_builder.add_leaf(Nibbles::unpack(key), &alloy_rlp::encode_fixed_size(value)); + } + let expected = hash_builder.root(); + + assert_eq!(root, expected); + } + }); + } +} diff --git a/crates/trie/trie/src/prefix_set.rs b/crates/trie/trie/src/prefix_set.rs index af0fb173d98a..da912fbbdad9 100644 --- a/crates/trie/trie/src/prefix_set.rs +++ b/crates/trie/trie/src/prefix_set.rs @@ -82,7 +82,7 @@ pub struct TriePrefixSets { /// assert!(prefix_set.contains(&[0xa, 0xb])); /// assert!(prefix_set.contains(&[0xa, 0xb, 0xc])); /// ``` -#[derive(Clone, Default, Debug)] +#[derive(PartialEq, Eq, Clone, Default, Debug)] pub struct PrefixSetMut { /// Flag indicating that any entry should be considered changed. /// If set, the keys will be discarded. From 6fb271036dbaf0ab9c9215959baec5833ac1cc88 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 15 Oct 2024 18:51:40 +0400 Subject: [PATCH 03/51] feat: move RPC launch to add-ons (#11532) --- Cargo.lock | 21 +- crates/e2e-test-utils/src/lib.rs | 14 +- crates/e2e-test-utils/src/node.rs | 8 +- crates/ethereum/node/src/node.rs | 27 +- crates/ethereum/node/tests/e2e/dev.rs | 8 +- crates/exex/test-utils/src/lib.rs | 4 +- crates/node/api/Cargo.toml | 7 + crates/node/api/src/node.rs | 75 +++-- crates/node/builder/src/builder/add_ons.rs | 16 +- crates/node/builder/src/builder/mod.rs | 30 +- crates/node/builder/src/builder/states.rs | 109 +++---- crates/node/builder/src/handle.rs | 10 +- crates/node/builder/src/launch/engine.rs | 62 +--- crates/node/builder/src/launch/mod.rs | 65 +--- crates/node/builder/src/lib.rs | 5 +- crates/node/builder/src/node.rs | 48 ++- crates/node/builder/src/rpc.rs | 318 ++++++++++++++------ crates/optimism/bin/src/main.rs | 29 +- crates/optimism/node/src/node.rs | 45 ++- crates/optimism/node/tests/e2e/utils.rs | 6 +- crates/optimism/rpc/src/eth/mod.rs | 22 +- crates/optimism/rpc/src/eth/transaction.rs | 10 +- crates/rpc/rpc-builder/src/eth.rs | 8 +- crates/rpc/rpc-builder/src/lib.rs | 1 + crates/rpc/rpc-eth-types/src/builder/ctx.rs | 62 +--- crates/rpc/rpc/src/eth/core.rs | 24 +- examples/custom-engine-types/src/main.rs | 7 +- 27 files changed, 535 insertions(+), 506 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7ee37e080aed..59e8f2b27fba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4464,7 +4464,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -7787,9 +7787,14 @@ dependencies = [ name = "reth-node-api" version = "1.1.0" dependencies = [ + "alloy-rpc-types-engine", + "eyre", + "reth-beacon-consensus", + "reth-consensus", "reth-engine-primitives", "reth-evm", "reth-network-api", + "reth-node-core", "reth-node-types", "reth-payload-builder", "reth-payload-primitives", @@ -9577,9 +9582,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" [[package]] name = "rustls-platform-verifier" @@ -9621,9 +9626,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" [[package]] name = "rusty-fork" @@ -9660,9 +9665,9 @@ dependencies = [ [[package]] name = "scc" -version = "2.2.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "553f8299af7450cda9a52d3a370199904e7a46b5ffd1bef187c4a6af3bb6db69" +checksum = "f2c1f7fc6deb21665a9060dfc7d271be784669295a31babdcd4dd2c79ae8cbfb" dependencies = [ "sdd", ] @@ -11367,7 +11372,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 998b48e70431..48e56910e6c0 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -7,15 +7,13 @@ use reth::{ args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, builder::{NodeBuilder, NodeConfig, NodeHandle}, network::PeersHandleProvider, - rpc::api::eth::{helpers::AddDevSigners, FullEthApiServer}, tasks::TaskManager, }; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_node_builder::{ - components::NodeComponentsBuilder, rpc::EthApiBuilderProvider, FullNodeTypesAdapter, Node, - NodeAdapter, NodeAddOns, NodeComponents, NodeTypesWithDBAdapter, NodeTypesWithEngine, - RethFullAdapter, + components::NodeComponentsBuilder, rpc::RethRpcAddOns, FullNodeTypesAdapter, Node, NodeAdapter, + NodeComponents, NodeTypesWithDBAdapter, NodeTypesWithEngine, RethFullAdapter, }; use reth_provider::providers::BlockchainProvider; use tracing::{span, Level}; @@ -56,10 +54,7 @@ where TmpNodeAdapter, Components: NodeComponents, Network: PeersHandleProvider>, >, - N::AddOns: NodeAddOns< - Adapter, - EthApi: FullEthApiServer + AddDevSigners + EthApiBuilderProvider>, - >, + N::AddOns: RethRpcAddOns>, { let tasks = TaskManager::current(); let exec = tasks.executor(); @@ -115,7 +110,8 @@ type TmpNodeAdapter = FullNodeTypesAdapter< BlockchainProvider>, >; -type Adapter = NodeAdapter< +/// Type alias for a `NodeAdapter` +pub type Adapter = NodeAdapter< RethFullAdapter, <>>::ComponentsBuilder as NodeComponentsBuilder< RethFullAdapter, diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 2ea39348f5de..c22913ba2363 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -18,7 +18,7 @@ use reth::{ }, }; use reth_chainspec::EthereumHardforks; -use reth_node_builder::{NodeAddOns, NodeTypesWithEngine}; +use reth_node_builder::{rpc::RethRpcAddOns, NodeTypesWithEngine}; use reth_stages_types::StageId; use tokio_stream::StreamExt; @@ -32,7 +32,7 @@ use crate::{ pub struct NodeTestContext where Node: FullNodeComponents, - AddOns: NodeAddOns, + AddOns: RethRpcAddOns, { /// The core structure representing the full node. pub inner: FullNode, @@ -52,7 +52,7 @@ where Node: FullNodeComponents, Node::Types: NodeTypesWithEngine, Node::Network: PeersHandleProvider, - AddOns: NodeAddOns, + AddOns: RethRpcAddOns, { /// Creates a new test node pub async fn new(node: FullNode) -> eyre::Result { @@ -67,7 +67,7 @@ where canonical_stream: node.provider.canonical_state_stream(), _marker: PhantomData::, }, - rpc: RpcTestContext { inner: node.rpc_registry }, + rpc: RpcTestContext { inner: node.add_ons_handle.rpc_registry }, }) } diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index f17658bb32da..82f313fbb0b2 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -11,14 +11,15 @@ use reth_ethereum_engine_primitives::{ }; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_network::NetworkHandle; -use reth_node_api::{ConfigureEvm, EngineValidator, FullNodeComponents, NodeAddOns}; +use reth_node_api::{ConfigureEvm, EngineValidator, FullNodeComponents, NodeTypesWithDB}; use reth_node_builder::{ components::{ ComponentsBuilder, ConsensusBuilder, EngineValidatorBuilder, ExecutorBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder, }, node::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}, - BuilderContext, Node, PayloadBuilderConfig, PayloadTypes, + rpc::RpcAddOns, + BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, PayloadTypes, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::Header; @@ -77,17 +78,19 @@ impl NodeTypesWithEngine for EthereumNode { } /// Add-ons w.r.t. l1 ethereum. -#[derive(Debug, Clone, Default)] -#[non_exhaustive] -pub struct EthereumAddOns; - -impl NodeAddOns for EthereumAddOns { - type EthApi = EthApi; -} +pub type EthereumAddOns = RpcAddOns< + N, + EthApi< + ::Provider, + ::Pool, + NetworkHandle, + ::Evm, + >, +>; impl Node for EthereumNode where - Types: NodeTypesWithEngine, + Types: NodeTypesWithDB + NodeTypesWithEngine, N: FullNodeTypes, { type ComponentsBuilder = ComponentsBuilder< @@ -100,7 +103,9 @@ where EthereumEngineValidatorBuilder, >; - type AddOns = EthereumAddOns; + type AddOns = EthereumAddOns< + NodeAdapter>::Components>, + >; fn components_builder(&self) -> Self::ComponentsBuilder { Self::components() diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index 6b4733b6f9b4..cad2fb34e5de 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -6,8 +6,10 @@ use futures::StreamExt; use reth::{args::DevArgs, core::rpc::eth::helpers::EthTransactions}; use reth_chainspec::ChainSpec; use reth_e2e_test_utils::setup; -use reth_node_api::{FullNodeComponents, NodeAddOns}; -use reth_node_builder::{EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, NodeHandle}; +use reth_node_api::FullNodeComponents; +use reth_node_builder::{ + rpc::RethRpcAddOns, EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, NodeHandle, +}; use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; use reth_provider::{providers::BlockchainProvider2, CanonStateSubscriptions}; use reth_tasks::TaskManager; @@ -53,7 +55,7 @@ async fn can_run_dev_node_new_engine() -> eyre::Result<()> { async fn assert_chain_advances(node: FullNode) where N: FullNodeComponents, - AddOns: NodeAddOns, + AddOns: RethRpcAddOns, { let mut notifications = node.provider.canonical_state_stream(); diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index e219f55031db..9e17013c4a5e 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -142,7 +142,9 @@ where TestConsensusBuilder, EthereumEngineValidatorBuilder, >; - type AddOns = EthereumAddOns; + type AddOns = EthereumAddOns< + NodeAdapter>::Components>, + >; fn components_builder(&self) -> Self::ComponentsBuilder { ComponentsBuilder::default() diff --git a/crates/node/api/Cargo.toml b/crates/node/api/Cargo.toml index e7685acc84fd..c2c3eb46326b 100644 --- a/crates/node/api/Cargo.toml +++ b/crates/node/api/Cargo.toml @@ -12,6 +12,8 @@ workspace = true [dependencies] # reth +reth-beacon-consensus.workspace = true +reth-consensus.workspace = true reth-evm.workspace = true reth-provider.workspace = true reth-engine-primitives.workspace = true @@ -23,3 +25,8 @@ reth-rpc-eth-api.workspace = true reth-network-api.workspace = true reth-node-types.workspace = true reth-primitives.workspace = true +reth-node-core.workspace = true + +alloy-rpc-types-engine.workspace = true + +eyre.workspace = true \ No newline at end of file diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index ce6b16c8ffcd..40c2a3a60b08 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -1,14 +1,18 @@ //! Traits for configuring a node. -use std::marker::PhantomData; +use std::{future::Future, marker::PhantomData}; +use alloy_rpc_types_engine::JwtSecret; +use reth_beacon_consensus::BeaconConsensusEngineHandle; +use reth_consensus::Consensus; +use reth_engine_primitives::EngineValidator; use reth_evm::execute::BlockExecutorProvider; use reth_network_api::FullNetwork; -use reth_node_types::{NodeTypesWithDB, NodeTypesWithEngine}; +use reth_node_core::node_config::NodeConfig; +use reth_node_types::{NodeTypes, NodeTypesWithDB, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; use reth_primitives::Header; use reth_provider::FullProvider; -use reth_rpc_eth_api::EthApiTypes; use reth_tasks::TaskExecutor; use reth_transaction_pool::TransactionPool; @@ -54,9 +58,15 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { /// The type that knows how to execute blocks. type Executor: BlockExecutorProvider; + /// The consensus type of the node. + type Consensus: Consensus + Clone + Unpin + 'static; + /// Network API. type Network: FullNetwork; + /// Validator for the engine API. + type EngineValidator: EngineValidator<::Engine>; + /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; @@ -66,8 +76,8 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { /// Returns the node's executor type. fn block_executor(&self) -> &Self::Executor; - /// Returns the provider of the node. - fn provider(&self) -> &Self::Provider; + /// Returns the node's consensus type. + fn consensus(&self) -> &Self::Consensus; /// Returns the handle to the network fn network(&self) -> &Self::Network; @@ -77,37 +87,46 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { &self, ) -> &PayloadBuilderHandle<::Engine>; + /// Returns the engine validator. + fn engine_validator(&self) -> &Self::EngineValidator; + + /// Returns the provider of the node. + fn provider(&self) -> &Self::Provider; + /// Returns handle to runtime. fn task_executor(&self) -> &TaskExecutor; } -/// Customizable node add-on types. -pub trait NodeAddOns: Send + Sync + Unpin + Clone + 'static { - /// The core `eth` namespace API type to install on the RPC server (see - /// `reth_rpc_eth_api::EthApiServer`). - type EthApi: EthApiTypes + Send + Clone; -} - -impl NodeAddOns for () { - type EthApi = (); +/// Context passed to [`NodeAddOns::launch_add_ons`], +#[derive(Debug)] +pub struct AddOnsContext<'a, N: FullNodeComponents> { + /// Node with all configured components. + pub node: &'a N, + /// Node configuration. + pub config: &'a NodeConfig<::ChainSpec>, + /// Handle to the beacon consensus engine. + pub beacon_engine_handle: + &'a BeaconConsensusEngineHandle<::Engine>, + /// JWT secret for the node. + pub jwt_secret: &'a JwtSecret, } -/// Returns the builder for type. -pub trait BuilderProvider: Send { - /// Context required to build type. - type Ctx<'a>; - - /// Returns builder for type. - #[allow(clippy::type_complexity)] - fn builder() -> Box Fn(Self::Ctx<'a>) -> Self + Send>; +/// Customizable node add-on types. +pub trait NodeAddOns: Send { + /// Handle to add-ons. + type Handle: Send + Sync + Clone; + + /// Configures and launches the add-ons. + fn launch_add_ons( + self, + ctx: AddOnsContext<'_, N>, + ) -> impl Future> + Send; } -impl BuilderProvider for () { - type Ctx<'a> = (); +impl NodeAddOns for () { + type Handle = (); - fn builder() -> Box Fn(Self::Ctx<'a>) -> Self + Send> { - Box::new(noop_builder) + async fn launch_add_ons(self, _components: AddOnsContext<'_, N>) -> eyre::Result { + Ok(()) } } - -const fn noop_builder(_: ()) {} diff --git a/crates/node/builder/src/builder/add_ons.rs b/crates/node/builder/src/builder/add_ons.rs index 26d7553bb86d..7be0411b2f01 100644 --- a/crates/node/builder/src/builder/add_ons.rs +++ b/crates/node/builder/src/builder/add_ons.rs @@ -1,8 +1,8 @@ //! Node add-ons. Depend on core [`NodeComponents`](crate::NodeComponents). -use reth_node_api::{EthApiTypes, FullNodeComponents, NodeAddOns}; +use reth_node_api::{FullNodeComponents, NodeAddOns}; -use crate::{exex::BoxedLaunchExEx, hooks::NodeHooks, rpc::RpcHooks}; +use crate::{exex::BoxedLaunchExEx, hooks::NodeHooks}; /// Additional node extensions. /// @@ -12,16 +12,6 @@ pub struct AddOns> { pub hooks: NodeHooks, /// The `ExExs` (execution extensions) of the node. pub exexs: Vec<(String, Box>)>, - /// Additional RPC add-ons. - pub rpc: RpcAddOns, /// Additional captured addons. - pub addons: AddOns, -} - -/// Captures node specific addons that can be installed on top of the type configured node and are -/// required for launching the node, such as RPC. -#[derive(Default)] -pub struct RpcAddOns { - /// Additional RPC hooks. - pub hooks: RpcHooks, + pub add_ons: AddOns, } diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index fb94413fe756..82d8d96f6f55 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -13,7 +13,7 @@ use crate::{ common::WithConfigs, components::NodeComponentsBuilder, node::FullNode, - rpc::{EthApiBuilderProvider, RethRpcServerHandles, RpcContext}, + rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext}, DefaultNodeLauncher, LaunchNode, Node, NodeHandle, }; use futures::Future; @@ -37,7 +37,6 @@ use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, primitives::Head, - rpc::eth::{helpers::AddDevSigners, FullEthApiServer}, }; use reth_primitives::revm_primitives::EnvKzgSettings; use reth_provider::{providers::BlockchainProvider, ChainSpecProvider, FullProvider}; @@ -358,19 +357,11 @@ where > where N: Node, ChainSpec = ChainSpec>, - N::AddOns: NodeAddOns< + N::AddOns: RethRpcAddOns< NodeAdapter< RethFullAdapter, >>::Components, >, - EthApi: EthApiBuilderProvider< - NodeAdapter< - RethFullAdapter, - >>::Components, - > - > - + FullEthApiServer - + AddDevSigners >, { self.node(node).launch().await @@ -418,7 +409,7 @@ impl WithLaunchContext> where T: FullNodeTypes, CB: NodeComponentsBuilder, - AO: NodeAddOns, EthApi: FullEthApiServer + AddDevSigners>, + AO: RethRpcAddOns>, { /// Returns a reference to the node builder's config. pub const fn config(&self) -> &NodeConfig<::ChainSpec> { @@ -466,6 +457,14 @@ where Self { builder: self.builder.on_node_started(hook), task_executor: self.task_executor } } + /// Modifies the addons with the given closure. + pub fn map_add_ons(self, f: F) -> Self + where + F: FnOnce(AO) -> AO, + { + Self { builder: self.builder.map_add_ons(f), task_executor: self.task_executor } + } + /// Sets the hook that is run once the rpc server is started. pub fn on_rpc_started(self, hook: F) -> Self where @@ -553,12 +552,7 @@ where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, T: NodeTypesWithEngine, CB: NodeComponentsBuilder>, - AO: NodeAddOns< - NodeAdapter, CB::Components>, - EthApi: EthApiBuilderProvider, CB::Components>> - + FullEthApiServer - + AddDevSigners, - >, + AO: RethRpcAddOns, CB::Components>>, { /// Launches the node with the [`DefaultNodeLauncher`] that sets up engine API consensus and rpc pub async fn launch( diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index 80930ef743cd..c4da466f23e9 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -11,10 +11,7 @@ use reth_exex::ExExContext; use reth_node_api::{ FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypes, NodeTypesWithDB, NodeTypesWithEngine, }; -use reth_node_core::{ - node_config::NodeConfig, - rpc::eth::{helpers::AddDevSigners, FullEthApiServer}, -}; +use reth_node_core::node_config::NodeConfig; use reth_payload_builder::PayloadBuilderHandle; use reth_tasks::TaskExecutor; @@ -22,8 +19,8 @@ use crate::{ components::{NodeComponents, NodeComponentsBuilder}, hooks::NodeHooks, launch::LaunchNode, - rpc::{EthApiBuilderProvider, RethRpcServerHandles, RpcContext, RpcHooks}, - AddOns, FullNode, RpcAddOns, + rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext}, + AddOns, FullNode, }; /// A node builder that also has the configured types. @@ -54,12 +51,7 @@ impl NodeBuilderWithTypes { config, adapter, components_builder, - add_ons: AddOns { - hooks: NodeHooks::default(), - rpc: RpcAddOns { hooks: RpcHooks::default() }, - exexs: Vec::new(), - addons: (), - }, + add_ons: AddOns { hooks: NodeHooks::default(), exexs: Vec::new(), add_ons: () }, } } } @@ -83,8 +75,8 @@ impl fmt::Debug for NodeTypesAdapter { } } -/// Container for the node's types and the components and other internals that can be used by addons -/// of the node. +/// Container for the node's types and the components and other internals that can be used by +/// addons of the node. pub struct NodeAdapter> { /// The components of the node. pub components: C, @@ -104,6 +96,8 @@ impl> FullNodeComponents for NodeAdapter< type Evm = C::Evm; type Executor = C::Executor; type Network = C::Network; + type Consensus = C::Consensus; + type EngineValidator = C::EngineValidator; fn pool(&self) -> &Self::Pool { self.components.pool() @@ -132,6 +126,14 @@ impl> FullNodeComponents for NodeAdapter< fn task_executor(&self) -> &TaskExecutor { &self.task_executor } + + fn consensus(&self) -> &Self::Consensus { + self.components.consensus() + } + + fn engine_validator(&self) -> &Self::EngineValidator { + self.components.engine_validator() + } } impl> Clone for NodeAdapter { @@ -169,7 +171,7 @@ where { /// Advances the state of the node builder to the next state where all customizable /// [`NodeAddOns`] types are configured. - pub fn with_add_ons(self, addons: AO) -> NodeBuilderWithComponents + pub fn with_add_ons(self, add_ons: AO) -> NodeBuilderWithComponents where AO: NodeAddOns>, { @@ -179,12 +181,7 @@ where config, adapter, components_builder, - add_ons: AddOns { - hooks: NodeHooks::default(), - rpc: RpcAddOns { hooks: RpcHooks::default() }, - exexs: Vec::new(), - addons, - }, + add_ons: AddOns { hooks: NodeHooks::default(), exexs: Vec::new(), add_ons }, } } } @@ -215,31 +212,6 @@ where self } - /// Sets the hook that is run once the rpc server is started. - pub fn on_rpc_started(mut self, hook: F) -> Self - where - F: FnOnce( - RpcContext<'_, NodeAdapter, AO::EthApi>, - RethRpcServerHandles, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.add_ons.rpc.hooks.set_on_rpc_started(hook); - self - } - - /// Sets the hook that is run to configure the rpc modules. - pub fn extend_rpc_modules(mut self, hook: F) -> Self - where - F: FnOnce(RpcContext<'_, NodeAdapter, AO::EthApi>) -> eyre::Result<()> - + Send - + 'static, - { - self.add_ons.rpc.hooks.set_extend_rpc_modules(hook); - self - } - /// Installs an `ExEx` (Execution Extension) in the node. /// /// # Note @@ -269,18 +241,22 @@ where pub const fn check_launch(self) -> Self { self } + + /// Modifies the addons with the given closure. + pub fn map_add_ons(mut self, f: F) -> Self + where + F: FnOnce(AO) -> AO, + { + self.add_ons.add_ons = f(self.add_ons.add_ons); + self + } } impl NodeBuilderWithComponents where T: FullNodeTypes, CB: NodeComponentsBuilder, - AO: NodeAddOns< - NodeAdapter, - EthApi: EthApiBuilderProvider> - + FullEthApiServer - + AddDevSigners, - >, + AO: RethRpcAddOns>, { /// Launches the node with the given launcher. pub async fn launch_with(self, launcher: L) -> eyre::Result @@ -289,4 +265,33 @@ where { launcher.launch_node(self).await } + + /// Sets the hook that is run once the rpc server is started. + pub fn on_rpc_started(self, hook: F) -> Self + where + F: FnOnce( + RpcContext<'_, NodeAdapter, AO::EthApi>, + RethRpcServerHandles, + ) -> eyre::Result<()> + + Send + + 'static, + { + self.map_add_ons(|mut add_ons| { + add_ons.hooks_mut().set_on_rpc_started(hook); + add_ons + }) + } + + /// Sets the hook that is run to configure the rpc modules. + pub fn extend_rpc_modules(self, hook: F) -> Self + where + F: FnOnce(RpcContext<'_, NodeAdapter, AO::EthApi>) -> eyre::Result<()> + + Send + + 'static, + { + self.map_add_ons(|mut add_ons| { + add_ons.hooks_mut().set_extend_rpc_modules(hook); + add_ons + }) + } } diff --git a/crates/node/builder/src/handle.rs b/crates/node/builder/src/handle.rs index c81aa9420dde..2997a8687afd 100644 --- a/crates/node/builder/src/handle.rs +++ b/crates/node/builder/src/handle.rs @@ -1,13 +1,13 @@ use std::fmt; -use reth_node_api::{FullNodeComponents, NodeAddOns}; +use reth_node_api::FullNodeComponents; use reth_node_core::exit::NodeExitFuture; -use crate::node::FullNode; +use crate::{node::FullNode, rpc::RethRpcAddOns}; /// A Handle to the launched node. #[must_use = "Needs to await the node exit future"] -pub struct NodeHandle> { +pub struct NodeHandle> { /// All node components. pub node: FullNode, /// The exit future of the node. @@ -17,7 +17,7 @@ pub struct NodeHandle> { impl NodeHandle where Node: FullNodeComponents, - AddOns: NodeAddOns, + AddOns: RethRpcAddOns, { /// Waits for the node to exit, if it was configured to exit. pub async fn wait_for_node_exit(self) -> eyre::Result<()> { @@ -28,7 +28,7 @@ where impl fmt::Debug for NodeHandle where Node: FullNodeComponents, - AddOns: NodeAddOns, + AddOns: RethRpcAddOns, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("NodeHandle") diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index f9e26f202fc4..3de651cdcd0b 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -1,6 +1,5 @@ //! Engine node related functionality. -use alloy_rpc_types::engine::ClientVersionV1; use futures::{future::Either, stream, stream_select, StreamExt}; use reth_beacon_consensus::{ hooks::{EngineHooks, StaticFileHook}, @@ -20,21 +19,17 @@ use reth_exex::ExExManagerHandle; use reth_network::{NetworkSyncUpdater, SyncState}; use reth_network_api::{BlockDownloaderProvider, NetworkEventListenerProvider}; use reth_node_api::{ - BuiltPayload, FullNodeTypes, NodeAddOns, NodeTypesWithEngine, PayloadAttributesBuilder, - PayloadTypes, + BuiltPayload, FullNodeTypes, NodeTypesWithEngine, PayloadAttributesBuilder, PayloadTypes, }; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, exit::NodeExitFuture, primitives::Head, - rpc::eth::{helpers::AddDevSigners, FullEthApiServer}, - version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; use reth_payload_primitives::PayloadBuilder; use reth_primitives::EthereumHardforks; use reth_provider::providers::{BlockchainProvider2, ProviderNodeTypes}; -use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; use reth_tracing::tracing::{debug, error, info}; @@ -45,9 +40,9 @@ use tokio_stream::wrappers::UnboundedReceiverStream; use crate::{ common::{Attached, LaunchContextWith, WithConfigs}, hooks::NodeHooks, - rpc::{launch_rpc_servers, EthApiBuilderProvider}, + rpc::{RethRpcAddOns, RpcHandle}, setup::build_networked_pipeline, - AddOns, ExExLauncher, FullNode, LaunchContext, LaunchNode, NodeAdapter, + AddOns, AddOnsContext, ExExLauncher, FullNode, LaunchContext, LaunchNode, NodeAdapter, NodeBuilderWithComponents, NodeComponents, NodeComponentsBuilder, NodeHandle, NodeTypesAdapter, }; @@ -78,12 +73,7 @@ where Types: ProviderNodeTypes + NodeTypesWithEngine, T: FullNodeTypes>, CB: NodeComponentsBuilder, - AO: NodeAddOns< - NodeAdapter, - EthApi: EthApiBuilderProvider> - + FullEthApiServer - + AddDevSigners, - >, + AO: RethRpcAddOns>, LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, @@ -98,7 +88,7 @@ where let NodeBuilderWithComponents { adapter: NodeTypesAdapter { database }, components_builder, - add_ons: AddOns { hooks, rpc, exexs: installed_exex, .. }, + add_ons: AddOns { hooks, exexs: installed_exex, add_ons }, config, } = target; let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; @@ -292,37 +282,18 @@ where ), ); - let client = ClientVersionV1 { - code: CLIENT_CODE, - name: NAME_CLIENT.to_string(), - version: CARGO_PKG_VERSION.to_string(), - commit: VERGEN_GIT_SHA.to_string(), - }; - let engine_api = EngineApi::new( - ctx.blockchain_db().clone(), - ctx.chain_spec(), - beacon_engine_handle, - ctx.components().payload_builder().clone().into(), - ctx.components().pool().clone(), - Box::new(ctx.task_executor().clone()), - client, - EngineCapabilities::default(), - ctx.components().engine_validator().clone(), - ); - info!(target: "reth::cli", "Engine API handler initialized"); - // extract the jwt secret from the args if possible let jwt_secret = ctx.auth_jwt_secret()?; - // Start RPC servers - let (rpc_server_handles, rpc_registry) = launch_rpc_servers( - ctx.node_adapter().clone(), - engine_api, - ctx.node_config(), - jwt_secret, - rpc, - ) - .await?; + let add_ons_ctx = AddOnsContext { + node: ctx.node_adapter(), + config: ctx.node_config(), + beacon_engine_handle: &beacon_engine_handle, + jwt_secret: &jwt_secret, + }; + + let RpcHandle { rpc_server_handles, rpc_registry } = + add_ons.launch_add_ons(add_ons_ctx).await?; // TODO: migrate to devmode with https://github.com/paradigmxyz/reth/issues/10104 if let Some(maybe_custom_etherscan_url) = ctx.node_config().debug.etherscan.clone() { @@ -442,13 +413,12 @@ where provider: ctx.node_adapter().provider.clone(), payload_builder: ctx.components().payload_builder().clone(), task_executor: ctx.task_executor().clone(), - rpc_server_handles, - rpc_registry, config: ctx.node_config().clone(), data_dir: ctx.data_dir().clone(), + add_ons_handle: RpcHandle { rpc_server_handles, rpc_registry }, }; // Notify on node started - on_node_started.on_event(full_node.clone())?; + on_node_started.on_event(FullNode::clone(&full_node))?; let handle = NodeHandle { node_exit_future: NodeExitFuture::new( diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 3188cde4b157..36aa55541e00 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -12,7 +12,6 @@ pub use exex::ExExLauncher; use std::{future::Future, sync::Arc}; use alloy_primitives::utils::format_ether; -use alloy_rpc_types::engine::ClientVersionV1; use futures::{future::Either, stream, stream_select, StreamExt}; use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, @@ -25,17 +24,14 @@ use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; use reth_network::{BlockDownloaderProvider, NetworkEventListenerProvider}; use reth_node_api::{ - FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypesWithDB, NodeTypesWithEngine, + AddOnsContext, FullNodeComponents, FullNodeTypes, NodeTypesWithDB, NodeTypesWithEngine, }; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, exit::NodeExitFuture, - rpc::eth::{helpers::AddDevSigners, FullEthApiServer}, - version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; use reth_provider::providers::BlockchainProvider; -use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::TransactionPool; @@ -47,19 +43,18 @@ use crate::{ components::{NodeComponents, NodeComponentsBuilder}, hooks::NodeHooks, node::FullNode, - rpc::EthApiBuilderProvider, + rpc::{RethRpcAddOns, RpcHandle}, AddOns, NodeBuilderWithComponents, NodeHandle, }; /// Alias for [`reth_rpc_eth_types::EthApiBuilderCtx`], adapter for [`FullNodeComponents`]. -pub type EthApiBuilderCtx = reth_rpc_eth_types::EthApiBuilderCtx< +pub type EthApiBuilderCtx = reth_rpc_eth_types::EthApiBuilderCtx< ::Provider, ::Pool, ::Evm, ::Network, TaskExecutor, ::Provider, - Eth, >; /// A general purpose trait that launches a new node of any kind. @@ -109,12 +104,7 @@ where Types: NodeTypesWithDB + NodeTypesWithEngine, T: FullNodeTypes, Types = Types>, CB: NodeComponentsBuilder, - AO: NodeAddOns< - NodeAdapter, - EthApi: EthApiBuilderProvider> - + FullEthApiServer - + AddDevSigners, - >, + AO: RethRpcAddOns>, { type Node = NodeHandle, AO>; @@ -126,7 +116,7 @@ where let NodeBuilderWithComponents { adapter: NodeTypesAdapter { database }, components_builder, - add_ons: AddOns { hooks, rpc, exexs: installed_exex, .. }, + add_ons: AddOns { hooks, exexs: installed_exex, add_ons }, config, } = target; let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; @@ -336,42 +326,18 @@ where ), ); - let client = ClientVersionV1 { - code: CLIENT_CODE, - name: NAME_CLIENT.to_string(), - version: CARGO_PKG_VERSION.to_string(), - commit: VERGEN_GIT_SHA.to_string(), - }; - let engine_api = EngineApi::new( - ctx.blockchain_db().clone(), - ctx.chain_spec(), - beacon_engine_handle, - ctx.components().payload_builder().clone().into(), - ctx.components().pool().clone(), - Box::new(ctx.task_executor().clone()), - client, - EngineCapabilities::default(), - ctx.components().engine_validator().clone(), - ); - info!(target: "reth::cli", "Engine API handler initialized"); - // extract the jwt secret from the args if possible let jwt_secret = ctx.auth_jwt_secret()?; - // Start RPC servers - let (rpc_server_handles, rpc_registry) = crate::rpc::launch_rpc_servers( - ctx.node_adapter().clone(), - engine_api, - ctx.node_config(), - jwt_secret, - rpc, - ) - .await?; + let add_ons_ctx = AddOnsContext { + node: ctx.node_adapter(), + config: ctx.node_config(), + beacon_engine_handle: &beacon_engine_handle, + jwt_secret: &jwt_secret, + }; - // in dev mode we generate 20 random dev-signer accounts - if ctx.is_dev() { - rpc_registry.eth_api().with_dev_accounts(); - } + let RpcHandle { rpc_server_handles, rpc_registry } = + add_ons.launch_add_ons(add_ons_ctx).await?; // Run consensus engine to completion let (tx, rx) = oneshot::channel(); @@ -431,13 +397,12 @@ where provider: ctx.node_adapter().provider.clone(), payload_builder: ctx.components().payload_builder().clone(), task_executor: ctx.task_executor().clone(), - rpc_server_handles, - rpc_registry, config: ctx.node_config().clone(), data_dir: ctx.data_dir().clone(), + add_ons_handle: RpcHandle { rpc_server_handles, rpc_registry }, }; // Notify on node started - on_node_started.on_event(full_node.clone())?; + on_node_started.on_event(FullNode::clone(&full_node))?; let handle = NodeHandle { node_exit_future: NodeExitFuture::new( diff --git a/crates/node/builder/src/lib.rs b/crates/node/builder/src/lib.rs index cfe16074a59d..899317f158c6 100644 --- a/crates/node/builder/src/lib.rs +++ b/crates/node/builder/src/lib.rs @@ -20,10 +20,7 @@ pub mod components; pub use components::{NodeComponents, NodeComponentsBuilder}; mod builder; -pub use builder::{ - add_ons::{AddOns, RpcAddOns}, - *, -}; +pub use builder::{add_ons::AddOns, *}; mod launch; pub use launch::{engine::EngineNodeLauncher, *}; diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 3a70c08c1031..3e3d5b696c39 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -1,7 +1,11 @@ // re-export the node api types pub use reth_node_api::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}; -use std::{marker::PhantomData, sync::Arc}; +use std::{ + marker::PhantomData, + ops::{Deref, DerefMut}, + sync::Arc, +}; use reth_node_api::{EngineTypes, FullNodeComponents}; use reth_node_core::{ @@ -14,11 +18,7 @@ use reth_provider::ChainSpecProvider; use reth_rpc_builder::{auth::AuthServerHandle, RpcServerHandle}; use reth_tasks::TaskExecutor; -use crate::{ - components::NodeComponentsBuilder, - rpc::{RethRpcServerHandles, RpcRegistry}, - NodeAdapter, NodeAddOns, -}; +use crate::{components::NodeComponentsBuilder, rpc::RethRpcAddOns, NodeAdapter, NodeAddOns}; /// A [`crate::Node`] is a [`NodeTypesWithEngine`] that comes with preconfigured components. /// @@ -84,7 +84,7 @@ impl Node for AnyNode where N: FullNodeTypes + Clone, C: NodeComponentsBuilder + Clone + Sync + Unpin + 'static, - AO: NodeAddOns>, + AO: NodeAddOns> + Clone + Sync + Unpin + 'static, { type ComponentsBuilder = C; type AddOns = AO; @@ -117,14 +117,12 @@ pub struct FullNode> { pub payload_builder: PayloadBuilderHandle<::Engine>, /// Task executor for the node. pub task_executor: TaskExecutor, - /// Handles to the node's rpc servers - pub rpc_server_handles: RethRpcServerHandles, - /// The configured rpc namespaces - pub rpc_registry: RpcRegistry, /// The initial node config. pub config: NodeConfig<::ChainSpec>, /// The data dir of the node. pub data_dir: ChainPath, + /// The handle to launched add-ons + pub add_ons_handle: AddOns::Handle, } impl> Clone for FullNode { @@ -137,10 +135,9 @@ impl> Clone for FullNode Arc<::ChainSpec> { self.provider.chain_spec() } +} +impl FullNode +where + Engine: EngineTypes, + Node: FullNodeComponents>, + AddOns: RethRpcAddOns, +{ /// Returns the [`RpcServerHandle`] to the started rpc server. pub const fn rpc_server_handle(&self) -> &RpcServerHandle { - &self.rpc_server_handles.rpc + &self.add_ons_handle.rpc_server_handles.rpc } /// Returns the [`AuthServerHandle`] to the started authenticated engine API server. pub const fn auth_server_handle(&self) -> &AuthServerHandle { - &self.rpc_server_handles.auth + &self.add_ons_handle.rpc_server_handles.auth } /// Returns the [`EngineApiClient`] interface for the authenticated engine API. @@ -188,3 +192,17 @@ where self.auth_server_handle().ipc_client().await } } + +impl> Deref for FullNode { + type Target = AddOns::Handle; + + fn deref(&self) -> &Self::Target { + &self.add_ons_handle + } +} + +impl> DerefMut for FullNode { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.add_ons_handle + } +} diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index cb8d8f355dac..d8cce9217efc 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -1,31 +1,35 @@ //! Builder support for rpc components. use std::{ - fmt, + fmt::{self, Debug}, + marker::PhantomData, ops::{Deref, DerefMut}, }; +use alloy_rpc_types::engine::ClientVersionV1; use futures::TryFutureExt; -use reth_node_api::{BuilderProvider, FullNodeComponents, NodeTypes, NodeTypesWithEngine}; +use reth_node_api::{ + AddOnsContext, FullNodeComponents, NodeAddOns, NodeTypes, NodeTypesWithEngine, +}; use reth_node_core::{ node_config::NodeConfig, - rpc::{ - api::EngineApiServer, - eth::{EthApiTypes, FullEthApiServer}, - }, + rpc::eth::{EthApiTypes, FullEthApiServer}, + version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_payload_builder::PayloadBuilderHandle; use reth_provider::providers::ProviderNodeTypes; +use reth_rpc::EthApi; +use reth_rpc_api::eth::helpers::AddDevSigners; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerHandle}, config::RethRpcServerConfig, RpcModuleBuilder, RpcRegistryInner, RpcServerHandle, TransportRpcModules, }; -use reth_rpc_layer::JwtSecret; +use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; -use crate::{EthApiBuilderCtx, RpcAddOns}; +use crate::EthApiBuilderCtx; /// Contains the handles to the spawned RPC servers. /// @@ -292,102 +296,232 @@ where } } -/// Launch the rpc servers. -pub async fn launch_rpc_servers( - node: Node, - engine_api: Engine, - config: &NodeConfig<::ChainSpec>, - jwt_secret: JwtSecret, - add_ons: RpcAddOns, -) -> eyre::Result<(RethRpcServerHandles, RpcRegistry)> +/// Handle to the launched RPC servers. +#[derive(Clone)] +pub struct RpcHandle { + /// Handles to launched servers. + pub rpc_server_handles: RethRpcServerHandles, + /// Configured RPC modules. + pub rpc_registry: RpcRegistry, +} + +impl Deref for RpcHandle { + type Target = RpcRegistry; + + fn deref(&self) -> &Self::Target { + &self.rpc_registry + } +} + +impl Debug for RpcHandle where - Node: FullNodeComponents + Clone, - Engine: EngineApiServer<::Engine>, - EthApi: EthApiBuilderProvider + FullEthApiServer, + RpcRegistry: Debug, { - let auth_config = config.rpc.auth_server_config(jwt_secret)?; - let module_config = config.rpc.transport_rpc_module_config(); - debug!(target: "reth::cli", http=?module_config.http(), ws=?module_config.ws(), "Using RPC module config"); - - let (mut modules, mut auth_module, registry) = RpcModuleBuilder::default() - .with_provider(node.provider().clone()) - .with_pool(node.pool().clone()) - .with_network(node.network().clone()) - .with_events(node.provider().clone()) - .with_executor(node.task_executor().clone()) - .with_evm_config(node.evm_config().clone()) - .with_block_executor(node.block_executor().clone()) - .build_with_auth_server(module_config, engine_api, EthApi::eth_api_builder()); - - let mut registry = RpcRegistry { registry }; - let ctx = RpcContext { - node: node.clone(), - config, - registry: &mut registry, - modules: &mut modules, - auth_module: &mut auth_module, - }; - - let RpcAddOns { hooks, .. } = add_ons; - let RpcHooks { on_rpc_started, extend_rpc_modules } = hooks; - - extend_rpc_modules.extend_rpc_modules(ctx)?; - - let server_config = config.rpc.rpc_server_config(); - let cloned_modules = modules.clone(); - let launch_rpc = server_config.start(&cloned_modules).map_ok(|handle| { - if let Some(path) = handle.ipc_endpoint() { - info!(target: "reth::cli", %path, "RPC IPC server started"); - } - if let Some(addr) = handle.http_local_addr() { - info!(target: "reth::cli", url=%addr, "RPC HTTP server started"); - } - if let Some(addr) = handle.ws_local_addr() { - info!(target: "reth::cli", url=%addr, "RPC WS server started"); - } - handle - }); - - let launch_auth = auth_module.clone().start_server(auth_config).map_ok(|handle| { - let addr = handle.local_addr(); - if let Some(ipc_endpoint) = handle.ipc_endpoint() { - info!(target: "reth::cli", url=%addr, ipc_endpoint=%ipc_endpoint,"RPC auth server started"); - } else { - info!(target: "reth::cli", url=%addr, "RPC auth server started"); + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RpcHandle") + .field("rpc_server_handles", &self.rpc_server_handles) + .field("rpc_registry", &self.rpc_registry) + .finish() + } +} + +/// Node add-ons containing RPC server configuration, with customizable eth API handler. +#[allow(clippy::type_complexity)] +pub struct RpcAddOns { + /// Additional RPC add-ons. + pub hooks: RpcHooks, + /// Builder for `EthApi` + eth_api_builder: Box) -> EthApi + Send + Sync>, + _pd: PhantomData<(Node, EthApi)>, +} + +impl Debug for RpcAddOns { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RpcAddOns") + .field("hooks", &self.hooks) + .field("eth_api_builder", &"...") + .finish() + } +} + +impl RpcAddOns { + /// Creates a new instance of the RPC add-ons. + pub fn new( + eth_api_builder: impl FnOnce(&EthApiBuilderCtx) -> EthApi + Send + Sync + 'static, + ) -> Self { + Self { + hooks: RpcHooks::default(), + eth_api_builder: Box::new(eth_api_builder), + _pd: PhantomData, } - handle - }); + } - // launch servers concurrently - let (rpc, auth) = futures::future::try_join(launch_rpc, launch_auth).await?; - let handles = RethRpcServerHandles { rpc, auth }; + /// Sets the hook that is run once the rpc server is started. + pub fn on_rpc_started(mut self, hook: F) -> Self + where + F: FnOnce(RpcContext<'_, Node, EthApi>, RethRpcServerHandles) -> eyre::Result<()> + + Send + + 'static, + { + self.hooks.set_on_rpc_started(hook); + self + } - let ctx = RpcContext { - node, - config, - registry: &mut registry, - modules: &mut modules, - auth_module: &mut auth_module, - }; + /// Sets the hook that is run to configure the rpc modules. + pub fn extend_rpc_modules(mut self, hook: F) -> Self + where + F: FnOnce(RpcContext<'_, Node, EthApi>) -> eyre::Result<()> + Send + 'static, + { + self.hooks.set_extend_rpc_modules(hook); + self + } +} + +impl> Default + for RpcAddOns +{ + fn default() -> Self { + Self::new(EthApi::build) + } +} - on_rpc_started.on_rpc_started(ctx, handles.clone())?; +impl NodeAddOns for RpcAddOns +where + N: FullNodeComponents, + EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, +{ + type Handle = RpcHandle; + + async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result { + let AddOnsContext { node, config, beacon_engine_handle, jwt_secret } = ctx; + + let client = ClientVersionV1 { + code: CLIENT_CODE, + name: NAME_CLIENT.to_string(), + version: CARGO_PKG_VERSION.to_string(), + commit: VERGEN_GIT_SHA.to_string(), + }; + + let engine_api = EngineApi::new( + node.provider().clone(), + config.chain.clone(), + beacon_engine_handle.clone(), + node.payload_builder().clone().into(), + node.pool().clone(), + Box::new(node.task_executor().clone()), + client, + EngineCapabilities::default(), + node.engine_validator().clone(), + ); + info!(target: "reth::cli", "Engine API handler initialized"); + + let auth_config = config.rpc.auth_server_config(*jwt_secret)?; + let module_config = config.rpc.transport_rpc_module_config(); + debug!(target: "reth::cli", http=?module_config.http(), ws=?module_config.ws(), "Using RPC module config"); + + let (mut modules, mut auth_module, registry) = RpcModuleBuilder::default() + .with_provider(node.provider().clone()) + .with_pool(node.pool().clone()) + .with_network(node.network().clone()) + .with_events(node.provider().clone()) + .with_executor(node.task_executor().clone()) + .with_evm_config(node.evm_config().clone()) + .with_block_executor(node.block_executor().clone()) + .build_with_auth_server(module_config, engine_api, self.eth_api_builder); + + // in dev mode we generate 20 random dev-signer accounts + if config.dev.dev { + registry.eth_api().with_dev_accounts(); + } - Ok((handles, registry)) + let mut registry = RpcRegistry { registry }; + let ctx = RpcContext { + node: node.clone(), + config, + registry: &mut registry, + modules: &mut modules, + auth_module: &mut auth_module, + }; + + let RpcHooks { on_rpc_started, extend_rpc_modules } = self.hooks; + + extend_rpc_modules.extend_rpc_modules(ctx)?; + + let server_config = config.rpc.rpc_server_config(); + let cloned_modules = modules.clone(); + let launch_rpc = server_config.start(&cloned_modules).map_ok(|handle| { + if let Some(path) = handle.ipc_endpoint() { + info!(target: "reth::cli", %path, "RPC IPC server started"); + } + if let Some(addr) = handle.http_local_addr() { + info!(target: "reth::cli", url=%addr, "RPC HTTP server started"); + } + if let Some(addr) = handle.ws_local_addr() { + info!(target: "reth::cli", url=%addr, "RPC WS server started"); + } + handle + }); + + let launch_auth = auth_module.clone().start_server(auth_config).map_ok(|handle| { + let addr = handle.local_addr(); + if let Some(ipc_endpoint) = handle.ipc_endpoint() { + info!(target: "reth::cli", url=%addr, ipc_endpoint=%ipc_endpoint,"RPC auth server started"); + } else { + info!(target: "reth::cli", url=%addr, "RPC auth server started"); + } + handle + }); + + // launch servers concurrently + let (rpc, auth) = futures::future::try_join(launch_rpc, launch_auth).await?; + + let handles = RethRpcServerHandles { rpc, auth }; + + let ctx = RpcContext { + node: node.clone(), + config, + registry: &mut registry, + modules: &mut modules, + auth_module: &mut auth_module, + }; + + on_rpc_started.on_rpc_started(ctx, handles.clone())?; + + Ok(RpcHandle { rpc_server_handles: handles, rpc_registry: registry }) + } } -/// Provides builder for the core `eth` API type. -pub trait EthApiBuilderProvider: BuilderProvider + EthApiTypes { - /// Returns the eth api builder. - #[allow(clippy::type_complexity)] - fn eth_api_builder() -> Box) -> Self + Send>; +/// Helper trait implemented for add-ons producing [`RpcHandle`]. Used by common node launcher +/// implementations. +pub trait RethRpcAddOns: + NodeAddOns> +{ + /// eth API implementation. + type EthApi: EthApiTypes; + + /// Returns a mutable reference to RPC hooks. + fn hooks_mut(&mut self) -> &mut RpcHooks; } -impl EthApiBuilderProvider for F +impl RethRpcAddOns for RpcAddOns where - N: FullNodeComponents, - for<'a> F: BuilderProvider = &'a EthApiBuilderCtx> + EthApiTypes, + Self: NodeAddOns>, { - fn eth_api_builder() -> Box) -> Self + Send> { - F::builder() + type EthApi = EthApi; + + fn hooks_mut(&mut self) -> &mut RpcHooks { + &mut self.hooks + } +} + +/// A `EthApi` that knows how to build itself from [`EthApiBuilderCtx`]. +pub trait EthApiBuilder: 'static { + /// Builds the `EthApi` from the given context. + fn build(ctx: &EthApiBuilderCtx) -> Self; +} + +impl EthApiBuilder for EthApi { + fn build(ctx: &EthApiBuilderCtx) -> Self { + Self::with_spawner(ctx) } } diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index 6822a6a50ec4..c6d3e32b7cf1 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -6,7 +6,6 @@ use clap::Parser; use reth_node_builder::{engine_tree_config::TreeConfig, EngineNodeLauncher}; use reth_optimism_cli::{chainspec::OpChainSpecParser, Cli}; use reth_optimism_node::{args::RollupArgs, node::OptimismAddOns, OptimismNode}; -use reth_optimism_rpc::SequencerClient; use reth_provider::providers::BlockchainProvider2; use tracing as _; @@ -34,17 +33,7 @@ fn main() { let handle = builder .with_types_and_provider::>() .with_components(OptimismNode::components(rollup_args)) - .with_add_ons(OptimismAddOns::new(sequencer_http_arg.clone())) - .extend_rpc_modules(move |ctx| { - // register sequencer tx forwarder - if let Some(sequencer_http) = sequencer_http_arg { - ctx.registry - .eth_api() - .set_sequencer_client(SequencerClient::new(sequencer_http))?; - } - - Ok(()) - }) + .with_add_ons(OptimismAddOns::new(sequencer_http_arg)) .launch_with_fn(|builder| { let launcher = EngineNodeLauncher::new( builder.task_executor().clone(), @@ -58,20 +47,8 @@ fn main() { handle.node_exit_future.await } false => { - let handle = builder - .node(OptimismNode::new(rollup_args.clone())) - .extend_rpc_modules(move |ctx| { - // register sequencer tx forwarder - if let Some(sequencer_http) = sequencer_http_arg { - ctx.registry - .eth_api() - .set_sequencer_client(SequencerClient::new(sequencer_http))?; - } - - Ok(()) - }) - .launch() - .await?; + let handle = + builder.node(OptimismNode::new(rollup_args.clone())).launch().await?; handle.node_exit_future.await } diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index b6e64f7e0e43..648da85d0bb4 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -13,7 +13,8 @@ use reth_node_builder::{ NetworkBuilder, PayloadServiceBuilder, PoolBuilder, PoolBuilderConfigOverrides, }, node::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}, - BuilderContext, Node, PayloadBuilderConfig, + rpc::{RethRpcAddOns, RpcAddOns, RpcHandle}, + BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::OptimismBeaconConsensus; @@ -97,7 +98,9 @@ where OptimismEngineValidatorBuilder, >; - type AddOns = OptimismAddOns; + type AddOns = OptimismAddOns< + NodeAdapter>::Components>, + >; fn components_builder(&self) -> Self::ComponentsBuilder { let Self { args } = self; @@ -119,25 +122,43 @@ impl NodeTypesWithEngine for OptimismNode { } /// Add-ons w.r.t. optimism. -#[derive(Debug, Clone)] -pub struct OptimismAddOns { - sequencer_http: Option, +#[derive(Debug)] +pub struct OptimismAddOns(pub RpcAddOns>); + +impl Default for OptimismAddOns { + fn default() -> Self { + Self::new(None) + } } -impl OptimismAddOns { +impl OptimismAddOns { /// Create a new instance with the given `sequencer_http` URL. - pub const fn new(sequencer_http: Option) -> Self { - Self { sequencer_http } + pub fn new(sequencer_http: Option) -> Self { + Self(RpcAddOns::new(move |ctx| OpEthApi::new(ctx, sequencer_http))) } +} + +impl>> NodeAddOns + for OptimismAddOns +{ + type Handle = RpcHandle>; - /// Returns the sequencer HTTP URL. - pub fn sequencer_http(&self) -> Option<&str> { - self.sequencer_http.as_deref() + async fn launch_add_ons( + self, + ctx: reth_node_api::AddOnsContext<'_, N>, + ) -> eyre::Result { + self.0.launch_add_ons(ctx).await } } -impl NodeAddOns for OptimismAddOns { +impl>> RethRpcAddOns + for OptimismAddOns +{ type EthApi = OpEthApi; + + fn hooks_mut(&mut self) -> &mut reth_node_builder::rpc::RpcHooks { + self.0.hooks_mut() + } } /// A regular optimism evm and executor builder. diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index 863bf254e494..8ea8df380b0b 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -1,7 +1,9 @@ use alloy_genesis::Genesis; use alloy_primitives::{Address, B256}; use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; -use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet::Wallet, NodeHelperType}; +use reth_e2e_test_utils::{ + transaction::TransactionTestContext, wallet::Wallet, Adapter, NodeHelperType, +}; use reth_optimism_chainspec::OpChainSpecBuilder; use reth_optimism_node::{ node::OptimismAddOns, OptimismBuiltPayload, OptimismNode, OptimismPayloadBuilderAttributes, @@ -11,7 +13,7 @@ use std::sync::Arc; use tokio::sync::Mutex; /// Optimism Node Helper type -pub(crate) type OpNode = NodeHelperType; +pub(crate) type OpNode = NodeHelperType>>; pub(crate) async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskManager, Wallet)> { let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 57ce44100f26..d65dd8edd1d5 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -17,7 +17,7 @@ use op_alloy_network::Optimism; use reth_chainspec::EthereumHardforks; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_node_api::{BuilderProvider, FullNodeComponents, FullNodeTypes, NodeTypes}; +use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeTypes}; use reth_node_builder::EthApiBuilderCtx; use reth_primitives::Header; use reth_provider::{ @@ -38,7 +38,6 @@ use reth_tasks::{ TaskSpawner, }; use reth_transaction_pool::TransactionPool; -use tokio::sync::OnceCell; use crate::{OpEthApiError, OpTxBuilder, SequencerClient}; @@ -67,13 +66,12 @@ pub struct OpEthApi { inner: Arc>, /// Sequencer client, configured to forward submitted transactions to sequencer of given OP /// network. - sequencer_client: Arc>, + sequencer_client: Option, } impl OpEthApi { /// Creates a new instance for given context. - #[allow(clippy::type_complexity)] - pub fn with_spawner(ctx: &EthApiBuilderCtx) -> Self { + pub fn new(ctx: &EthApiBuilderCtx, sequencer_http: Option) -> Self { let blocking_task_pool = BlockingTaskPool::build().expect("failed to build blocking task pool"); @@ -93,7 +91,7 @@ impl OpEthApi { ctx.config.proof_permits, ); - Self { inner: Arc::new(inner), sequencer_client: Arc::new(OnceCell::new()) } + Self { inner: Arc::new(inner), sequencer_client: sequencer_http.map(SequencerClient::new) } } } @@ -246,18 +244,6 @@ where } } -impl BuilderProvider for OpEthApi -where - Self: Send, - N: FullNodeComponents, -{ - type Ctx<'a> = &'a EthApiBuilderCtx; - - fn builder() -> Box Fn(Self::Ctx<'a>) -> Self + Send> { - Box::new(Self::with_spawner) - } -} - impl fmt::Debug for OpEthApi { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApi").finish_non_exhaustive() diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index ab7525016a1a..36556905e8ac 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -81,17 +81,9 @@ impl OpEthApi where N: FullNodeComponents, { - /// Sets a [`SequencerClient`] for `eth_sendRawTransaction` to forward transactions to. - pub fn set_sequencer_client( - &self, - sequencer_client: SequencerClient, - ) -> Result<(), tokio::sync::SetError> { - self.sequencer_client.set(sequencer_client) - } - /// Returns the [`SequencerClient`] if one is set. pub fn raw_tx_forwarder(&self) -> Option { - self.sequencer_client.get().cloned() + self.sequencer_client.clone() } } diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 920268a98549..613652678a20 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,5 +1,3 @@ -use std::marker::PhantomData; - use reth_evm::ConfigureEvm; use reth_primitives::Header; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider, StateProviderFactory}; @@ -11,9 +9,8 @@ use reth_rpc_eth_types::{ use reth_tasks::TaskSpawner; /// Alias for `eth` namespace API builder. -pub type DynEthApiBuilder = Box< - dyn Fn(&EthApiBuilderCtx) -> EthApi, ->; +pub type DynEthApiBuilder = + Box) -> EthApi>; /// Handlers for core, filter and pubsub `eth` namespace APIs. #[derive(Debug, Clone)] @@ -87,7 +84,6 @@ where executor, events, cache, - _rpc_ty_builders: PhantomData, }; let api = eth_api_builder(&ctx); diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index e49105ab7d7d..6a1240b64f8e 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -152,6 +152,7 @@ use std::{ collections::HashMap, + fmt::Debug, net::{Ipv4Addr, SocketAddr, SocketAddrV4}, time::{Duration, SystemTime, UNIX_EPOCH}, }; diff --git a/crates/rpc/rpc-eth-types/src/builder/ctx.rs b/crates/rpc/rpc-eth-types/src/builder/ctx.rs index cb2750b6e54f..2132dd0e22c4 100644 --- a/crates/rpc/rpc-eth-types/src/builder/ctx.rs +++ b/crates/rpc/rpc-eth-types/src/builder/ctx.rs @@ -1,7 +1,5 @@ //! Context required for building `eth` namespace APIs. -use std::marker::PhantomData; - use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::ChainSpecProvider; use reth_storage_api::BlockReaderIdExt; @@ -14,7 +12,7 @@ use crate::{ /// Context for building the `eth` namespace API. #[derive(Debug, Clone)] -pub struct EthApiBuilderCtx { +pub struct EthApiBuilderCtx { /// Database handle. pub provider: Provider, /// Mempool handle. @@ -31,12 +29,10 @@ pub struct EthApiBuilderCtx, } -impl - EthApiBuilderCtx +impl + EthApiBuilderCtx where Provider: BlockReaderIdExt + Clone, { @@ -46,53 +42,14 @@ where Provider: ChainSpecProvider + 'static, Tasks: TaskSpawner, Events: CanonStateSubscriptions, - { - FeeHistoryCacheBuilder::build(self) - } - - /// Returns a new [`GasPriceOracle`] for the context. - pub fn new_gas_price_oracle(&self) -> GasPriceOracle { - GasPriceOracleBuilder::build(self) - } -} - -/// Builds `eth_` core api component [`GasPriceOracle`], for given context. -#[derive(Debug)] -pub struct GasPriceOracleBuilder; - -impl GasPriceOracleBuilder { - /// Builds a [`GasPriceOracle`], for given context. - pub fn build( - ctx: &EthApiBuilderCtx, - ) -> GasPriceOracle - where - Provider: BlockReaderIdExt + Clone, - { - GasPriceOracle::new(ctx.provider.clone(), ctx.config.gas_oracle, ctx.cache.clone()) - } -} - -/// Builds `eth_` core api component [`FeeHistoryCache`], for given context. -#[derive(Debug)] -pub struct FeeHistoryCacheBuilder; - -impl FeeHistoryCacheBuilder { - /// Builds a [`FeeHistoryCache`], for given context. - pub fn build( - ctx: &EthApiBuilderCtx, - ) -> FeeHistoryCache - where - Provider: ChainSpecProvider + BlockReaderIdExt + Clone + 'static, - Tasks: TaskSpawner, - Events: CanonStateSubscriptions, { let fee_history_cache = - FeeHistoryCache::new(ctx.cache.clone(), ctx.config.fee_history_cache); + FeeHistoryCache::new(self.cache.clone(), self.config.fee_history_cache); - let new_canonical_blocks = ctx.events.canonical_state_stream(); + let new_canonical_blocks = self.events.canonical_state_stream(); let fhc = fee_history_cache.clone(); - let provider = ctx.provider.clone(); - ctx.executor.spawn_critical( + let provider = self.provider.clone(); + self.executor.spawn_critical( "cache canonical blocks for fee history task", Box::pin(async move { fee_history_cache_new_blocks_task(fhc, new_canonical_blocks, provider).await; @@ -101,4 +58,9 @@ impl FeeHistoryCacheBuilder { fee_history_cache } + + /// Returns a new [`GasPriceOracle`] for the context. + pub fn new_gas_price_oracle(&self) -> GasPriceOracle { + GasPriceOracle::new(self.provider.clone(), self.config.gas_oracle, self.cache.clone()) + } } diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 304266f6a8b3..6da46804005c 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -6,7 +6,6 @@ use std::sync::Arc; use alloy_network::AnyNetwork; use alloy_primitives::U256; use derive_more::Deref; -use reth_node_api::{BuilderProvider, FullNodeComponents}; use reth_primitives::BlockNumberOrTag; use reth_provider::{BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider}; use reth_rpc_eth_api::{ @@ -19,7 +18,7 @@ use reth_rpc_eth_types::{ }; use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, - TaskExecutor, TaskSpawner, TokioTaskExecutor, + TaskSpawner, TokioTaskExecutor, }; use tokio::sync::Mutex; @@ -95,7 +94,7 @@ where { /// Creates a new, shareable instance. pub fn with_spawner( - ctx: &EthApiBuilderCtx, + ctx: &EthApiBuilderCtx, ) -> Self where Tasks: TaskSpawner + Clone + 'static, @@ -163,25 +162,6 @@ where } } -impl BuilderProvider for EthApi -where - N: FullNodeComponents, -{ - type Ctx<'a> = &'a EthApiBuilderCtx< - N::Provider, - N::Pool, - N::Evm, - N::Network, - TaskExecutor, - N::Provider, - Self, - >; - - fn builder() -> Box Fn(Self::Ctx<'a>) -> Self + Send> { - Box::new(Self::with_spawner) - } -} - /// Container type `EthApi` #[allow(missing_debug_implementations)] pub struct EthApiInner { diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 34f8186be7f8..f833da86236e 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -36,7 +36,8 @@ use reth::{ builder::{ components::{ComponentsBuilder, EngineValidatorBuilder, PayloadServiceBuilder}, node::{NodeTypes, NodeTypesWithEngine}, - BuilderContext, FullNodeTypes, Node, NodeBuilder, PayloadBuilderConfig, + BuilderContext, FullNodeTypes, Node, NodeAdapter, NodeBuilder, NodeComponentsBuilder, + PayloadBuilderConfig, }, providers::{CanonStateSubscriptions, StateProviderFactory}, tasks::TaskManager, @@ -241,7 +242,9 @@ where EthereumConsensusBuilder, CustomEngineValidatorBuilder, >; - type AddOns = EthereumAddOns; + type AddOns = EthereumAddOns< + NodeAdapter>::Components>, + >; fn components_builder(&self) -> Self::ComponentsBuilder { ComponentsBuilder::default() From 5aceb3e11ed97be89a2cea338f904fc0afbc6dd4 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 15 Oct 2024 17:27:26 +0200 Subject: [PATCH 04/51] primitives: rm redundant `chain_id` function for Transaction (#11751) --- Cargo.lock | 1 + crates/primitives/src/alloy_compat.rs | 2 +- crates/primitives/src/transaction/mod.rs | 21 ++++++------------- crates/rpc/rpc-types-compat/Cargo.toml | 1 + .../rpc-types-compat/src/transaction/mod.rs | 1 + crates/rpc/rpc/src/eth/helpers/types.rs | 1 + crates/transaction-pool/Cargo.toml | 5 ++--- crates/transaction-pool/src/traits.rs | 1 + testing/testing-utils/src/generators.rs | 2 +- 9 files changed, 15 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 59e8f2b27fba..b3cd1538507c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8835,6 +8835,7 @@ dependencies = [ name = "reth-rpc-types-compat" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index 65099967e6e9..c9bdfad89f57 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -5,7 +5,7 @@ use crate::{ Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxType, }; use alloc::{string::ToString, vec::Vec}; -use alloy_consensus::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}; +use alloy_consensus::{Transaction as _, TxEip1559, TxEip2930, TxEip4844, TxLegacy}; use alloy_primitives::{Parity, TxKind}; use alloy_rlp::Error as RlpError; use alloy_serde::WithOtherFields; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 0cb860ff6b86..0463cd9ea7e1 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -4,7 +4,10 @@ use crate::BlockHashOrNumber; use alloy_eips::eip7702::SignedAuthorization; use alloy_primitives::{keccak256, Address, ChainId, TxKind, B256, U256}; -use alloy_consensus::{SignableTransaction, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}; +use alloy_consensus::{ + SignableTransaction, Transaction as AlloyTransaction, TxEip1559, TxEip2930, TxEip4844, + TxEip7702, TxLegacy, +}; use alloy_eips::{ eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, eip2930::AccessList, @@ -197,19 +200,6 @@ impl Transaction { } } - /// Get `chain_id`. - pub const fn chain_id(&self) -> Option { - match self { - Self::Legacy(TxLegacy { chain_id, .. }) => *chain_id, - Self::Eip2930(TxEip2930 { chain_id, .. }) | - Self::Eip1559(TxEip1559 { chain_id, .. }) | - Self::Eip4844(TxEip4844 { chain_id, .. }) | - Self::Eip7702(TxEip7702 { chain_id, .. }) => Some(*chain_id), - #[cfg(feature = "optimism")] - Self::Deposit(_) => None, - } - } - /// Sets the transaction's chain id to the provided value. pub fn set_chain_id(&mut self, chain_id: u64) { match self { @@ -824,7 +814,7 @@ impl Encodable for Transaction { } } -impl alloy_consensus::Transaction for Transaction { +impl AlloyTransaction for Transaction { fn chain_id(&self) -> Option { match self { Self::Legacy(tx) => tx.chain_id(), @@ -1974,6 +1964,7 @@ mod tests { transaction::{signature::Signature, TxEip1559, TxKind, TxLegacy}, Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, }; + use alloy_consensus::Transaction as _; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_primitives::{address, b256, bytes, hex, Address, Bytes, Parity, B256, U256}; use alloy_rlp::{Decodable, Encodable, Error as RlpError}; diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index 81b4def204f9..8e436f0d3934 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -24,6 +24,7 @@ alloy-rpc-types.workspace = true alloy-rpc-types-eth = { workspace = true, default-features = false, features = ["serde"] } alloy-serde.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true [dev-dependencies] serde_json.workspace = true \ No newline at end of file diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index 9bb2a8b5d92a..a489a588617a 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -4,6 +4,7 @@ mod signature; pub use signature::*; use std::fmt; +use alloy_consensus::Transaction as _; use alloy_rpc_types::{ request::{TransactionInput, TransactionRequest}, Transaction, TransactionInfo, diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index d2b9c268e240..982afdcac0ad 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -1,5 +1,6 @@ //! L1 `eth` API types. +use alloy_consensus::Transaction as _; use alloy_network::{AnyNetwork, Network}; use alloy_primitives::{Address, TxKind}; use alloy_rpc_types::{Transaction, TransactionInfo}; diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 41abbb4b6b7e..887543b521aa 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -27,6 +27,7 @@ revm.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true +alloy-consensus.workspace = true # async/futures futures-util.workspace = true @@ -54,7 +55,6 @@ rand = { workspace = true, optional = true } paste = { workspace = true, optional = true } proptest = { workspace = true, optional = true } proptest-arbitrary-interop = { workspace = true, optional = true } -alloy-consensus = { workspace = true, optional = true } [dev-dependencies] reth-primitives = { workspace = true, features = ["arbitrary"] } @@ -69,12 +69,11 @@ pprof = { workspace = true, features = ["criterion", "flamegraph"] } assert_matches.workspace = true tempfile.workspace = true serde_json.workspace = true -alloy-consensus.workspace = true [features] default = ["serde"] serde = ["dep:serde"] -test-utils = ["rand", "paste", "serde", "alloy-consensus"] +test-utils = ["rand", "paste", "serde"] arbitrary = ["proptest", "reth-primitives/arbitrary", "proptest-arbitrary-interop"] [[bench]] diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 07fe9b9c206e..9a6eda03d9da 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -7,6 +7,7 @@ use crate::{ validate::ValidPoolTransaction, AllTransactionsEvents, }; +use alloy_consensus::Transaction as _; use alloy_eips::{eip2718::Encodable2718, eip2930::AccessList, eip4844::BlobAndProofV1}; use alloy_primitives::{Address, TxHash, TxKind, B256, U256}; use futures_util::{ready, Stream}; diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 70d6cc02b655..d07af00ce4c2 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -1,6 +1,6 @@ //! Generators for different data structures like block headers, block bodies and ranges of those. -use alloy_consensus::TxLegacy; +use alloy_consensus::{Transaction as _, TxLegacy}; use alloy_eips::{ eip6110::DepositRequest, eip7002::WithdrawalRequest, eip7251::ConsolidationRequest, }; From 04f5b53462cd9f981db9348efe23e4d81cec0012 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 15 Oct 2024 17:47:04 +0200 Subject: [PATCH 05/51] chore: touchups PayloadOrAttributes (#11749) --- crates/payload/primitives/src/payload.rs | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/crates/payload/primitives/src/payload.rs b/crates/payload/primitives/src/payload.rs index 41c2ef1efc23..fc685559e087 100644 --- a/crates/payload/primitives/src/payload.rs +++ b/crates/payload/primitives/src/payload.rs @@ -3,8 +3,10 @@ use alloy_primitives::B256; use alloy_rpc_types::engine::ExecutionPayload; /// Either an [`ExecutionPayload`] or a types that implements the [`PayloadAttributes`] trait. +/// +/// This is a helper type to unify pre-validation of version specific fields of the engine API. #[derive(Debug)] -pub enum PayloadOrAttributes<'a, AttributesType> { +pub enum PayloadOrAttributes<'a, Attributes> { /// An [`ExecutionPayload`] and optional parent beacon block root. ExecutionPayload { /// The inner execution payload @@ -13,13 +15,10 @@ pub enum PayloadOrAttributes<'a, AttributesType> { parent_beacon_block_root: Option, }, /// A payload attributes type. - PayloadAttributes(&'a AttributesType), + PayloadAttributes(&'a Attributes), } -impl<'a, AttributesType> PayloadOrAttributes<'a, AttributesType> -where - AttributesType: PayloadAttributes, -{ +impl<'a, Attributes> PayloadOrAttributes<'a, Attributes> { /// Construct a [`PayloadOrAttributes`] from an [`ExecutionPayload`] and optional parent beacon /// block root. pub const fn from_execution_payload( @@ -29,6 +28,16 @@ where Self::ExecutionPayload { payload, parent_beacon_block_root } } + /// Construct a [`PayloadOrAttributes::PayloadAttributes`] variant + pub const fn from_attributes(attributes: &'a Attributes) -> Self { + Self::PayloadAttributes(attributes) + } +} + +impl PayloadOrAttributes<'_, Attributes> +where + Attributes: PayloadAttributes, +{ /// Return the withdrawals for the payload or attributes. pub fn withdrawals(&self) -> Option<&Vec> { match self { From 77a382f59b2437e1b0dcefe66bddfeccc4020f06 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 15 Oct 2024 18:21:08 +0200 Subject: [PATCH 06/51] chore: allow missing const (#11750) --- crates/net/peers/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/net/peers/src/lib.rs b/crates/net/peers/src/lib.rs index e80331f90468..1d60994d8e1b 100644 --- a/crates/net/peers/src/lib.rs +++ b/crates/net/peers/src/lib.rs @@ -117,6 +117,7 @@ pub enum AnyNode { impl AnyNode { /// Returns the peer id of the node. + #[allow(clippy::missing_const_for_fn)] pub fn peer_id(&self) -> PeerId { match self { Self::NodeRecord(record) => record.id, @@ -127,6 +128,7 @@ impl AnyNode { } /// Returns the full node record if available. + #[allow(clippy::missing_const_for_fn)] pub fn node_record(&self) -> Option { match self { Self::NodeRecord(record) => Some(*record), From 7b1b1fcb3b632d8c877a3e761a77ae4d513c0c2a Mon Sep 17 00:00:00 2001 From: nk_ysg Date: Wed, 16 Oct 2024 00:24:25 +0800 Subject: [PATCH 07/51] chore(stage test): use with_capacity (#11759) --- crates/stages/stages/src/stages/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index 17ffcf2e90eb..4b9f9295103e 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -263,7 +263,7 @@ mod tests { ); db.insert_blocks(blocks.iter(), StorageKind::Static)?; - let mut receipts = Vec::new(); + let mut receipts = Vec::with_capacity(blocks.len()); let mut tx_num = 0u64; for block in &blocks { let mut block_receipts = Vec::with_capacity(block.body.transactions.len()); From 7f92760655e887f7282a4027465f4cdb4f66ef9a Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 15 Oct 2024 22:43:56 +0400 Subject: [PATCH 08/51] fix: `estimateGas` edge case (#11764) --- crates/rpc/rpc-eth-api/src/helpers/call.rs | 6 +++++- crates/rpc/rpc-eth-api/src/helpers/error.rs | 10 ++++++++++ crates/rpc/rpc-eth-types/src/error.rs | 5 +++++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index cb5c4e79a732..6784f9327f09 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -888,7 +888,11 @@ pub trait Call: LoadState + SpawnBlocking { // Execute transaction and handle potential gas errors, adjusting limits accordingly. match self.transact(&mut db, env.clone()) { Err(err) if err.is_gas_too_high() => { - // Increase the lowest gas limit if gas is too high + // Decrease the highest gas limit if gas is too high + highest_gas_limit = mid_gas_limit; + } + Err(err) if err.is_gas_too_low() => { + // Increase the lowest gas limit if gas is too low lowest_gas_limit = mid_gas_limit; } // Handle other cases, including successful transactions. diff --git a/crates/rpc/rpc-eth-api/src/helpers/error.rs b/crates/rpc/rpc-eth-api/src/helpers/error.rs index 041a019052bd..1d991b8e65b6 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/error.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/error.rs @@ -59,6 +59,16 @@ pub trait AsEthApiError { false } + + /// Returns `true` if error is + /// [`RpcInvalidTransactionError::GasTooLow`](reth_rpc_eth_types::RpcInvalidTransactionError::GasTooLow). + fn is_gas_too_low(&self) -> bool { + if let Some(err) = self.as_err() { + return err.is_gas_too_low() + } + + false + } } impl AsEthApiError for EthApiError { diff --git a/crates/rpc/rpc-eth-types/src/error.rs b/crates/rpc/rpc-eth-types/src/error.rs index 212fca36d9c9..b38b3122708b 100644 --- a/crates/rpc/rpc-eth-types/src/error.rs +++ b/crates/rpc/rpc-eth-types/src/error.rs @@ -151,6 +151,11 @@ impl EthApiError { pub const fn is_gas_too_high(&self) -> bool { matches!(self, Self::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh)) } + + /// Returns `true` if error is [`RpcInvalidTransactionError::GasTooLow`] + pub const fn is_gas_too_low(&self) -> bool { + matches!(self, Self::InvalidTransaction(RpcInvalidTransactionError::GasTooLow)) + } } impl From for jsonrpsee_types::error::ErrorObject<'static> { From 4144d6ea24e1ff528cde86982bce269b3dba179a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 15 Oct 2024 21:17:31 +0200 Subject: [PATCH 09/51] chore: add get_database_args (#11766) --- crates/node/core/src/args/database.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/crates/node/core/src/args/database.rs b/crates/node/core/src/args/database.rs index 09e9de82f52d..da96deb70c1b 100644 --- a/crates/node/core/src/args/database.rs +++ b/crates/node/core/src/args/database.rs @@ -6,6 +6,7 @@ use clap::{ error::ErrorKind, Arg, Args, Command, Error, }; +use reth_db::ClientVersion; use reth_storage_errors::db::LogLevel; /// Parameters for database configuration @@ -24,7 +25,15 @@ pub struct DatabaseArgs { impl DatabaseArgs { /// Returns default database arguments with configured log level and client version. pub fn database_args(&self) -> reth_db::mdbx::DatabaseArguments { - reth_db::mdbx::DatabaseArguments::new(default_client_version()) + self.get_database_args(default_client_version()) + } + + /// Returns the database arguments with configured log level and given client version. + pub const fn get_database_args( + &self, + client_version: ClientVersion, + ) -> reth_db::mdbx::DatabaseArguments { + reth_db::mdbx::DatabaseArguments::new(client_version) .with_log_level(self.log_level) .with_exclusive(self.exclusive) } From 78415ff7c586eb11097d5181153e7b7d2c092aac Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 15 Oct 2024 21:18:21 +0200 Subject: [PATCH 10/51] chore: include hash in trace (#11762) --- crates/optimism/rpc/src/eth/transaction.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 36556905e8ac..e161504f8405 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -40,7 +40,7 @@ where // On optimism, transactions are forwarded directly to the sequencer to be included in // blocks that it builds. if let Some(client) = self.raw_tx_forwarder().as_ref() { - tracing::debug!( target: "rpc::eth", "forwarding raw transaction to"); + tracing::debug!(target: "rpc::eth", hash = %pool_transaction.hash(), "forwarding raw transaction to sequencer"); let _ = client.forward_raw_transaction(&tx).await.inspect_err(|err| { tracing::debug!(target: "rpc::eth", %err, hash=% *pool_transaction.hash(), "failed to forward raw transaction"); }); From d4be773f5f71b49e7e40de3dd747a80af18daf74 Mon Sep 17 00:00:00 2001 From: "0xriazaka.eth" <168359025+0xriazaka@users.noreply.github.com> Date: Tue, 15 Oct 2024 21:47:23 +0100 Subject: [PATCH 11/51] chore: move tests in reth_execution_types::chain to reth-evm-optimism (#11115) Co-authored-by: Emilia Hane Co-authored-by: Matthias Seitz --- crates/evm/execution-types/src/chain.rs | 12 +--- crates/optimism/evm/Cargo.toml | 1 + crates/optimism/evm/src/lib.rs | 82 ++++++++++++++++++++++++- 3 files changed, 83 insertions(+), 12 deletions(-) diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 30f1f4cd2fc3..d3ed2913ea3b 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -656,7 +656,6 @@ pub(super) mod serde_bincode_compat { mod tests { use super::*; use alloy_primitives::B256; - use reth_primitives::{Receipt, Receipts, TxType}; use revm::primitives::{AccountInfo, HashMap}; #[test] @@ -789,7 +788,10 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn receipts_by_block_hash() { + use reth_primitives::{Receipt, Receipts, TxType}; + // Create a default SealedBlockWithSenders object let block = SealedBlockWithSenders::default(); @@ -811,10 +813,6 @@ mod tests { cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), }; // Create another random receipt object, receipt2 @@ -823,10 +821,6 @@ mod tests { cumulative_gas_used: 1325345, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), }; // Create a Receipts object with a vector of receipt vectors diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 76fa3ce69e4d..72231716ff92 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -42,6 +42,7 @@ tracing.workspace = true alloy-eips.workspace = true reth-revm = { workspace = true, features = ["test-utils"] } +reth-primitives = { workspace = true, features = ["test-utils"] } reth-optimism-chainspec.workspace = true alloy-genesis.workspace = true alloy-consensus.workspace = true diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 63d0ee6b4f4e..f3de053f780b 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -204,12 +204,13 @@ mod tests { use super::*; use alloy_genesis::Genesis; use alloy_primitives::{B256, U256}; - use reth_chainspec::{Chain, ChainSpec}; + use reth_chainspec::ChainSpec; use reth_evm::execute::ProviderError; + use reth_execution_types::{Chain, ExecutionOutcome}; use reth_optimism_chainspec::BASE_MAINNET; use reth_primitives::{ revm_primitives::{BlockEnv, CfgEnv, SpecId}, - Header, KECCAK_EMPTY, + Header, Receipt, Receipts, SealedBlockWithSenders, TxType, KECCAK_EMPTY, }; use reth_revm::{ db::{CacheDB, EmptyDBTyped}, @@ -237,7 +238,7 @@ mod tests { // Build the ChainSpec for Ethereum mainnet, activating London, Paris, and Shanghai // hardforks let chain_spec = ChainSpec::builder() - .chain(Chain::mainnet()) + .chain(0.into()) .genesis(Genesis::default()) .london_activated() .paris_activated() @@ -540,4 +541,79 @@ mod tests { // Optimism in handler assert_eq!(evm.handler.cfg, HandlerCfg { spec_id: SpecId::ECOTONE, is_optimism: true }); } + + #[test] + fn receipts_by_block_hash() { + // Create a default SealedBlockWithSenders object + let block = SealedBlockWithSenders::default(); + + // Define block hashes for block1 and block2 + let block1_hash = B256::new([0x01; 32]); + let block2_hash = B256::new([0x02; 32]); + + // Clone the default block into block1 and block2 + let mut block1 = block.clone(); + let mut block2 = block; + + // Set the hashes of block1 and block2 + block1.block.header.set_block_number(10); + block1.block.header.set_hash(block1_hash); + + block2.block.header.set_block_number(11); + block2.block.header.set_hash(block2_hash); + + // Create a random receipt object, receipt1 + let receipt1 = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + }; + + // Create another random receipt object, receipt2 + let receipt2 = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 1325345, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + }; + + // Create a Receipts object with a vector of receipt vectors + let receipts = + Receipts { receipt_vec: vec![vec![Some(receipt1.clone())], vec![Some(receipt2)]] }; + + // Create an ExecutionOutcome object with the created bundle, receipts, an empty requests + // vector, and first_block set to 10 + let execution_outcome = ExecutionOutcome { + bundle: Default::default(), + receipts, + requests: vec![], + first_block: 10, + }; + + // Create a Chain object with a BTreeMap of blocks mapped to their block numbers, + // including block1_hash and block2_hash, and the execution_outcome + let chain = Chain::new([block1, block2], execution_outcome.clone(), None); + + // Assert that the proper receipt vector is returned for block1_hash + assert_eq!(chain.receipts_by_block_hash(block1_hash), Some(vec![&receipt1])); + + // Create an ExecutionOutcome object with a single receipt vector containing receipt1 + let execution_outcome1 = ExecutionOutcome { + bundle: Default::default(), + receipts: Receipts { receipt_vec: vec![vec![Some(receipt1)]] }, + requests: vec![], + first_block: 10, + }; + + // Assert that the execution outcome at the first block contains only the first receipt + assert_eq!(chain.execution_outcome_at_block(10), Some(execution_outcome1)); + + // Assert that the execution outcome at the tip block contains the whole execution outcome + assert_eq!(chain.execution_outcome_at_block(11), Some(execution_outcome)); + } } From 3f3a7ef0237282862157e33776563b6a371ac3cb Mon Sep 17 00:00:00 2001 From: nk_ysg Date: Wed, 16 Oct 2024 15:57:28 +0800 Subject: [PATCH 12/51] unify &Option to Option<&T> (#11755) --- crates/cli/commands/src/db/get.rs | 7 +++---- crates/rpc/rpc-builder/src/lib.rs | 4 ++-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index 5b794feeada2..4006d1660aa3 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -132,9 +132,8 @@ pub(crate) fn table_key(key: &str) -> Result { } /// Get an instance of subkey for given dupsort table -fn table_subkey(subkey: &Option) -> Result { - serde_json::from_str::(&subkey.clone().unwrap_or_default()) - .map_err(|e| eyre::eyre!(e)) +fn table_subkey(subkey: Option<&str>) -> Result { + serde_json::from_str::(subkey.unwrap_or_default()).map_err(|e| eyre::eyre!(e)) } struct GetValueViewer<'a, N: NodeTypesWithDB> { @@ -175,7 +174,7 @@ impl TableViewer<()> for GetValueViewer<'_, N> { let key = table_key::(&self.key)?; // process dupsort table - let subkey = table_subkey::(&self.subkey)?; + let subkey = table_subkey::(self.subkey.as_deref())?; match self.tool.get_dup::(key, subkey)? { Some(content) => { diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 6a1240b64f8e..cd93aeb620e3 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -676,8 +676,8 @@ impl RpcModuleConfigBuilder { } /// Get a reference to the eth namespace config, if any - pub const fn get_eth(&self) -> &Option { - &self.eth + pub const fn get_eth(&self) -> Option<&EthConfig> { + self.eth.as_ref() } /// Get a mutable reference to the eth namespace config, if any From b8147708ad6ad997c14c2a11b4ed5196bdd38e4b Mon Sep 17 00:00:00 2001 From: Kien Trinh <51135161+kien6034@users.noreply.github.com> Date: Wed, 16 Oct 2024 15:05:56 +0700 Subject: [PATCH 13/51] feat(txpool): function to return the next free nonce (#11744) Co-authored-by: Matthias Seitz --- crates/transaction-pool/src/identifier.rs | 5 +++ crates/transaction-pool/src/lib.rs | 8 ++++ crates/transaction-pool/src/noop.rs | 8 ++++ crates/transaction-pool/src/pool/mod.rs | 11 +++++ crates/transaction-pool/src/pool/txpool.rs | 51 ++++++++++++++++++++++ crates/transaction-pool/src/traits.rs | 15 +++++++ 6 files changed, 98 insertions(+) diff --git a/crates/transaction-pool/src/identifier.rs b/crates/transaction-pool/src/identifier.rs index 97d4bda8d03b..c50d39ae495a 100644 --- a/crates/transaction-pool/src/identifier.rs +++ b/crates/transaction-pool/src/identifier.rs @@ -59,6 +59,11 @@ impl SenderId { pub const fn start_bound(self) -> std::ops::Bound { std::ops::Bound::Included(TransactionId::new(self, 0)) } + + /// Converts the sender to a [`TransactionId`] with the given nonce. + pub const fn into_id(self, nonce: u64) -> TransactionId { + TransactionId::new(self, nonce) + } } impl From for SenderId { diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 75465afac1a1..2cffcd33fa8b 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -510,6 +510,14 @@ where self.pool.get_highest_transaction_by_sender(sender) } + fn get_highest_consecutive_transaction_by_sender( + &self, + sender: Address, + on_chain_nonce: u64, + ) -> Option>> { + self.pool.get_highest_consecutive_transaction_by_sender(sender, on_chain_nonce) + } + fn get_transaction_by_sender_and_nonce( &self, sender: Address, diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 681aa9896abb..4464ae1fc8a5 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -227,6 +227,14 @@ impl TransactionPool for NoopTransactionPool { None } + fn get_highest_consecutive_transaction_by_sender( + &self, + _sender: Address, + _on_chain_nonce: u64, + ) -> Option>> { + None + } + fn get_transaction_by_sender_and_nonce( &self, _sender: Address, diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 44dd5a8d525f..64e6dad6793f 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -785,6 +785,17 @@ where self.get_pool_data().get_highest_transaction_by_sender(sender_id) } + /// Returns the transaction with the highest nonce that is executable given the on chain nonce. + pub(crate) fn get_highest_consecutive_transaction_by_sender( + &self, + sender: Address, + on_chain_nonce: u64, + ) -> Option>> { + let sender_id = self.get_sender_id(sender); + self.get_pool_data() + .get_highest_consecutive_transaction_by_sender(sender_id.into_id(on_chain_nonce)) + } + /// Returns all transactions that where submitted with the given [`TransactionOrigin`] pub(crate) fn get_transactions_by_origin( &self, diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 7aaa77df49f4..c470faf3a1bf 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -108,6 +108,27 @@ impl TxPool { self.all().txs_iter(sender).last().map(|(_, tx)| Arc::clone(&tx.transaction)) } + /// Returns the transaction with the highest nonce that is executable given the on chain nonce. + /// + /// Note: The next pending pooled transaction must have the on chain nonce. + pub(crate) fn get_highest_consecutive_transaction_by_sender( + &self, + on_chain: TransactionId, + ) -> Option>> { + let mut last_consecutive_tx = None; + + let mut next_expected_nonce = on_chain.nonce; + for (id, tx) in self.all().descendant_txs_inclusive(&on_chain) { + if next_expected_nonce != id.nonce { + break + } + next_expected_nonce = id.next_nonce(); + last_consecutive_tx = Some(tx); + } + + last_consecutive_tx.map(|tx| Arc::clone(&tx.transaction)) + } + /// Returns access to the [`AllTransactions`] container. pub(crate) const fn all(&self) -> &AllTransactions { &self.all_transactions @@ -2755,6 +2776,36 @@ mod tests { assert_eq!(highest_tx.as_ref().transaction, tx1); } + #[test] + fn get_highest_consecutive_transaction_by_sender() { + // Set up a mock transaction factory and a new transaction pool. + let mut pool = TxPool::new(MockOrdering::default(), PoolConfig::default()); + let mut f = MockTransactionFactory::default(); + + // Create transactions with nonces 0, 1, 2, 4, 5. + let sender = Address::random(); + let txs: Vec<_> = vec![0, 1, 2, 4, 5]; + for nonce in txs { + let mut mock_tx = MockTransaction::eip1559(); + mock_tx.set_sender(sender); + mock_tx.set_nonce(nonce); + + let validated_tx = f.validated(mock_tx); + pool.add_transaction(validated_tx, U256::from(1000), 0).unwrap(); + } + + // Get last consecutive transaction + let sender_id = f.ids.sender_id(&sender).unwrap(); + let next_tx = pool.get_highest_consecutive_transaction_by_sender(sender_id.into_id(0)); + assert_eq!(next_tx.map(|tx| tx.nonce()), Some(2), "Expected nonce 2 for on-chain nonce 0"); + + let next_tx = pool.get_highest_consecutive_transaction_by_sender(sender_id.into_id(4)); + assert_eq!(next_tx.map(|tx| tx.nonce()), Some(5), "Expected nonce 5 for on-chain nonce 4"); + + let next_tx = pool.get_highest_consecutive_transaction_by_sender(sender_id.into_id(5)); + assert_eq!(next_tx.map(|tx| tx.nonce()), Some(5), "Expected nonce 5 for on-chain nonce 5"); + } + #[test] fn discard_nonce_too_low() { let mut f = MockTransactionFactory::default(); diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 9a6eda03d9da..d19381935ec4 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -357,6 +357,21 @@ pub trait TransactionPool: Send + Sync + Clone { sender: Address, ) -> Option>>; + /// Returns the transaction with the highest nonce that is executable given the on chain nonce. + /// In other words the highest non nonce gapped transaction. + /// + /// Note: The next pending pooled transaction must have the on chain nonce. + /// + /// For example, for a given on chain nonce of `5`, the next transaction must have that nonce. + /// If the pool contains txs `[5,6,7]` this returns tx `7`. + /// If the pool contains txs `[6,7]` this returns `None` because the next valid nonce (5) is + /// missing, which means txs `[6,7]` are nonce gapped. + fn get_highest_consecutive_transaction_by_sender( + &self, + sender: Address, + on_chain_nonce: u64, + ) -> Option>>; + /// Returns a transaction sent by a given user and a nonce fn get_transaction_by_sender_and_nonce( &self, From 183cea4577714b451d9e3e1ef4a89016a79cf832 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 16 Oct 2024 17:13:30 +0900 Subject: [PATCH 14/51] chore(provider): move `state_provider_from_state` to `BlockState` impl (#11777) --- crates/chain-state/src/in_memory.rs | 24 ++++++++----------- .../src/providers/blockchain_provider.rs | 2 +- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index fb67608ebda7..f157da5ff450 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -504,20 +504,6 @@ impl CanonicalInMemoryState { self.inner.canon_state_notification_sender.send(event).ok(); } - /// Return state provider with reference to in-memory blocks that overlay database state. - /// - /// This merges the state of all blocks that are part of the chain that the requested block is - /// the head of. This includes all blocks that connect back to the canonical block on disk. - pub fn state_provider_from_state( - &self, - state: &BlockState, - historical: StateProviderBox, - ) -> MemoryOverlayStateProvider { - let in_memory = state.chain().into_iter().map(|block_state| block_state.block()).collect(); - - MemoryOverlayStateProvider::new(historical, in_memory) - } - /// Return state provider with reference to in-memory blocks that overlay database state. /// /// This merges the state of all blocks that are part of the chain that the requested block is @@ -723,6 +709,16 @@ impl BlockState { pub fn iter(self: Arc) -> impl Iterator> { std::iter::successors(Some(self), |state| state.parent.clone()) } + + /// Return state provider with reference to in-memory blocks that overlay database state. + /// + /// This merges the state of all blocks that are part of the chain that the this block is + /// the head of. This includes all blocks that connect back to the canonical block on disk. + pub fn state_provider(&self, historical: StateProviderBox) -> MemoryOverlayStateProvider { + let in_memory = self.chain().into_iter().map(|block_state| block_state.block()).collect(); + + MemoryOverlayStateProvider::new(historical, in_memory) + } } /// Represents an executed block stored in-memory. diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 9b88cab136f5..3013be0603c8 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -402,7 +402,7 @@ impl BlockchainProvider2 { ) -> ProviderResult { let anchor_hash = state.anchor().hash; let latest_historical = self.database.history_by_block_hash(anchor_hash)?; - Ok(self.canonical_in_memory_state.state_provider_from_state(state, latest_historical)) + Ok(state.state_provider(latest_historical)) } /// Fetches data from either in-memory state or persistent storage for a range of transactions. From a6358d2e6fd90ecb7c225733fcca9a7587a60d58 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 16 Oct 2024 17:52:56 +0900 Subject: [PATCH 15/51] feat(provider): add `*StateProviderRef` creation methods to `DatabaseProvider` (#11776) --- .../src/providers/database/provider.rs | 74 +++++++++++++++++-- .../src/providers/state/historical.rs | 18 +++++ 2 files changed, 84 insertions(+), 8 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 3b074a5afffa..8140700fabac 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -9,12 +9,12 @@ use crate::{ AccountReader, BlockExecutionReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, BlockReader, BlockWriter, BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, DBProvider, EvmEnvProvider, HashingWriter, HeaderProvider, HeaderSyncGap, - HeaderSyncGapProvider, HistoricalStateProvider, HistoryWriter, LatestStateProvider, - OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, - RequestsProvider, RevertsInit, StageCheckpointReader, StateChangeWriter, StateProviderBox, - StateReader, StateWriter, StaticFileProviderFactory, StatsReader, StorageReader, - StorageTrieWriter, TransactionVariant, TransactionsProvider, TransactionsProviderExt, - TrieWriter, WithdrawalsProvider, + HeaderSyncGapProvider, HistoricalStateProvider, HistoricalStateProviderRef, HistoryWriter, + LatestStateProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, + PruneCheckpointReader, PruneCheckpointWriter, RequestsProvider, RevertsInit, + StageCheckpointReader, StateChangeWriter, StateProviderBox, StateReader, StateWriter, + StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, + TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, }; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; @@ -47,7 +47,7 @@ use reth_primitives::{ }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{StorageChangeSetReader, TryIntoHistoricalStateProvider}; +use reth_storage_api::{StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, @@ -68,7 +68,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::sync::watch; -use tracing::{debug, error, warn}; +use tracing::{debug, error, trace, warn}; /// A [`DatabaseProvider`] that holds a read-only database transaction. pub type DatabaseProviderRO = DatabaseProvider<::TX, Spec>; @@ -145,6 +145,64 @@ impl DatabaseProvider { } } +impl DatabaseProvider { + /// State provider for latest block + pub fn latest<'a>(&'a self) -> ProviderResult> { + trace!(target: "providers::db", "Returning latest state provider"); + Ok(Box::new(LatestStateProviderRef::new(&self.tx, self.static_file_provider.clone()))) + } + + /// Storage provider for state at that given block hash + pub fn history_by_block_hash<'a>( + &'a self, + block_hash: BlockHash, + ) -> ProviderResult> { + let mut block_number = + self.block_number(block_hash)?.ok_or(ProviderError::BlockHashNotFound(block_hash))?; + if block_number == self.best_block_number().unwrap_or_default() && + block_number == self.last_block_number().unwrap_or_default() + { + return Ok(Box::new(LatestStateProviderRef::new( + &self.tx, + self.static_file_provider.clone(), + ))) + } + + // +1 as the changeset that we want is the one that was applied after this block. + block_number += 1; + + let account_history_prune_checkpoint = + self.get_prune_checkpoint(PruneSegment::AccountHistory)?; + let storage_history_prune_checkpoint = + self.get_prune_checkpoint(PruneSegment::StorageHistory)?; + + let mut state_provider = HistoricalStateProviderRef::new( + &self.tx, + block_number, + self.static_file_provider.clone(), + ); + + // If we pruned account or storage history, we can't return state on every historical block. + // Instead, we should cap it at the latest prune checkpoint for corresponding prune segment. + if let Some(prune_checkpoint_block_number) = + account_history_prune_checkpoint.and_then(|checkpoint| checkpoint.block_number) + { + state_provider = state_provider.with_lowest_available_account_history_block_number( + prune_checkpoint_block_number + 1, + ); + } + if let Some(prune_checkpoint_block_number) = + storage_history_prune_checkpoint.and_then(|checkpoint| checkpoint.block_number) + { + state_provider = state_provider.with_lowest_available_storage_history_block_number( + prune_checkpoint_block_number + 1, + ); + } + + Ok(Box::new(state_provider)) + } +} + impl StaticFileProviderFactory for DatabaseProvider { /// Returns a static file provider fn static_file_provider(&self) -> StaticFileProvider { diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 781a11f6deca..640041e0801f 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -227,6 +227,24 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { Ok(HistoryInfo::NotYetWritten) } } + + /// Set the lowest block number at which the account history is available. + pub const fn with_lowest_available_account_history_block_number( + mut self, + block_number: BlockNumber, + ) -> Self { + self.lowest_available_blocks.account_history_block_number = Some(block_number); + self + } + + /// Set the lowest block number at which the storage history is available. + pub const fn with_lowest_available_storage_history_block_number( + mut self, + block_number: BlockNumber, + ) -> Self { + self.lowest_available_blocks.storage_history_block_number = Some(block_number); + self + } } impl AccountReader for HistoricalStateProviderRef<'_, TX> { From a14a9fd8b037ba839c94946fb7fd091bb1202eb5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 16 Oct 2024 10:59:20 +0200 Subject: [PATCH 16/51] chore: add chain_id shortcut (#11782) --- crates/chainspec/src/api.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index 39a6716eedde..f7061ff18fe7 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -14,9 +14,14 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { // todo: make chain spec type generic over hardfork //type Hardfork: Clone + Copy + 'static; - /// Chain id. + /// Returns the [`Chain`] object this spec targets. fn chain(&self) -> Chain; + /// Returns the chain id number + fn chain_id(&self) -> u64 { + self.chain().id() + } + /// Get the [`BaseFeeParams`] for the chain at the given block. fn base_fee_params_at_block(&self, block_number: u64) -> BaseFeeParams; From 323d8edfb924af27ce0644419371850d7ca96027 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 16 Oct 2024 11:02:23 +0200 Subject: [PATCH 17/51] feat: implement batch executor (#11753) --- Cargo.lock | 2 + crates/evm/Cargo.toml | 8 +- crates/evm/src/execute.rs | 171 +++++++++++++++++++++++++++-------- crates/evm/src/test_utils.rs | 45 ++++++++- 4 files changed, 184 insertions(+), 42 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b3cd1538507c..59f7ad35380b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7359,12 +7359,14 @@ dependencies = [ "metrics", "parking_lot 0.12.3", "reth-chainspec", + "reth-consensus", "reth-execution-errors", "reth-execution-types", "reth-metrics", "reth-primitives", "reth-primitives-traits", "reth-prune-types", + "reth-revm", "reth-storage-errors", "revm", "revm-primitives", diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index 20070d421e97..6081eae420cc 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -13,16 +13,18 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true +reth-consensus.workspace = true reth-execution-errors.workspace = true +reth-execution-types.workspace = true +reth-metrics = { workspace = true, optional = true } reth-primitives.workspace = true reth-primitives-traits.workspace = true -revm-primitives.workspace = true reth-prune-types.workspace = true -reth-metrics = { workspace = true, optional = true } +reth-revm.workspace = true reth-storage-errors.workspace = true -reth-execution-types.workspace = true revm.workspace = true +revm-primitives.workspace = true # alloy alloy-primitives.workspace = true diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index eb77d054bdcd..9413c709d1e7 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -7,15 +7,17 @@ pub use reth_execution_errors::{ pub use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; pub use reth_storage_errors::provider::ProviderError; -use alloc::{boxed::Box, vec::Vec}; +use crate::system_calls::OnStateHook; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_primitives::BlockNumber; use core::{fmt::Display, marker::PhantomData}; +use reth_chainspec::ChainSpec; +use reth_consensus::ConsensusError; use reth_primitives::{BlockWithSenders, Receipt, Request}; use reth_prune_types::PruneModes; +use reth_revm::batch::BlockBatchRecord; use revm::{db::BundleState, State}; -use revm_primitives::db::Database; - -use crate::system_calls::OnStateHook; +use revm_primitives::{db::Database, U256}; /// A general purpose executor trait that executes an input (e.g. block) and produces an output /// (e.g. state changes and receipts). @@ -170,25 +172,49 @@ pub trait BlockExecutionStrategy { type Error: From + core::error::Error; /// Applies any necessary changes before executing the block's transactions. - fn apply_pre_execution_changes(&mut self) -> Result<(), Self::Error>; + fn apply_pre_execution_changes( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result<(), Self::Error>; /// Executes all transactions in the block. fn execute_transactions( &mut self, block: &BlockWithSenders, + total_difficulty: U256, ) -> Result<(Vec, u64), Self::Error>; /// Applies any necessary changes after executing the block's transactions. - fn apply_post_execution_changes(&mut self) -> Result, Self::Error>; + fn apply_post_execution_changes( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + receipts: &[Receipt], + ) -> Result, Self::Error>; /// Returns a reference to the current state. fn state_ref(&self) -> &State; + /// Returns a mutable reference to the current state. + fn state_mut(&mut self) -> &mut State; + /// Sets a hook to be called after each state change during execution. fn with_state_hook(&mut self, hook: Option>); /// Returns the final bundle state. - fn finish(&self) -> BundleState; + fn finish(&mut self) -> BundleState; + + /// Returns the strategy chain spec. + fn chain_spec(&self) -> Arc; + + /// Validate a block with regard to execution results. + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + receipts: &[Receipt], + requests: &[Request], + ) -> Result<(), ConsensusError>; } /// A strategy factory that can create block execution strategies. @@ -250,7 +276,8 @@ where DB: Database + Display>, { let strategy = self.strategy_factory.create_strategy(db); - GenericBatchExecutor::new(strategy) + let batch_record = BlockBatchRecord::default(); + GenericBatchExecutor::new(strategy, batch_record) } } @@ -261,7 +288,8 @@ pub struct GenericBlockExecutor where S: BlockExecutionStrategy, { - strategy: S, + /// Block execution strategy. + pub(crate) strategy: S, _phantom: PhantomData, } @@ -285,11 +313,12 @@ where type Error = S::Error; fn execute(mut self, input: Self::Input<'_>) -> Result { - let BlockExecutionInput { block, total_difficulty: _ } = input; + let BlockExecutionInput { block, total_difficulty } = input; - self.strategy.apply_pre_execution_changes()?; - let (receipts, gas_used) = self.strategy.execute_transactions(block)?; - let requests = self.strategy.apply_post_execution_changes()?; + self.strategy.apply_pre_execution_changes(block, total_difficulty)?; + let (receipts, gas_used) = self.strategy.execute_transactions(block, total_difficulty)?; + let requests = + self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; let state = self.strategy.finish(); Ok(BlockExecutionOutput { state, receipts, requests, gas_used }) @@ -303,11 +332,12 @@ where where F: FnMut(&State), { - let BlockExecutionInput { block, total_difficulty: _ } = input; + let BlockExecutionInput { block, total_difficulty } = input; - self.strategy.apply_pre_execution_changes()?; - let (receipts, gas_used) = self.strategy.execute_transactions(block)?; - let requests = self.strategy.apply_post_execution_changes()?; + self.strategy.apply_pre_execution_changes(block, total_difficulty)?; + let (receipts, gas_used) = self.strategy.execute_transactions(block, total_difficulty)?; + let requests = + self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; state(self.strategy.state_ref()); @@ -324,13 +354,14 @@ where where H: OnStateHook + 'static, { - let BlockExecutionInput { block, total_difficulty: _ } = input; + let BlockExecutionInput { block, total_difficulty } = input; self.strategy.with_state_hook(Some(Box::new(state_hook))); - self.strategy.apply_pre_execution_changes()?; - let (receipts, gas_used) = self.strategy.execute_transactions(block)?; - let requests = self.strategy.apply_post_execution_changes()?; + self.strategy.apply_pre_execution_changes(block, total_difficulty)?; + let (receipts, gas_used) = self.strategy.execute_transactions(block, total_difficulty)?; + let requests = + self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; let state = self.strategy.finish(); @@ -341,15 +372,24 @@ where /// A generic batch executor that uses a [`BlockExecutionStrategy`] to /// execute batches. #[allow(missing_debug_implementations)] -pub struct GenericBatchExecutor { - _strategy: S, +pub struct GenericBatchExecutor +where + S: BlockExecutionStrategy, +{ + /// Batch execution strategy. + pub(crate) strategy: S, + /// Keeps track of batch execution receipts and requests. + batch_record: BlockBatchRecord, _phantom: PhantomData, } -impl GenericBatchExecutor { +impl GenericBatchExecutor +where + S: BlockExecutionStrategy, +{ /// Creates a new `GenericBatchExecutor` with the given strategy. - pub const fn new(_strategy: S) -> Self { - Self { _strategy, _phantom: PhantomData } + pub const fn new(strategy: S, batch_record: BlockBatchRecord) -> Self { + Self { strategy, batch_record, _phantom: PhantomData } } } @@ -362,24 +402,52 @@ where type Output = ExecutionOutcome; type Error = BlockExecutionError; - fn execute_and_verify_one(&mut self, _input: Self::Input<'_>) -> Result<(), Self::Error> { - todo!() + fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { + let BlockExecutionInput { block, total_difficulty } = input; + + if self.batch_record.first_block().is_none() { + self.batch_record.set_first_block(block.number); + } + + self.strategy.apply_pre_execution_changes(block, total_difficulty)?; + let (receipts, _gas_used) = self.strategy.execute_transactions(block, total_difficulty)?; + let requests = + self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; + + self.strategy.validate_block_post_execution(block, &receipts, &requests)?; + + // prepare the state according to the prune mode + let retention = self.batch_record.bundle_retention(block.number); + self.strategy.state_mut().merge_transitions(retention); + + // store receipts in the set + self.batch_record.save_receipts(receipts)?; + + // store requests in the set + self.batch_record.save_requests(requests); + + Ok(()) } - fn finalize(self) -> Self::Output { - todo!() + fn finalize(mut self) -> Self::Output { + ExecutionOutcome::new( + self.strategy.state_mut().take_bundle(), + self.batch_record.take_receipts(), + self.batch_record.first_block().unwrap_or_default(), + self.batch_record.take_requests(), + ) } - fn set_tip(&mut self, _tip: BlockNumber) { - todo!() + fn set_tip(&mut self, tip: BlockNumber) { + self.batch_record.set_tip(tip); } - fn set_prune_modes(&mut self, _prune_modes: PruneModes) { - todo!() + fn set_prune_modes(&mut self, prune_modes: PruneModes) { + self.batch_record.set_prune_modes(prune_modes); } fn size_hint(&self) -> Option { - None + Some(self.strategy.state_ref().bundle_state.size_hint()) } } @@ -522,18 +590,28 @@ mod tests { impl BlockExecutionStrategy for TestExecutorStrategy { type Error = BlockExecutionError; - fn apply_pre_execution_changes(&mut self) -> Result<(), Self::Error> { + fn apply_pre_execution_changes( + &mut self, + _block: &BlockWithSenders, + _total_difficulty: U256, + ) -> Result<(), Self::Error> { Ok(()) } fn execute_transactions( &mut self, _block: &BlockWithSenders, + _total_difficulty: U256, ) -> Result<(Vec, u64), Self::Error> { Ok(self.execute_transactions_result.clone()) } - fn apply_post_execution_changes(&mut self) -> Result, Self::Error> { + fn apply_post_execution_changes( + &mut self, + _block: &BlockWithSenders, + _total_difficulty: U256, + _receipts: &[Receipt], + ) -> Result, Self::Error> { Ok(self.apply_post_execution_changes_result.clone()) } @@ -541,11 +619,28 @@ mod tests { &self.state } + fn state_mut(&mut self) -> &mut State { + &mut self.state + } + fn with_state_hook(&mut self, _hook: Option>) {} - fn finish(&self) -> BundleState { + fn finish(&mut self) -> BundleState { self.finish_result.clone() } + + fn chain_spec(&self) -> Arc { + MAINNET.clone() + } + + fn validate_block_post_execution( + &self, + _block: &BlockWithSenders, + _receipts: &[Receipt], + _requests: &[Request], + ) -> Result<(), ConsensusError> { + Ok(()) + } } #[derive(Clone)] diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index fc620bb42c33..a033c8023a96 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -2,7 +2,8 @@ use crate::{ execute::{ - BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, + BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutionStrategy, + BlockExecutorProvider, Executor, GenericBatchExecutor, GenericBlockExecutor, }, system_calls::OnStateHook, }; @@ -110,3 +111,45 @@ impl BatchExecutor for MockExecutorProvider { None } } + +impl GenericBlockExecutor +where + S: BlockExecutionStrategy, +{ + /// Provides safe read access to the state + pub fn with_state(&self, f: F) -> R + where + F: FnOnce(&State) -> R, + { + f(self.strategy.state_ref()) + } + + /// Provides safe write access to the state + pub fn with_state_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut State) -> R, + { + f(self.strategy.state_mut()) + } +} + +impl GenericBatchExecutor +where + S: BlockExecutionStrategy, +{ + /// Provides safe read access to the state + pub fn with_state(&self, f: F) -> R + where + F: FnOnce(&State) -> R, + { + f(self.strategy.state_ref()) + } + + /// Provides safe write access to the state + pub fn with_state_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut State) -> R, + { + f(self.strategy.state_mut()) + } +} From 0f903b1e204ead35616bba24447a09b316763793 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 16 Oct 2024 11:40:33 +0200 Subject: [PATCH 18/51] feat: add EthExecutionStrategy (#11584) --- Cargo.lock | 1 + crates/ethereum/evm/Cargo.toml | 2 + crates/ethereum/evm/src/lib.rs | 1 + crates/ethereum/evm/src/strategy.rs | 1179 +++++++++++++++++++++++++++ crates/evm/src/execute.rs | 10 +- 5 files changed, 1184 insertions(+), 9 deletions(-) create mode 100644 crates/ethereum/evm/src/strategy.rs diff --git a/Cargo.lock b/Cargo.lock index 59f7ad35380b..eb0bb2a3d023 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7382,6 +7382,7 @@ dependencies = [ "alloy-primitives", "alloy-sol-types", "reth-chainspec", + "reth-consensus", "reth-ethereum-consensus", "reth-ethereum-forks", "reth-evm", diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index 61ce0a23b904..a19cbc018c72 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -20,6 +20,7 @@ reth-revm.workspace = true reth-ethereum-consensus.workspace = true reth-prune-types.workspace = true reth-execution-types.workspace = true +reth-consensus.workspace = true # Ethereum revm-primitives.workspace = true @@ -31,6 +32,7 @@ alloy-sol-types.workspace = true [dev-dependencies] reth-testing-utils.workspace = true +reth-evm = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true, features = ["secp256k1"] } secp256k1.workspace = true diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index cede8008b3cf..ed18a24fb191 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -33,6 +33,7 @@ use reth_ethereum_forks::EthereumHardfork; use reth_primitives::constants::EIP1559_INITIAL_BASE_FEE; pub mod execute; +pub mod strategy; /// Ethereum DAO hardfork state change data. pub mod dao_fork; diff --git a/crates/ethereum/evm/src/strategy.rs b/crates/ethereum/evm/src/strategy.rs new file mode 100644 index 000000000000..52f58a8b0a80 --- /dev/null +++ b/crates/ethereum/evm/src/strategy.rs @@ -0,0 +1,1179 @@ +//! Ethereum block execution strategy, + +use crate::{ + dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, + EthEvmConfig, +}; +use alloc::sync::Arc; +use core::fmt::Display; +use reth_chainspec::{ChainSpec, EthereumHardfork, EthereumHardforks, MAINNET}; +use reth_consensus::ConsensusError; +use reth_ethereum_consensus::validate_block_post_execution; +use reth_evm::{ + execute::{ + BlockExecutionError, BlockExecutionStrategy, BlockExecutionStrategyFactory, + BlockValidationError, ProviderError, + }, + system_calls::{OnStateHook, SystemCaller}, + ConfigureEvm, ConfigureEvmEnv, +}; +use reth_primitives::{BlockWithSenders, Header, Receipt, Request}; +use reth_revm::{ + db::{states::bundle_state::BundleRetention, BundleState}, + state_change::post_block_balance_increments, + Database, DatabaseCommit, State, +}; +use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256}; + +/// Factory for [`EthExecutionStrategy`]. +#[derive(Debug, Clone)] +pub struct EthExecutionStrategyFactory { + /// The chainspec + chain_spec: Arc, + /// How to create an EVM. + evm_config: EvmConfig, +} + +impl EthExecutionStrategyFactory { + /// Creates a new default ethereum executor strategy factory. + pub fn ethereum(chain_spec: Arc) -> Self { + Self::new(chain_spec.clone(), EthEvmConfig::new(chain_spec)) + } + + /// Returns a new factory for the mainnet. + pub fn mainnet() -> Self { + Self::ethereum(MAINNET.clone()) + } +} + +impl EthExecutionStrategyFactory { + /// Creates a new executor strategy factory. + pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { + Self { chain_spec, evm_config } + } +} + +impl BlockExecutionStrategyFactory for EthExecutionStrategyFactory { + type Strategy + Display>> = EthExecutionStrategy; + + fn create_strategy(&self, db: DB) -> Self::Strategy + where + DB: Database + Display>, + { + let state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + EthExecutionStrategy::new(state, self.chain_spec.clone(), self.evm_config.clone()) + } +} + +/// Block execution strategy for Ethereum. +#[allow(missing_debug_implementations)] +pub struct EthExecutionStrategy { + /// The chainspec + chain_spec: Arc, + /// How to create an EVM. + evm_config: EvmConfig, + /// Current state for block execution. + state: State, + /// Utility to call system smart contracts. + system_caller: SystemCaller, +} + +impl EthExecutionStrategy { + /// Creates a new [`EthExecutionStrategy`] + pub fn new(state: State, chain_spec: Arc, evm_config: EthEvmConfig) -> Self { + let system_caller = SystemCaller::new(evm_config.clone(), (*chain_spec).clone()); + Self { state, chain_spec, evm_config, system_caller } + } +} + +impl EthExecutionStrategy +where + DB: Database + Display>, + EvmConfig: ConfigureEvm
, +{ + /// Configures a new evm configuration and block environment for the given block. + /// + /// # Caution + /// + /// This does not initialize the tx environment. + fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); + + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) + } +} + +impl BlockExecutionStrategy for EthExecutionStrategy +where + DB: Database + Display>, +{ + type Error = BlockExecutionError; + + fn apply_pre_execution_changes( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result<(), Self::Error> { + // Set state clear flag if the block is after the Spurious Dragon hardfork. + let state_clear_flag = + (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); + self.state.set_state_clear_flag(state_clear_flag); + + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + + self.system_caller.apply_pre_execution_changes(block, &mut evm)?; + + Ok(()) + } + + fn execute_transactions( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result<(Vec, u64), Self::Error> { + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + + let mut cumulative_gas_used = 0; + let mut receipts = Vec::with_capacity(block.body.transactions.len()); + for (sender, transaction) in block.transactions_with_sender() { + // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, + // must be no greater than the block’s gasLimit. + let block_available_gas = block.header.gas_limit - cumulative_gas_used; + if transaction.gas_limit() > block_available_gas { + return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { + transaction_gas_limit: transaction.gas_limit(), + block_available_gas, + } + .into()) + } + + self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); + + // Execute transaction. + let result_and_state = evm.transact().map_err(move |err| { + let new_err = err.map_db_err(|e| e.into()); + // Ensure hash is calculated for error log, if not already done + BlockValidationError::EVM { + hash: transaction.recalculate_hash(), + error: Box::new(new_err), + } + })?; + self.system_caller.on_state(&result_and_state); + let ResultAndState { result, state } = result_and_state; + evm.db_mut().commit(state); + + // append gas used + cumulative_gas_used += result.gas_used(); + + // Push transaction changeset and calculate header bloom filter for receipt. + receipts.push( + #[allow(clippy::needless_update)] // side-effect of optimism fields + Receipt { + tx_type: transaction.tx_type(), + // Success flag was added in `EIP-658: Embedding transaction status code in + // receipts`. + success: result.is_success(), + cumulative_gas_used, + // convert to reth log + logs: result.into_logs(), + ..Default::default() + }, + ); + } + Ok((receipts, cumulative_gas_used)) + } + + fn apply_post_execution_changes( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + receipts: &[Receipt], + ) -> Result, Self::Error> { + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + + let requests = if self.chain_spec.is_prague_active_at_timestamp(block.timestamp) { + // Collect all EIP-6110 deposits + let deposit_requests = + crate::eip6110::parse_deposits_from_receipts(&self.chain_spec, receipts)?; + + let post_execution_requests = + self.system_caller.apply_post_execution_changes(&mut evm)?; + + [deposit_requests, post_execution_requests].concat() + } else { + vec![] + }; + drop(evm); + + let mut balance_increments = + post_block_balance_increments(&self.chain_spec, block, total_difficulty); + + // Irregular state change at Ethereum DAO hardfork + if self.chain_spec.fork(EthereumHardfork::Dao).transitions_at_block(block.number) { + // drain balances from hardcoded addresses. + let drained_balance: u128 = self + .state + .drain_balances(DAO_HARDKFORK_ACCOUNTS) + .map_err(|_| BlockValidationError::IncrementBalanceFailed)? + .into_iter() + .sum(); + + // return balance to DAO beneficiary. + *balance_increments.entry(DAO_HARDFORK_BENEFICIARY).or_default() += drained_balance; + } + // increment balances + self.state + .increment_balances(balance_increments) + .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; + + Ok(requests) + } + + fn state_ref(&self) -> &State { + &self.state + } + + fn state_mut(&mut self) -> &mut State { + &mut self.state + } + + fn with_state_hook(&mut self, hook: Option>) { + self.system_caller.with_state_hook(hook); + } + + fn finish(&mut self) -> BundleState { + self.state.merge_transitions(BundleRetention::Reverts); + self.state.take_bundle() + } + + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + receipts: &[Receipt], + requests: &[Request], + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec.clone(), receipts, requests) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::{TxLegacy, EMPTY_ROOT_HASH}; + use alloy_eips::{ + eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}, + eip4788::{BEACON_ROOTS_ADDRESS, BEACON_ROOTS_CODE, SYSTEM_ADDRESS}, + eip7002::{WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, WITHDRAWAL_REQUEST_PREDEPLOY_CODE}, + }; + use alloy_primitives::{b256, fixed_bytes, keccak256, Bytes, TxKind, B256}; + use reth_chainspec::{ChainSpecBuilder, ForkCondition}; + use reth_evm::execute::{ + BatchExecutor, BlockExecutorProvider, Executor, GenericBlockExecutorProvider, + }; + use reth_execution_types::BlockExecutionOutput; + use reth_primitives::{ + constants::ETH_TO_WEI, public_key_to_address, Account, Block, BlockBody, Transaction, + }; + use reth_revm::{ + database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, + }; + use reth_testing_utils::generators::{self, sign_tx_with_key_pair}; + use revm_primitives::BLOCKHASH_SERVE_WINDOW; + use secp256k1::{Keypair, Secp256k1}; + use std::collections::HashMap; + + fn create_state_provider_with_beacon_root_contract() -> StateProviderTest { + let mut db = StateProviderTest::default(); + + let beacon_root_contract_account = Account { + balance: U256::ZERO, + bytecode_hash: Some(keccak256(BEACON_ROOTS_CODE.clone())), + nonce: 1, + }; + + db.insert_account( + BEACON_ROOTS_ADDRESS, + beacon_root_contract_account, + Some(BEACON_ROOTS_CODE.clone()), + HashMap::default(), + ); + + db + } + + fn create_state_provider_with_withdrawal_requests_contract() -> StateProviderTest { + let mut db = StateProviderTest::default(); + + let withdrawal_requests_contract_account = Account { + nonce: 1, + balance: U256::ZERO, + bytecode_hash: Some(keccak256(WITHDRAWAL_REQUEST_PREDEPLOY_CODE.clone())), + }; + + db.insert_account( + WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, + withdrawal_requests_contract_account, + Some(WITHDRAWAL_REQUEST_PREDEPLOY_CODE.clone()), + HashMap::default(), + ); + + db + } + + fn executor_provider( + chain_spec: Arc, + ) -> GenericBlockExecutorProvider { + let strategy_factory = + EthExecutionStrategyFactory::new(chain_spec.clone(), EthEvmConfig::new(chain_spec)); + + GenericBlockExecutorProvider::new(strategy_factory) + } + + #[test] + fn eip_4788_non_genesis_call() { + let mut header = + Header { timestamp: 1, number: 1, excess_blob_gas: Some(0), ..Header::default() }; + + let db = create_state_provider_with_beacon_root_contract(); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) + .build(), + ); + + let provider = executor_provider(chain_spec); + + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + // attempt to execute a block without parent beacon block root, expect err + let err = executor + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { + header: header.clone(), + body: BlockBody { + transactions: vec![], + ommers: vec![], + withdrawals: None, + requests: None, + }, + }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .expect_err( + "Executing cancun block without parent beacon block root field should fail", + ); + + assert_eq!( + err.as_validation().unwrap().clone(), + BlockValidationError::MissingParentBeaconBlockRoot + ); + + // fix header, set a gas limit + header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); + + // Now execute a block with the fixed header, ensure that it does not fail + executor + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { + header: header.clone(), + body: BlockBody { + transactions: vec![], + ommers: vec![], + withdrawals: None, + requests: None, + }, + }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .unwrap(); + + // check the actual storage of the contract - it should be: + // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be + // header.timestamp + // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH + // // should be parent_beacon_block_root + let history_buffer_length = 8191u64; + let timestamp_index = header.timestamp % history_buffer_length; + let parent_beacon_block_root_index = + timestamp_index % history_buffer_length + history_buffer_length; + + let timestamp_storage = executor.with_state_mut(|state| { + state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap() + }); + assert_eq!(timestamp_storage, U256::from(header.timestamp)); + + // get parent beacon block root storage and compare + let parent_beacon_block_root_storage = executor.with_state_mut(|state| { + state + .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) + .expect("storage value should exist") + }); + assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); + } + + #[test] + fn eip_4788_no_code_cancun() { + // This test ensures that we "silently fail" when cancun is active and there is no code at + // // BEACON_ROOTS_ADDRESS + let header = Header { + timestamp: 1, + number: 1, + parent_beacon_block_root: Some(B256::with_last_byte(0x69)), + excess_blob_gas: Some(0), + ..Header::default() + }; + + let db = StateProviderTest::default(); + + // DON'T deploy the contract at genesis + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) + .build(), + ); + + let provider = executor_provider(chain_spec); + + // attempt to execute an empty block with parent beacon block root, this should not fail + provider + .batch_executor(StateProviderDatabase::new(&db)) + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { + header, + body: BlockBody { + transactions: vec![], + ommers: vec![], + withdrawals: None, + requests: None, + }, + }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .expect( + "Executing a block with no transactions while cancun is active should not fail", + ); + } + + #[test] + fn eip_4788_empty_account_call() { + // This test ensures that we do not increment the nonce of an empty SYSTEM_ADDRESS account + // // during the pre-block call + + let mut db = create_state_provider_with_beacon_root_contract(); + + // insert an empty SYSTEM_ADDRESS + db.insert_account(SYSTEM_ADDRESS, Account::default(), None, HashMap::default()); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) + .build(), + ); + + let provider = executor_provider(chain_spec); + + // construct the header for block one + let header = Header { + timestamp: 1, + number: 1, + parent_beacon_block_root: Some(B256::with_last_byte(0x69)), + excess_blob_gas: Some(0), + ..Header::default() + }; + + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + // attempt to execute an empty block with parent beacon block root, this should not fail + executor + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { + header, + body: BlockBody { + transactions: vec![], + ommers: vec![], + withdrawals: None, + requests: None, + }, + }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .expect( + "Executing a block with no transactions while cancun is active should not fail", + ); + + // ensure that the nonce of the system address account has not changed + let nonce = + executor.with_state_mut(|state| state.basic(SYSTEM_ADDRESS).unwrap().unwrap().nonce); + assert_eq!(nonce, 0); + } + + #[test] + fn eip_4788_genesis_call() { + let db = create_state_provider_with_beacon_root_contract(); + + // activate cancun at genesis + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)) + .build(), + ); + + let mut header = chain_spec.genesis_header().clone(); + let provider = executor_provider(chain_spec); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + // attempt to execute the genesis block with non-zero parent beacon block root, expect err + header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); + let _err = executor + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { header: header.clone(), body: Default::default() }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .expect_err( + "Executing genesis cancun block with non-zero parent beacon block root field + should fail", + ); + + // fix header + header.parent_beacon_block_root = Some(B256::ZERO); + + // now try to process the genesis block again, this time ensuring that a system contract + // call does not occur + executor + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { header, body: Default::default() }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .unwrap(); + + // there is no system contract call so there should be NO STORAGE CHANGES + // this means we'll check the transition state + let transition_state = executor.with_state_mut(|state| { + state + .transition_state + .take() + .expect("the evm should be initialized with bundle updates") + }); + + // assert that it is the default (empty) transition state + assert_eq!(transition_state, TransitionState::default()); + } + + #[test] + fn eip_4788_high_base_fee() { + // This test ensures that if we have a base fee, then we don't return an error when the + // system contract is called, due to the gas price being less than the base fee. + let header = Header { + timestamp: 1, + number: 1, + parent_beacon_block_root: Some(B256::with_last_byte(0x69)), + base_fee_per_gas: Some(u64::MAX), + excess_blob_gas: Some(0), + ..Header::default() + }; + + let db = create_state_provider_with_beacon_root_contract(); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) + .build(), + ); + + let provider = executor_provider(chain_spec); + + // execute header + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + // Now execute a block with the fixed header, ensure that it does not fail + executor + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { header: header.clone(), body: Default::default() }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .unwrap(); + + // check the actual storage of the contract - it should be: + // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be + // header.timestamp + // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH + // // should be parent_beacon_block_root + let history_buffer_length = 8191u64; + let timestamp_index = header.timestamp % history_buffer_length; + let parent_beacon_block_root_index = + timestamp_index % history_buffer_length + history_buffer_length; + + // get timestamp storage and compare + let timestamp_storage = executor.with_state_mut(|state| { + state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap() + }); + assert_eq!(timestamp_storage, U256::from(header.timestamp)); + + // get parent beacon block root storage and compare + let parent_beacon_block_root_storage = executor.with_state_mut(|state| { + state.storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)).unwrap() + }); + assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); + } + + /// Create a state provider with blockhashes and the EIP-2935 system contract. + fn create_state_provider_with_block_hashes(latest_block: u64) -> StateProviderTest { + let mut db = StateProviderTest::default(); + for block_number in 0..=latest_block { + db.insert_block_hash(block_number, keccak256(block_number.to_string())); + } + + let blockhashes_contract_account = Account { + balance: U256::ZERO, + bytecode_hash: Some(keccak256(HISTORY_STORAGE_CODE.clone())), + nonce: 1, + }; + + db.insert_account( + HISTORY_STORAGE_ADDRESS, + blockhashes_contract_account, + Some(HISTORY_STORAGE_CODE.clone()), + HashMap::default(), + ); + + db + } + #[test] + fn eip_2935_pre_fork() { + let db = create_state_provider_with_block_hashes(1); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Prague, ForkCondition::Never) + .build(), + ); + + let provider = executor_provider(chain_spec); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + // construct the header for block one + let header = Header { timestamp: 1, number: 1, ..Header::default() }; + + // attempt to execute an empty block, this should not fail + executor + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { header, body: Default::default() }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .expect( + "Executing a block with no transactions while Prague is active should not fail", + ); + + // ensure that the block hash was *not* written to storage, since this is before the fork + // was activated + // + // we load the account first, because revm expects it to be + // loaded + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); + assert!(executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) + .unwrap() + .is_zero())); + } + + #[test] + fn eip_2935_fork_activation_genesis() { + let db = create_state_provider_with_block_hashes(0); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) + .build(), + ); + + let header = chain_spec.genesis_header().clone(); + let provider = executor_provider(chain_spec); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + // attempt to execute genesis block, this should not fail + executor + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { header, body: Default::default() }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .expect( + "Executing a block with no transactions while Prague is active should not fail", + ); + + // ensure that the block hash was *not* written to storage, since there are no blocks + // preceding genesis + // + // we load the account first, because revm expects it to be + // loaded + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); + assert!(executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) + .unwrap() + .is_zero())); + } + + #[test] + fn eip_2935_fork_activation_within_window_bounds() { + let fork_activation_block = (BLOCKHASH_SERVE_WINDOW - 10) as u64; + let db = create_state_provider_with_block_hashes(fork_activation_block); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(1)) + .build(), + ); + + let header = Header { + parent_hash: B256::random(), + timestamp: 1, + number: fork_activation_block, + requests_root: Some(EMPTY_ROOT_HASH), + ..Header::default() + }; + let provider = executor_provider(chain_spec); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + // attempt to execute the fork activation block, this should not fail + executor + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { header, body: Default::default() }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .expect( + "Executing a block with no transactions while Prague is active should not fail", + ); + + // the hash for the ancestor of the fork activation block should be present + assert!(executor + .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); + assert_ne!( + executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block - 1)) + .unwrap()), + U256::ZERO + ); + + // the hash of the block itself should not be in storage + assert!(executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block)) + .unwrap() + .is_zero())); + } + + #[test] + fn eip_2935_fork_activation_outside_window_bounds() { + let fork_activation_block = (BLOCKHASH_SERVE_WINDOW + 256) as u64; + let db = create_state_provider_with_block_hashes(fork_activation_block); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(1)) + .build(), + ); + + let provider = executor_provider(chain_spec); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + let header = Header { + parent_hash: B256::random(), + timestamp: 1, + number: fork_activation_block, + requests_root: Some(EMPTY_ROOT_HASH), + ..Header::default() + }; + + // attempt to execute the fork activation block, this should not fail + executor + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { header, body: Default::default() }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .expect( + "Executing a block with no transactions while Prague is active should not fail", + ); + + // the hash for the ancestor of the fork activation block should be present + assert!(executor + .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); + assert_ne!( + executor.with_state_mut(|state| state + .storage( + HISTORY_STORAGE_ADDRESS, + U256::from(fork_activation_block % BLOCKHASH_SERVE_WINDOW as u64 - 1) + ) + .unwrap()), + U256::ZERO + ); + } + + #[test] + fn eip_2935_state_transition_inside_fork() { + let db = create_state_provider_with_block_hashes(2); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) + .build(), + ); + + let mut header = chain_spec.genesis_header().clone(); + header.requests_root = Some(EMPTY_ROOT_HASH); + let header_hash = header.hash_slow(); + + let provider = executor_provider(chain_spec); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + // attempt to execute the genesis block, this should not fail + executor + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { header, body: Default::default() }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .expect( + "Executing a block with no transactions while Prague is active should not fail", + ); + + // nothing should be written as the genesis has no ancestors + // + // we load the account first, because revm expects it to be + // loaded + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); + assert!(executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) + .unwrap() + .is_zero())); + + // attempt to execute block 1, this should not fail + let header = Header { + parent_hash: header_hash, + timestamp: 1, + number: 1, + requests_root: Some(EMPTY_ROOT_HASH), + ..Header::default() + }; + let header_hash = header.hash_slow(); + + executor + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { header, body: Default::default() }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .expect( + "Executing a block with no transactions while Prague is active should not fail", + ); + + // the block hash of genesis should now be in storage, but not block 1 + assert!(executor + .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); + assert_ne!( + executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) + .unwrap()), + U256::ZERO + ); + assert!(executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::from(1)) + .unwrap() + .is_zero())); + + // attempt to execute block 2, this should not fail + let header = Header { + parent_hash: header_hash, + timestamp: 1, + number: 2, + requests_root: Some(EMPTY_ROOT_HASH), + ..Header::default() + }; + + executor + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { header, body: Default::default() }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .expect( + "Executing a block with no transactions while Prague is active should not fail", + ); + + // the block hash of genesis and block 1 should now be in storage, but not block 2 + assert!(executor + .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); + assert_ne!( + executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) + .unwrap()), + U256::ZERO + ); + assert_ne!( + executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::from(1)) + .unwrap()), + U256::ZERO + ); + assert!(executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::from(2)) + .unwrap() + .is_zero())); + } + + #[test] + fn eip_7002() { + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) + .build(), + ); + + let mut db = create_state_provider_with_withdrawal_requests_contract(); + + let secp = Secp256k1::new(); + let sender_key_pair = Keypair::new(&secp, &mut generators::rng()); + let sender_address = public_key_to_address(sender_key_pair.public_key()); + + db.insert_account( + sender_address, + Account { nonce: 1, balance: U256::from(ETH_TO_WEI), bytecode_hash: None }, + None, + HashMap::default(), + ); + + // https://github.com/lightclient/7002asm/blob/e0d68e04d15f25057af7b6d180423d94b6b3bdb3/test/Contract.t.sol.in#L49-L64 + let validator_public_key = fixed_bytes!("111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); + let withdrawal_amount = fixed_bytes!("2222222222222222"); + let input: Bytes = [&validator_public_key[..], &withdrawal_amount[..]].concat().into(); + assert_eq!(input.len(), 56); + + let mut header = chain_spec.genesis_header().clone(); + header.gas_limit = 1_500_000; + header.gas_used = 134_807; + header.receipts_root = + b256!("b31a3e47b902e9211c4d349af4e4c5604ce388471e79ca008907ae4616bb0ed3"); + + let tx = sign_tx_with_key_pair( + sender_key_pair, + Transaction::Legacy(TxLegacy { + chain_id: Some(chain_spec.chain.id()), + nonce: 1, + gas_price: header.base_fee_per_gas.unwrap().into(), + gas_limit: 134_807, + to: TxKind::Call(WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS), + // `MIN_WITHDRAWAL_REQUEST_FEE` + value: U256::from(1), + input, + }), + ); + + let provider = executor_provider(chain_spec); + + let executor = provider.executor(StateProviderDatabase::new(&db)); + + let BlockExecutionOutput { receipts, requests, .. } = executor + .execute( + ( + &Block { + header, + body: BlockBody { transactions: vec![tx], ..Default::default() }, + } + .with_recovered_senders() + .unwrap(), + U256::ZERO, + ) + .into(), + ) + .unwrap(); + + let receipt = receipts.first().unwrap(); + assert!(receipt.success); + + let request = requests.first().unwrap(); + let withdrawal_request = request.as_withdrawal_request().unwrap(); + assert_eq!(withdrawal_request.source_address, sender_address); + assert_eq!(withdrawal_request.validator_pubkey, validator_public_key); + assert_eq!(withdrawal_request.amount, u64::from_be_bytes(withdrawal_amount.into())); + } + + #[test] + fn block_gas_limit_error() { + // Create a chain specification with fork conditions set for Prague + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) + .build(), + ); + + // Create a state provider with the withdrawal requests contract pre-deployed + let mut db = create_state_provider_with_withdrawal_requests_contract(); + + // Initialize Secp256k1 for key pair generation + let secp = Secp256k1::new(); + // Generate a new key pair for the sender + let sender_key_pair = Keypair::new(&secp, &mut generators::rng()); + // Get the sender's address from the public key + let sender_address = public_key_to_address(sender_key_pair.public_key()); + + // Insert the sender account into the state with a nonce of 1 and a balance of 1 ETH in Wei + db.insert_account( + sender_address, + Account { nonce: 1, balance: U256::from(ETH_TO_WEI), bytecode_hash: None }, + None, + HashMap::default(), + ); + + // Define the validator public key and withdrawal amount as fixed bytes + let validator_public_key = fixed_bytes!("111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); + let withdrawal_amount = fixed_bytes!("2222222222222222"); + // Concatenate the validator public key and withdrawal amount into a single byte array + let input: Bytes = [&validator_public_key[..], &withdrawal_amount[..]].concat().into(); + // Ensure the input length is 56 bytes + assert_eq!(input.len(), 56); + + // Create a genesis block header with a specified gas limit and gas used + let mut header = chain_spec.genesis_header().clone(); + header.gas_limit = 1_500_000; + header.gas_used = 134_807; + header.receipts_root = + b256!("b31a3e47b902e9211c4d349af4e4c5604ce388471e79ca008907ae4616bb0ed3"); + + // Create a transaction with a gas limit higher than the block gas limit + let tx = sign_tx_with_key_pair( + sender_key_pair, + Transaction::Legacy(TxLegacy { + chain_id: Some(chain_spec.chain.id()), + nonce: 1, + gas_price: header.base_fee_per_gas.unwrap().into(), + gas_limit: 2_500_000, // higher than block gas limit + to: TxKind::Call(WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS), + value: U256::from(1), + input, + }), + ); + + // Create an executor from the state provider + let executor = executor_provider(chain_spec).executor(StateProviderDatabase::new(&db)); + + // Execute the block and capture the result + let exec_result = executor.execute( + ( + &Block { header, body: BlockBody { transactions: vec![tx], ..Default::default() } } + .with_recovered_senders() + .unwrap(), + U256::ZERO, + ) + .into(), + ); + + // Check if the execution result is an error and assert the specific error type + match exec_result { + Ok(_) => panic!("Expected block gas limit error"), + Err(err) => assert_eq!( + *err.as_validation().unwrap(), + BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { + transaction_gas_limit: 2_500_000, + block_available_gas: 1_500_000, + } + ), + } + } +} diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 9413c709d1e7..859dacd8a67f 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -8,10 +8,9 @@ pub use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, Execut pub use reth_storage_errors::provider::ProviderError; use crate::system_calls::OnStateHook; -use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use alloc::{boxed::Box, vec::Vec}; use alloy_primitives::BlockNumber; use core::{fmt::Display, marker::PhantomData}; -use reth_chainspec::ChainSpec; use reth_consensus::ConsensusError; use reth_primitives::{BlockWithSenders, Receipt, Request}; use reth_prune_types::PruneModes; @@ -205,9 +204,6 @@ pub trait BlockExecutionStrategy { /// Returns the final bundle state. fn finish(&mut self) -> BundleState; - /// Returns the strategy chain spec. - fn chain_spec(&self) -> Arc; - /// Validate a block with regard to execution results. fn validate_block_post_execution( &self, @@ -629,10 +625,6 @@ mod tests { self.finish_result.clone() } - fn chain_spec(&self) -> Arc { - MAINNET.clone() - } - fn validate_block_post_execution( &self, _block: &BlockWithSenders, From e454b2402b0f9fe936620d9302edc94f9f8d5782 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 16 Oct 2024 11:44:15 +0200 Subject: [PATCH 19/51] chore: use highest known nonce (#11784) --- crates/transaction-pool/src/pool/txpool.rs | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index c470faf3a1bf..c89aca830f0a 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -110,13 +110,21 @@ impl TxPool { /// Returns the transaction with the highest nonce that is executable given the on chain nonce. /// + /// If the pool already tracks a higher nonce for the given sender, then this nonce is used + /// instead. + /// /// Note: The next pending pooled transaction must have the on chain nonce. pub(crate) fn get_highest_consecutive_transaction_by_sender( &self, - on_chain: TransactionId, + mut on_chain: TransactionId, ) -> Option>> { let mut last_consecutive_tx = None; + // ensure this operates on the most recent + if let Some(current) = self.sender_info.get(&on_chain.sender) { + on_chain.nonce = on_chain.nonce.max(current.state_nonce); + } + let mut next_expected_nonce = on_chain.nonce; for (id, tx) in self.all().descendant_txs_inclusive(&on_chain) { if next_expected_nonce != id.nonce { @@ -2784,7 +2792,7 @@ mod tests { // Create transactions with nonces 0, 1, 2, 4, 5. let sender = Address::random(); - let txs: Vec<_> = vec![0, 1, 2, 4, 5]; + let txs: Vec<_> = vec![0, 1, 2, 4, 5, 8, 9]; for nonce in txs { let mut mock_tx = MockTransaction::eip1559(); mock_tx.set_sender(sender); @@ -2804,6 +2812,13 @@ mod tests { let next_tx = pool.get_highest_consecutive_transaction_by_sender(sender_id.into_id(5)); assert_eq!(next_tx.map(|tx| tx.nonce()), Some(5), "Expected nonce 5 for on-chain nonce 5"); + + // update the tracked nonce + let mut info = SenderInfo::default(); + info.update(8, U256::ZERO); + pool.sender_info.insert(sender_id, info); + let next_tx = pool.get_highest_consecutive_transaction_by_sender(sender_id.into_id(5)); + assert_eq!(next_tx.map(|tx| tx.nonce()), Some(9), "Expected nonce 9 for on-chain nonce 8"); } #[test] From f49a4ae185d0f02e548af4434b9cfc14df79af85 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 16 Oct 2024 12:13:46 +0200 Subject: [PATCH 20/51] feat: add OpExecutionStrategy (#11761) --- Cargo.lock | 1 + crates/evm/src/execute.rs | 2 +- crates/evm/src/test_utils.rs | 7 +- crates/optimism/evm/Cargo.toml | 2 + crates/optimism/evm/src/lib.rs | 2 + crates/optimism/evm/src/strategy.rs | 491 ++++++++++++++++++++++++++++ 6 files changed, 503 insertions(+), 2 deletions(-) create mode 100644 crates/optimism/evm/src/strategy.rs diff --git a/Cargo.lock b/Cargo.lock index eb0bb2a3d023..4d23c5b42bf9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8105,6 +8105,7 @@ dependencies = [ "alloy-primitives", "op-alloy-consensus", "reth-chainspec", + "reth-consensus", "reth-ethereum-forks", "reth-evm", "reth-execution-errors", diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 859dacd8a67f..704b6a23ad5a 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -375,7 +375,7 @@ where /// Batch execution strategy. pub(crate) strategy: S, /// Keeps track of batch execution receipts and requests. - batch_record: BlockBatchRecord, + pub(crate) batch_record: BlockBatchRecord, _phantom: PhantomData, } diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index a033c8023a96..5ae7ed45b712 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -11,7 +11,7 @@ use alloy_primitives::BlockNumber; use parking_lot::Mutex; use reth_execution_errors::BlockExecutionError; use reth_execution_types::ExecutionOutcome; -use reth_primitives::{BlockWithSenders, Receipt}; +use reth_primitives::{BlockWithSenders, Receipt, Receipts}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm::State; @@ -152,4 +152,9 @@ where { f(self.strategy.state_mut()) } + + /// Accessor for batch executor receipts. + pub const fn receipts(&self) -> &Receipts { + self.batch_record.receipts() + } } diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 72231716ff92..afaca32b63ee 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -20,6 +20,7 @@ reth-revm.workspace = true reth-execution-errors.workspace = true reth-execution-types.workspace = true reth-prune-types.workspace = true +reth-consensus.workspace = true # ethereum alloy-primitives.workspace = true @@ -41,6 +42,7 @@ tracing.workspace = true [dev-dependencies] alloy-eips.workspace = true +reth-evm = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true, features = ["test-utils"] } reth-optimism-chainspec.workspace = true diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index f3de053f780b..4d0f9d89ff41 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -33,6 +33,8 @@ use revm_primitives::{ BlobExcessGasAndPrice, BlockEnv, Bytes, CfgEnv, Env, HandlerCfg, OptimismFields, SpecId, TxKind, }; +pub mod strategy; + /// Optimism-related EVM configuration. #[derive(Debug, Clone)] pub struct OptimismEvmConfig { diff --git a/crates/optimism/evm/src/strategy.rs b/crates/optimism/evm/src/strategy.rs new file mode 100644 index 000000000000..4fc06c39686f --- /dev/null +++ b/crates/optimism/evm/src/strategy.rs @@ -0,0 +1,491 @@ +//! Optimism block execution strategy, + +use crate::{l1::ensure_create2_deployer, OptimismBlockExecutionError, OptimismEvmConfig}; +use reth_chainspec::EthereumHardforks; +use reth_consensus::ConsensusError; +use reth_evm::{ + execute::{ + BlockExecutionError, BlockExecutionStrategy, BlockExecutionStrategyFactory, + BlockValidationError, ProviderError, + }, + system_calls::{OnStateHook, SystemCaller}, + ConfigureEvm, ConfigureEvmEnv, +}; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_consensus::validate_block_post_execution; +use reth_optimism_forks::OptimismHardfork; +use reth_primitives::{BlockWithSenders, Header, Receipt, Request, TxType}; +use reth_revm::{ + db::{states::bundle_state::BundleRetention, BundleState}, + state_change::post_block_balance_increments, + Database, State, +}; +use revm_primitives::{ + db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256, +}; +use std::{fmt::Display, sync::Arc}; +use tracing::trace; + +/// Factory for [`OpExecutionStrategy`]. +#[derive(Debug, Clone)] +pub struct OpExecutionStrategyFactory { + /// The chainspec + chain_spec: Arc, + /// How to create an EVM. + evm_config: EvmConfig, +} + +impl OpExecutionStrategyFactory { + /// Creates a new default optimism executor strategy factory. + pub fn optimism(chain_spec: Arc) -> Self { + Self::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)) + } +} + +impl OpExecutionStrategyFactory { + /// Creates a new executor strategy factory. + pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { + Self { chain_spec, evm_config } + } +} + +impl BlockExecutionStrategyFactory for OpExecutionStrategyFactory { + type Strategy + Display>> = OpExecutionStrategy; + + fn create_strategy(&self, db: DB) -> Self::Strategy + where + DB: Database + Display>, + { + let state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + OpExecutionStrategy::new(state, self.chain_spec.clone(), self.evm_config.clone()) + } +} + +/// Block execution strategy for Optimism. +#[allow(missing_debug_implementations)] +pub struct OpExecutionStrategy { + /// The chainspec + chain_spec: Arc, + /// How to create an EVM. + evm_config: EvmConfig, + /// Current state for block execution. + state: State, + /// Utility to call system smart contracts. + system_caller: SystemCaller, +} + +impl OpExecutionStrategy { + /// Creates a new [`OpExecutionStrategy`] + pub fn new( + state: State, + chain_spec: Arc, + evm_config: OptimismEvmConfig, + ) -> Self { + let system_caller = SystemCaller::new(evm_config.clone(), (*chain_spec).clone()); + Self { state, chain_spec, evm_config, system_caller } + } +} + +impl OpExecutionStrategy { + /// Configures a new evm configuration and block environment for the given block. + /// + /// Caution: this does not initialize the tx environment. + fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); + + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) + } +} + +impl BlockExecutionStrategy for OpExecutionStrategy +where + DB: Database + Display>, +{ + type Error = BlockExecutionError; + + fn apply_pre_execution_changes( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result<(), Self::Error> { + // Set state clear flag if the block is after the Spurious Dragon hardfork. + let state_clear_flag = + (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); + self.state.set_state_clear_flag(state_clear_flag); + + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + + self.system_caller.apply_beacon_root_contract_call( + block.timestamp, + block.number, + block.parent_beacon_block_root, + &mut evm, + )?; + + // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism + // blocks will always have at least a single transaction in them (the L1 info transaction), + // so we can safely assume that this will always be triggered upon the transition and that + // the above check for empty blocks will never be hit on OP chains. + ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()) + .map_err(|_| OptimismBlockExecutionError::ForceCreate2DeployerFail)?; + + Ok(()) + } + + fn execute_transactions( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result<(Vec, u64), Self::Error> { + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + + let is_regolith = + self.chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(block.timestamp); + + let mut cumulative_gas_used = 0; + let mut receipts = Vec::with_capacity(block.body.transactions.len()); + for (sender, transaction) in block.transactions_with_sender() { + // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, + // must be no greater than the block’s gasLimit. + let block_available_gas = block.header.gas_limit - cumulative_gas_used; + if transaction.gas_limit() > block_available_gas && + (is_regolith || !transaction.is_system_transaction()) + { + return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { + transaction_gas_limit: transaction.gas_limit(), + block_available_gas, + } + .into()) + } + + // An optimism block should never contain blob transactions. + if matches!(transaction.tx_type(), TxType::Eip4844) { + return Err(OptimismBlockExecutionError::BlobTransactionRejected.into()) + } + + // Cache the depositor account prior to the state transition for the deposit nonce. + // + // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces + // were not introduced in Bedrock. In addition, regular transactions don't have deposit + // nonces, so we don't need to touch the DB for those. + let depositor = (is_regolith && transaction.is_deposit()) + .then(|| { + evm.db_mut() + .load_cache_account(*sender) + .map(|acc| acc.account_info().unwrap_or_default()) + }) + .transpose() + .map_err(|_| OptimismBlockExecutionError::AccountLoadFailed(*sender))?; + + self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); + + // Execute transaction. + let result_and_state = evm.transact().map_err(move |err| { + let new_err = err.map_db_err(|e| e.into()); + // Ensure hash is calculated for error log, if not already done + BlockValidationError::EVM { + hash: transaction.recalculate_hash(), + error: Box::new(new_err), + } + })?; + + trace!( + target: "evm", + ?transaction, + "Executed transaction" + ); + self.system_caller.on_state(&result_and_state); + let ResultAndState { result, state } = result_and_state; + evm.db_mut().commit(state); + + // append gas used + cumulative_gas_used += result.gas_used(); + + // Push transaction changeset and calculate header bloom filter for receipt. + receipts.push(Receipt { + tx_type: transaction.tx_type(), + // Success flag was added in `EIP-658: Embedding transaction status code in + // receipts`. + success: result.is_success(), + cumulative_gas_used, + logs: result.into_logs(), + deposit_nonce: depositor.map(|account| account.nonce), + // The deposit receipt version was introduced in Canyon to indicate an update to how + // receipt hashes should be computed when set. The state transition process ensures + // this is only set for post-Canyon deposit transactions. + deposit_receipt_version: (transaction.is_deposit() && + self.chain_spec + .is_fork_active_at_timestamp(OptimismHardfork::Canyon, block.timestamp)) + .then_some(1), + }); + } + + Ok((receipts, cumulative_gas_used)) + } + + fn apply_post_execution_changes( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + _receipts: &[Receipt], + ) -> Result, Self::Error> { + let balance_increments = + post_block_balance_increments(&self.chain_spec.clone(), block, total_difficulty); + // increment balances + self.state + .increment_balances(balance_increments) + .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; + + Ok(vec![]) + } + + fn state_ref(&self) -> &State { + &self.state + } + + fn state_mut(&mut self) -> &mut State { + &mut self.state + } + + fn with_state_hook(&mut self, hook: Option>) { + self.system_caller.with_state_hook(hook); + } + + fn finish(&mut self) -> BundleState { + self.state.merge_transitions(BundleRetention::Reverts); + self.state.take_bundle() + } + + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + receipts: &[Receipt], + _requests: &[Request], + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec.clone(), receipts) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::OpChainSpec; + use alloy_consensus::TxEip1559; + use alloy_primitives::{b256, Address, StorageKey, StorageValue}; + use reth_chainspec::MIN_TRANSACTION_GAS; + use reth_evm::execute::{BatchExecutor, BlockExecutorProvider, GenericBlockExecutorProvider}; + use reth_optimism_chainspec::{optimism_deposit_tx_signature, OpChainSpecBuilder}; + use reth_primitives::{Account, Block, BlockBody, Signature, Transaction, TransactionSigned}; + use reth_revm::{ + database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, + }; + use std::{collections::HashMap, str::FromStr}; + + fn create_op_state_provider() -> StateProviderTest { + let mut db = StateProviderTest::default(); + + let l1_block_contract_account = + Account { balance: U256::ZERO, bytecode_hash: None, nonce: 1 }; + + let mut l1_block_storage = HashMap::default(); + // base fee + l1_block_storage.insert(StorageKey::with_last_byte(1), StorageValue::from(1000000000)); + // l1 fee overhead + l1_block_storage.insert(StorageKey::with_last_byte(5), StorageValue::from(188)); + // l1 fee scalar + l1_block_storage.insert(StorageKey::with_last_byte(6), StorageValue::from(684000)); + // l1 free scalars post ecotone + l1_block_storage.insert( + StorageKey::with_last_byte(3), + StorageValue::from_str( + "0x0000000000000000000000000000000000001db0000d27300000000000000005", + ) + .unwrap(), + ); + + db.insert_account(L1_BLOCK_CONTRACT, l1_block_contract_account, None, l1_block_storage); + + db + } + + fn executor_provider( + chain_spec: Arc, + ) -> GenericBlockExecutorProvider { + let strategy_factory = + OpExecutionStrategyFactory::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)); + + GenericBlockExecutorProvider::new(strategy_factory) + } + + #[test] + fn op_deposit_fields_pre_canyon() { + let header = Header { + timestamp: 1, + number: 1, + gas_limit: 1_000_000, + gas_used: 42_000, + receipts_root: b256!( + "83465d1e7d01578c0d609be33570f91242f013e9e295b0879905346abbd63731" + ), + ..Default::default() + }; + + let mut db = create_op_state_provider(); + + let addr = Address::ZERO; + let account = Account { balance: U256::MAX, ..Account::default() }; + db.insert_account(addr, account, None, HashMap::default()); + + let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().regolith_activated().build()); + + let tx = TransactionSigned::from_transaction_and_signature( + Transaction::Eip1559(TxEip1559 { + chain_id: chain_spec.chain.id(), + nonce: 0, + gas_limit: MIN_TRANSACTION_GAS, + to: addr.into(), + ..Default::default() + }), + Signature::test_signature(), + ); + + let tx_deposit = TransactionSigned::from_transaction_and_signature( + Transaction::Deposit(op_alloy_consensus::TxDeposit { + from: addr, + to: addr.into(), + gas_limit: MIN_TRANSACTION_GAS, + ..Default::default() + }), + Signature::test_signature(), + ); + + let provider = executor_provider(chain_spec); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + // make sure the L1 block contract state is preloaded. + executor.with_state_mut(|state| { + state.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + }); + + // Attempt to execute a block with one deposit and one non-deposit transaction + executor + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { + header, + body: BlockBody { + transactions: vec![tx, tx_deposit], + ..Default::default() + }, + }, + senders: vec![addr, addr], + }, + U256::ZERO, + ) + .into(), + ) + .unwrap(); + + let receipts = executor.receipts(); + let tx_receipt = receipts[0][0].as_ref().unwrap(); + let deposit_receipt = receipts[0][1].as_ref().unwrap(); + + // deposit_receipt_version is not present in pre canyon transactions + assert!(deposit_receipt.deposit_receipt_version.is_none()); + assert!(tx_receipt.deposit_receipt_version.is_none()); + + // deposit_nonce is present only in deposit transactions + assert!(deposit_receipt.deposit_nonce.is_some()); + assert!(tx_receipt.deposit_nonce.is_none()); + } + + #[test] + fn op_deposit_fields_post_canyon() { + // ensure_create2_deployer will fail if timestamp is set to less then 2 + let header = Header { + timestamp: 2, + number: 1, + gas_limit: 1_000_000, + gas_used: 42_000, + receipts_root: b256!( + "fffc85c4004fd03c7bfbe5491fae98a7473126c099ac11e8286fd0013f15f908" + ), + ..Default::default() + }; + + let mut db = create_op_state_provider(); + let addr = Address::ZERO; + let account = Account { balance: U256::MAX, ..Account::default() }; + + db.insert_account(addr, account, None, HashMap::default()); + + let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().canyon_activated().build()); + + let tx = TransactionSigned::from_transaction_and_signature( + Transaction::Eip1559(TxEip1559 { + chain_id: chain_spec.chain.id(), + nonce: 0, + gas_limit: MIN_TRANSACTION_GAS, + to: addr.into(), + ..Default::default() + }), + Signature::test_signature(), + ); + + let tx_deposit = TransactionSigned::from_transaction_and_signature( + Transaction::Deposit(op_alloy_consensus::TxDeposit { + from: addr, + to: addr.into(), + gas_limit: MIN_TRANSACTION_GAS, + ..Default::default() + }), + optimism_deposit_tx_signature(), + ); + + let provider = executor_provider(chain_spec); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + // make sure the L1 block contract state is preloaded. + executor.with_state_mut(|state| { + state.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + }); + + // attempt to execute an empty block with parent beacon block root, this should not fail + executor + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { + header, + body: BlockBody { + transactions: vec![tx, tx_deposit], + ..Default::default() + }, + }, + senders: vec![addr, addr], + }, + U256::ZERO, + ) + .into(), + ) + .expect("Executing a block while canyon is active should not fail"); + + let receipts = executor.receipts(); + let tx_receipt = receipts[0][0].as_ref().unwrap(); + let deposit_receipt = receipts[0][1].as_ref().unwrap(); + + // deposit_receipt_version is set to 1 for post canyon deposit transactions + assert_eq!(deposit_receipt.deposit_receipt_version, Some(1)); + assert!(tx_receipt.deposit_receipt_version.is_none()); + + // deposit_nonce is present only in deposit transactions + assert!(deposit_receipt.deposit_nonce.is_some()); + assert!(tx_receipt.deposit_nonce.is_none()); + } +} From d421931b7ec86759a0bf1aa2cb7e9b6263f14c9a Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 16 Oct 2024 12:31:23 +0200 Subject: [PATCH 21/51] trie: simplify usage of `HashedStorage` with default (#11662) --- crates/storage/provider/src/providers/bundle_state_provider.rs | 2 +- crates/trie/parallel/src/parallel_root.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index 296dae8c6ab7..be6549033cde 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -43,7 +43,7 @@ impl BundleStateProvider account.storage.iter().map(|(slot, value)| (slot, &value.present_value)), ) }) - .unwrap_or_else(|| HashedStorage::new(false)) + .unwrap_or_default() } } diff --git a/crates/trie/parallel/src/parallel_root.rs b/crates/trie/parallel/src/parallel_root.rs index a64b8351446e..e432b91062ca 100644 --- a/crates/trie/parallel/src/parallel_root.rs +++ b/crates/trie/parallel/src/parallel_root.rs @@ -320,7 +320,7 @@ mod tests { hashed_state .storages .entry(hashed_address) - .or_insert_with(|| HashedStorage::new(false)) + .or_insert_with(HashedStorage::default) .storage .insert(hashed_slot, *value); } From 248b6b5905e9c44b5e0c2af6abae450898801beb Mon Sep 17 00:00:00 2001 From: greged93 <82421016+greged93@users.noreply.github.com> Date: Wed, 16 Oct 2024 12:39:25 +0200 Subject: [PATCH 22/51] fix: task executor metrics (#11738) --- bin/reth/src/cli/mod.rs | 5 +++++ crates/cli/commands/src/node.rs | 5 ----- crates/tasks/src/lib.rs | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index cca801da36b2..01f8f73e7b1b 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -18,6 +18,7 @@ use reth_db::DatabaseEnv; use reth_ethereum_cli::chainspec::EthereumChainSpecParser; use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_ethereum::{EthExecutorProvider, EthereumNode}; +use reth_node_metrics::recorder::install_prometheus_recorder; use reth_tracing::FileWorkerGuard; use std::{ffi::OsString, fmt, future::Future, sync::Arc}; use tracing::info; @@ -145,6 +146,10 @@ impl, Ext: clap::Args + fmt::Debug> Cl let _guard = self.init_tracing()?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); + // Install the prometheus recorder to be sure to record task + // executor's metrics + let _ = install_prometheus_recorder(); + let runner = CliRunner::default(); match self.command { Commands::Node(command) => { diff --git a/crates/cli/commands/src/node.rs b/crates/cli/commands/src/node.rs index 5b1a87e068b3..b099a2c05222 100644 --- a/crates/cli/commands/src/node.rs +++ b/crates/cli/commands/src/node.rs @@ -16,7 +16,6 @@ use reth_node_core::{ node_config::NodeConfig, version, }; -use reth_node_metrics::recorder::install_prometheus_recorder; use std::{ffi::OsString, fmt, future::Future, net::SocketAddr, path::PathBuf, sync::Arc}; /// Start the node @@ -180,10 +179,6 @@ impl< pruning, }; - // Register the prometheus recorder before creating the database, - // because database init needs it to register metrics. - let _ = install_prometheus_recorder(); - let data_dir = node_config.datadir(); let db_path = data_dir.db(); diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index a0070698fcff..28b5eaba9ffb 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -288,7 +288,7 @@ pub struct TaskExecutor { on_shutdown: Shutdown, /// Sender half for sending panic signals to this type panicked_tasks_tx: UnboundedSender, - // Task Executor Metrics + /// Task Executor Metrics metrics: TaskExecutorMetrics, /// How many [`GracefulShutdown`] tasks are currently active graceful_tasks: Arc, From 87399ae2c17d83c9d874eaf3f0b902310414359e Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 16 Oct 2024 13:20:42 +0200 Subject: [PATCH 23/51] chore: rename executor and provider Generic -> Basic (#11788) --- crates/ethereum/evm/src/strategy.rs | 6 ++--- crates/evm/src/execute.rs | 36 ++++++++++++++--------------- crates/evm/src/test_utils.rs | 8 +++---- crates/optimism/evm/src/strategy.rs | 6 ++--- 4 files changed, 28 insertions(+), 28 deletions(-) diff --git a/crates/ethereum/evm/src/strategy.rs b/crates/ethereum/evm/src/strategy.rs index 52f58a8b0a80..1c284e068d03 100644 --- a/crates/ethereum/evm/src/strategy.rs +++ b/crates/ethereum/evm/src/strategy.rs @@ -274,7 +274,7 @@ mod tests { use alloy_primitives::{b256, fixed_bytes, keccak256, Bytes, TxKind, B256}; use reth_chainspec::{ChainSpecBuilder, ForkCondition}; use reth_evm::execute::{ - BatchExecutor, BlockExecutorProvider, Executor, GenericBlockExecutorProvider, + BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider, Executor, }; use reth_execution_types::BlockExecutionOutput; use reth_primitives::{ @@ -328,11 +328,11 @@ mod tests { fn executor_provider( chain_spec: Arc, - ) -> GenericBlockExecutorProvider { + ) -> BasicBlockExecutorProvider { let strategy_factory = EthExecutionStrategyFactory::new(chain_spec.clone(), EthEvmConfig::new(chain_spec)); - GenericBlockExecutorProvider::new(strategy_factory) + BasicBlockExecutorProvider::new(strategy_factory) } #[test] diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 704b6a23ad5a..f52325b43e8f 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -227,7 +227,7 @@ pub trait BlockExecutionStrategyFactory: Send + Sync + Clone + Unpin + 'static { DB: Database + Display>; } -impl Clone for GenericBlockExecutorProvider +impl Clone for BasicBlockExecutorProvider where F: Clone, { @@ -238,33 +238,33 @@ where /// A generic block executor provider that can create executors using a strategy factory. #[allow(missing_debug_implementations)] -pub struct GenericBlockExecutorProvider { +pub struct BasicBlockExecutorProvider { strategy_factory: F, } -impl GenericBlockExecutorProvider { - /// Creates a new `GenericBlockExecutorProvider` with the given strategy factory. +impl BasicBlockExecutorProvider { + /// Creates a new `BasicBlockExecutorProvider` with the given strategy factory. pub const fn new(strategy_factory: F) -> Self { Self { strategy_factory } } } -impl BlockExecutorProvider for GenericBlockExecutorProvider +impl BlockExecutorProvider for BasicBlockExecutorProvider where F: BlockExecutionStrategyFactory, { type Executor + Display>> = - GenericBlockExecutor, DB>; + BasicBlockExecutor, DB>; type BatchExecutor + Display>> = - GenericBatchExecutor, DB>; + BasicBatchExecutor, DB>; fn executor(&self, db: DB) -> Self::Executor where DB: Database + Display>, { let strategy = self.strategy_factory.create_strategy(db); - GenericBlockExecutor::new(strategy) + BasicBlockExecutor::new(strategy) } fn batch_executor(&self, db: DB) -> Self::BatchExecutor @@ -273,14 +273,14 @@ where { let strategy = self.strategy_factory.create_strategy(db); let batch_record = BlockBatchRecord::default(); - GenericBatchExecutor::new(strategy, batch_record) + BasicBatchExecutor::new(strategy, batch_record) } } /// A generic block executor that uses a [`BlockExecutionStrategy`] to /// execute blocks. #[allow(missing_debug_implementations, dead_code)] -pub struct GenericBlockExecutor +pub struct BasicBlockExecutor where S: BlockExecutionStrategy, { @@ -289,17 +289,17 @@ where _phantom: PhantomData, } -impl GenericBlockExecutor +impl BasicBlockExecutor where S: BlockExecutionStrategy, { - /// Creates a new `GenericBlockExecutor` with the given strategy. + /// Creates a new `BasicBlockExecutor` with the given strategy. pub const fn new(strategy: S) -> Self { Self { strategy, _phantom: PhantomData } } } -impl Executor for GenericBlockExecutor +impl Executor for BasicBlockExecutor where S: BlockExecutionStrategy, DB: Database + Display>, @@ -368,7 +368,7 @@ where /// A generic batch executor that uses a [`BlockExecutionStrategy`] to /// execute batches. #[allow(missing_debug_implementations)] -pub struct GenericBatchExecutor +pub struct BasicBatchExecutor where S: BlockExecutionStrategy, { @@ -379,17 +379,17 @@ where _phantom: PhantomData, } -impl GenericBatchExecutor +impl BasicBatchExecutor where S: BlockExecutionStrategy, { - /// Creates a new `GenericBatchExecutor` with the given strategy. + /// Creates a new `BasicBatchExecutor` with the given strategy. pub const fn new(strategy: S, batch_record: BlockBatchRecord) -> Self { Self { strategy, batch_record, _phantom: PhantomData } } } -impl BatchExecutor for GenericBatchExecutor +impl BatchExecutor for BasicBatchExecutor where S: BlockExecutionStrategy, DB: Database + Display>, @@ -661,7 +661,7 @@ mod tests { .clone(), finish_result: expected_finish_result.clone(), }; - let provider = GenericBlockExecutorProvider::new(strategy_factory); + let provider = BasicBlockExecutorProvider::new(strategy_factory); let db = CacheDB::>::default(); let executor = provider.executor(db); let result = executor.execute(BlockExecutionInput::new(&Default::default(), U256::ZERO)); diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index 5ae7ed45b712..261b36420b4b 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -2,8 +2,8 @@ use crate::{ execute::{ - BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutionStrategy, - BlockExecutorProvider, Executor, GenericBatchExecutor, GenericBlockExecutor, + BasicBatchExecutor, BasicBlockExecutor, BatchExecutor, BlockExecutionInput, + BlockExecutionOutput, BlockExecutionStrategy, BlockExecutorProvider, Executor, }, system_calls::OnStateHook, }; @@ -112,7 +112,7 @@ impl BatchExecutor for MockExecutorProvider { } } -impl GenericBlockExecutor +impl BasicBlockExecutor where S: BlockExecutionStrategy, { @@ -133,7 +133,7 @@ where } } -impl GenericBatchExecutor +impl BasicBatchExecutor where S: BlockExecutionStrategy, { diff --git a/crates/optimism/evm/src/strategy.rs b/crates/optimism/evm/src/strategy.rs index 4fc06c39686f..33770714c4cb 100644 --- a/crates/optimism/evm/src/strategy.rs +++ b/crates/optimism/evm/src/strategy.rs @@ -278,7 +278,7 @@ mod tests { use alloy_consensus::TxEip1559; use alloy_primitives::{b256, Address, StorageKey, StorageValue}; use reth_chainspec::MIN_TRANSACTION_GAS; - use reth_evm::execute::{BatchExecutor, BlockExecutorProvider, GenericBlockExecutorProvider}; + use reth_evm::execute::{BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider}; use reth_optimism_chainspec::{optimism_deposit_tx_signature, OpChainSpecBuilder}; use reth_primitives::{Account, Block, BlockBody, Signature, Transaction, TransactionSigned}; use reth_revm::{ @@ -315,11 +315,11 @@ mod tests { fn executor_provider( chain_spec: Arc, - ) -> GenericBlockExecutorProvider { + ) -> BasicBlockExecutorProvider { let strategy_factory = OpExecutionStrategyFactory::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)); - GenericBlockExecutorProvider::new(strategy_factory) + BasicBlockExecutorProvider::new(strategy_factory) } #[test] From eec861fe9fc787900c18fc2d080f73c89620f5e3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 16 Oct 2024 13:59:41 +0200 Subject: [PATCH 24/51] chore: rm unused optimism feature (#11794) --- crates/rpc/rpc/Cargo.toml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 5399e50ce28a..fe150e36eed5 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -98,8 +98,3 @@ jsonrpsee = { workspace = true, features = ["client"] } [features] js-tracer = ["revm-inspectors/js-tracer", "reth-rpc-eth-types/js-tracer"] -optimism = [ - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-revm/optimism", -] From 6f041108767a76d4ee964411bb8e346364444d47 Mon Sep 17 00:00:00 2001 From: Evan Chipman <42247026+evchip@users.noreply.github.com> Date: Wed, 16 Oct 2024 19:25:27 +0700 Subject: [PATCH 25/51] chore: rename SenderId::into_id to SenderId::into_transaction_id (#11793) Co-authored-by: Matthias Seitz --- crates/transaction-pool/src/identifier.rs | 2 +- crates/transaction-pool/src/pool/mod.rs | 5 +++-- crates/transaction-pool/src/pool/txpool.rs | 12 ++++++++---- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/crates/transaction-pool/src/identifier.rs b/crates/transaction-pool/src/identifier.rs index c50d39ae495a..2e5312a210fe 100644 --- a/crates/transaction-pool/src/identifier.rs +++ b/crates/transaction-pool/src/identifier.rs @@ -61,7 +61,7 @@ impl SenderId { } /// Converts the sender to a [`TransactionId`] with the given nonce. - pub const fn into_id(self, nonce: u64) -> TransactionId { + pub const fn into_transaction_id(self, nonce: u64) -> TransactionId { TransactionId::new(self, nonce) } } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 64e6dad6793f..600a8da934ec 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -792,8 +792,9 @@ where on_chain_nonce: u64, ) -> Option>> { let sender_id = self.get_sender_id(sender); - self.get_pool_data() - .get_highest_consecutive_transaction_by_sender(sender_id.into_id(on_chain_nonce)) + self.get_pool_data().get_highest_consecutive_transaction_by_sender( + sender_id.into_transaction_id(on_chain_nonce), + ) } /// Returns all transactions that where submitted with the given [`TransactionOrigin`] diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index c89aca830f0a..9d284392db55 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -2804,20 +2804,24 @@ mod tests { // Get last consecutive transaction let sender_id = f.ids.sender_id(&sender).unwrap(); - let next_tx = pool.get_highest_consecutive_transaction_by_sender(sender_id.into_id(0)); + let next_tx = + pool.get_highest_consecutive_transaction_by_sender(sender_id.into_transaction_id(0)); assert_eq!(next_tx.map(|tx| tx.nonce()), Some(2), "Expected nonce 2 for on-chain nonce 0"); - let next_tx = pool.get_highest_consecutive_transaction_by_sender(sender_id.into_id(4)); + let next_tx = + pool.get_highest_consecutive_transaction_by_sender(sender_id.into_transaction_id(4)); assert_eq!(next_tx.map(|tx| tx.nonce()), Some(5), "Expected nonce 5 for on-chain nonce 4"); - let next_tx = pool.get_highest_consecutive_transaction_by_sender(sender_id.into_id(5)); + let next_tx = + pool.get_highest_consecutive_transaction_by_sender(sender_id.into_transaction_id(5)); assert_eq!(next_tx.map(|tx| tx.nonce()), Some(5), "Expected nonce 5 for on-chain nonce 5"); // update the tracked nonce let mut info = SenderInfo::default(); info.update(8, U256::ZERO); pool.sender_info.insert(sender_id, info); - let next_tx = pool.get_highest_consecutive_transaction_by_sender(sender_id.into_id(5)); + let next_tx = + pool.get_highest_consecutive_transaction_by_sender(sender_id.into_transaction_id(5)); assert_eq!(next_tx.map(|tx| tx.nonce()), Some(9), "Expected nonce 9 for on-chain nonce 8"); } From c76d3194446b59581f11072e9cb66307e4aa278e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 16 Oct 2024 14:44:19 +0200 Subject: [PATCH 26/51] chore: rm optimism feature from reth-revm (#11797) --- crates/optimism/evm/Cargo.toml | 2 +- crates/optimism/node/Cargo.toml | 1 - crates/optimism/payload/Cargo.toml | 2 +- crates/revm/Cargo.toml | 1 - 4 files changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index afaca32b63ee..53f9ae033dd3 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -54,5 +54,5 @@ optimism = [ "reth-primitives/optimism", "reth-execution-types/optimism", "reth-optimism-consensus/optimism", - "reth-revm/optimism", + "revm/optimism", ] diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index f9e038a3d9e0..8e359e602657 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -87,7 +87,6 @@ optimism = [ "reth-optimism-evm/optimism", "reth-optimism-payload-builder/optimism", "reth-beacon-consensus/optimism", - "reth-revm/optimism", "revm/optimism", "reth-auto-seal-consensus/optimism", "reth-optimism-rpc/optimism", diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 414b2c358118..e1d6fe47d291 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -52,5 +52,5 @@ optimism = [ "reth-primitives/optimism", "reth-provider/optimism", "reth-optimism-evm/optimism", - "reth-revm/optimism", + "revm/optimism", ] diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 9e4501f62770..7ffb06ce960c 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -36,5 +36,4 @@ default = ["std", "c-kzg"] std = [] c-kzg = ["revm/c-kzg"] test-utils = ["dep:reth-trie"] -optimism = ["revm/optimism"] serde = ["revm/serde"] From 6ad1275e6b86b003eb51f794cd70de41559247f7 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 16 Oct 2024 17:04:23 +0200 Subject: [PATCH 27/51] chore(sdk): move block traits into `reth-primitives-traits` (#11780) --- crates/primitives-traits/src/block.rs | 99 +++++++++++++++++++ .../src}/block/body.rs | 27 +++-- .../src}/block/mod.rs | 76 ++++---------- crates/primitives/src/traits/mod.rs | 7 -- 4 files changed, 135 insertions(+), 74 deletions(-) create mode 100644 crates/primitives-traits/src/block.rs rename crates/{primitives/src/traits => primitives-traits/src}/block/body.rs (84%) rename crates/{primitives/src/traits => primitives-traits/src}/block/mod.rs (58%) delete mode 100644 crates/primitives/src/traits/mod.rs diff --git a/crates/primitives-traits/src/block.rs b/crates/primitives-traits/src/block.rs new file mode 100644 index 000000000000..02f581801c93 --- /dev/null +++ b/crates/primitives-traits/src/block.rs @@ -0,0 +1,99 @@ +//! Block abstraction. + +pub mod body; + +use alloc::fmt; +use core::ops; + +use alloy_consensus::BlockHeader; +use alloy_primitives::{Address, Sealable, B256}; + +use crate::{traits::BlockBody, BlockWithSenders, SealedBlock, SealedHeader}; + +/// Abstraction of block data type. +pub trait Block: + fmt::Debug + + Clone + + PartialEq + + Eq + + Default + + serde::Serialize + + for<'a> serde::Deserialize<'a> + + From<(Self::Header, Self::Body)> + + Into<(Self::Header, Self::Body)> +{ + /// Header part of the block. + type Header: BlockHeader + Sealable; + + /// The block's body contains the transactions in the block. + type Body: BlockBody; + + /// A block and block hash. + type SealedBlock; + + /// A block and addresses of senders of transactions in it. + type BlockWithSenders; + + /// Returns reference to [`BlockHeader`] type. + fn header(&self) -> &Self::Header; + + /// Returns reference to [`BlockBody`] type. + fn body(&self) -> &Self::Body; + + /// Calculate the header hash and seal the block so that it can't be changed. + // todo: can be default impl if sealed block type is made generic over header and body and + // migrated to alloy + fn seal_slow(self) -> Self::SealedBlock; + + /// Seal the block with a known hash. + /// + /// WARNING: This method does not perform validation whether the hash is correct. + // todo: can be default impl if sealed block type is made generic over header and body and + // migrated to alloy + fn seal(self, hash: B256) -> Self::SealedBlock; + + /// Expensive operation that recovers transaction signer. See + /// [`SealedBlockWithSenders`](reth_primitives::SealedBlockWithSenders). + fn senders(&self) -> Option> { + self.body().recover_signers() + } + + /// Transform into a [`BlockWithSenders`]. + /// + /// # Panics + /// + /// If the number of senders does not match the number of transactions in the block + /// and the signer recovery for one of the transactions fails. + /// + /// Note: this is expected to be called with blocks read from disk. + #[track_caller] + fn with_senders_unchecked(self, senders: Vec
) -> Self::BlockWithSenders { + self.try_with_senders_unchecked(senders).expect("stored block is valid") + } + + /// Transform into a [`BlockWithSenders`] using the given senders. + /// + /// If the number of senders does not match the number of transactions in the block, this falls + /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. + /// See also [`TransactionSigned::recover_signer_unchecked`] + /// + /// Returns an error if a signature is invalid. + // todo: can be default impl if block with senders type is made generic over block and migrated + // to alloy + #[track_caller] + fn try_with_senders_unchecked( + self, + senders: Vec
, + ) -> Result; + + /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained + /// transactions. + /// + /// Returns `None` if a transaction is invalid. + // todo: can be default impl if sealed block type is made generic over header and body and + // migrated to alloy + fn with_recovered_senders(self) -> Option; + + /// Calculates a heuristic for the in-memory size of the [`Block`]. + fn size(&self) -> usize; +} diff --git a/crates/primitives/src/traits/block/body.rs b/crates/primitives-traits/src/block/body.rs similarity index 84% rename from crates/primitives/src/traits/block/body.rs rename to crates/primitives-traits/src/block/body.rs index ff8f71b76162..03246c68b454 100644 --- a/crates/primitives/src/traits/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -3,10 +3,11 @@ use alloc::fmt; use core::ops; -use alloy_consensus::{BlockHeader, Transaction, TxType}; +use alloy_consensus::{BlockHeader,Request, Transaction, TxType}; use alloy_primitives::{Address, B256}; +use alloy_eips::eip1559::Withdrawal; -use crate::{proofs, traits::Block, Requests, Withdrawals}; +use crate::Block; /// Abstraction for block's body. pub trait BlockBody: @@ -27,18 +28,24 @@ pub trait BlockBody: /// Header type (uncle blocks). type Header: BlockHeader; + /// Withdrawals in block. + type Withdrawals: Iterator; + + /// Requests in block. + type Requests: Iterator; + /// Returns reference to transactions in block. fn transactions(&self) -> &[Self::SignedTransaction]; /// Returns [`Withdrawals`] in the block, if any. // todo: branch out into extension trait - fn withdrawals(&self) -> Option<&Withdrawals>; + fn withdrawals(&self) -> Option<&Self::Withdrawals>; /// Returns reference to uncle block headers. fn ommers(&self) -> &[Self::Header]; /// Returns [`Request`] in block, if any. - fn requests(&self) -> Option<&Requests>; + fn requests(&self) -> Option<&Self::Requests>; /// Create a [`Block`] from the body and its header. fn into_block>(self, header: Self::Header) -> T { @@ -53,15 +60,15 @@ pub trait BlockBody: /// Calculate the withdrawals root for the block body, if withdrawals exist. If there are no /// withdrawals, this will return `None`. - fn calculate_withdrawals_root(&self) -> Option { - Some(proofs::calculate_withdrawals_root(self.withdrawals()?)) - } + // todo: can be default impl if `calculate_withdrawals_root` made into a method on + // `Withdrawals` and `Withdrawals` moved to alloy + fn calculate_withdrawals_root(&self) -> Option; /// Calculate the requests root for the block body, if requests exist. If there are no /// requests, this will return `None`. - fn calculate_requests_root(&self) -> Option { - Some(proofs::calculate_requests_root(self.requests()?)) - } + // todo: can be default impl if `calculate_requests_root` made into a method on + // `Requests` and `Requests` moved to alloy + fn calculate_requests_root(&self) -> Option; /// Recover signer addresses for all transactions in the block body. fn recover_signers(&self) -> Option>; diff --git a/crates/primitives/src/traits/block/mod.rs b/crates/primitives-traits/src/block/mod.rs similarity index 58% rename from crates/primitives/src/traits/block/mod.rs rename to crates/primitives-traits/src/block/mod.rs index 451a54c3457c..02f581801c93 100644 --- a/crates/primitives/src/traits/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -28,6 +28,12 @@ pub trait Block: /// The block's body contains the transactions in the block. type Body: BlockBody; + /// A block and block hash. + type SealedBlock; + + /// A block and addresses of senders of transactions in it. + type BlockWithSenders; + /// Returns reference to [`BlockHeader`] type. fn header(&self) -> &Self::Header; @@ -35,20 +41,16 @@ pub trait Block: fn body(&self) -> &Self::Body; /// Calculate the header hash and seal the block so that it can't be changed. - fn seal_slow(self) -> SealedBlock { - let (header, body) = self.into(); - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedBlock { header: SealedHeader::new(header, seal), body } - } + // todo: can be default impl if sealed block type is made generic over header and body and + // migrated to alloy + fn seal_slow(self) -> Self::SealedBlock; /// Seal the block with a known hash. /// /// WARNING: This method does not perform validation whether the hash is correct. - fn seal(self, hash: B256) -> SealedBlock { - let (header, body) = self.into(); - SealedBlock { header: SealedHeader::new(header, hash), body } - } + // todo: can be default impl if sealed block type is made generic over header and body and + // migrated to alloy + fn seal(self, hash: B256) -> Self::SealedBlock; /// Expensive operation that recovers transaction signer. See /// [`SealedBlockWithSenders`](reth_primitives::SealedBlockWithSenders). @@ -65,7 +67,7 @@ pub trait Block: /// /// Note: this is expected to be called with blocks read from disk. #[track_caller] - fn with_senders_unchecked(self, senders: Vec
) -> BlockWithSenders { + fn with_senders_unchecked(self, senders: Vec
) -> Self::BlockWithSenders { self.try_with_senders_unchecked(senders).expect("stored block is valid") } @@ -76,62 +78,22 @@ pub trait Block: /// See also [`TransactionSigned::recover_signer_unchecked`] /// /// Returns an error if a signature is invalid. + // todo: can be default impl if block with senders type is made generic over block and migrated + // to alloy #[track_caller] fn try_with_senders_unchecked( self, senders: Vec
, - ) -> Result, Self> { - let senders = if self.body().transactions().len() == senders.len() { - senders - } else { - let Some(senders) = self.body().recover_signers() else { return Err(self) }; - senders - }; - - Ok(BlockWithSenders { block: self, senders }) - } + ) -> Result; /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained /// transactions. /// /// Returns `None` if a transaction is invalid. - fn with_recovered_senders(self) -> Option> { - let senders = self.senders()?; - Some(BlockWithSenders { block: self, senders }) - } + // todo: can be default impl if sealed block type is made generic over header and body and + // migrated to alloy + fn with_recovered_senders(self) -> Option; /// Calculates a heuristic for the in-memory size of the [`Block`]. fn size(&self) -> usize; } - -impl Block for T -where - T: ops::Deref - + fmt::Debug - + Clone - + PartialEq - + Eq - + Default - + serde::Serialize - + for<'a> serde::Deserialize<'a> - + From<(::Header, ::Body)> - + Into<(::Header, ::Body)>, -{ - type Header = ::Header; - type Body = ::Body; - - #[inline] - fn header(&self) -> &Self::Header { - self.deref().header() - } - - #[inline] - fn body(&self) -> &Self::Body { - self.deref().body() - } - - #[inline] - fn size(&self) -> usize { - self.deref().size() - } -} diff --git a/crates/primitives/src/traits/mod.rs b/crates/primitives/src/traits/mod.rs deleted file mode 100644 index 8c84c6729753..000000000000 --- a/crates/primitives/src/traits/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! Abstractions of primitive data types - -pub mod block; - -pub use block::{body::BlockBody, Block}; - -pub use alloy_consensus::BlockHeader; From cb7fd084a673bae8c5b5c0c0fbf46b74fe89a23e Mon Sep 17 00:00:00 2001 From: greged93 <82421016+greged93@users.noreply.github.com> Date: Wed, 16 Oct 2024 17:05:59 +0200 Subject: [PATCH 28/51] chore: remove &self from update_estimated_gas_range (#11799) --- crates/rpc/rpc-eth-api/src/helpers/call.rs | 101 ++++++++++----------- 1 file changed, 50 insertions(+), 51 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 6784f9327f09..b43b34305bd9 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -855,7 +855,7 @@ pub trait Call: LoadState + SpawnBlocking { // Update the gas used based on the new result. gas_used = res.result.gas_used(); // Update the gas limit estimates (highest and lowest) based on the execution result. - self.update_estimated_gas_range( + update_estimated_gas_range( res.result, optimistic_gas_limit, &mut highest_gas_limit, @@ -900,7 +900,7 @@ pub trait Call: LoadState + SpawnBlocking { // Unpack the result and environment if the transaction was successful. (res, env) = ethres?; // Update the estimated gas range based on the transaction result. - self.update_estimated_gas_range( + update_estimated_gas_range( res.result, mid_gas_limit, &mut highest_gas_limit, @@ -916,55 +916,6 @@ pub trait Call: LoadState + SpawnBlocking { Ok(U256::from(highest_gas_limit)) } - /// Updates the highest and lowest gas limits for binary search based on the execution result. - /// - /// This function refines the gas limit estimates used in a binary search to find the optimal - /// gas limit for a transaction. It adjusts the highest or lowest gas limits depending on - /// whether the execution succeeded, reverted, or halted due to specific reasons. - #[inline] - fn update_estimated_gas_range( - &self, - result: ExecutionResult, - tx_gas_limit: u64, - highest_gas_limit: &mut u64, - lowest_gas_limit: &mut u64, - ) -> Result<(), Self::Error> { - match result { - ExecutionResult::Success { .. } => { - // Cap the highest gas limit with the succeeding gas limit. - *highest_gas_limit = tx_gas_limit; - } - ExecutionResult::Revert { .. } => { - // Increase the lowest gas limit. - *lowest_gas_limit = tx_gas_limit; - } - ExecutionResult::Halt { reason, .. } => { - match reason { - HaltReason::OutOfGas(_) | HaltReason::InvalidFEOpcode => { - // Both `OutOfGas` and `InvalidEFOpcode` can occur dynamically if the gas - // left is too low. Treat this as an out of gas - // condition, knowing that the call succeeds with a - // higher gas limit. - // - // Common usage of invalid opcode in OpenZeppelin: - // - - // Increase the lowest gas limit. - *lowest_gas_limit = tx_gas_limit; - } - err => { - // These cases should be unreachable because we know the transaction - // succeeds, but if they occur, treat them as an - // error. - return Err(RpcInvalidTransactionError::EvmHalt(err).into_eth_err()) - } - } - } - }; - - Ok(()) - } - /// Executes the requests again after an out of gas error to check if the error is gas related /// or not #[inline] @@ -1163,3 +1114,51 @@ pub trait Call: LoadState + SpawnBlocking { Ok(env) } } + +/// Updates the highest and lowest gas limits for binary search based on the execution result. +/// +/// This function refines the gas limit estimates used in a binary search to find the optimal +/// gas limit for a transaction. It adjusts the highest or lowest gas limits depending on +/// whether the execution succeeded, reverted, or halted due to specific reasons. +#[inline] +fn update_estimated_gas_range( + result: ExecutionResult, + tx_gas_limit: u64, + highest_gas_limit: &mut u64, + lowest_gas_limit: &mut u64, +) -> Result<(), EthApiError> { + match result { + ExecutionResult::Success { .. } => { + // Cap the highest gas limit with the succeeding gas limit. + *highest_gas_limit = tx_gas_limit; + } + ExecutionResult::Revert { .. } => { + // Increase the lowest gas limit. + *lowest_gas_limit = tx_gas_limit; + } + ExecutionResult::Halt { reason, .. } => { + match reason { + HaltReason::OutOfGas(_) | HaltReason::InvalidFEOpcode => { + // Both `OutOfGas` and `InvalidEFOpcode` can occur dynamically if the gas + // left is too low. Treat this as an out of gas + // condition, knowing that the call succeeds with a + // higher gas limit. + // + // Common usage of invalid opcode in OpenZeppelin: + // + + // Increase the lowest gas limit. + *lowest_gas_limit = tx_gas_limit; + } + err => { + // These cases should be unreachable because we know the transaction + // succeeds, but if they occur, treat them as an + // error. + return Err(RpcInvalidTransactionError::EvmHalt(err).into_eth_err()) + } + } + } + }; + + Ok(()) +} From 281307fe4c25fef8cebbffcfff6eaf0f4d190d6b Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 16 Oct 2024 17:47:35 +0200 Subject: [PATCH 29/51] chore(ci): update list of crates excluded from wasm checks (#11787) --- .github/assets/check_wasm.sh | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 8d53f457af91..1b1c0641fc0a 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -30,12 +30,10 @@ exclude_crates=( reth-engine-util reth-eth-wire reth-ethereum-cli - reth-ethereum-engine reth-ethereum-engine-primitives reth-ethereum-payload-builder reth-etl reth-evm-ethereum - reth-execution-errors reth-exex reth-exex-test-utils reth-ipc @@ -49,7 +47,6 @@ exclude_crates=( reth-node-events reth-node-metrics reth-optimism-cli - reth-optimism-evm reth-optimism-node reth-optimism-payload-builder reth-optimism-rpc @@ -63,9 +60,7 @@ exclude_crates=( reth-rpc-eth-api reth-rpc-eth-types reth-rpc-layer - reth-rpc-types reth-stages - reth-storage-errors reth-engine-local # The following are not supposed to be working reth # all of the crates below From 6b2ec42e48394a8ffa0dc471ec7b2a9ba2a697c4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 16 Oct 2024 18:50:18 +0200 Subject: [PATCH 30/51] docs: clarify max rpc tracing requests (#11796) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- book/cli/help.rs | 2 +- book/cli/reth/node.md | 4 +++- crates/node/core/src/args/rpc_server.rs | 5 +++++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/book/cli/help.rs b/book/cli/help.rs index e347e1ea5db6..963f53deb0a9 100755 --- a/book/cli/help.rs +++ b/book/cli/help.rs @@ -320,7 +320,7 @@ fn preprocess_help(s: &str) -> Cow<'_, str> { (r"default: reth/.*/\w+", "default: reth//"), // Remove rpc.max-tracing-requests default value ( - r"(rpc.max-tracing-requests \n.*\n.*\n.*)\[default: \d+\]", + r"(rpc.max-tracing-requests \n.*\n.*\n.*\n.*\n.*)\[default: \d+\]", r"$1[default: ]", ), ]; diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index 73a8063a852b..ea10d9522161 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -331,7 +331,9 @@ RPC: [default: 500] --rpc.max-tracing-requests - Maximum number of concurrent tracing requests + Maximum number of concurrent tracing requests. + + By default this chooses a sensible value based on the number of available cores. Tracing requests are generally CPU bound. Choosing a value that is higher than the available CPU cores can have a negative impact on the performance of the node and affect the node's ability to maintain sync. [default: ] diff --git a/crates/node/core/src/args/rpc_server.rs b/crates/node/core/src/args/rpc_server.rs index 15771e9897ef..382f22d37764 100644 --- a/crates/node/core/src/args/rpc_server.rs +++ b/crates/node/core/src/args/rpc_server.rs @@ -135,6 +135,11 @@ pub struct RpcServerArgs { pub rpc_max_connections: MaxU32, /// Maximum number of concurrent tracing requests. + /// + /// By default this chooses a sensible value based on the number of available cores. + /// Tracing requests are generally CPU bound. + /// Choosing a value that is higher than the available CPU cores can have a negative impact on + /// the performance of the node and affect the node's ability to maintain sync. #[arg(long = "rpc.max-tracing-requests", alias = "rpc-max-tracing-requests", value_name = "COUNT", default_value_t = constants::default_max_tracing_requests())] pub rpc_max_tracing_requests: usize, From 12cab204b50a824bcfb55f6b6d46f2e9cda34c31 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 16 Oct 2024 19:21:25 +0200 Subject: [PATCH 31/51] fix(witness): branch node children decoding (#11599) --- crates/trie/db/tests/witness.rs | 52 +++++++++++++++++++++++++++++++++ crates/trie/trie/src/witness.rs | 22 ++++++++++---- 2 files changed, 68 insertions(+), 6 deletions(-) diff --git a/crates/trie/db/tests/witness.rs b/crates/trie/db/tests/witness.rs index 59656383de4c..20f8cfbb9081 100644 --- a/crates/trie/db/tests/witness.rs +++ b/crates/trie/db/tests/witness.rs @@ -6,6 +6,8 @@ use alloy_primitives::{ Address, Bytes, B256, U256, }; use alloy_rlp::EMPTY_STRING_CODE; +use reth_db::{cursor::DbCursorRW, tables}; +use reth_db_api::transaction::DbTxMut; use reth_primitives::{constants::EMPTY_ROOT_HASH, Account, StorageEntry}; use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; use reth_trie::{proof::Proof, witness::TrieWitness, HashedPostState, HashedStorage, StateRoot}; @@ -91,3 +93,53 @@ fn includes_nodes_for_destroyed_storage_nodes() { assert_eq!(witness.get(&keccak256(node)), Some(node)); } } + +#[test] +fn correctly_decodes_branch_node_values() { + let factory = create_test_provider_factory(); + let provider = factory.provider_rw().unwrap(); + + let address = Address::random(); + let hashed_address = keccak256(address); + let hashed_slot1 = B256::with_last_byte(1); + let hashed_slot2 = B256::with_last_byte(2); + + // Insert account and slots into database + provider.insert_account_for_hashing([(address, Some(Account::default()))]).unwrap(); + let mut hashed_storage_cursor = + provider.tx_ref().cursor_dup_write::().unwrap(); + hashed_storage_cursor + .upsert(hashed_address, StorageEntry { key: hashed_slot1, value: U256::from(1) }) + .unwrap(); + hashed_storage_cursor + .upsert(hashed_address, StorageEntry { key: hashed_slot2, value: U256::from(1) }) + .unwrap(); + + let state_root = StateRoot::from_tx(provider.tx_ref()).root().unwrap(); + let multiproof = Proof::from_tx(provider.tx_ref()) + .multiproof(HashMap::from_iter([( + hashed_address, + HashSet::from_iter([hashed_slot1, hashed_slot2]), + )])) + .unwrap(); + + let witness = TrieWitness::from_tx(provider.tx_ref()) + .compute(HashedPostState { + accounts: HashMap::from([(hashed_address, Some(Account::default()))]), + storages: HashMap::from([( + hashed_address, + HashedStorage::from_iter( + false, + [hashed_slot1, hashed_slot2].map(|hashed_slot| (hashed_slot, U256::from(2))), + ), + )]), + }) + .unwrap(); + assert!(witness.contains_key(&state_root)); + for node in multiproof.account_subtree.values() { + assert_eq!(witness.get(&keccak256(node)), Some(node)); + } + for node in multiproof.storages.iter().flat_map(|(_, storage)| storage.subtree.values()) { + assert_eq!(witness.get(&keccak256(node)), Some(node)); + } +} diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 971f10cfbae1..f3b70e85ad6f 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -216,9 +216,14 @@ where TrieNode::Branch(branch) => { next_path.push(key[path.len()]); let children = branch_node_children(path.clone(), &branch); - for (child_path, node_hash) in children { + for (child_path, value) in children { if !key.starts_with(&child_path) { - trie_nodes.insert(child_path, Either::Left(node_hash)); + let value = if value.len() < B256::len_bytes() { + Either::Right(value.to_vec()) + } else { + Either::Left(B256::from_slice(&value[1..])) + }; + trie_nodes.insert(child_path, value); } } } @@ -312,8 +317,13 @@ where match TrieNode::decode(&mut &node[..])? { TrieNode::Branch(branch) => { let children = branch_node_children(path, &branch); - for (child_path, branch_hash) in children { - hash_builder.add_branch(child_path, branch_hash, false); + for (child_path, value) in children { + if value.len() < B256::len_bytes() { + hash_builder.add_leaf(child_path, value); + } else { + let hash = B256::from_slice(&value[1..]); + hash_builder.add_branch(child_path, hash, false); + } } break } @@ -343,14 +353,14 @@ where } /// Returned branch node children with keys in order. -fn branch_node_children(prefix: Nibbles, node: &BranchNode) -> Vec<(Nibbles, B256)> { +fn branch_node_children(prefix: Nibbles, node: &BranchNode) -> Vec<(Nibbles, &[u8])> { let mut children = Vec::with_capacity(node.state_mask.count_ones() as usize); let mut stack_ptr = node.as_ref().first_child_index(); for index in CHILD_INDEX_RANGE { if node.state_mask.is_bit_set(index) { let mut child_path = prefix.clone(); child_path.push(index); - children.push((child_path, B256::from_slice(&node.stack[stack_ptr][1..]))); + children.push((child_path, &node.stack[stack_ptr][..])); stack_ptr += 1; } } From 099987fc3d60f1c3bc6ca458242ae46988cba5bd Mon Sep 17 00:00:00 2001 From: Kunal Arora <55632507+aroralanuk@users.noreply.github.com> Date: Wed, 16 Oct 2024 23:00:26 +0530 Subject: [PATCH 32/51] chore(cli): add `default_client_version` to rethCli (#11773) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + crates/cli/cli/Cargo.toml | 2 +- crates/cli/cli/src/lib.rs | 4 ++++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 4d23c5b42bf9..3211a92c615c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6551,6 +6551,7 @@ dependencies = [ "clap", "eyre", "reth-cli-runner", + "reth-db", "serde_json", "shellexpand", ] diff --git a/crates/cli/cli/Cargo.toml b/crates/cli/cli/Cargo.toml index 7eb1f43b1e58..5da51a1b2f80 100644 --- a/crates/cli/cli/Cargo.toml +++ b/crates/cli/cli/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # reth reth-cli-runner.workspace = true - +reth-db.workspace = true alloy-genesis.workspace = true # misc diff --git a/crates/cli/cli/src/lib.rs b/crates/cli/cli/src/lib.rs index 1db5ebf86b13..f7bf716ea379 100644 --- a/crates/cli/cli/src/lib.rs +++ b/crates/cli/cli/src/lib.rs @@ -10,6 +10,7 @@ use clap::{Error, Parser}; use reth_cli_runner::CliRunner; +use reth_db::ClientVersion; use std::{borrow::Cow, ffi::OsString}; /// The chainspec module defines the different chainspecs that can be used by the node. @@ -66,4 +67,7 @@ pub trait RethCli: Sized { Ok(cli.with_runner(f)) } + + /// The client version of the node. + fn client_version() -> ClientVersion; } From e5cd026e036e7c28fd76253f65a18402e8b8b6f2 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 16 Oct 2024 19:49:57 +0200 Subject: [PATCH 33/51] deps: `alloy-trie@0.7.2` (#11807) --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3211a92c615c..a1e11216b3f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -741,9 +741,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fa8acead43cb238a7b7f47238c71137f4677a0b8d90e7e3be6e6ca59a28194e" +checksum = "cdd7f8b3a7c65ca09b3c7bdd7c7d72d7423d026f5247eda96af53d24e58315c1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -4464,7 +4464,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -11378,7 +11378,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] From dcaa432155931267a4010ab09d0d30b94ed30a41 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 16 Oct 2024 20:31:58 +0200 Subject: [PATCH 34/51] chore(trie): use `RlpNode::as_hash` (#11808) --- crates/trie/sparse/src/trie.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 1b83e07e48d6..2edaaf76b274 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -267,8 +267,8 @@ impl RevealedSparseTrie { // take the current prefix set. let mut prefix_set = std::mem::take(&mut self.prefix_set).freeze(); let root_rlp = self.rlp_node(Nibbles::default(), &mut prefix_set); - if root_rlp.len() == B256::len_bytes() + 1 { - B256::from_slice(&root_rlp[1..]) + if let Some(root_hash) = root_rlp.as_hash() { + root_hash } else { keccak256(root_rlp) } @@ -338,8 +338,8 @@ impl RevealedSparseTrie { } else { let value = self.values.get(&path).unwrap(); let rlp_node = LeafNodeRef { key, value }.rlp(&mut self.rlp_buf); - if rlp_node.len() == B256::len_bytes() + 1 { - *hash = Some(B256::from_slice(&rlp_node[1..])); + if let Some(node_hash) = rlp_node.as_hash() { + *hash = Some(node_hash); } rlp_node } @@ -353,8 +353,8 @@ impl RevealedSparseTrie { let (_, child) = rlp_node_stack.pop().unwrap(); self.rlp_buf.clear(); let rlp_node = ExtensionNodeRef::new(key, &child).rlp(&mut self.rlp_buf); - if rlp_node.len() == B256::len_bytes() + 1 { - *hash = Some(B256::from_slice(&rlp_node[1..])); + if let Some(node_hash) = rlp_node.as_hash() { + *hash = Some(node_hash); } rlp_node } else { @@ -393,8 +393,8 @@ impl RevealedSparseTrie { self.rlp_buf.clear(); let rlp_node = BranchNodeRef::new(&branch_value_stack_buf, *state_mask) .rlp(&mut self.rlp_buf); - if rlp_node.len() == B256::len_bytes() + 1 { - *hash = Some(B256::from_slice(&rlp_node[1..])); + if let Some(node_hash) = rlp_node.as_hash() { + *hash = Some(node_hash); } rlp_node } From 756a47e4e2a8ab350cfc12d6621981312e12f715 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 16 Oct 2024 20:33:29 +0200 Subject: [PATCH 35/51] chore: add `shekhirin` to trie code owners (#11809) --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index afb3d6776320..488e6c90cf7f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -43,6 +43,6 @@ crates/tasks/ @mattsse crates/tokio-util/ @fgimenez @emhane crates/tracing/ @onbjerg crates/transaction-pool/ @mattsse -crates/trie/ @rkrasiuk @Rjected +crates/trie/ @rkrasiuk @Rjected @shekhirin etc/ @Rjected @onbjerg @shekhirin .github/ @onbjerg @gakonst @DaniPopes From a2249b0b04f7e92ea3c4b632c34269d048d7da05 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 16 Oct 2024 20:23:15 +0100 Subject: [PATCH 36/51] fix(exex): filter only WAL files when walking the directory (#11802) --- crates/exex/exex/src/wal/storage.rs | 37 ++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 6 deletions(-) diff --git a/crates/exex/exex/src/wal/storage.rs b/crates/exex/exex/src/wal/storage.rs index af3a590e5860..aaa4398fd0b6 100644 --- a/crates/exex/exex/src/wal/storage.rs +++ b/crates/exex/exex/src/wal/storage.rs @@ -9,6 +9,8 @@ use reth_exex_types::ExExNotification; use reth_tracing::tracing::debug; use tracing::instrument; +static FILE_EXTENSION: &str = "wal"; + /// The underlying WAL storage backed by a directory of files. /// /// Each notification is represented by a single file that contains a MessagePack-encoded @@ -29,7 +31,7 @@ impl Storage { } fn file_path(&self, id: u32) -> PathBuf { - self.path.join(format!("{id}.wal")) + self.path.join(format!("{id}.{FILE_EXTENSION}")) } fn parse_filename(filename: &str) -> eyre::Result { @@ -70,11 +72,14 @@ impl Storage { for entry in reth_fs_util::read_dir(&self.path)? { let entry = entry?; - let file_name = entry.file_name(); - let file_id = Self::parse_filename(&file_name.to_string_lossy())?; - min_id = min_id.map_or(Some(file_id), |min_id: u32| Some(min_id.min(file_id))); - max_id = max_id.map_or(Some(file_id), |max_id: u32| Some(max_id.max(file_id))); + if entry.path().extension() == Some(FILE_EXTENSION.as_ref()) { + let file_name = entry.file_name(); + let file_id = Self::parse_filename(&file_name.to_string_lossy())?; + + min_id = min_id.map_or(Some(file_id), |min_id: u32| Some(min_id.min(file_id))); + max_id = max_id.map_or(Some(file_id), |max_id: u32| Some(max_id.max(file_id))); + } } Ok(min_id.zip(max_id).map(|(min_id, max_id)| min_id..=max_id)) @@ -167,7 +172,7 @@ impl Storage { #[cfg(test)] mod tests { - use std::sync::Arc; + use std::{fs::File, sync::Arc}; use eyre::OptionExt; use reth_exex_types::ExExNotification; @@ -206,4 +211,24 @@ mod tests { Ok(()) } + + #[test] + fn test_files_range() -> eyre::Result<()> { + let temp_dir = tempfile::tempdir()?; + let storage = Storage::new(&temp_dir)?; + + // Create WAL files + File::create(storage.file_path(1))?; + File::create(storage.file_path(2))?; + File::create(storage.file_path(3))?; + + // Create non-WAL files that should be ignored + File::create(temp_dir.path().join("0.tmp"))?; + File::create(temp_dir.path().join("4.tmp"))?; + + // Check files range + assert_eq!(storage.files_range()?, Some(1..=3)); + + Ok(()) + } } From b1cc16809b9246f632d9cd8736a283af247b98fb Mon Sep 17 00:00:00 2001 From: Kien Trinh <51135161+kien6034@users.noreply.github.com> Date: Thu, 17 Oct 2024 03:13:16 +0700 Subject: [PATCH 37/51] feat(cli): make pruning block interval an option (#11810) --- book/cli/reth/node.md | 2 -- crates/node/builder/src/launch/common.rs | 2 +- crates/node/core/src/args/pruning.rs | 4 ++-- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index ea10d9522161..34d32209ada9 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -616,8 +616,6 @@ Pruning: --block-interval Minimum pruning interval measured in blocks - [default: 0] - --prune.senderrecovery.full Prunes all sender recovery data diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 3e8f92e707c0..ac2339fa6cfc 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -1072,7 +1072,7 @@ mod tests { let node_config = NodeConfig { pruning: PruningArgs { full: true, - block_interval: 0, + block_interval: None, sender_recovery_full: false, sender_recovery_distance: None, sender_recovery_before: None, diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index 2bee5ec164fe..c0a3ae375c0a 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -17,8 +17,8 @@ pub struct PruningArgs { pub full: bool, /// Minimum pruning interval measured in blocks. - #[arg(long, default_value_t = 0)] - pub block_interval: u64, + #[arg(long, default_value = None)] + pub block_interval: Option, // Sender Recovery /// Prunes all sender recovery data. From 82862fabd762f2a2b00dd77d140c2c6f5b63cadf Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 16 Oct 2024 22:33:57 +0200 Subject: [PATCH 38/51] primitives: rm redundant functions for `Transaction` (#11747) --- Cargo.lock | 3 + crates/chain-state/src/test_utils.rs | 2 +- crates/engine/util/Cargo.toml | 1 + crates/engine/util/src/reorg.rs | 1 + crates/ethereum/evm/Cargo.toml | 2 +- crates/ethereum/evm/src/execute.rs | 1 + crates/ethereum/evm/src/strategy.rs | 1 + crates/optimism/evm/Cargo.toml | 1 + crates/optimism/evm/src/execute.rs | 1 + crates/optimism/evm/src/strategy.rs | 1 + crates/optimism/rpc/Cargo.toml | 1 + crates/optimism/rpc/src/eth/transaction.rs | 1 + crates/primitives/src/transaction/mod.rs | 107 +----------------- crates/rpc/rpc-eth-api/Cargo.toml | 1 + .../rpc-eth-api/src/helpers/transaction.rs | 1 + crates/rpc/rpc-eth-types/src/receipt.rs | 1 + crates/rpc/rpc-eth-types/src/simulate.rs | 2 +- crates/rpc/rpc/src/txpool.rs | 1 + .../provider/src/providers/static_file/mod.rs | 1 + 19 files changed, 21 insertions(+), 109 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a1e11216b3f9..aa7c704b462e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7158,6 +7158,7 @@ dependencies = [ name = "reth-engine-util" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types-engine", "eyre", @@ -8235,6 +8236,7 @@ dependencies = [ name = "reth-optimism-rpc" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rpc-types", @@ -8726,6 +8728,7 @@ dependencies = [ name = "reth-rpc-eth-api" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-dyn-abi", "alloy-eips", "alloy-json-rpc", diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index 4b0bfcdd996a..ad5f2dbdbcc2 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -2,7 +2,7 @@ use crate::{ in_memory::ExecutedBlock, CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions, }; -use alloy_consensus::TxEip1559; +use alloy_consensus::{Transaction as _, TxEip1559}; use alloy_primitives::{Address, BlockNumber, Sealable, B256, U256}; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; diff --git a/crates/engine/util/Cargo.toml b/crates/engine/util/Cargo.toml index 20a0acb8d428..c11948b9405f 100644 --- a/crates/engine/util/Cargo.toml +++ b/crates/engine/util/Cargo.toml @@ -29,6 +29,7 @@ reth-trie.workspace = true # alloy alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true # async tokio = { workspace = true, default-features = false } diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 611095101ff3..abfa23a57b32 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -1,5 +1,6 @@ //! Stream wrapper that simulates reorgs. +use alloy_consensus::Transaction; use alloy_primitives::U256; use alloy_rpc_types_engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index a19cbc018c72..7215efa68c60 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -29,6 +29,7 @@ revm-primitives.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true alloy-sol-types.workspace = true +alloy-consensus.workspace = true [dev-dependencies] reth-testing-utils.workspace = true @@ -38,7 +39,6 @@ reth-primitives = { workspace = true, features = ["secp256k1"] } secp256k1.workspace = true serde_json.workspace = true alloy-genesis.workspace = true -alloy-consensus.workspace = true [features] default = ["std"] diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 108e1f87c455..f712389fe121 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -5,6 +5,7 @@ use crate::{ EthEvmConfig, }; use alloc::{boxed::Box, sync::Arc, vec, vec::Vec}; +use alloy_consensus::Transaction as _; use alloy_primitives::{BlockNumber, U256}; use core::fmt::Display; use reth_chainspec::{ChainSpec, EthereumHardforks, MAINNET}; diff --git a/crates/ethereum/evm/src/strategy.rs b/crates/ethereum/evm/src/strategy.rs index 1c284e068d03..7a297be498a3 100644 --- a/crates/ethereum/evm/src/strategy.rs +++ b/crates/ethereum/evm/src/strategy.rs @@ -5,6 +5,7 @@ use crate::{ EthEvmConfig, }; use alloc::sync::Arc; +use alloy_consensus::Transaction as _; use core::fmt::Display; use reth_chainspec::{ChainSpec, EthereumHardfork, EthereumHardforks, MAINNET}; use reth_consensus::ConsensusError; diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 53f9ae033dd3..0a22dcfddb44 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -25,6 +25,7 @@ reth-consensus.workspace = true # ethereum alloy-primitives.workspace = true op-alloy-consensus.workspace = true +alloy-consensus.workspace = true # Optimism reth-optimism-consensus.workspace = true diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index ee0d028e4251..f7da1c250d9b 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -3,6 +3,7 @@ use crate::{ l1::ensure_create2_deployer, OpChainSpec, OptimismBlockExecutionError, OptimismEvmConfig, }; +use alloy_consensus::Transaction as _; use alloy_primitives::{BlockNumber, U256}; use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_evm::{ diff --git a/crates/optimism/evm/src/strategy.rs b/crates/optimism/evm/src/strategy.rs index 33770714c4cb..fe8164cc7cf3 100644 --- a/crates/optimism/evm/src/strategy.rs +++ b/crates/optimism/evm/src/strategy.rs @@ -1,6 +1,7 @@ //! Optimism block execution strategy, use crate::{l1::ensure_create2_deployer, OptimismBlockExecutionError, OptimismEvmConfig}; +use alloy_consensus::Transaction as _; use reth_chainspec::EthereumHardforks; use reth_consensus::ConsensusError; use reth_evm::{ diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 65dce7510b0e..90984998ac7e 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -38,6 +38,7 @@ alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true alloy-rpc-types.workspace = true +alloy-consensus.workspace = true op-alloy-network.workspace = true op-alloy-rpc-types.workspace = true op-alloy-consensus.workspace = true diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index e161504f8405..b7575c24416a 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,5 +1,6 @@ //! Loads and formats OP transaction RPC response. +use alloy_consensus::Transaction as _; use alloy_primitives::{Bytes, B256}; use alloy_rpc_types::TransactionInfo; use op_alloy_rpc_types::Transaction; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 0463cd9ea7e1..aeee4232e05d 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -249,33 +249,6 @@ impl Transaction { } } - /// Gets the transaction's value field. - pub const fn value(&self) -> U256 { - *match self { - Self::Legacy(TxLegacy { value, .. }) | - Self::Eip2930(TxEip2930 { value, .. }) | - Self::Eip1559(TxEip1559 { value, .. }) | - Self::Eip4844(TxEip4844 { value, .. }) | - Self::Eip7702(TxEip7702 { value, .. }) => value, - #[cfg(feature = "optimism")] - Self::Deposit(TxDeposit { value, .. }) => value, - } - } - - /// Get the transaction's nonce. - pub const fn nonce(&self) -> u64 { - match self { - Self::Legacy(TxLegacy { nonce, .. }) | - Self::Eip2930(TxEip2930 { nonce, .. }) | - Self::Eip1559(TxEip1559 { nonce, .. }) | - Self::Eip4844(TxEip4844 { nonce, .. }) | - Self::Eip7702(TxEip7702 { nonce, .. }) => *nonce, - // Deposit transactions do not have nonces. - #[cfg(feature = "optimism")] - Self::Deposit(_) => 0, - } - } - /// Returns the [`AccessList`] of the transaction. /// /// Returns `None` for legacy transactions. @@ -301,19 +274,6 @@ impl Transaction { } } - /// Get the gas limit of the transaction. - pub const fn gas_limit(&self) -> u64 { - match self { - Self::Legacy(TxLegacy { gas_limit, .. }) | - Self::Eip1559(TxEip1559 { gas_limit, .. }) | - Self::Eip4844(TxEip4844 { gas_limit, .. }) | - Self::Eip7702(TxEip7702 { gas_limit, .. }) | - Self::Eip2930(TxEip2930 { gas_limit, .. }) => *gas_limit, - #[cfg(feature = "optimism")] - Self::Deposit(TxDeposit { gas_limit, .. }) => *gas_limit, - } - } - /// Returns true if the tx supports dynamic fees pub const fn is_dynamic_fee(&self) -> bool { match self { @@ -324,40 +284,6 @@ impl Transaction { } } - /// Max fee per gas for eip1559 transaction, for legacy transactions this is `gas_price`. - /// - /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). - pub const fn max_fee_per_gas(&self) -> u128 { - match self { - Self::Legacy(TxLegacy { gas_price, .. }) | - Self::Eip2930(TxEip2930 { gas_price, .. }) => *gas_price, - Self::Eip1559(TxEip1559 { max_fee_per_gas, .. }) | - Self::Eip4844(TxEip4844 { max_fee_per_gas, .. }) | - Self::Eip7702(TxEip7702 { max_fee_per_gas, .. }) => *max_fee_per_gas, - // Deposit transactions buy their L2 gas on L1 and, as such, the L2 gas is not - // refundable. - #[cfg(feature = "optimism")] - Self::Deposit(_) => 0, - } - } - - /// Max priority fee per gas for eip1559 transaction, for legacy and eip2930 transactions this - /// is `None` - /// - /// This is also commonly referred to as the "Gas Tip Cap" (`GasTipCap`). - pub const fn max_priority_fee_per_gas(&self) -> Option { - match self { - Self::Legacy(_) | Self::Eip2930(_) => None, - Self::Eip1559(TxEip1559 { max_priority_fee_per_gas, .. }) | - Self::Eip4844(TxEip4844 { max_priority_fee_per_gas, .. }) | - Self::Eip7702(TxEip7702 { max_priority_fee_per_gas, .. }) => { - Some(*max_priority_fee_per_gas) - } - #[cfg(feature = "optimism")] - Self::Deposit(_) => None, - } - } - /// Blob versioned hashes for eip4844 transaction, for legacy, eip1559, eip2930 and eip7702 /// transactions this is `None` /// @@ -373,18 +299,6 @@ impl Transaction { } } - /// Max fee per blob gas for eip4844 transaction [`TxEip4844`]. - /// - /// Returns `None` for non-eip4844 transactions. - /// - /// This is also commonly referred to as the "Blob Gas Fee Cap" (`BlobGasFeeCap`). - pub const fn max_fee_per_blob_gas(&self) -> Option { - match self { - Self::Eip4844(TxEip4844 { max_fee_per_blob_gas, .. }) => Some(*max_fee_per_blob_gas), - _ => None, - } - } - /// Returns the blob gas used for all blobs of the EIP-4844 transaction if it is an EIP-4844 /// transaction. /// @@ -394,25 +308,6 @@ impl Transaction { self.as_eip4844().map(TxEip4844::blob_gas) } - /// Return the max priority fee per gas if the transaction is an EIP-1559 transaction, and - /// otherwise return the gas price. - /// - /// # Warning - /// - /// This is different than the `max_priority_fee_per_gas` method, which returns `None` for - /// non-EIP-1559 transactions. - pub const fn priority_fee_or_price(&self) -> u128 { - match self { - Self::Legacy(TxLegacy { gas_price, .. }) | - Self::Eip2930(TxEip2930 { gas_price, .. }) => *gas_price, - Self::Eip1559(TxEip1559 { max_priority_fee_per_gas, .. }) | - Self::Eip4844(TxEip4844 { max_priority_fee_per_gas, .. }) | - Self::Eip7702(TxEip7702 { max_priority_fee_per_gas, .. }) => *max_priority_fee_per_gas, - #[cfg(feature = "optimism")] - Self::Deposit(_) => 0, - } - } - /// Returns the effective gas price for the given base fee. /// /// If the transaction is a legacy or EIP2930 transaction, the gas price is returned. @@ -923,7 +818,7 @@ impl AlloyTransaction for Transaction { } } - fn value(&self) -> alloy_primitives::U256 { + fn value(&self) -> U256 { match self { Self::Legacy(tx) => tx.value(), Self::Eip2930(tx) => tx.value(), diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index e59ee39a694b..9d0f6cfd83d6 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -40,6 +40,7 @@ alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true alloy-rpc-types.workspace = true alloy-rpc-types-mev.workspace = true +alloy-consensus.workspace = true # rpc jsonrpsee = { workspace = true, features = ["server", "macros"] } diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 4c2493717a04..54d60cb7abdf 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -1,6 +1,7 @@ //! Database access for `eth_` transaction RPC methods. Loads transaction and receipt data w.r.t. //! network. +use alloy_consensus::Transaction; use alloy_dyn_abi::TypedData; use alloy_eips::eip2718::Encodable2718; use alloy_network::TransactionBuilder; diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index e95c92f24ae6..2668291e2c8c 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -1,5 +1,6 @@ //! RPC receipt response builder, extends a layer one receipt with layer two data. +use alloy_consensus::Transaction; use alloy_primitives::{Address, TxKind}; use alloy_rpc_types::{ AnyReceiptEnvelope, AnyTransactionReceipt, Log, ReceiptWithBloom, TransactionReceipt, diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 561aa360d86f..a673da967202 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -1,6 +1,6 @@ //! Utilities for serving `eth_simulateV1` -use alloy_consensus::{TxEip4844Variant, TxType, TypedTransaction}; +use alloy_consensus::{Transaction as _, TxEip4844Variant, TxType, TypedTransaction}; use alloy_primitives::Parity; use alloy_rpc_types::{ simulate::{SimCallResult, SimulateError, SimulatedBlock}, diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index 5e26935ca1ba..47aaac0bbfd5 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -1,5 +1,6 @@ use std::{collections::BTreeMap, marker::PhantomData}; +use alloy_consensus::Transaction; use alloy_primitives::Address; use alloy_rpc_types_txpool::{ TxpoolContent, TxpoolContentFrom, TxpoolInspect, TxpoolInspectSummary, TxpoolStatus, diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 45b7816af02a..52eb6ed666ea 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -56,6 +56,7 @@ impl Deref for LoadedJar { mod tests { use super::*; use crate::{test_utils::create_test_provider_factory, HeaderProvider}; + use alloy_consensus::Transaction; use alloy_primitives::{BlockHash, TxNumber, B256, U256}; use rand::seq::SliceRandom; use reth_db::{ From d2ca8f3a2b4139617fcafb05318447653a2a68af Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 16 Oct 2024 23:59:58 +0200 Subject: [PATCH 39/51] fix: update block interval properly (#11546) --- crates/config/src/config.rs | 49 ++++++++++++++++------------ crates/node/core/src/args/pruning.rs | 9 +++-- 2 files changed, 35 insertions(+), 23 deletions(-) diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index e4a7fc9677aa..4c4b6066a3e9 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -14,6 +14,9 @@ use std::{ const EXTENSION: &str = "toml"; +/// The default prune block interval +pub const DEFAULT_BLOCK_INTERVAL: usize = 5; + /// Configuration for the reth node. #[derive(Debug, Clone, Default, Deserialize, PartialEq, Eq, Serialize)] #[serde(default)] @@ -383,7 +386,7 @@ pub struct PruneConfig { impl Default for PruneConfig { fn default() -> Self { - Self { block_interval: 5, segments: PruneModes::none() } + Self { block_interval: DEFAULT_BLOCK_INTERVAL, segments: PruneModes::none() } } } @@ -397,27 +400,33 @@ impl PruneConfig { /// if the corresponding value in this config is not set. pub fn merge(&mut self, other: Option) { let Some(other) = other else { return }; - - // Merge block_interval - if self.block_interval == 0 { - self.block_interval = other.block_interval; + let Self { + block_interval, + segments: + PruneModes { + sender_recovery, + transaction_lookup, + receipts, + account_history, + storage_history, + receipts_log_filter, + }, + } = other; + + // Merge block_interval, only update if it's the default interval + if self.block_interval == DEFAULT_BLOCK_INTERVAL { + self.block_interval = block_interval; } // Merge the various segment prune modes - self.segments.sender_recovery = - self.segments.sender_recovery.or(other.segments.sender_recovery); - self.segments.transaction_lookup = - self.segments.transaction_lookup.or(other.segments.transaction_lookup); - self.segments.receipts = self.segments.receipts.or(other.segments.receipts); - self.segments.account_history = - self.segments.account_history.or(other.segments.account_history); - self.segments.storage_history = - self.segments.storage_history.or(other.segments.storage_history); - - if self.segments.receipts_log_filter.0.is_empty() && - !other.segments.receipts_log_filter.0.is_empty() - { - self.segments.receipts_log_filter = other.segments.receipts_log_filter; + self.segments.sender_recovery = self.segments.sender_recovery.or(sender_recovery); + self.segments.transaction_lookup = self.segments.transaction_lookup.or(transaction_lookup); + self.segments.receipts = self.segments.receipts.or(receipts); + self.segments.account_history = self.segments.account_history.or(account_history); + self.segments.storage_history = self.segments.storage_history.or(storage_history); + + if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() { + self.segments.receipts_log_filter = receipts_log_filter; } } } @@ -961,7 +970,7 @@ receipts = 'full' // Check that the configuration has been merged. Any configuration present in config1 // should not be overwritten by config2 - assert_eq!(config1.block_interval, 5); + assert_eq!(config1.block_interval, 10); assert_eq!(config1.segments.sender_recovery, Some(PruneMode::Full)); assert_eq!(config1.segments.transaction_lookup, Some(PruneMode::Full)); assert_eq!(config1.segments.receipts, Some(PruneMode::Distance(1000))); diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index c0a3ae375c0a..cd852b9e1685 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -2,7 +2,7 @@ use crate::args::error::ReceiptsLogError; use alloy_primitives::{Address, BlockNumber}; -use clap::Args; +use clap::{builder::RangedU64ValueParser, Args}; use reth_chainspec::EthChainSpec; use reth_config::config::PruneConfig; use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE}; @@ -17,7 +17,7 @@ pub struct PruningArgs { pub full: bool, /// Minimum pruning interval measured in blocks. - #[arg(long, default_value = None)] + #[arg(long, value_parser = RangedU64ValueParser::::new().range(1..),)] pub block_interval: Option, // Sender Recovery @@ -99,7 +99,7 @@ impl PruningArgs { // If --full is set, use full node defaults. if self.full { config = PruneConfig { - block_interval: 5, + block_interval: config.block_interval, segments: PruneModes { sender_recovery: Some(PruneMode::Full), transaction_lookup: None, @@ -123,6 +123,9 @@ impl PruningArgs { } // Override with any explicitly set prune.* flags. + if let Some(block_interval) = self.block_interval { + config.block_interval = block_interval as usize; + } if let Some(mode) = self.sender_recovery_prune_mode() { config.segments.sender_recovery = Some(mode); } From 5a82f20a294c817d7070ad8fa0118a18400f7b63 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 17 Oct 2024 00:03:33 +0200 Subject: [PATCH 40/51] chore: rm 1 usage of optimism feature (#11813) --- crates/storage/codecs/src/alloy/transaction/mod.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/storage/codecs/src/alloy/transaction/mod.rs b/crates/storage/codecs/src/alloy/transaction/mod.rs index 86edfee5bcca..5b1d173a528e 100644 --- a/crates/storage/codecs/src/alloy/transaction/mod.rs +++ b/crates/storage/codecs/src/alloy/transaction/mod.rs @@ -15,8 +15,6 @@ mod tests { // this check is to ensure we do not inadvertently add too many fields to a struct which would // expand the flags field and break backwards compatibility - #[cfg(feature = "optimism")] - use crate::alloy::transaction::optimism::TxDeposit; use crate::alloy::transaction::{ eip1559::TxEip1559, eip2930::TxEip2930, eip4844::TxEip4844, eip7702::TxEip7702, legacy::TxLegacy, @@ -34,6 +32,6 @@ mod tests { #[cfg(feature = "optimism")] #[test] fn test_ensure_backwards_compatibility_optimism() { - assert_eq!(TxDeposit::bitflag_encoded_bytes(), 2); + assert_eq!(crate::alloy::transaction::optimism::TxDeposit::bitflag_encoded_bytes(), 2); } } From 025cb3b70e02ee99295a5f98a117906d2938c42d Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 17 Oct 2024 00:07:27 +0200 Subject: [PATCH 41/51] primitive-traits: rm redundant `EMPTY_ROOT_HASH` definition (#11811) Co-authored-by: Matthias Seitz --- Cargo.lock | 2 ++ crates/blockchain-tree/src/blockchain_tree.rs | 4 ++-- crates/chain-state/src/test_utils.rs | 4 ++-- crates/ethereum/evm/src/execute.rs | 5 ++--- crates/primitives-traits/src/constants/mod.rs | 5 +---- crates/primitives/src/proofs.rs | 3 ++- crates/rpc/rpc-eth-api/src/helpers/pending_block.rs | 3 ++- crates/trie/db/Cargo.toml | 2 ++ crates/trie/db/tests/proof.rs | 3 ++- crates/trie/db/tests/trie.rs | 3 ++- crates/trie/db/tests/witness.rs | 3 ++- crates/trie/trie/Cargo.toml | 1 + crates/trie/trie/src/trie.rs | 2 +- crates/trie/trie/src/witness.rs | 2 +- 14 files changed, 24 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aa7c704b462e..bb2577577f83 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9119,6 +9119,7 @@ dependencies = [ name = "reth-trie" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rlp", "auto_impl", @@ -9174,6 +9175,7 @@ dependencies = [ name = "reth-trie-db" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rlp", "auto_impl", diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 4bed718aa0a6..71a58aa56288 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1376,7 +1376,7 @@ where #[cfg(test)] mod tests { use super::*; - use alloy_consensus::TxEip1559; + use alloy_consensus::{TxEip1559, EMPTY_ROOT_HASH}; use alloy_genesis::{Genesis, GenesisAccount}; use alloy_primitives::{keccak256, Address, Sealable, B256}; use assert_matches::assert_matches; @@ -1388,7 +1388,7 @@ mod tests { use reth_evm::test_utils::MockExecutorProvider; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_primitives::{ - constants::{EIP1559_INITIAL_BASE_FEE, EMPTY_ROOT_HASH}, + constants::EIP1559_INITIAL_BASE_FEE, proofs::{calculate_receipt_root, calculate_transaction_root}, revm_primitives::AccountInfo, Account, BlockBody, Header, Signature, Transaction, TransactionSigned, diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index ad5f2dbdbcc2..a820bb5cf018 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -2,7 +2,7 @@ use crate::{ in_memory::ExecutedBlock, CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions, }; -use alloy_consensus::{Transaction as _, TxEip1559}; +use alloy_consensus::{Transaction as _, TxEip1559, EMPTY_ROOT_HASH}; use alloy_primitives::{Address, BlockNumber, Sealable, B256, U256}; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; @@ -10,7 +10,7 @@ use rand::{thread_rng, Rng}; use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ - constants::{EIP1559_INITIAL_BASE_FEE, EMPTY_ROOT_HASH}, + constants::EIP1559_INITIAL_BASE_FEE, proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, BlockBody, Header, Receipt, Receipts, Requests, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, TransactionSigned, TransactionSignedEcRecovered, diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index f712389fe121..9c7748a561f0 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -494,7 +494,7 @@ where #[cfg(test)] mod tests { use super::*; - use alloy_consensus::TxLegacy; + use alloy_consensus::{TxLegacy, EMPTY_ROOT_HASH}; use alloy_eips::{ eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}, eip4788::{BEACON_ROOTS_ADDRESS, BEACON_ROOTS_CODE, SYSTEM_ADDRESS}, @@ -503,8 +503,7 @@ mod tests { use alloy_primitives::{b256, fixed_bytes, keccak256, Bytes, TxKind, B256}; use reth_chainspec::{ChainSpecBuilder, ForkCondition}; use reth_primitives::{ - constants::{EMPTY_ROOT_HASH, ETH_TO_WEI}, - public_key_to_address, Account, Block, BlockBody, Transaction, + constants::ETH_TO_WEI, public_key_to_address, Account, Block, BlockBody, Transaction, }; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index ed5c893dd6c2..d40abdd64ba7 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -1,5 +1,6 @@ //! Ethereum protocol-related constants +use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{address, b256, Address, B256, U256}; use core::time::Duration; @@ -122,10 +123,6 @@ pub const KECCAK_EMPTY: B256 = pub const EMPTY_OMMER_ROOT_HASH: B256 = b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"); -/// Root hash of an empty trie: `0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421` -pub const EMPTY_ROOT_HASH: B256 = - b256!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"); - /// From address from Optimism system txs: `0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001` pub const OP_SYSTEM_TX_FROM_ADDR: Address = address!("deaddeaddeaddeaddeaddeaddeaddeaddead0001"); diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index a12a5d6be89f..dc814804ec81 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -65,7 +65,8 @@ pub fn calculate_ommers_root(ommers: &[Header]) -> B256 { #[cfg(test)] mod tests { use super::*; - use crate::{constants::EMPTY_ROOT_HASH, Block}; + use crate::Block; + use alloy_consensus::EMPTY_ROOT_HASH; use alloy_genesis::GenesisAccount; use alloy_primitives::{b256, hex_literal::hex, Address, U256}; use alloy_rlp::Decodable; diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 26e0ffc7412a..81c6a567846b 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -5,6 +5,7 @@ use std::time::{Duration, Instant}; use crate::{EthApiTypes, FromEthApiError, FromEvmError}; +use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rpc_types::BlockNumberOrTag; use futures::Future; @@ -12,7 +13,7 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv}; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - constants::{eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE, EMPTY_ROOT_HASH}, + constants::{eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE}, proofs::calculate_transaction_root, revm_primitives::{ BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, diff --git a/crates/trie/db/Cargo.toml b/crates/trie/db/Cargo.toml index 6d322ba3ff67..a0e1acbce352 100644 --- a/crates/trie/db/Cargo.toml +++ b/crates/trie/db/Cargo.toml @@ -58,6 +58,8 @@ reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } reth-trie = { workspace = true, features = ["test-utils"] } reth-node-types.workspace = true +alloy-consensus.workspace = true + # trie triehash = "0.8" diff --git a/crates/trie/db/tests/proof.rs b/crates/trie/db/tests/proof.rs index 5ffa6729b49a..79a2ce96fced 100644 --- a/crates/trie/db/tests/proof.rs +++ b/crates/trie/db/tests/proof.rs @@ -1,9 +1,10 @@ #![allow(missing_docs)] +use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; use alloy_rlp::EMPTY_STRING_CODE; use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET}; -use reth_primitives::{constants::EMPTY_ROOT_HASH, Account}; +use reth_primitives::Account; use reth_provider::test_utils::{create_test_provider_factory, insert_genesis}; use reth_trie::{proof::Proof, Nibbles}; use reth_trie_common::{AccountProof, StorageProof}; diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs index 59fffec58d06..f5823404c899 100644 --- a/crates/trie/db/tests/trie.rs +++ b/crates/trie/db/tests/trie.rs @@ -1,5 +1,6 @@ #![allow(missing_docs)] +use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{hex_literal::hex, keccak256, Address, B256, U256}; use proptest::{prelude::ProptestConfig, proptest}; use proptest_arbitrary_interop::arb; @@ -8,7 +9,7 @@ use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, transaction::DbTxMut, }; -use reth_primitives::{constants::EMPTY_ROOT_HASH, Account, StorageEntry}; +use reth_primitives::{Account, StorageEntry}; use reth_provider::{ test_utils::create_test_provider_factory, DatabaseProviderRW, StorageTrieWriter, TrieWriter, }; diff --git a/crates/trie/db/tests/witness.rs b/crates/trie/db/tests/witness.rs index 20f8cfbb9081..8e00472b4738 100644 --- a/crates/trie/db/tests/witness.rs +++ b/crates/trie/db/tests/witness.rs @@ -1,5 +1,6 @@ #![allow(missing_docs)] +use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{ keccak256, map::{HashMap, HashSet}, @@ -8,7 +9,7 @@ use alloy_primitives::{ use alloy_rlp::EMPTY_STRING_CODE; use reth_db::{cursor::DbCursorRW, tables}; use reth_db_api::transaction::DbTxMut; -use reth_primitives::{constants::EMPTY_ROOT_HASH, Account, StorageEntry}; +use reth_primitives::{Account, StorageEntry}; use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; use reth_trie::{proof::Proof, witness::TrieWitness, HashedPostState, HashedStorage, StateRoot}; use reth_trie_db::{DatabaseProof, DatabaseStateRoot, DatabaseTrieWitness}; diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index d0f0fa092a77..31b5ac3e25cd 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -24,6 +24,7 @@ revm.workspace = true # alloy alloy-rlp.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true # tracing tracing.workspace = true diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index b8aa133d6fa3..1bf8cf1ce797 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -9,10 +9,10 @@ use crate::{ walker::TrieWalker, HashBuilder, Nibbles, TrieAccount, }; +use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{keccak256, Address, B256}; use alloy_rlp::{BufMut, Encodable}; use reth_execution_errors::{StateRootError, StorageRootError}; -use reth_primitives::constants::EMPTY_ROOT_HASH; use tracing::trace; #[cfg(feature = "metrics")] diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index f3b70e85ad6f..39d82a7bda7a 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -5,6 +5,7 @@ use crate::{ trie_cursor::TrieCursorFactory, HashedPostState, }; +use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{ keccak256, map::{HashMap, HashSet}, @@ -13,7 +14,6 @@ use alloy_primitives::{ use alloy_rlp::{BufMut, Decodable, Encodable}; use itertools::{Either, Itertools}; use reth_execution_errors::{StateProofError, TrieWitnessError}; -use reth_primitives::constants::EMPTY_ROOT_HASH; use reth_trie_common::{ BranchNode, HashBuilder, Nibbles, StorageMultiProof, TrieAccount, TrieNode, CHILD_INDEX_RANGE, }; From 24287e8562ab5e70d473e0c0b87f800277a84d8a Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 17 Oct 2024 01:02:44 +0200 Subject: [PATCH 42/51] primitives: use `EMPTY_ROOT_HASH` when possible (#11822) --- Cargo.lock | 1 + crates/chainspec/src/spec.rs | 4 +--- crates/consensus/common/src/validation.rs | 6 ++--- crates/net/eth-wire-types/src/header.rs | 28 ++++++----------------- crates/optimism/primitives/Cargo.toml | 1 + crates/optimism/primitives/src/bedrock.rs | 5 ++-- 6 files changed, 16 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bb2577577f83..740810d1b1b8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8227,6 +8227,7 @@ dependencies = [ name = "reth-optimism-primitives" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "reth-primitives", "reth-primitives-traits", diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index e6e8a67d75d3..a8bae966b58e 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -1814,9 +1814,7 @@ Post-merge hard forks (timestamp based): hex!("078dc6061b1d8eaa8493384b59c9c65ceb917201221d08b80c4de6770b6ec7e7").into(); assert_eq!(chainspec.genesis_header().state_root, expected_state_root); - let expected_withdrawals_hash: B256 = - hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(); - assert_eq!(chainspec.genesis_header().withdrawals_root, Some(expected_withdrawals_hash)); + assert_eq!(chainspec.genesis_header().withdrawals_root, Some(EMPTY_ROOT_HASH)); let expected_hash: B256 = hex!("1fc027d65f820d3eef441ebeec139ebe09e471cf98516dce7b5643ccb27f418c").into(); diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 88a4cabe96c5..df66a00d1dfe 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -300,7 +300,7 @@ pub fn validate_against_parent_4844( #[cfg(test)] mod tests { use super::*; - use alloy_consensus::TxEip4844; + use alloy_consensus::{TxEip4844, EMPTY_ROOT_HASH}; use alloy_primitives::{ hex_literal::hex, Address, BlockHash, BlockNumber, Bytes, Parity, Sealable, U256, }; @@ -444,8 +444,8 @@ mod tests { ommers_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").into(), beneficiary: hex!("4675c7e5baafbffbca748158becba61ef3b0a263").into(), state_root: hex!("8337403406e368b3e40411138f4868f79f6d835825d55fd0c2f6e17b1a3948e9").into(), - transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(), - receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(), + transactions_root: EMPTY_ROOT_HASH, + receipts_root: EMPTY_ROOT_HASH, logs_bloom: hex!("002400000000004000220000800002000000000000000000000000000000100000000000000000100000000000000021020000000800000006000000002100040000000c0004000000000008000008200000000000000000000000008000000001040000020000020000002000000800000002000020000000022010000000000000010002001000000000020200000000000001000200880000004000000900020000000000020000000040000000000000000000000000000080000000000001000002000000000000012000200020000000000000001000000000000020000010321400000000100000000000000000000000000000400000000000000000").into(), difficulty: U256::ZERO, // total difficulty: 0xc70d815d562d3cfa955).into(), number: 0xf21d20, diff --git a/crates/net/eth-wire-types/src/header.rs b/crates/net/eth-wire-types/src/header.rs index 4075a4a92fb7..7ecfc802d8a8 100644 --- a/crates/net/eth-wire-types/src/header.rs +++ b/crates/net/eth-wire-types/src/header.rs @@ -87,6 +87,7 @@ impl From for bool { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{address, b256, bloom, bytes, hex, Address, Bytes, B256, U256}; use alloy_rlp::{Decodable, Encodable}; use reth_primitives::Header; @@ -201,10 +202,7 @@ mod tests { gas_used: 0x0125b8, timestamp: 0x079e, extra_data: Bytes::from_str("42").unwrap(), - mix_hash: B256::from_str( - "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - ) - .unwrap(), + mix_hash: EMPTY_ROOT_HASH, base_fee_per_gas: Some(0x09), withdrawals_root: Some( B256::from_str("27f166f1d7c789251299535cb176ba34116e44894476a7886fe5d73d9be5c973") @@ -254,16 +252,10 @@ mod tests { gas_used: 0x02a865, timestamp: 0x079e, extra_data: Bytes::from(vec![0x42]), - mix_hash: B256::from_str( - "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - ) - .unwrap(), + mix_hash: EMPTY_ROOT_HASH, nonce: 0u64.into(), base_fee_per_gas: Some(9), - withdrawals_root: Some( - B256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - .unwrap(), - ), + withdrawals_root: Some(EMPTY_ROOT_HASH), blob_gas_used: Some(0x020000), excess_blob_gas: Some(0), parent_beacon_block_root: None, @@ -291,12 +283,8 @@ mod tests { ommers_hash: b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), beneficiary: address!("f97e180c050e5ab072211ad2c213eb5aee4df134"), state_root: b256!("ec229dbe85b0d3643ad0f471e6ec1a36bbc87deffbbd970762d22a53b35d068a"), - transactions_root: b256!( - "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - ), - receipts_root: b256!( - "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - ), + transactions_root: EMPTY_ROOT_HASH, + receipts_root: EMPTY_ROOT_HASH, logs_bloom: Default::default(), difficulty: U256::from(0), number: 0x30598, @@ -307,9 +295,7 @@ mod tests { mix_hash: b256!("70ccadc40b16e2094954b1064749cc6fbac783c1712f1b271a8aac3eda2f2325"), nonce: 0u64.into(), base_fee_per_gas: Some(7), - withdrawals_root: Some(b256!( - "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - )), + withdrawals_root: Some(EMPTY_ROOT_HASH), parent_beacon_block_root: None, blob_gas_used: Some(0), excess_blob_gas: Some(0x1600000), diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index 73a3bab1e445..2054de7305ba 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -15,3 +15,4 @@ workspace = true reth-primitives.workspace = true reth-primitives-traits.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true diff --git a/crates/optimism/primitives/src/bedrock.rs b/crates/optimism/primitives/src/bedrock.rs index 4ece12ad679e..1a347aecafe9 100644 --- a/crates/optimism/primitives/src/bedrock.rs +++ b/crates/optimism/primitives/src/bedrock.rs @@ -1,5 +1,6 @@ //! OP mainnet bedrock related data. +use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{address, b256, bloom, bytes, B256, B64, U256}; use reth_primitives::Header; use reth_primitives_traits::constants::EMPTY_OMMER_ROOT_HASH; @@ -73,10 +74,10 @@ pub const BEDROCK_HEADER: Header = Header { nonce: B64::ZERO, number: 105235063, parent_hash: b256!("21a168dfa5e727926063a28ba16fd5ee84c814e847c81a699c7a0ea551e4ca50"), - receipts_root: b256!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), + receipts_root: EMPTY_ROOT_HASH, state_root: b256!("920314c198da844a041d63bf6cbe8b59583165fd2229d1b3f599da812fd424cb"), timestamp: 1686068903, - transactions_root: b256!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), + transactions_root: EMPTY_ROOT_HASH, ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: address!("4200000000000000000000000000000000000011"), withdrawals_root: None, From 1b97b1d942c671831bb81c0ccc53a9018c9febc5 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 17 Oct 2024 12:00:25 +0900 Subject: [PATCH 43/51] fix(staged-sync): prevent `StaticFileProducer` from running with an unwinded target on legacy engine (#11717) --- .../beacon/src/engine/hooks/static_file.rs | 18 +++++++++++++----- crates/stages/api/src/pipeline/mod.rs | 4 ++++ .../static-file/src/static_file_producer.rs | 14 ++++++++++++-- 3 files changed, 29 insertions(+), 7 deletions(-) diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index 8a5a28f95741..89231ed55825 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -9,7 +9,8 @@ use futures::FutureExt; use reth_errors::RethResult; use reth_primitives::static_file::HighestStaticFiles; use reth_provider::{ - BlockReader, DatabaseProviderFactory, StageCheckpointReader, StaticFileProviderFactory, + BlockReader, ChainStateBlockReader, DatabaseProviderFactory, StageCheckpointReader, + StaticFileProviderFactory, }; use reth_static_file::{StaticFileProducer, StaticFileProducerWithResult}; use reth_tasks::TaskSpawner; @@ -31,8 +32,9 @@ pub struct StaticFileHook { impl StaticFileHook where Provider: StaticFileProviderFactory - + DatabaseProviderFactory - + 'static, + + DatabaseProviderFactory< + Provider: StageCheckpointReader + BlockReader + ChainStateBlockReader, + > + 'static, { /// Create a new instance pub fn new( @@ -104,6 +106,11 @@ where return Ok(None) }; + let finalized_block_number = locked_static_file_producer + .last_finalized_block()? + .map(|on_disk| finalized_block_number.min(on_disk)) + .unwrap_or(finalized_block_number); + let targets = locked_static_file_producer.get_static_file_targets(HighestStaticFiles { headers: Some(finalized_block_number), @@ -137,8 +144,9 @@ where impl EngineHook for StaticFileHook where Provider: StaticFileProviderFactory - + DatabaseProviderFactory - + 'static, + + DatabaseProviderFactory< + Provider: StageCheckpointReader + BlockReader + ChainStateBlockReader, + > + 'static, { fn name(&self) -> &'static str { "StaticFile" diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 1f6d9341ad6d..14225a595285 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -276,6 +276,10 @@ impl Pipeline { // Unwind stages in reverse order of execution let unwind_pipeline = self.stages.iter_mut().rev(); + // Legacy Engine: This prevents a race condition in which the `StaticFileProducer` could + // attempt to proceed with a finalized block which has been unwinded + let _locked_sf_producer = self.static_file_producer.lock(); + let mut provider_rw = self.provider_factory.database_provider_rw()?; for stage in unwind_pipeline { diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 32565fd6dfa6..2c442aedfa34 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -5,8 +5,8 @@ use alloy_primitives::BlockNumber; use parking_lot::Mutex; use rayon::prelude::*; use reth_provider::{ - providers::StaticFileWriter, BlockReader, DBProvider, DatabaseProviderFactory, - StageCheckpointReader, StaticFileProviderFactory, + providers::StaticFileWriter, BlockReader, ChainStateBlockReader, DBProvider, + DatabaseProviderFactory, StageCheckpointReader, StaticFileProviderFactory, }; use reth_prune_types::PruneModes; use reth_stages_types::StageId; @@ -106,6 +106,16 @@ impl StaticFileProducerInner { } } +impl StaticFileProducerInner +where + Provider: StaticFileProviderFactory + DatabaseProviderFactory, +{ + /// Returns the last finalized block number on disk. + pub fn last_finalized_block(&self) -> ProviderResult> { + self.provider.database_provider_ro()?.last_finalized_block_number() + } +} + impl StaticFileProducerInner where Provider: StaticFileProviderFactory From b57a5607cbc47f4995fa567c27351f342e08147e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 17 Oct 2024 05:05:26 +0200 Subject: [PATCH 44/51] test: make provider compile with cargo t (#11817) --- crates/storage/provider/Cargo.toml | 2 ++ .../provider/src/providers/blockchain_provider.rs | 9 ++------- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 7ef827a37042..00e1c9f098df 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -73,8 +73,10 @@ rayon.workspace = true [dev-dependencies] reth-db = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true, features = ["arbitrary", "test-utils"] } +reth-chain-state = { workspace = true, features = ["test-utils"] } reth-trie = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true +reth-ethereum-engine-primitives.workspace = true parking_lot.workspace = true tempfile.workspace = true diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 3013be0603c8..9e6f32b33a3b 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1933,7 +1933,7 @@ mod tests { /// This simulates a RPC method having a different view than when its database transaction was /// created. fn persist_block_after_db_tx_creation( - provider: Arc>, + provider: BlockchainProvider2, block_number: BlockNumber, ) { let hook_provider = provider.clone(); @@ -3142,7 +3142,6 @@ mod tests { ..Default::default() }, )?; - let provider = Arc::new(provider); $( // Since data moves for each tried method, need to recalculate everything @@ -3257,7 +3256,6 @@ mod tests { ..Default::default() }, )?; - let provider = Arc::new(provider); $( // Since data moves for each tried method, need to recalculate everything @@ -3383,7 +3381,6 @@ mod tests { ..Default::default() }, )?; - let provider = Arc::new(provider); let mut in_memory_blocks: std::collections::VecDeque<_> = in_memory_blocks.into(); @@ -3685,8 +3682,6 @@ mod tests { }, )?; - let provider = Arc::new(provider); - // Old implementation was querying the database first. This is problematic, if there are // changes AFTER the database transaction is created. let old_transaction_hash_fn = @@ -3739,7 +3734,7 @@ mod tests { correct_transaction_hash_fn( to_be_persisted_tx.hash(), provider.canonical_in_memory_state(), - provider.database.clone() + provider.database ), Ok(Some(to_be_persisted_tx)) ); From e828b34ed1332832bcebb421a98684a894c49993 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 17 Oct 2024 05:06:23 +0200 Subject: [PATCH 45/51] chore: rm features from test utils (#11816) --- .../storage/provider/src/test_utils/blocks.rs | 140 +++++++++--------- 1 file changed, 70 insertions(+), 70 deletions(-) diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index e6a88618792d..57e111d674ba 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -201,20 +201,20 @@ fn block1(number: BlockNumber) -> (SealedBlockWithSenders, ExecutionOutcome) { .revert_account_info(number, account2, Some(None)) .state_storage(account1, HashMap::from_iter([(slot, (U256::ZERO, U256::from(10)))])) .build(), - vec![vec![Some(Receipt { - tx_type: TxType::Eip2930, - success: true, - cumulative_gas_used: 300, - logs: vec![Log::new_unchecked( - Address::new([0x60; 20]), - vec![B256::with_last_byte(1), B256::with_last_byte(2)], - Bytes::default(), - )], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - })]] + vec![vec![Some( + #[allow(clippy::needless_update)] // side-effect of optimism fields + Receipt { + tx_type: TxType::Eip2930, + success: true, + cumulative_gas_used: 300, + logs: vec![Log::new_unchecked( + Address::new([0x60; 20]), + vec![B256::with_last_byte(1), B256::with_last_byte(2)], + Bytes::default(), + )], + ..Default::default() + }, + )]] .into(), number, Vec::new(), @@ -263,20 +263,20 @@ fn block2( ) .revert_storage(number, account, Vec::from([(slot, U256::from(10))])) .build(), - vec![vec![Some(Receipt { - tx_type: TxType::Eip1559, - success: false, - cumulative_gas_used: 400, - logs: vec![Log::new_unchecked( - Address::new([0x61; 20]), - vec![B256::with_last_byte(3), B256::with_last_byte(4)], - Bytes::default(), - )], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - })]] + vec![vec![Some( + #[allow(clippy::needless_update)] // side-effect of optimism fields + Receipt { + tx_type: TxType::Eip1559, + success: false, + cumulative_gas_used: 400, + logs: vec![Log::new_unchecked( + Address::new([0x61; 20]), + vec![B256::with_last_byte(3), B256::with_last_byte(4)], + Bytes::default(), + )], + ..Default::default() + }, + )]] .into(), number, Vec::new(), @@ -334,20 +334,20 @@ fn block3( } let execution_outcome = ExecutionOutcome::new( bundle_state_builder.build(), - vec![vec![Some(Receipt { - tx_type: TxType::Eip1559, - success: true, - cumulative_gas_used: 400, - logs: vec![Log::new_unchecked( - Address::new([0x61; 20]), - vec![B256::with_last_byte(3), B256::with_last_byte(4)], - Bytes::default(), - )], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - })]] + vec![vec![Some( + #[allow(clippy::needless_update)] // side-effect of optimism fields + Receipt { + tx_type: TxType::Eip1559, + success: true, + cumulative_gas_used: 400, + logs: vec![Log::new_unchecked( + Address::new([0x61; 20]), + vec![B256::with_last_byte(3), B256::with_last_byte(4)], + Bytes::default(), + )], + ..Default::default() + }, + )]] .into(), number, Vec::new(), @@ -425,20 +425,20 @@ fn block4( } let execution_outcome = ExecutionOutcome::new( bundle_state_builder.build(), - vec![vec![Some(Receipt { - tx_type: TxType::Eip1559, - success: true, - cumulative_gas_used: 400, - logs: vec![Log::new_unchecked( - Address::new([0x61; 20]), - vec![B256::with_last_byte(3), B256::with_last_byte(4)], - Bytes::default(), - )], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - })]] + vec![vec![Some( + #[allow(clippy::needless_update)] // side-effect of optimism fields + Receipt { + tx_type: TxType::Eip1559, + success: true, + cumulative_gas_used: 400, + logs: vec![Log::new_unchecked( + Address::new([0x61; 20]), + vec![B256::with_last_byte(3), B256::with_last_byte(4)], + Bytes::default(), + )], + ..Default::default() + }, + )]] .into(), number, Vec::new(), @@ -513,20 +513,20 @@ fn block5( } let execution_outcome = ExecutionOutcome::new( bundle_state_builder.build(), - vec![vec![Some(Receipt { - tx_type: TxType::Eip1559, - success: true, - cumulative_gas_used: 400, - logs: vec![Log::new_unchecked( - Address::new([0x61; 20]), - vec![B256::with_last_byte(3), B256::with_last_byte(4)], - Bytes::default(), - )], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - })]] + vec![vec![Some( + #[allow(clippy::needless_update)] // side-effect of optimism fields + Receipt { + tx_type: TxType::Eip1559, + success: true, + cumulative_gas_used: 400, + logs: vec![Log::new_unchecked( + Address::new([0x61; 20]), + vec![B256::with_last_byte(3), B256::with_last_byte(4)], + Bytes::default(), + )], + ..Default::default() + }, + )]] .into(), number, Vec::new(), From e3e83b7e71c7d9f6a34b9d7b407b72ca3eda9b4e Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 17 Oct 2024 10:03:25 +0200 Subject: [PATCH 46/51] docs(trie): revealed sparse trie invariants (#11825) --- crates/trie/sparse/src/trie.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 2edaaf76b274..d8f4280e875a 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -69,6 +69,13 @@ impl SparseTrie { } /// The representation of revealed sparse trie. +/// +/// ## Invariants +/// +/// - The root node is always present in `nodes` collection. +/// - Each leaf entry in `nodes` collection must have a corresponding entry in `values` collection. +/// The opposite is also true. +/// - All keys in `values` collection are full leaf paths. #[derive(PartialEq, Eq)] pub struct RevealedSparseTrie { /// All trie nodes. From 63a75fdd95ea71028d9d65c277a514f7e518676e Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 17 Oct 2024 10:07:45 +0200 Subject: [PATCH 47/51] fix(trie): intermediate trie node hashes (#11826) --- crates/trie/sparse/src/trie.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index d8f4280e875a..e83522ca8904 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -345,9 +345,7 @@ impl RevealedSparseTrie { } else { let value = self.values.get(&path).unwrap(); let rlp_node = LeafNodeRef { key, value }.rlp(&mut self.rlp_buf); - if let Some(node_hash) = rlp_node.as_hash() { - *hash = Some(node_hash); - } + *hash = rlp_node.as_hash(); rlp_node } } @@ -360,9 +358,7 @@ impl RevealedSparseTrie { let (_, child) = rlp_node_stack.pop().unwrap(); self.rlp_buf.clear(); let rlp_node = ExtensionNodeRef::new(key, &child).rlp(&mut self.rlp_buf); - if let Some(node_hash) = rlp_node.as_hash() { - *hash = Some(node_hash); - } + *hash = rlp_node.as_hash(); rlp_node } else { path_stack.extend([path, child_path]); // need to get rlp node for child first @@ -400,9 +396,7 @@ impl RevealedSparseTrie { self.rlp_buf.clear(); let rlp_node = BranchNodeRef::new(&branch_value_stack_buf, *state_mask) .rlp(&mut self.rlp_buf); - if let Some(node_hash) = rlp_node.as_hash() { - *hash = Some(node_hash); - } + *hash = rlp_node.as_hash(); rlp_node } }; From 491f154c3437d36d3a8add91f76efce8eea28a63 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 17 Oct 2024 11:30:26 +0200 Subject: [PATCH 48/51] primitives-traits: rm redundant definitions of `EMPTY_OMMER_ROOT_HASH` (#11820) --- Cargo.lock | 4 ++++ crates/ethereum/consensus/Cargo.toml | 1 + crates/ethereum/consensus/src/lib.rs | 2 +- crates/ethereum/payload/Cargo.toml | 1 + crates/ethereum/payload/src/lib.rs | 3 ++- crates/optimism/consensus/Cargo.toml | 1 + crates/optimism/consensus/src/lib.rs | 5 ++--- crates/optimism/payload/Cargo.toml | 1 + crates/optimism/payload/src/builder.rs | 3 ++- crates/optimism/primitives/src/bedrock.rs | 3 +-- crates/primitives-traits/src/constants/mod.rs | 4 ---- crates/primitives/src/lib.rs | 4 ++-- crates/primitives/src/proofs.rs | 4 ++-- crates/rpc/rpc-eth-api/src/helpers/pending_block.rs | 4 ++-- crates/rpc/rpc-types-compat/src/engine/payload.rs | 3 ++- 15 files changed, 24 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 740810d1b1b8..e434e80cfde2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7270,6 +7270,7 @@ dependencies = [ name = "reth-ethereum-consensus" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "reth-chainspec", "reth-consensus", @@ -7320,6 +7321,7 @@ dependencies = [ name = "reth-ethereum-payload-builder" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "reth-basic-payload-builder", "reth-chain-state", @@ -8086,6 +8088,7 @@ dependencies = [ name = "reth-optimism-consensus" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "reth-chainspec", "reth-consensus", @@ -8194,6 +8197,7 @@ dependencies = [ name = "reth-optimism-payload-builder" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", diff --git a/crates/ethereum/consensus/Cargo.toml b/crates/ethereum/consensus/Cargo.toml index 02d217b63b27..af934d3e2b62 100644 --- a/crates/ethereum/consensus/Cargo.toml +++ b/crates/ethereum/consensus/Cargo.toml @@ -19,5 +19,6 @@ reth-consensus.workspace = true # alloy alloy-primitives.workspace = true +alloy-consensus.workspace = true tracing.workspace = true diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index e74f3498fa5f..8f2a8a720427 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -8,6 +8,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_primitives::U256; use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; @@ -19,7 +20,6 @@ use reth_consensus_common::validation::{ }; use reth_primitives::{ constants::MINIMUM_GAS_LIMIT, BlockWithSenders, Header, SealedBlock, SealedHeader, - EMPTY_OMMER_ROOT_HASH, }; use std::{fmt::Debug, sync::Arc, time::SystemTime}; diff --git a/crates/ethereum/payload/Cargo.toml b/crates/ethereum/payload/Cargo.toml index f169d58f7e8d..ce37a4f8ea4f 100644 --- a/crates/ethereum/payload/Cargo.toml +++ b/crates/ethereum/payload/Cargo.toml @@ -34,6 +34,7 @@ revm-primitives.workspace = true # alloy alloy-primitives.workspace = true +alloy-consensus.workspace = true # misc tracing.workspace = true diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 248aa3486de4..dcf54fc02489 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -9,6 +9,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![allow(clippy::useless_let_if_seq)] +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_primitives::U256; use reth_basic_payload_builder::{ commit_withdrawals, is_better_payload, BuildArguments, BuildOutcome, PayloadBuilder, @@ -26,7 +27,7 @@ use reth_primitives::{ constants::{eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE}, proofs::{self, calculate_requests_root}, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, EthereumHardforks, Header, Receipt, EMPTY_OMMER_ROOT_HASH, + Block, BlockBody, EthereumHardforks, Header, Receipt, }; use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index f5f061c59929..e2520c89340d 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -25,6 +25,7 @@ reth-optimism-chainspec.workspace = true # ethereum alloy-primitives.workspace = true +alloy-consensus.workspace = true tracing.workspace = true diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index fe67ff1bcd9e..16c1d5d37d7c 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -9,6 +9,7 @@ // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_primitives::{B64, U256}; use reth_chainspec::EthereumHardforks; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; @@ -20,9 +21,7 @@ use reth_consensus_common::validation::{ }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::OptimismHardforks; -use reth_primitives::{ - BlockWithSenders, GotExpected, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, -}; +use reth_primitives::{BlockWithSenders, GotExpected, Header, SealedBlock, SealedHeader}; use std::{sync::Arc, time::SystemTime}; mod proof; diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index e1d6fe47d291..46cc82edb6ce 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -41,6 +41,7 @@ alloy-rlp.workspace = true op-alloy-rpc-types-engine.workspace = true revm-primitives.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true # misc tracing.workspace = true diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 0a8dcdb12449..e590635f524e 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -2,6 +2,7 @@ use std::sync::Arc; +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_primitives::U256; use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; @@ -16,7 +17,7 @@ use reth_primitives::{ constants::BEACON_NONCE, proofs, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, Header, Receipt, TxType, EMPTY_OMMER_ROOT_HASH, + Block, BlockBody, Header, Receipt, TxType, }; use reth_provider::StateProviderFactory; use reth_revm::database::StateProviderDatabase; diff --git a/crates/optimism/primitives/src/bedrock.rs b/crates/optimism/primitives/src/bedrock.rs index 1a347aecafe9..bd42298588fe 100644 --- a/crates/optimism/primitives/src/bedrock.rs +++ b/crates/optimism/primitives/src/bedrock.rs @@ -1,9 +1,8 @@ //! OP mainnet bedrock related data. -use alloy_consensus::EMPTY_ROOT_HASH; +use alloy_consensus::{EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_primitives::{address, b256, bloom, bytes, B256, B64, U256}; use reth_primitives::Header; -use reth_primitives_traits::constants::EMPTY_OMMER_ROOT_HASH; /// Transaction 0x9ed8f713b2cc6439657db52dcd2fdb9cc944915428f3c6e2a7703e242b259cb9 in block 985, /// replayed in blocks: diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index d40abdd64ba7..5d64b911b60a 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -119,10 +119,6 @@ pub const DEV_GENESIS_HASH: B256 = pub const KECCAK_EMPTY: B256 = b256!("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"); -/// Ommer root of empty list: `0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347` -pub const EMPTY_OMMER_ROOT_HASH: B256 = - b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"); - /// From address from Optimism system txs: `0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001` pub const OP_SYSTEM_TX_FROM_ADDR: Address = address!("deaddeaddeaddeaddeaddeaddeaddeaddead0001"); diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index ec65cbf20e52..a59e72bbd556 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -40,8 +40,8 @@ pub use block::{ #[cfg(feature = "reth-codec")] pub use compression::*; pub use constants::{ - DEV_GENESIS_HASH, EMPTY_OMMER_ROOT_HASH, HOLESKY_GENESIS_HASH, KECCAK_EMPTY, - MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, + DEV_GENESIS_HASH, HOLESKY_GENESIS_HASH, KECCAK_EMPTY, MAINNET_GENESIS_HASH, + SEPOLIA_GENESIS_HASH, }; pub use receipt::{ gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index dc814804ec81..4efbb588e10d 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -1,10 +1,10 @@ //! Helper function for calculating Merkle proofs and hashes. use crate::{ - constants::EMPTY_OMMER_ROOT_HASH, Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, - Request, TransactionSigned, Withdrawal, + Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Request, TransactionSigned, Withdrawal, }; use alloc::vec::Vec; +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_eips::{eip2718::Encodable2718, eip7685::Encodable7685}; use alloy_primitives::{keccak256, B256}; use reth_trie_common::root::{ordered_trie_root, ordered_trie_root_with_encoder}; diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 81c6a567846b..832cf17055a5 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -5,7 +5,7 @@ use std::time::{Duration, Instant}; use crate::{EthApiTypes, FromEthApiError, FromEvmError}; -use alloy_consensus::EMPTY_ROOT_HASH; +use alloy_consensus::{EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rpc_types::BlockNumberOrTag; use futures::Future; @@ -20,7 +20,7 @@ use reth_primitives::{ ResultAndState, SpecId, }, Block, BlockBody, Header, Receipt, Requests, SealedBlockWithSenders, SealedHeader, - TransactionSignedEcRecovered, EMPTY_OMMER_ROOT_HASH, + TransactionSignedEcRecovered, }; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 84943b60e208..e6f2f97ca75c 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -1,6 +1,7 @@ //! Standalone Conversion Functions for Handling Different Versions of Execution Payloads in //! Ethereum's Engine +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_primitives::{B256, U256}; use alloy_rpc_types_engine::{ @@ -9,7 +10,7 @@ use alloy_rpc_types_engine::{ ExecutionPayloadV3, ExecutionPayloadV4, PayloadError, }; use reth_primitives::{ - constants::{EMPTY_OMMER_ROOT_HASH, MAXIMUM_EXTRA_DATA_SIZE}, + constants::MAXIMUM_EXTRA_DATA_SIZE, proofs::{self}, Block, BlockBody, Header, Request, SealedBlock, TransactionSigned, Withdrawals, }; From e4c6294fc5531513060887d822c5cc591d48ba8d Mon Sep 17 00:00:00 2001 From: frisitano Date: Thu, 17 Oct 2024 23:04:49 +0800 Subject: [PATCH 49/51] feat: introduce StateCommitment type --- Cargo.lock | 5 ++ crates/ethereum/node/Cargo.toml | 1 + crates/ethereum/node/src/node.rs | 2 + crates/exex/test-utils/Cargo.toml | 1 + crates/exex/test-utils/src/lib.rs | 1 + crates/node/builder/src/node.rs | 2 + crates/node/types/Cargo.toml | 3 +- crates/node/types/src/lib.rs | 49 +++++++++++++------ crates/optimism/node/Cargo.toml | 1 + crates/optimism/node/src/node.rs | 2 + crates/storage/provider/src/test_utils/mod.rs | 1 + crates/trie/common/src/key.rs | 18 +++++++ crates/trie/common/src/lib.rs | 3 ++ crates/trie/db/src/commitment.rs | 38 ++++++++++++++ crates/trie/db/src/lib.rs | 2 + examples/custom-engine-types/Cargo.toml | 1 + examples/custom-engine-types/src/main.rs | 2 + 17 files changed, 116 insertions(+), 16 deletions(-) create mode 100644 crates/trie/common/src/key.rs create mode 100644 crates/trie/db/src/commitment.rs diff --git a/Cargo.lock b/Cargo.lock index e434e80cfde2..fc07aed94ca2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2754,6 +2754,7 @@ dependencies = [ "reth-payload-builder", "reth-primitives", "reth-tracing", + "reth-trie-db", "serde", "thiserror", "tokio", @@ -7504,6 +7505,7 @@ dependencies = [ "reth-provider", "reth-tasks", "reth-transaction-pool", + "reth-trie-db", "tempfile", "thiserror", "tokio", @@ -7960,6 +7962,7 @@ dependencies = [ "reth-tasks", "reth-tracing", "reth-transaction-pool", + "reth-trie-db", "revm", "serde_json", "tokio", @@ -8020,6 +8023,7 @@ dependencies = [ "reth-chainspec", "reth-db-api", "reth-engine-primitives", + "reth-trie-db", ] [[package]] @@ -8185,6 +8189,7 @@ dependencies = [ "reth-rpc-types-compat", "reth-tracing", "reth-transaction-pool", + "reth-trie-db", "revm", "serde", "serde_json", diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 7a323f91d877..d264f3daea56 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -30,6 +30,7 @@ reth-node-api.workspace = true reth-chainspec.workspace = true reth-primitives.workspace = true reth-revm = { workspace = true, features = ["std"] } +reth-trie-db.workspace = true # revm with required ethereum features revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 82f313fbb0b2..27dca26c022c 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -30,6 +30,7 @@ use reth_transaction_pool::{ blobstore::DiskFileBlobStore, EthTransactionPool, TransactionPool, TransactionValidationTaskExecutor, }; +use reth_trie_db::MerklePatriciaTrie; use crate::{EthEngineTypes, EthEvmConfig}; @@ -71,6 +72,7 @@ impl EthereumNode { impl NodeTypes for EthereumNode { type Primitives = (); type ChainSpec = ChainSpec; + type StateCommitment = MerklePatriciaTrie; } impl NodeTypesWithEngine for EthereumNode { diff --git a/crates/exex/test-utils/Cargo.toml b/crates/exex/test-utils/Cargo.toml index 8488cdb8b731..a88d0832632a 100644 --- a/crates/exex/test-utils/Cargo.toml +++ b/crates/exex/test-utils/Cargo.toml @@ -32,6 +32,7 @@ reth-provider = { workspace = true, features = ["test-utils"] } reth-tasks.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-ethereum-engine-primitives.workspace = true +reth-trie-db.workspace = true ## async futures-util.workspace = true diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 9e17013c4a5e..07db668aec52 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -123,6 +123,7 @@ pub struct TestNode; impl NodeTypes for TestNode { type Primitives = (); type ChainSpec = ChainSpec; + type StateCommitment = reth_trie_db::MerklePatriciaTrie; } impl NodeTypesWithEngine for TestNode { diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 3e3d5b696c39..b04b763125ba 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -69,6 +69,8 @@ where type Primitives = ::Primitives; type ChainSpec = ::ChainSpec; + + type StateCommitment = ::StateCommitment; } impl NodeTypesWithEngine for AnyNode diff --git a/crates/node/types/Cargo.toml b/crates/node/types/Cargo.toml index f04925d9cd41..9e99287d844d 100644 --- a/crates/node/types/Cargo.toml +++ b/crates/node/types/Cargo.toml @@ -14,4 +14,5 @@ workspace = true # reth reth-chainspec.workspace = true reth-db-api.workspace = true -reth-engine-primitives.workspace = true \ No newline at end of file +reth-engine-primitives.workspace = true +reth-trie-db.workspace = true diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index 2c72e02d3edc..36cb68ac8c65 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -16,6 +16,7 @@ use reth_db_api::{ Database, }; use reth_engine_primitives::EngineTypes; +use reth_trie_db::StateCommitment; /// Configures all the primitive types of the node. // TODO(mattsse): this is currently a placeholder @@ -34,6 +35,8 @@ pub trait NodeTypes: Send + Sync + Unpin + 'static { type Primitives: NodePrimitives; /// The type used for configuration of the EVM. type ChainSpec: EthChainSpec; + /// The type used to perform state commitment operations. + type StateCommitment: StateCommitment; } /// The type that configures an Ethereum-like node with an engine for consensus. @@ -84,6 +87,7 @@ where { type Primitives = Types::Primitives; type ChainSpec = Types::ChainSpec; + type StateCommitment = Types::StateCommitment; } impl NodeTypesWithEngine for NodeTypesWithDBAdapter @@ -104,70 +108,85 @@ where /// A [`NodeTypes`] type builder. #[derive(Default, Debug)] -pub struct AnyNodeTypes

(PhantomData

, PhantomData); +pub struct AnyNodeTypes

(PhantomData

, PhantomData, PhantomData); -impl AnyNodeTypes { +impl AnyNodeTypes { /// Sets the `Primitives` associated type. - pub const fn primitives(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::, PhantomData::) + pub const fn primitives(self) -> AnyNodeTypes { + AnyNodeTypes::(PhantomData::, PhantomData::, PhantomData::) } /// Sets the `ChainSpec` associated type. - pub const fn chain_spec(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::

, PhantomData::) + pub const fn chain_spec(self) -> AnyNodeTypes { + AnyNodeTypes::(PhantomData::

, PhantomData::, PhantomData::) + } + + /// Sets the `StateCommitment` associated type. + pub const fn state(self) -> AnyNodeTypes { + AnyNodeTypes::(PhantomData::

, PhantomData::, PhantomData::) } } -impl NodeTypes for AnyNodeTypes +impl NodeTypes for AnyNodeTypes where P: NodePrimitives + Send + Sync + Unpin + 'static, C: EthChainSpec + 'static, + S: StateCommitment, { type Primitives = P; type ChainSpec = C; + type StateCommitment = S; } /// A [`NodeTypesWithEngine`] type builder. #[derive(Default, Debug)] -pub struct AnyNodeTypesWithEngine

{ +pub struct AnyNodeTypesWithEngine

{ /// Embedding the basic node types. - base: AnyNodeTypes, + base: AnyNodeTypes, /// Phantom data for the engine. _engine: PhantomData, } -impl AnyNodeTypesWithEngine { +impl AnyNodeTypesWithEngine { /// Sets the `Primitives` associated type. - pub const fn primitives(self) -> AnyNodeTypesWithEngine { + pub const fn primitives(self) -> AnyNodeTypesWithEngine { AnyNodeTypesWithEngine { base: self.base.primitives::(), _engine: PhantomData } } /// Sets the `Engine` associated type. - pub const fn engine(self) -> AnyNodeTypesWithEngine { + pub const fn engine(self) -> AnyNodeTypesWithEngine { AnyNodeTypesWithEngine { base: self.base, _engine: PhantomData:: } } /// Sets the `ChainSpec` associated type. - pub const fn chain_spec(self) -> AnyNodeTypesWithEngine { + pub const fn chain_spec(self) -> AnyNodeTypesWithEngine { AnyNodeTypesWithEngine { base: self.base.chain_spec::(), _engine: PhantomData } } + + /// Sets the `StateCommitment` associated type. + pub const fn state(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine { base: self.base.state::(), _engine: PhantomData } + } } -impl NodeTypes for AnyNodeTypesWithEngine +impl NodeTypes for AnyNodeTypesWithEngine where P: NodePrimitives + Send + Sync + Unpin + 'static, E: EngineTypes + Send + Sync + Unpin, C: EthChainSpec + 'static, + S: StateCommitment, { type Primitives = P; type ChainSpec = C; + type StateCommitment = S; } -impl NodeTypesWithEngine for AnyNodeTypesWithEngine +impl NodeTypesWithEngine for AnyNodeTypesWithEngine where P: NodePrimitives + Send + Sync + Unpin + 'static, E: EngineTypes + Send + Sync + Unpin, C: EthChainSpec + 'static, + S: StateCommitment, { type Engine = E; } diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 8e359e602657..d1338e4c4591 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -33,6 +33,7 @@ reth-discv5.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-eth-api.workspace = true reth-rpc.workspace = true +reth-trie-db.workspace = true # op-reth reth-optimism-payload-builder.workspace = true diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 648da85d0bb4..7f61e0b71df5 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -28,6 +28,7 @@ use reth_transaction_pool::{ blobstore::DiskFileBlobStore, CoinbaseTipOrdering, TransactionPool, TransactionValidationTaskExecutor, }; +use reth_trie_db::MerklePatriciaTrie; use crate::{ args::RollupArgs, @@ -115,6 +116,7 @@ where impl NodeTypes for OptimismNode { type Primitives = (); type ChainSpec = OpChainSpec; + type StateCommitment = MerklePatriciaTrie; } impl NodeTypesWithEngine for OptimismNode { diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index 2200781096d0..c0e80930b318 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -25,6 +25,7 @@ pub type MockNodeTypes = reth_node_types::AnyNodeTypesWithEngine< (), reth_ethereum_engine_primitives::EthEngineTypes, reth_chainspec::ChainSpec, + reth_trie_db::MerklePatriciaTrie, >; /// Mock [`reth_node_types::NodeTypesWithDB`] for testing. diff --git a/crates/trie/common/src/key.rs b/crates/trie/common/src/key.rs new file mode 100644 index 000000000000..9e440d199fa1 --- /dev/null +++ b/crates/trie/common/src/key.rs @@ -0,0 +1,18 @@ +use alloy_primitives::B256; +use revm_primitives::keccak256; + +/// Trait for hashing keys in state. +pub trait KeyHasher: Default + Clone + Send + Sync + 'static { + /// Hashes the given bytes into a 256-bit hash. + fn hash_key>(bytes: T) -> B256; +} + +/// A key hasher that uses the Keccak-256 hash function. +#[derive(Clone, Debug, Default)] +pub struct KeccakKeyHasher; + +impl KeyHasher for KeccakKeyHasher { + fn hash_key>(bytes: T) -> B256 { + keccak256(bytes) + } +} diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index bdec36028b94..7645ebd3a1cb 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -14,6 +14,9 @@ pub mod hash_builder; mod account; pub use account::TrieAccount; +mod key; +pub use key::{KeccakKeyHasher, KeyHasher}; + mod nibbles; pub use nibbles::{Nibbles, StoredNibbles, StoredNibblesSubKey}; diff --git a/crates/trie/db/src/commitment.rs b/crates/trie/db/src/commitment.rs new file mode 100644 index 000000000000..30b1d07469af --- /dev/null +++ b/crates/trie/db/src/commitment.rs @@ -0,0 +1,38 @@ +use crate::{ + DatabaseHashedCursorFactory, DatabaseProof, DatabaseStateRoot, DatabaseStorageRoot, + DatabaseTrieCursorFactory, DatabaseTrieWitness, +}; +use reth_db::transaction::DbTx; +use reth_trie::{ + proof::Proof, witness::TrieWitness, KeccakKeyHasher, KeyHasher, StateRoot, StorageRoot, +}; + +/// The `StateCommitment` trait provides associated types for state commitment operations. +pub trait StateCommitment: std::fmt::Debug + Send + Sync + Unpin + 'static { + /// The state root type. + type StateRoot<'a, TX: DbTx + 'a>: DatabaseStateRoot<'a, TX>; + /// The storage root type. + type StorageRoot<'a, TX: DbTx + 'a>: DatabaseStorageRoot<'a, TX>; + /// The state proof type. + type StateProof<'a, TX: DbTx + 'a>: DatabaseProof<'a, TX>; + /// The state witness type. + type StateWitness<'a, TX: DbTx + 'a>: DatabaseTrieWitness<'a, TX>; + /// The key hasher type. + type KeyHasher: KeyHasher; +} + +/// The state commitment type for Ethereum's Merkle Patricia Trie. +#[derive(Debug)] +pub struct MerklePatriciaTrie; + +impl StateCommitment for MerklePatriciaTrie { + type StateRoot<'a, TX: DbTx + 'a> = + StateRoot, DatabaseHashedCursorFactory<'a, TX>>; + type StorageRoot<'a, TX: DbTx + 'a> = + StorageRoot, DatabaseHashedCursorFactory<'a, TX>>; + type StateProof<'a, TX: DbTx + 'a> = + Proof, DatabaseHashedCursorFactory<'a, TX>>; + type StateWitness<'a, TX: DbTx + 'a> = + TrieWitness, DatabaseHashedCursorFactory<'a, TX>>; + type KeyHasher = KeccakKeyHasher; +} diff --git a/crates/trie/db/src/lib.rs b/crates/trie/db/src/lib.rs index 3a9b1e328239..27c18af6cbfd 100644 --- a/crates/trie/db/src/lib.rs +++ b/crates/trie/db/src/lib.rs @@ -1,5 +1,6 @@ //! An integration of [`reth-trie`] with [`reth-db`]. +mod commitment; mod hashed_cursor; mod prefix_set; mod proof; @@ -8,6 +9,7 @@ mod storage; mod trie_cursor; mod witness; +pub use commitment::{MerklePatriciaTrie, StateCommitment}; pub use hashed_cursor::{ DatabaseHashedAccountCursor, DatabaseHashedCursorFactory, DatabaseHashedStorageCursor, }; diff --git a/examples/custom-engine-types/Cargo.toml b/examples/custom-engine-types/Cargo.toml index f826451d2038..1fbb3c4947ac 100644 --- a/examples/custom-engine-types/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -16,6 +16,7 @@ reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true +reth-trie-db.workspace = true alloy-genesis.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } alloy-primitives.workspace = true diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index f833da86236e..ab0ec5f52dbe 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -67,6 +67,7 @@ use reth_payload_builder::{ }; use reth_primitives::Withdrawals; use reth_tracing::{RethTracer, Tracer}; +use reth_trie_db::MerklePatriciaTrie; /// A custom payload attributes type. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -219,6 +220,7 @@ struct MyCustomNode; impl NodeTypes for MyCustomNode { type Primitives = (); type ChainSpec = ChainSpec; + type StateCommitment = MerklePatriciaTrie; } /// Configure the node types with the custom engine types From cb77bce0454a5810fcae515c929408261f5f5107 Mon Sep 17 00:00:00 2001 From: frisitano Date: Fri, 18 Oct 2024 02:25:26 +0800 Subject: [PATCH 50/51] feat: introduce StateCommitment in StateProviders --- .../commands/debug_cmd/in_memory_merkle.rs | 9 +- bin/reth/src/commands/debug_cmd/merkle.rs | 12 +- crates/exex/exex/src/backfill/test_utils.rs | 15 +- crates/stages/stages/benches/criterion.rs | 10 +- crates/stages/stages/benches/setup/mod.rs | 20 +- crates/stages/stages/src/stages/execution.rs | 22 +- .../stages/src/stages/hashing_account.rs | 3 +- crates/stages/stages/src/test_utils/runner.rs | 10 +- .../src/providers/blockchain_provider.rs | 10 +- .../provider/src/providers/database/mod.rs | 18 +- .../src/providers/database/provider.rs | 235 +++++++++++------- .../src/providers/state/historical.rs | 120 +++++---- .../provider/src/providers/state/latest.rs | 39 +-- .../storage/provider/src/test_utils/blocks.rs | 5 +- .../storage/provider/src/test_utils/mock.rs | 5 +- crates/storage/storage-api/src/state.rs | 7 + crates/trie/db/tests/trie.rs | 8 +- 17 files changed, 345 insertions(+), 203 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 51851c0b0ad2..81c84819af62 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -22,8 +22,8 @@ use reth_node_ethereum::EthExecutorProvider; use reth_primitives::BlockHashOrNumber; use reth_provider::{ writer::UnifiedStorageWriter, AccountExtReader, ChainSpecProvider, HashingWriter, - HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, - StageCheckpointReader, StateWriter, StaticFileProviderFactory, StorageReader, + HeaderProvider, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StateWriter, + StorageReader, ToLatestStateProviderRef, }; use reth_revm::database::StateProviderDatabase; use reth_stages::StageId; @@ -131,10 +131,7 @@ impl> Command { ) .await?; - let db = StateProviderDatabase::new(LatestStateProviderRef::new( - provider.tx_ref(), - provider_factory.static_file_provider(), - )); + let db = StateProviderDatabase::new(provider.latest()); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()).executor(db); diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 8e02a52eaf07..9923321ae1c0 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -21,8 +21,8 @@ use reth_node_ethereum::EthExecutorProvider; use reth_primitives::BlockHashOrNumber; use reth_provider::{ writer::UnifiedStorageWriter, BlockNumReader, BlockWriter, ChainSpecProvider, - DatabaseProviderFactory, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, - ProviderError, ProviderFactory, StateWriter, StaticFileProviderFactory, + DatabaseProviderFactory, HeaderProvider, OriginalValuesKnown, ProviderError, ProviderFactory, + StateWriter, ToLatestStateProviderRef, }; use reth_revm::database::StateProviderDatabase; use reth_stages::{ @@ -152,12 +152,8 @@ impl> Command { provider_rw.insert_block(sealed_block.clone())?; td += sealed_block.difficulty; - let mut executor = executor_provider.batch_executor(StateProviderDatabase::new( - LatestStateProviderRef::new( - provider_rw.tx_ref(), - provider_rw.static_file_provider().clone(), - ), - )); + let mut executor = + executor_provider.batch_executor(StateProviderDatabase::new(provider_rw.latest())); executor.execute_and_verify_one((&sealed_block.clone().unseal(), td).into())?; let execution_outcome = executor.finalize(); diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index 1c793975c755..f01afa75398d 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -14,8 +14,8 @@ use reth_primitives::{ SealedBlockWithSenders, Transaction, }; use reth_provider::{ - providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, - ProviderFactory, StaticFileProviderFactory, + providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, ProviderFactory, + ToLatestStateProviderRef, }; use reth_revm::database::StateProviderDatabase; use reth_testing_utils::generators::sign_tx_with_key_pair; @@ -64,10 +64,7 @@ where // Execute the block to produce a block execution output let mut block_execution_output = EthExecutorProvider::ethereum(chain_spec) - .executor(StateProviderDatabase::new(LatestStateProviderRef::new( - provider.tx_ref(), - provider.static_file_provider(), - ))) + .executor(StateProviderDatabase::new(provider.latest())) .execute(BlockExecutionInput { block, total_difficulty: U256::ZERO })?; block_execution_output.state.reverts.sort(); @@ -192,10 +189,8 @@ where let provider = provider_factory.provider()?; - let executor = - EthExecutorProvider::ethereum(chain_spec).batch_executor(StateProviderDatabase::new( - LatestStateProviderRef::new(provider.tx_ref(), provider.static_file_provider()), - )); + let executor = EthExecutorProvider::ethereum(chain_spec) + .batch_executor(StateProviderDatabase::new(provider.latest())); let mut execution_outcome = executor.execute_and_verify_batch(vec![ (&block1, U256::ZERO).into(), diff --git a/crates/stages/stages/benches/criterion.rs b/crates/stages/stages/benches/criterion.rs index 7519d81a3622..9d40f52908a1 100644 --- a/crates/stages/stages/benches/criterion.rs +++ b/crates/stages/stages/benches/criterion.rs @@ -14,6 +14,7 @@ use reth_stages::{ StageCheckpoint, }; use reth_stages_api::{ExecInput, Stage, StageExt, UnwindInput}; +use reth_trie_db::MerklePatriciaTrie; use std::ops::RangeInclusive; use tokio::runtime::Runtime; @@ -148,7 +149,14 @@ fn measure_stage( block_interval: RangeInclusive, label: String, ) where - S: Clone + Stage as Database>::TXMut, ChainSpec>>, + S: Clone + + Stage< + DatabaseProvider< + as Database>::TXMut, + ChainSpec, + MerklePatriciaTrie, + >, + >, F: Fn(S, &TestStageDB, StageRange), { let stage_range = ( diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index 4812fb13c39a..bbb3e29096f9 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -26,12 +26,19 @@ mod constants; mod account_hashing; pub use account_hashing::*; use reth_stages_api::{ExecInput, Stage, UnwindInput}; -use reth_trie_db::DatabaseStateRoot; +use reth_trie_db::{DatabaseStateRoot, MerklePatriciaTrie}; pub(crate) type StageRange = (ExecInput, UnwindInput); pub(crate) fn stage_unwind< - S: Clone + Stage as Database>::TXMut, ChainSpec>>, + S: Clone + + Stage< + DatabaseProvider< + as Database>::TXMut, + ChainSpec, + MerklePatriciaTrie, + >, + >, >( stage: S, db: &TestStageDB, @@ -63,7 +70,14 @@ pub(crate) fn stage_unwind< pub(crate) fn unwind_hashes(stage: S, db: &TestStageDB, range: StageRange) where - S: Clone + Stage as Database>::TXMut, ChainSpec>>, + S: Clone + + Stage< + DatabaseProvider< + as Database>::TXMut, + ChainSpec, + MerklePatriciaTrie, + >, + >, { let (input, unwind) = range; diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 7bb6ebc59e09..b040c55622a3 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -15,8 +15,8 @@ use reth_primitives_traits::format_gas_throughput; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, writer::UnifiedStorageWriter, - BlockReader, DBProvider, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, - ProviderError, StateChangeWriter, StateWriter, StaticFileProviderFactory, StatsReader, + BlockReader, DBProvider, HeaderProvider, OriginalValuesKnown, ProviderError, StateChangeWriter, + StateWriter, StaticFileProviderFactory, StatsReader, ToLatestStateProviderRef, TransactionVariant, }; use reth_prune_types::PruneModes; @@ -45,8 +45,9 @@ use tracing::*; /// - [`tables::BlockBodyIndices`] to get tx number /// - [`tables::Transactions`] to execute /// -/// For state access [`LatestStateProviderRef`] provides us latest state and history state -/// For latest most recent state [`LatestStateProviderRef`] would need (Used for execution Stage): +/// For state access [`reth_provider::LatestStateProviderRef`] provides us latest state and history +/// state For latest most recent state [`reth_provider::LatestStateProviderRef`] would need (Used +/// for execution Stage): /// - [`tables::PlainAccountState`] /// - [`tables::Bytecodes`] /// - [`tables::PlainStorageState`] @@ -174,8 +175,12 @@ impl ExecutionStage { impl Stage for ExecutionStage where E: BlockExecutorProvider, - Provider: - DBProvider + BlockReader + StaticFileProviderFactory + StatsReader + StateChangeWriter, + Provider: DBProvider + + BlockReader + + StaticFileProviderFactory + + StatsReader + + StateChangeWriter + + ToLatestStateProviderRef, for<'a> UnifiedStorageWriter<'a, Provider, StaticFileProviderRWRefMut<'a>>: StateWriter, { /// Return the id of the stage @@ -219,10 +224,7 @@ where None }; - let db = StateProviderDatabase(LatestStateProviderRef::new( - provider.tx_ref(), - provider.static_file_provider(), - )); + let db = StateProviderDatabase(provider.latest()); let mut executor = self.executor_provider.batch_executor(db); executor.set_tip(max_block); executor.set_prune_modes(prune_modes); diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 14afb37d81db..0087eae3e99c 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -61,8 +61,9 @@ impl AccountHashingStage { pub fn seed< Tx: DbTx + DbTxMut + 'static, Spec: Send + Sync + 'static + reth_chainspec::EthereumHardforks, + SC: Send + Sync + 'static, >( - provider: &reth_provider::DatabaseProvider, + provider: &reth_provider::DatabaseProvider, opts: SeedOpts, ) -> Result, StageError> { use alloy_primitives::U256; diff --git a/crates/stages/stages/src/test_utils/runner.rs b/crates/stages/stages/src/test_utils/runner.rs index 26f245c1304d..b5f91e8664b5 100644 --- a/crates/stages/stages/src/test_utils/runner.rs +++ b/crates/stages/stages/src/test_utils/runner.rs @@ -6,6 +6,7 @@ use reth_stages_api::{ ExecInput, ExecOutput, Stage, StageError, StageExt, UnwindInput, UnwindOutput, }; use reth_storage_errors::db::DatabaseError; +use reth_trie_db::MerklePatriciaTrie; use tokio::sync::oneshot; #[derive(thiserror::Error, Debug)] @@ -20,8 +21,13 @@ pub(crate) enum TestRunnerError { /// A generic test runner for stages. pub(crate) trait StageTestRunner { - type S: Stage as Database>::TXMut, ChainSpec>> - + 'static; + type S: Stage< + DatabaseProvider< + as Database>::TXMut, + ChainSpec, + MerklePatriciaTrie, + >, + > + 'static; /// Return a reference to the database. fn db(&self) -> &TestStageDB; diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 9e6f32b33a3b..6e39a3697b30 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -302,7 +302,7 @@ impl BlockchainProvider2 { ) -> ProviderResult> where F: FnOnce( - &DatabaseProviderRO, + &DatabaseProviderRO, RangeInclusive, &mut P, ) -> ProviderResult>, @@ -418,7 +418,7 @@ impl BlockchainProvider2 { ) -> ProviderResult> where S: FnOnce( - DatabaseProviderRO, + DatabaseProviderRO, RangeInclusive, ) -> ProviderResult>, M: Fn(RangeInclusive, Arc) -> ProviderResult>, @@ -516,7 +516,9 @@ impl BlockchainProvider2 { fetch_from_block_state: M, ) -> ProviderResult> where - S: FnOnce(DatabaseProviderRO) -> ProviderResult>, + S: FnOnce( + DatabaseProviderRO, + ) -> ProviderResult>, M: Fn(usize, TxNumber, Arc) -> ProviderResult>, { // Order of instantiation matters. More information on: @@ -585,7 +587,7 @@ impl BlockchainProvider2 { fetch_from_block_state: M, ) -> ProviderResult where - S: FnOnce(DatabaseProviderRO) -> ProviderResult, + S: FnOnce(DatabaseProviderRO) -> ProviderResult, M: Fn(Arc) -> ProviderResult, { let block_state = match id { diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 520b514527b2..62e04c2cf651 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -130,7 +130,9 @@ impl ProviderFactory { /// This sets the [`PruneModes`] to [`None`], because they should only be relevant for writing /// data. #[track_caller] - pub fn provider(&self) -> ProviderResult> { + pub fn provider( + &self, + ) -> ProviderResult> { Ok(DatabaseProvider::new( self.db.tx()?, self.chain_spec.clone(), @@ -144,7 +146,9 @@ impl ProviderFactory { /// [`BlockHashReader`]. This may fail if the inner read/write database transaction fails to /// open. #[track_caller] - pub fn provider_rw(&self) -> ProviderResult> { + pub fn provider_rw( + &self, + ) -> ProviderResult> { Ok(DatabaseProviderRW(DatabaseProvider::new_rw( self.db.tx_mut()?, self.chain_spec.clone(), @@ -157,7 +161,10 @@ impl ProviderFactory { #[track_caller] pub fn latest(&self) -> ProviderResult { trace!(target: "providers::db", "Returning latest state provider"); - Ok(Box::new(LatestStateProvider::new(self.db.tx()?, self.static_file_provider()))) + Ok(Box::new(LatestStateProvider::<_, N::StateCommitment>::new( + self.db.tx()?, + self.static_file_provider(), + ))) } /// Storage provider for state at that given block @@ -186,8 +193,9 @@ impl ProviderFactory { impl DatabaseProviderFactory for ProviderFactory { type DB = N::DB; - type Provider = DatabaseProvider<::TX, N::ChainSpec>; - type ProviderRW = DatabaseProvider<::TXMut, N::ChainSpec>; + type Provider = DatabaseProvider<::TX, N::ChainSpec, N::StateCommitment>; + type ProviderRW = + DatabaseProvider<::TXMut, N::ChainSpec, N::StateCommitment>; fn database_provider_ro(&self) -> ProviderResult { self.provider() diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 8140700fabac..1c3042d5e4a6 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -13,8 +13,9 @@ use crate::{ LatestStateProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RequestsProvider, RevertsInit, StageCheckpointReader, StateChangeWriter, StateProviderBox, StateReader, StateWriter, - StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, - TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, + StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, + ToLatestStateProviderRef, TransactionVariant, TransactionsProvider, TransactionsProviderExt, + TrieWriter, WithdrawalsProvider, }; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; @@ -54,7 +55,7 @@ use reth_trie::{ updates::{StorageTrieUpdates, TrieUpdates}, HashedPostStateSorted, Nibbles, StateRoot, StoredNibbles, }; -use reth_trie_db::{DatabaseStateRoot, DatabaseStorageTrieCursor}; +use reth_trie_db::{DatabaseStateRoot, DatabaseStorageTrieCursor, StateCommitment}; use revm::{ db::states::{PlainStateReverts, PlainStorageChangeset, PlainStorageRevert, StateChangeset}, primitives::{BlockEnv, CfgEnvWithHandlerCfg}, @@ -63,6 +64,7 @@ use std::{ cmp::Ordering, collections::{hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, fmt::Debug, + marker::PhantomData, ops::{Bound, Deref, DerefMut, Range, RangeBounds, RangeInclusive}, sync::{mpsc, Arc}, time::{Duration, Instant}, @@ -71,40 +73,40 @@ use tokio::sync::watch; use tracing::{debug, error, trace, warn}; /// A [`DatabaseProvider`] that holds a read-only database transaction. -pub type DatabaseProviderRO = DatabaseProvider<::TX, Spec>; +pub type DatabaseProviderRO = DatabaseProvider<::TX, Spec, SC>; /// A [`DatabaseProvider`] that holds a read-write database transaction. /// /// Ideally this would be an alias type. However, there's some weird compiler error (), that forces us to wrap this in a struct instead. /// Once that issue is solved, we can probably revert back to being an alias type. #[derive(Debug)] -pub struct DatabaseProviderRW( - pub DatabaseProvider<::TXMut, Spec>, +pub struct DatabaseProviderRW( + pub DatabaseProvider<::TXMut, Spec, SC>, ); -impl Deref for DatabaseProviderRW { - type Target = DatabaseProvider<::TXMut, Spec>; +impl Deref for DatabaseProviderRW { + type Target = DatabaseProvider<::TXMut, Spec, SC>; fn deref(&self) -> &Self::Target { &self.0 } } -impl DerefMut for DatabaseProviderRW { +impl DerefMut for DatabaseProviderRW { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } -impl AsRef::TXMut, Spec>> - for DatabaseProviderRW +impl AsRef::TXMut, Spec, SC>> + for DatabaseProviderRW { - fn as_ref(&self) -> &DatabaseProvider<::TXMut, Spec> { + fn as_ref(&self) -> &DatabaseProvider<::TXMut, Spec, SC> { &self.0 } } -impl DatabaseProviderRW { +impl DatabaseProviderRW { /// Commit database transaction and static file if it exists. pub fn commit(self) -> ProviderResult { self.0.commit() @@ -116,10 +118,10 @@ impl DatabaseProviderRW { } } -impl From> - for DatabaseProvider<::TXMut, Spec> +impl From> + for DatabaseProvider<::TXMut, Spec, SC> { - fn from(provider: DatabaseProviderRW) -> Self { + fn from(provider: DatabaseProviderRW) -> Self { provider.0 } } @@ -127,7 +129,7 @@ impl From> /// A provider struct that fetches data from the database. /// Wrapper around [`DbTx`] and [`DbTxMut`]. Example: [`HeaderProvider`] [`BlockHashReader`] #[derive(Debug)] -pub struct DatabaseProvider { +pub struct DatabaseProvider { /// Database transaction. tx: TX, /// Chain spec @@ -136,22 +138,18 @@ pub struct DatabaseProvider { static_file_provider: StaticFileProvider, /// Pruning configuration prune_modes: PruneModes, + /// Marker to associate the `StateCommitment` type with this provider. + _marker: std::marker::PhantomData, } -impl DatabaseProvider { +impl DatabaseProvider { /// Returns reference to prune modes. pub const fn prune_modes_ref(&self) -> &PruneModes { &self.prune_modes } } -impl DatabaseProvider { - /// State provider for latest block - pub fn latest<'a>(&'a self) -> ProviderResult> { - trace!(target: "providers::db", "Returning latest state provider"); - Ok(Box::new(LatestStateProviderRef::new(&self.tx, self.static_file_provider.clone()))) - } - +impl DatabaseProvider { /// Storage provider for state at that given block hash pub fn history_by_block_hash<'a>( &'a self, @@ -162,7 +160,7 @@ impl DatabaseProvider { if block_number == self.best_block_number().unwrap_or_default() && block_number == self.last_block_number().unwrap_or_default() { - return Ok(Box::new(LatestStateProviderRef::new( + return Ok(Box::new(LatestStateProviderRef::<'_, _, SC>::new( &self.tx, self.static_file_provider.clone(), ))) @@ -176,7 +174,7 @@ impl DatabaseProvider { let storage_history_prune_checkpoint = self.get_prune_checkpoint(PruneSegment::StorageHistory)?; - let mut state_provider = HistoricalStateProviderRef::new( + let mut state_provider = HistoricalStateProviderRef::<'_, _, SC>::new( &self.tx, block_number, self.static_file_provider.clone(), @@ -203,15 +201,15 @@ impl DatabaseProvider { } } -impl StaticFileProviderFactory for DatabaseProvider { +impl StaticFileProviderFactory for DatabaseProvider { /// Returns a static file provider fn static_file_provider(&self) -> StaticFileProvider { self.static_file_provider.clone() } } -impl ChainSpecProvider - for DatabaseProvider +impl ChainSpecProvider + for DatabaseProvider { type ChainSpec = Spec; @@ -220,7 +218,7 @@ impl ChainSpecProvider } } -impl DatabaseProvider { +impl DatabaseProvider { /// Creates a provider with an inner read-write transaction. pub const fn new_rw( tx: TX, @@ -228,18 +226,18 @@ impl DatabaseProvider { static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes } + Self { tx, chain_spec, static_file_provider, prune_modes, _marker: PhantomData } } } -impl AsRef for DatabaseProvider { +impl AsRef for DatabaseProvider { fn as_ref(&self) -> &Self { self } } -impl TryIntoHistoricalStateProvider - for DatabaseProvider +impl TryIntoHistoricalStateProvider + for DatabaseProvider { fn try_into_history_at_block( self, @@ -248,7 +246,10 @@ impl TryIntoHistoricalStateProvider if block_number == self.best_block_number().unwrap_or_default() && block_number == self.last_block_number().unwrap_or_default() { - return Ok(Box::new(LatestStateProvider::new(self.tx, self.static_file_provider))) + return Ok(Box::new(LatestStateProvider::<_, SC>::new( + self.tx, + self.static_file_provider, + ))) } // +1 as the changeset that we want is the one that was applied after this block. @@ -260,7 +261,7 @@ impl TryIntoHistoricalStateProvider self.get_prune_checkpoint(PruneSegment::StorageHistory)?; let mut state_provider = - HistoricalStateProvider::new(self.tx, block_number, self.static_file_provider); + HistoricalStateProvider::<_, SC>::new(self.tx, block_number, self.static_file_provider); // If we pruned account or storage history, we can't return state on every historical block. // Instead, we should cap it at the latest prune checkpoint for corresponding prune segment. @@ -283,8 +284,24 @@ impl TryIntoHistoricalStateProvider } } -impl - DatabaseProvider +impl ToLatestStateProviderRef + for DatabaseProvider +{ + /// State provider for latest block + fn latest<'a>(&'a self) -> Box { + trace!(target: "providers::db", "Returning latest state provider"); + Box::new(LatestStateProviderRef::<'_, _, SC>::new( + &self.tx, + self.static_file_provider.clone(), + )) + } +} + +impl< + Tx: DbTx + DbTxMut + 'static, + Spec: Send + Sync + EthereumHardforks + 'static, + SC: Send + Sync + 'static, + > DatabaseProvider { // TODO: uncomment below, once `reth debug_cmd` has been feature gated with dev. // #[cfg(any(test, feature = "test-utils"))] @@ -366,7 +383,7 @@ where Ok(Vec::new()) } -impl DatabaseProvider { +impl DatabaseProvider { /// Creates a provider with an inner read-only transaction. pub const fn new( tx: TX, @@ -374,7 +391,7 @@ impl DatabaseProvider { static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes } + Self { tx, chain_spec, static_file_provider, prune_modes, _marker: PhantomData } } /// Consume `DbTx` or `DbTxMut`. @@ -1037,7 +1054,7 @@ impl DatabaseProvider { } } -impl DatabaseProvider { +impl DatabaseProvider { /// Commit database transaction. pub fn commit(self) -> ProviderResult { Ok(self.tx.commit()?) @@ -1423,13 +1440,17 @@ impl DatabaseProvider { } } -impl AccountReader for DatabaseProvider { +impl AccountReader + for DatabaseProvider +{ fn basic_account(&self, address: Address) -> ProviderResult> { Ok(self.tx.get::(address)?) } } -impl AccountExtReader for DatabaseProvider { +impl AccountExtReader + for DatabaseProvider +{ fn changed_accounts_with_range( &self, range: impl RangeBounds, @@ -1473,7 +1494,9 @@ impl AccountExtReader for DatabaseProvider StorageChangeSetReader for DatabaseProvider { +impl StorageChangeSetReader + for DatabaseProvider +{ fn storage_changeset( &self, block_number: BlockNumber, @@ -1488,7 +1511,9 @@ impl StorageChangeSetReader for DatabaseProvider ChangeSetReader for DatabaseProvider { +impl ChangeSetReader + for DatabaseProvider +{ fn account_block_changeset( &self, block_number: BlockNumber, @@ -1505,7 +1530,9 @@ impl ChangeSetReader for DatabaseProvider } } -impl HeaderSyncGapProvider for DatabaseProvider { +impl HeaderSyncGapProvider + for DatabaseProvider +{ fn sync_gap( &self, tip: watch::Receiver, @@ -1549,8 +1576,8 @@ impl HeaderSyncGapProvider for DatabaseProvider HeaderProvider - for DatabaseProvider +impl HeaderProvider + for DatabaseProvider { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { if let Some(num) = self.block_number(*block_hash)? { @@ -1649,7 +1676,9 @@ impl HeaderProvider } } -impl BlockHashReader for DatabaseProvider { +impl BlockHashReader + for DatabaseProvider +{ fn block_hash(&self, number: u64) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, @@ -1676,7 +1705,9 @@ impl BlockHashReader for DatabaseProvider } } -impl BlockNumReader for DatabaseProvider { +impl BlockNumReader + for DatabaseProvider +{ fn chain_info(&self) -> ProviderResult { let best_number = self.best_block_number()?; let best_hash = self.block_hash(best_number)?.unwrap_or_default(); @@ -1707,7 +1738,9 @@ impl BlockNumReader for DatabaseProvider } } -impl BlockReader for DatabaseProvider { +impl BlockReader + for DatabaseProvider +{ fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { if source.is_canonical() { self.block(hash.into()) @@ -1892,8 +1925,8 @@ impl BlockReader for DatabasePr } } -impl TransactionsProviderExt - for DatabaseProvider +impl TransactionsProviderExt + for DatabaseProvider { /// Recovers transaction hashes by walking through `Transactions` table and /// calculating them in a parallel manner. Returned unsorted. @@ -1962,8 +1995,8 @@ impl TransactionsProviderExt } // Calculates the hash of the given transaction -impl TransactionsProvider - for DatabaseProvider +impl TransactionsProvider + for DatabaseProvider { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { Ok(self.tx.get::(tx_hash)?) @@ -2122,8 +2155,8 @@ impl TransactionsProvider } } -impl ReceiptProvider - for DatabaseProvider +impl ReceiptProvider + for DatabaseProvider { fn receipt(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( @@ -2170,8 +2203,8 @@ impl ReceiptProvider } } -impl WithdrawalsProvider - for DatabaseProvider +impl WithdrawalsProvider + for DatabaseProvider { fn withdrawals_by_block( &self, @@ -2200,8 +2233,8 @@ impl WithdrawalsProvider } } -impl RequestsProvider - for DatabaseProvider +impl RequestsProvider + for DatabaseProvider { fn requests_by_block( &self, @@ -2218,8 +2251,8 @@ impl RequestsProvider } } -impl EvmEnvProvider - for DatabaseProvider +impl EvmEnvProvider + for DatabaseProvider { fn fill_env_at( &self, @@ -2284,7 +2317,9 @@ impl EvmEnvProvider } } -impl StageCheckpointReader for DatabaseProvider { +impl StageCheckpointReader + for DatabaseProvider +{ fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { Ok(self.tx.get::(id.to_string())?) } @@ -2303,7 +2338,9 @@ impl StageCheckpointReader for DatabaseProvider StageCheckpointWriter for DatabaseProvider { +impl StageCheckpointWriter + for DatabaseProvider +{ /// Save stage checkpoint. fn save_stage_checkpoint( &self, @@ -2344,7 +2381,9 @@ impl StageCheckpointWriter for DatabaseProvider< } } -impl StorageReader for DatabaseProvider { +impl StorageReader + for DatabaseProvider +{ fn plain_state_storages( &self, addresses_with_keys: impl IntoIterator)>, @@ -2407,7 +2446,9 @@ impl StorageReader for DatabaseProvider { } } -impl StateChangeWriter for DatabaseProvider { +impl StateChangeWriter + for DatabaseProvider +{ fn write_state_reverts( &self, reverts: PlainStateReverts, @@ -2784,7 +2825,9 @@ impl StateChangeWriter for DatabaseProvid } } -impl TrieWriter for DatabaseProvider { +impl TrieWriter + for DatabaseProvider +{ /// Writes trie updates. Returns the number of entries modified. fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult { if trie_updates.is_empty() { @@ -2834,7 +2877,9 @@ impl TrieWriter for DatabaseProvider StorageTrieWriter for DatabaseProvider { +impl StorageTrieWriter + for DatabaseProvider +{ /// Writes storage trie updates from the given storage trie map. First sorts the storage trie /// updates by the hashed address, writing in sorted order. fn write_storage_trie_updates( @@ -2871,7 +2916,9 @@ impl StorageTrieWriter for DatabaseProvid } } -impl HashingWriter for DatabaseProvider { +impl HashingWriter + for DatabaseProvider +{ fn unwind_account_hashing( &self, range: RangeInclusive, @@ -3077,7 +3124,9 @@ impl HashingWriter for DatabaseProvider HistoryWriter for DatabaseProvider { +impl HistoryWriter + for DatabaseProvider +{ fn unwind_account_history_indices( &self, range: RangeInclusive, @@ -3193,8 +3242,8 @@ impl HistoryWriter for DatabaseProvider BlockExecutionReader - for DatabaseProvider +impl BlockExecutionReader + for DatabaseProvider { fn get_block_and_execution_range( &self, @@ -3210,14 +3259,17 @@ impl BlockExecutionReader } } -impl StateReader for DatabaseProvider { +impl StateReader for DatabaseProvider { fn get_state(&self, block: BlockNumber) -> ProviderResult> { self.get_state(block..=block) } } -impl - BlockExecutionWriter for DatabaseProvider +impl< + TX: DbTxMut + DbTx + 'static, + Spec: Send + Sync + EthereumHardforks + 'static, + SC: Send + Sync + 'static, + > BlockExecutionWriter for DatabaseProvider { fn take_block_and_execution_range( &self, @@ -3396,8 +3448,11 @@ impl BlockWriter - for DatabaseProvider +impl< + TX: DbTxMut + DbTx + 'static, + Spec: Send + Sync + EthereumHardforks + 'static, + SC: Send + Sync + 'static, + > BlockWriter for DatabaseProvider { /// Inserts the block into the database, always modifying the following tables: /// * [`CanonicalHeaders`](tables::CanonicalHeaders) @@ -3615,7 +3670,9 @@ impl PruneCheckpointReader for DatabaseProvider { +impl PruneCheckpointReader + for DatabaseProvider +{ fn get_prune_checkpoint( &self, segment: PruneSegment, @@ -3632,7 +3689,9 @@ impl PruneCheckpointReader for DatabaseProvider PruneCheckpointWriter for DatabaseProvider { +impl PruneCheckpointWriter + for DatabaseProvider +{ fn save_prune_checkpoint( &self, segment: PruneSegment, @@ -3642,7 +3701,7 @@ impl PruneCheckpointWriter for DatabaseProvider< } } -impl StatsReader for DatabaseProvider { +impl StatsReader for DatabaseProvider { fn count_entries(&self) -> ProviderResult { let db_entries = self.tx.entries::()?; let static_file_entries = match self.static_file_provider.count_entries::() { @@ -3655,7 +3714,9 @@ impl StatsReader for DatabaseProvider { } } -impl ChainStateBlockReader for DatabaseProvider { +impl ChainStateBlockReader + for DatabaseProvider +{ fn last_finalized_block_number(&self) -> ProviderResult> { let mut finalized_blocks = self .tx @@ -3681,7 +3742,9 @@ impl ChainStateBlockReader for DatabaseProvider ChainStateBlockWriter for DatabaseProvider { +impl ChainStateBlockWriter + for DatabaseProvider +{ fn save_finalized_block_number(&self, block_number: BlockNumber) -> ProviderResult<()> { Ok(self .tx @@ -3695,7 +3758,9 @@ impl ChainStateBlockWriter for DatabaseProvider< } } -impl DBProvider for DatabaseProvider { +impl DBProvider + for DatabaseProvider +{ type Tx = TX; fn tx_ref(&self) -> &Self::Tx { diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 640041e0801f..967343a9763c 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -24,9 +24,9 @@ use reth_trie::{ }; use reth_trie_db::{ DatabaseHashedPostState, DatabaseHashedStorage, DatabaseProof, DatabaseStateRoot, - DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness, + DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness, StateCommitment, }; -use std::fmt::Debug; +use std::{fmt::Debug, marker::PhantomData}; /// State provider for a given block number which takes a tx reference. /// @@ -40,7 +40,7 @@ use std::fmt::Debug; /// - [`tables::AccountChangeSets`] /// - [`tables::StorageChangeSets`] #[derive(Debug)] -pub struct HistoricalStateProviderRef<'b, TX: DbTx> { +pub struct HistoricalStateProviderRef<'b, TX: DbTx, SC: StateCommitment> { /// Transaction tx: &'b TX, /// Block number is main index for the history state of accounts and storages. @@ -49,6 +49,8 @@ pub struct HistoricalStateProviderRef<'b, TX: DbTx> { lowest_available_blocks: LowestAvailableBlocks, /// Static File provider static_file_provider: StaticFileProvider, + /// Marker to associate the `StateCommitment` type with this provider. + _marker: PhantomData, } #[derive(Debug, Eq, PartialEq)] @@ -59,14 +61,20 @@ pub enum HistoryInfo { MaybeInPlainState, } -impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { +impl<'b, TX: DbTx, SC: StateCommitment> HistoricalStateProviderRef<'b, TX, SC> { /// Create new `StateProvider` for historical block number pub fn new( tx: &'b TX, block_number: BlockNumber, static_file_provider: StaticFileProvider, ) -> Self { - Self { tx, block_number, lowest_available_blocks: Default::default(), static_file_provider } + Self { + tx, + block_number, + lowest_available_blocks: Default::default(), + static_file_provider, + _marker: PhantomData, + } } /// Create new `StateProvider` for historical block number and lowest block numbers at which @@ -77,7 +85,13 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { lowest_available_blocks: LowestAvailableBlocks, static_file_provider: StaticFileProvider, ) -> Self { - Self { tx, block_number, lowest_available_blocks, static_file_provider } + Self { + tx, + block_number, + lowest_available_blocks, + static_file_provider, + _marker: PhantomData, + } } /// Lookup an account in the `AccountsHistory` table @@ -247,7 +261,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { } } -impl AccountReader for HistoricalStateProviderRef<'_, TX> { +impl AccountReader for HistoricalStateProviderRef<'_, TX, SC> { /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { match self.account_history_lookup(address)? { @@ -269,7 +283,7 @@ impl AccountReader for HistoricalStateProviderRef<'_, TX> { } } -impl BlockHashReader for HistoricalStateProviderRef<'_, TX> { +impl BlockHashReader for HistoricalStateProviderRef<'_, TX, SC> { /// Get block hash by number. fn block_hash(&self, number: u64) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( @@ -305,7 +319,7 @@ impl BlockHashReader for HistoricalStateProviderRef<'_, TX> { } } -impl StateRootProvider for HistoricalStateProviderRef<'_, TX> { +impl StateRootProvider for HistoricalStateProviderRef<'_, TX, SC> { fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { let mut revert_state = self.revert_state()?; revert_state.extend(hashed_state); @@ -339,7 +353,7 @@ impl StateRootProvider for HistoricalStateProviderRef<'_, TX> { } } -impl StorageRootProvider for HistoricalStateProviderRef<'_, TX> { +impl StorageRootProvider for HistoricalStateProviderRef<'_, TX, SC> { fn storage_root( &self, address: Address, @@ -364,7 +378,7 @@ impl StorageRootProvider for HistoricalStateProviderRef<'_, TX> { } } -impl StateProofProvider for HistoricalStateProviderRef<'_, TX> { +impl StateProofProvider for HistoricalStateProviderRef<'_, TX, SC> { /// Get account and storage proofs. fn proof( &self, @@ -396,7 +410,7 @@ impl StateProofProvider for HistoricalStateProviderRef<'_, TX> { } } -impl StateProvider for HistoricalStateProviderRef<'_, TX> { +impl StateProvider for HistoricalStateProviderRef<'_, TX, SC> { /// Get storage. fn storage( &self, @@ -436,7 +450,7 @@ impl StateProvider for HistoricalStateProviderRef<'_, TX> { /// State provider for a given block number. /// For more detailed description, see [`HistoricalStateProviderRef`]. #[derive(Debug)] -pub struct HistoricalStateProvider { +pub struct HistoricalStateProvider { /// Database transaction tx: TX, /// State at the block number is the main indexer of the state. @@ -445,16 +459,24 @@ pub struct HistoricalStateProvider { lowest_available_blocks: LowestAvailableBlocks, /// Static File provider static_file_provider: StaticFileProvider, + /// Marker to associate the `StateCommitment` type with this provider. + _marker: PhantomData, } -impl HistoricalStateProvider { +impl HistoricalStateProvider { /// Create new `StateProvider` for historical block number pub fn new( tx: TX, block_number: BlockNumber, static_file_provider: StaticFileProvider, ) -> Self { - Self { tx, block_number, lowest_available_blocks: Default::default(), static_file_provider } + Self { + tx, + block_number, + lowest_available_blocks: Default::default(), + static_file_provider, + _marker: PhantomData, + } } /// Set the lowest block number at which the account history is available. @@ -477,7 +499,7 @@ impl HistoricalStateProvider { /// Returns a new provider that takes the `TX` as reference #[inline(always)] - fn as_ref(&self) -> HistoricalStateProviderRef<'_, TX> { + fn as_ref(&self) -> HistoricalStateProviderRef<'_, TX, SC> { HistoricalStateProviderRef::new_with_lowest_available_blocks( &self.tx, self.block_number, @@ -488,7 +510,7 @@ impl HistoricalStateProvider { } // Delegates all provider impls to [HistoricalStateProviderRef] -delegate_provider_impls!(HistoricalStateProvider where [TX: DbTx]); +delegate_provider_impls!(HistoricalStateProvider where [TX: DbTx, SC: StateCommitment]); /// Lowest blocks at which different parts of the state are available. /// They may be [Some] if pruning is enabled. @@ -534,6 +556,18 @@ mod tests { }; use reth_primitives::{Account, StorageEntry}; use reth_storage_errors::provider::ProviderError; + use reth_trie_db::{MerklePatriciaTrie, StateCommitment}; + + type TestHistoricalStateProviderRefRO<'a> = HistoricalStateProviderRef< + 'a, + reth_db::mdbx::tx::Tx, + MerklePatriciaTrie, + >; + type TestHistoricalStateProviderRefRW<'a> = HistoricalStateProviderRef< + 'a, + reth_db::mdbx::tx::Tx, + MerklePatriciaTrie, + >; const ADDRESS: Address = address!("0000000000000000000000000000000000000001"); const HIGHER_ADDRESS: Address = address!("0000000000000000000000000000000000000005"); @@ -541,8 +575,8 @@ mod tests { const fn assert_state_provider() {} #[allow(dead_code)] - const fn assert_historical_state_provider() { - assert_state_provider::>(); + const fn assert_historical_state_provider() { + assert_state_provider::>(); } #[test] @@ -613,58 +647,58 @@ mod tests { // run assert_eq!( - HistoricalStateProviderRef::new(&tx, 1, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 1, static_file_provider.clone()) .basic_account(ADDRESS), Ok(None) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 2, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 2, static_file_provider.clone()) .basic_account(ADDRESS), Ok(Some(acc_at3)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 3, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 3, static_file_provider.clone()) .basic_account(ADDRESS), Ok(Some(acc_at3)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 4, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 4, static_file_provider.clone()) .basic_account(ADDRESS), Ok(Some(acc_at7)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 7, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 7, static_file_provider.clone()) .basic_account(ADDRESS), Ok(Some(acc_at7)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 9, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 9, static_file_provider.clone()) .basic_account(ADDRESS), Ok(Some(acc_at10)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 10, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 10, static_file_provider.clone()) .basic_account(ADDRESS), Ok(Some(acc_at10)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 11, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 11, static_file_provider.clone()) .basic_account(ADDRESS), Ok(Some(acc_at15)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 16, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 16, static_file_provider.clone()) .basic_account(ADDRESS), Ok(Some(acc_plain)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 1, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 1, static_file_provider.clone()) .basic_account(HIGHER_ADDRESS), Ok(None) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 1000, static_file_provider) + TestHistoricalStateProviderRefRO::new(&tx, 1000, static_file_provider) .basic_account(HIGHER_ADDRESS), Ok(Some(higher_acc_plain)) ); @@ -725,52 +759,52 @@ mod tests { // run assert_eq!( - HistoricalStateProviderRef::new(&tx, 0, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 0, static_file_provider.clone()) .storage(ADDRESS, STORAGE), Ok(None) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 3, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 3, static_file_provider.clone()) .storage(ADDRESS, STORAGE), Ok(Some(U256::ZERO)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 4, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 4, static_file_provider.clone()) .storage(ADDRESS, STORAGE), Ok(Some(entry_at7.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 7, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 7, static_file_provider.clone()) .storage(ADDRESS, STORAGE), Ok(Some(entry_at7.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 9, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 9, static_file_provider.clone()) .storage(ADDRESS, STORAGE), Ok(Some(entry_at10.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 10, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 10, static_file_provider.clone()) .storage(ADDRESS, STORAGE), Ok(Some(entry_at10.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 11, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 11, static_file_provider.clone()) .storage(ADDRESS, STORAGE), Ok(Some(entry_at15.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 16, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 16, static_file_provider.clone()) .storage(ADDRESS, STORAGE), Ok(Some(entry_plain.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 1, static_file_provider.clone()) + TestHistoricalStateProviderRefRO::new(&tx, 1, static_file_provider.clone()) .storage(HIGHER_ADDRESS, STORAGE), Ok(None) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 1000, static_file_provider) + TestHistoricalStateProviderRefRO::new(&tx, 1000, static_file_provider) .storage(HIGHER_ADDRESS, STORAGE), Ok(Some(higher_entry_plain.value)) ); @@ -784,7 +818,7 @@ mod tests { // provider block_number < lowest available block number, // i.e. state at provider block is pruned - let provider = HistoricalStateProviderRef::new_with_lowest_available_blocks( + let provider = TestHistoricalStateProviderRefRW::new_with_lowest_available_blocks( &tx, 2, LowestAvailableBlocks { @@ -804,7 +838,7 @@ mod tests { // provider block_number == lowest available block number, // i.e. state at provider block is available - let provider = HistoricalStateProviderRef::new_with_lowest_available_blocks( + let provider = TestHistoricalStateProviderRefRW::new_with_lowest_available_blocks( &tx, 2, LowestAvailableBlocks { @@ -821,7 +855,7 @@ mod tests { // provider block_number == lowest available block number, // i.e. state at provider block is available - let provider = HistoricalStateProviderRef::new_with_lowest_available_blocks( + let provider = TestHistoricalStateProviderRefRW::new_with_lowest_available_blocks( &tx, 2, LowestAvailableBlocks { diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index fdcbfc4937fe..1299db28b911 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -22,33 +22,36 @@ use reth_trie::{ }; use reth_trie_db::{ DatabaseProof, DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, - DatabaseTrieWitness, + DatabaseTrieWitness, StateCommitment, }; +use std::marker::PhantomData; /// State provider over latest state that takes tx reference. #[derive(Debug)] -pub struct LatestStateProviderRef<'b, TX: DbTx> { +pub struct LatestStateProviderRef<'b, TX: DbTx, SC: StateCommitment> { /// database transaction tx: &'b TX, /// Static File provider static_file_provider: StaticFileProvider, + /// Marker to associate the `StateCommitment` type with this provider. + _marker: PhantomData, } -impl<'b, TX: DbTx> LatestStateProviderRef<'b, TX> { +impl<'b, TX: DbTx, SC: StateCommitment> LatestStateProviderRef<'b, TX, SC> { /// Create new state provider pub const fn new(tx: &'b TX, static_file_provider: StaticFileProvider) -> Self { - Self { tx, static_file_provider } + Self { tx, static_file_provider, _marker: PhantomData } } } -impl AccountReader for LatestStateProviderRef<'_, TX> { +impl AccountReader for LatestStateProviderRef<'_, TX, SC> { /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { self.tx.get::(address).map_err(Into::into) } } -impl BlockHashReader for LatestStateProviderRef<'_, TX> { +impl BlockHashReader for LatestStateProviderRef<'_, TX, SC> { /// Get block hash by number. fn block_hash(&self, number: u64) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( @@ -84,7 +87,7 @@ impl BlockHashReader for LatestStateProviderRef<'_, TX> { } } -impl StateRootProvider for LatestStateProviderRef<'_, TX> { +impl StateRootProvider for LatestStateProviderRef<'_, TX, SC> { fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { StateRoot::overlay_root(self.tx, hashed_state) .map_err(|err| ProviderError::Database(err.into())) @@ -112,7 +115,7 @@ impl StateRootProvider for LatestStateProviderRef<'_, TX> { } } -impl StorageRootProvider for LatestStateProviderRef<'_, TX> { +impl StorageRootProvider for LatestStateProviderRef<'_, TX, SC> { fn storage_root( &self, address: Address, @@ -133,7 +136,7 @@ impl StorageRootProvider for LatestStateProviderRef<'_, TX> { } } -impl StateProofProvider for LatestStateProviderRef<'_, TX> { +impl StateProofProvider for LatestStateProviderRef<'_, TX, SC> { fn proof( &self, input: TrieInput, @@ -161,7 +164,7 @@ impl StateProofProvider for LatestStateProviderRef<'_, TX> { } } -impl StateProvider for LatestStateProviderRef<'_, TX> { +impl StateProvider for LatestStateProviderRef<'_, TX, SC> { /// Get storage. fn storage( &self, @@ -185,28 +188,30 @@ impl StateProvider for LatestStateProviderRef<'_, TX> { /// State provider for the latest state. #[derive(Debug)] -pub struct LatestStateProvider { +pub struct LatestStateProvider { /// database transaction db: TX, /// Static File provider static_file_provider: StaticFileProvider, + /// Marker to associate the `StateCommitment` type with this provider. + _marker: PhantomData, } -impl LatestStateProvider { +impl LatestStateProvider { /// Create new state provider pub const fn new(db: TX, static_file_provider: StaticFileProvider) -> Self { - Self { db, static_file_provider } + Self { db, static_file_provider, _marker: PhantomData } } /// Returns a new provider that takes the `TX` as reference #[inline(always)] - fn as_ref(&self) -> LatestStateProviderRef<'_, TX> { + fn as_ref(&self) -> LatestStateProviderRef<'_, TX, SC> { LatestStateProviderRef::new(&self.db, self.static_file_provider.clone()) } } // Delegates all provider impls to [LatestStateProviderRef] -delegate_provider_impls!(LatestStateProvider where [TX: DbTx]); +delegate_provider_impls!(LatestStateProvider where [TX: DbTx, SC: StateCommitment]); #[cfg(test)] mod tests { @@ -214,7 +219,7 @@ mod tests { const fn assert_state_provider() {} #[allow(dead_code)] - const fn assert_latest_state_provider() { - assert_state_provider::>(); + const fn assert_latest_state_provider() { + assert_state_provider::>(); } } diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 57e111d674ba..6f45dce63245 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -13,12 +13,13 @@ use reth_primitives::{ Signature, Transaction, TransactionSigned, TxType, Withdrawal, Withdrawals, }; use reth_trie::root::{state_root_unhashed, storage_root_unhashed}; +use reth_trie_db::StateCommitment; use revm::{db::BundleState, primitives::AccountInfo}; use std::{str::FromStr, sync::LazyLock}; /// Assert genesis block -pub fn assert_genesis_block( - provider: &DatabaseProviderRW, +pub fn assert_genesis_block( + provider: &DatabaseProviderRW, g: SealedBlock, ) { let n = g.number; diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index c7c94b939ac3..d99093ac80a5 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -34,6 +34,7 @@ use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, TrieInput, }; +use reth_trie_db::MerklePatriciaTrie; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ collections::BTreeMap, @@ -152,8 +153,8 @@ impl MockEthProvider { impl DatabaseProviderFactory for MockEthProvider { type DB = DatabaseMock; - type Provider = DatabaseProvider; - type ProviderRW = DatabaseProvider; + type Provider = DatabaseProvider; + type ProviderRW = DatabaseProvider; fn database_provider_ro(&self) -> ProviderResult { Err(ConsistentViewError::Syncing { best_block: GotExpected::new(0, 0) }.into()) diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index 9a3b855ff14d..1bbcd614e77e 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -90,6 +90,13 @@ pub trait TryIntoHistoricalStateProvider { ) -> ProviderResult; } +/// Trait implemented for database providers that can be converted into a latest state provider +/// reference. +pub trait ToLatestStateProviderRef { + /// Returns a [`StateProvider`] for the latest state. + fn latest<'a>(&'a self) -> Box; +} + /// Light wrapper that returns `StateProvider` implementations that correspond to the given /// `BlockNumber`, the latest state, or the pending state. /// diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs index f5823404c899..fffcf224d86c 100644 --- a/crates/trie/db/tests/trie.rs +++ b/crates/trie/db/tests/trie.rs @@ -693,8 +693,8 @@ fn storage_trie_around_extension_node() { assert_trie_updates(updates.storage_nodes_ref()); } -fn extension_node_storage_trie( - tx: &DatabaseProviderRW>, Spec>, +fn extension_node_storage_trie( + tx: &DatabaseProviderRW>, Spec, SC>, hashed_address: B256, ) -> (B256, StorageTrieUpdates) { let value = U256::from(1); @@ -721,8 +721,8 @@ fn extension_node_storage_trie( (root, trie_updates) } -fn extension_node_trie( - tx: &DatabaseProviderRW>, Spec>, +fn extension_node_trie( + tx: &DatabaseProviderRW>, Spec, SC>, ) -> B256 { let a = Account { nonce: 0, balance: U256::from(1u64), bytecode_hash: Some(B256::random()) }; let val = encode_account(a, None); From d3db6416c433e51c5f2b61c9c1c74e6df6391580 Mon Sep 17 00:00:00 2001 From: frisitano Date: Fri, 18 Oct 2024 20:09:29 +0800 Subject: [PATCH 51/51] feat: introduce HashedPostStateProvider --- Cargo.lock | 4 --- .../src/commands/debug_cmd/build_block.rs | 5 +-- .../commands/debug_cmd/in_memory_merkle.rs | 8 ++--- crates/blockchain-tree/src/blockchain_tree.rs | 14 +++++--- crates/blockchain-tree/src/chain.rs | 16 +++++++--- crates/chain-state/src/in_memory.rs | 20 ++++++++++-- crates/chain-state/src/memory_overlay.rs | 20 ++++++++++-- crates/consensus/auto-seal/Cargo.toml | 1 - crates/consensus/auto-seal/src/lib.rs | 3 +- .../engine/invalid-block-hooks/src/witness.rs | 4 +-- crates/engine/tree/src/tree/mod.rs | 19 +++++++---- crates/engine/util/src/reorg.rs | 3 +- crates/ethereum/payload/Cargo.toml | 1 - crates/ethereum/payload/src/lib.rs | 23 +++++++------ .../execution-types/src/execution_outcome.rs | 6 ++-- crates/evm/execution-types/src/lib.rs | 3 ++ crates/optimism/payload/Cargo.toml | 1 - crates/optimism/payload/src/builder.rs | 22 +++++++------ crates/revm/src/test_utils.rs | 22 ++++++++++--- crates/rpc/rpc-eth-api/Cargo.toml | 1 - .../rpc-eth-api/src/helpers/pending_block.rs | 7 ++-- crates/rpc/rpc-eth-types/src/cache/db.rs | 16 ++++++++++ .../src/providers/blockchain_provider.rs | 18 ++++++++++- .../src/providers/bundle_state_provider.rs | 32 +++++++++++++++---- .../provider/src/providers/consistent_view.rs | 7 ++-- .../provider/src/providers/database/mod.rs | 26 ++++++++++++++- .../src/providers/database/provider.rs | 29 +++++++++++++++-- .../src/providers/state/historical.rs | 22 +++++++++++-- .../provider/src/providers/state/latest.rs | 22 +++++++++++-- .../provider/src/providers/state/macros.rs | 4 +++ .../storage/provider/src/test_utils/mock.rs | 23 +++++++++++-- .../storage/provider/src/test_utils/noop.rs | 18 ++++++++++- crates/storage/provider/src/writer/mod.rs | 4 +-- crates/storage/storage-api/src/state.rs | 17 +++++++++- crates/trie/db/src/state.rs | 18 ++++++----- crates/trie/parallel/src/parallel_root.rs | 9 ++++-- crates/trie/trie/benches/hash_post_state.rs | 4 +-- crates/trie/trie/src/state.rs | 16 ++++++---- 38 files changed, 372 insertions(+), 116 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fc07aed94ca2..60dc918df378 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6329,7 +6329,6 @@ dependencies = [ "reth-stages-api", "reth-tokio-util", "reth-transaction-pool", - "reth-trie", "revm-primitives", "tokio", "tokio-stream", @@ -7337,7 +7336,6 @@ dependencies = [ "reth-provider", "reth-revm", "reth-transaction-pool", - "reth-trie", "revm", "revm-primitives", "tracing", @@ -8224,7 +8222,6 @@ dependencies = [ "reth-revm", "reth-rpc-types-compat", "reth-transaction-pool", - "reth-trie", "revm", "revm-primitives", "sha2 0.10.8", @@ -8767,7 +8764,6 @@ dependencies = [ "reth-rpc-types-compat", "reth-tasks", "reth-transaction-pool", - "reth-trie", "revm", "revm-inspectors", "revm-primitives", diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 455d8356aff9..1940ae1e83b8 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -31,7 +31,7 @@ use reth_primitives::{ }; use reth_provider::{ providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, - ProviderFactory, StageCheckpointReader, StateProviderFactory, + HashedPostStateProvider, ProviderFactory, StageCheckpointReader, StateProviderFactory, }; use reth_revm::{database::StateProviderDatabase, primitives::EnvKzgSettings}; use reth_stages::StageId; @@ -266,7 +266,8 @@ impl> Command { ExecutionOutcome::from((block_execution_output, block.number)); debug!(target: "reth::cli", ?execution_outcome, "Executed block"); - let hashed_post_state = execution_outcome.hash_state_slow(); + let hashed_post_state = + provider_factory.hashed_post_state_from_bundle_state(execution_outcome.state()); let (state_root, trie_updates) = StateRoot::overlay_root_with_updates( provider_factory.provider()?.tx_ref(), hashed_post_state.clone(), diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 81c84819af62..5cb3562d0e1c 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -21,9 +21,9 @@ use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_ethereum::EthExecutorProvider; use reth_primitives::BlockHashOrNumber; use reth_provider::{ - writer::UnifiedStorageWriter, AccountExtReader, ChainSpecProvider, HashingWriter, - HeaderProvider, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StateWriter, - StorageReader, ToLatestStateProviderRef, + writer::UnifiedStorageWriter, AccountExtReader, ChainSpecProvider, HashedPostStateProvider, + HashingWriter, HeaderProvider, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, + StateWriter, StorageReader, ToLatestStateProviderRef, }; use reth_revm::database::StateProviderDatabase; use reth_stages::StageId; @@ -153,7 +153,7 @@ impl> Command { // Unpacked `BundleState::state_root_slow` function let (in_memory_state_root, in_memory_updates) = StateRoot::overlay_root_with_updates( provider.tx_ref(), - execution_outcome.hash_state_slow(), + provider.hashed_post_state_from_bundle_state(execution_outcome.state()), )?; if in_memory_state_root == block.state_root { diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 71a58aa56288..475d800feb29 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -23,8 +23,8 @@ use reth_primitives::{ use reth_provider::{ providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, - ChainSpecProvider, ChainSplit, ChainSplitTarget, DisplayBlocksChain, HeaderProvider, - ProviderError, StaticFileProviderFactory, + ChainSpecProvider, ChainSplit, ChainSplitTarget, DisplayBlocksChain, HashedPostStateProvider, + HeaderProvider, ProviderError, StaticFileProviderFactory, }; use reth_stages_api::{MetricEvent, MetricEventsSender}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; @@ -1216,7 +1216,8 @@ where recorder: &mut MakeCanonicalDurationsRecorder, ) -> Result<(), CanonicalError> { let (blocks, state, chain_trie_updates) = chain.into_inner(); - let hashed_state = state.hash_state_slow(); + let hashed_state = + self.externals.provider_factory.hashed_post_state_from_bundle_state(state.state()); let prefix_sets = hashed_state.construct_prefix_sets().freeze(); let hashed_state_sorted = hashed_state.into_sorted(); @@ -1875,7 +1876,12 @@ mod tests { ); let provider = tree.externals.provider_factory.provider().unwrap(); - let prefix_sets = exec5.hash_state_slow().construct_prefix_sets().freeze(); + let prefix_sets = tree + .externals + .provider_factory + .hashed_post_state_from_bundle_state(exec5.state()) + .construct_prefix_sets() + .freeze(); let state_root = StateRoot::from_tx(provider.tx_ref()).with_prefix_sets(prefix_sets).root().unwrap(); assert_eq!(state_root, block5.state_root); diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 393e525d5ae2..db350e0185b2 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -18,10 +18,11 @@ use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{GotExpected, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ providers::{BundleStateProvider, ConsistentDbView, ProviderNodeTypes}, - FullExecutionDataProvider, ProviderError, StateRootProvider, TryIntoHistoricalStateProvider, + FullExecutionDataProvider, HashedPostStateProvider, ProviderError, StateRootProvider, + TryIntoHistoricalStateProvider, }; use reth_revm::database::StateProviderDatabase; -use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; +use reth_trie::{updates::TrieUpdates, TrieInput}; use reth_trie_parallel::parallel_root::ParallelStateRoot; use std::{ collections::BTreeMap, @@ -227,14 +228,19 @@ impl AppendableChain { execution_outcome.extend(initial_execution_outcome.clone()); ParallelStateRoot::new( consistent_view, - TrieInput::from_state(execution_outcome.hash_state_slow()), + TrieInput::from_state( + externals + .provider_factory + .hashed_post_state_from_bundle_state(execution_outcome.state()), + ), ) .incremental_root_with_updates() .map(|(root, updates)| (root, Some(updates))) .map_err(ProviderError::from)? } else { - let hashed_state = - HashedPostState::from_bundle_state(&initial_execution_outcome.state().state); + let hashed_state = externals + .provider_factory + .hashed_post_state_from_bundle_state(initial_execution_outcome.state()); let state_root = provider.state_root(hashed_state)?; (state_root, None) }; diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index f157da5ff450..195d2b5fbf67 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -870,8 +870,8 @@ mod tests { use reth_errors::ProviderResult; use reth_primitives::{Account, Bytecode, Receipt, Requests}; use reth_storage_api::{ - AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, - StorageRootProvider, + AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, + StateRootProvider, StorageRootProvider, }; use reth_trie::{AccountProof, HashedStorage, MultiProof, StorageProof, TrieInput}; @@ -1012,6 +1012,22 @@ mod tests { } } + impl HashedPostStateProvider for MockStateProvider { + fn hashed_post_state_from_bundle_state( + &self, + _bundle_state: &reth_execution_types::BundleState, + ) -> HashedPostState { + HashedPostState::default() + } + + fn hashed_post_state_from_reverts( + &self, + _block_number: BlockNumber, + ) -> ProviderResult { + Ok(HashedPostState::default()) + } + } + #[test] fn test_in_memory_state_impl_state_by_hash() { let mut state_by_hash = HashMap::default(); diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index eb125dad115e..b19ae8508be9 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -7,8 +7,8 @@ use alloy_primitives::{ use reth_errors::ProviderResult; use reth_primitives::{Account, Bytecode}; use reth_storage_api::{ - AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateProviderBox, - StateRootProvider, StorageRootProvider, + AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, + StateProviderBox, StateRootProvider, StorageRootProvider, }; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, @@ -188,6 +188,22 @@ impl StateProofProvider for MemoryOverlayStateProvider { } } +impl HashedPostStateProvider for MemoryOverlayStateProvider { + fn hashed_post_state_from_bundle_state( + &self, + bundle_state: &reth_execution_types::BundleState, + ) -> HashedPostState { + self.historical.hashed_post_state_from_bundle_state(bundle_state) + } + + fn hashed_post_state_from_reverts( + &self, + block_number: BlockNumber, + ) -> ProviderResult { + self.historical.hashed_post_state_from_reverts(block_number) + } +} + impl StateProvider for MemoryOverlayStateProvider { fn storage( &self, diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml index b4b281230336..74ee7447d231 100644 --- a/crates/consensus/auto-seal/Cargo.toml +++ b/crates/consensus/auto-seal/Cargo.toml @@ -28,7 +28,6 @@ reth-engine-primitives.workspace = true reth-consensus.workspace = true reth-network-peers.workspace = true reth-tokio-util.workspace = true -reth-trie.workspace = true # ethereum alloy-primitives.workspace = true diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 261227f1074e..7b4d0acc0b40 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -31,7 +31,6 @@ use reth_primitives::{ use reth_provider::{BlockReaderIdExt, StateProviderFactory, StateRootProvider}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::TransactionPool; -use reth_trie::HashedPostState; use revm_primitives::calc_excess_blob_gas; use std::{ collections::HashMap, @@ -383,7 +382,7 @@ impl StorageInner { executor.executor(&mut db).execute((&block, U256::ZERO).into())?; let gas_used = block_execution_output.gas_used; let execution_outcome = ExecutionOutcome::from((block_execution_output, block.number)); - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); + let hashed_state = db.0.hashed_post_state_from_bundle_state(execution_outcome.state()); // todo(onbjerg): we should not pass requests around as this is building a block, which // means we need to extract the requests from the execution output and compute the requests diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index bb227e304198..6ea682970e55 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -18,7 +18,7 @@ use reth_revm::{ }; use reth_rpc_api::DebugApiClient; use reth_tracing::tracing::warn; -use reth_trie::{updates::TrieUpdates, HashedPostState, HashedStorage}; +use reth_trie::{updates::TrieUpdates, HashedStorage}; use serde::Serialize; /// Generates a witness for the given block and saves it to a file. @@ -128,7 +128,7 @@ where // // Note: We grab *all* accounts in the cache here, as the `BundleState` prunes // referenced accounts + storage slots. - let mut hashed_state = HashedPostState::from_bundle_state(&bundle_state.state); + let mut hashed_state = db.database.hashed_post_state_from_bundle_state(&bundle_state); for (address, account) in db.cache.accounts { let hashed_address = keccak256(address); hashed_state diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 3eadbbd522db..aeaf25289762 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -37,8 +37,8 @@ use reth_primitives::{ }; use reth_provider::{ providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, ExecutionOutcome, - ProviderError, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, - TransactionVariant, + HashedPostStateProvider, ProviderError, StateProviderBox, StateProviderFactory, StateReader, + StateRootProvider, TransactionVariant, }; use reth_revm::database::StateProviderDatabase; use reth_stages_api::ControlFlow; @@ -531,8 +531,14 @@ impl std::fmt::Debug impl EngineApiTreeHandler where - P: DatabaseProviderFactory + BlockReader + StateProviderFactory + StateReader + Clone + 'static, -

::Provider: BlockReader, + P: DatabaseProviderFactory + + BlockReader + + StateProviderFactory + + StateReader + + HashedPostStateProvider + + Clone + + 'static, +

::Provider: BlockReader + HashedPostStateProvider, E: BlockExecutorProvider, T: EngineTypes, Spec: Send + Sync + EthereumHardforks + 'static, @@ -1530,7 +1536,8 @@ where .provider .get_state(block.number)? .ok_or_else(|| ProviderError::StateForNumberNotFound(block.number))?; - let hashed_state = execution_output.hash_state_slow(); + let hashed_state = + self.provider.hashed_post_state_from_bundle_state(execution_output.state()); Ok(Some(ExecutedBlock { block: Arc::new(block), @@ -2194,7 +2201,7 @@ where return Err(err.into()) } - let hashed_state = HashedPostState::from_bundle_state(&output.state.state); + let hashed_state = self.provider.hashed_post_state_from_bundle_state(&output.state); trace!(target: "engine::tree", block=?BlockNumHash::new(block_number, block_hash), "Calculating block state root"); let root_time = Instant::now(); diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index abfa23a57b32..1c52d944b53d 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -22,7 +22,6 @@ use reth_revm::{ DatabaseCommit, }; use reth_rpc_types_compat::engine::payload::block_to_payload; -use reth_trie::HashedPostState; use revm_primitives::{ calc_excess_blob_gas, BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, }; @@ -365,7 +364,7 @@ where reorg_target.number, Default::default(), ); - let hashed_state = HashedPostState::from_bundle_state(&outcome.state().state); + let hashed_state = state_provider.hashed_post_state_from_bundle_state(outcome.state()); let (blob_gas_used, excess_blob_gas) = if chain_spec.is_cancun_active_at_timestamp(reorg_target.timestamp) { diff --git a/crates/ethereum/payload/Cargo.toml b/crates/ethereum/payload/Cargo.toml index ce37a4f8ea4f..721a99f049d9 100644 --- a/crates/ethereum/payload/Cargo.toml +++ b/crates/ethereum/payload/Cargo.toml @@ -24,7 +24,6 @@ reth-basic-payload-builder.workspace = true reth-evm.workspace = true reth-evm-ethereum.workspace = true reth-errors.workspace = true -reth-trie.workspace = true reth-chain-state.workspace = true reth-chainspec.workspace = true diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index dcf54fc02489..36652a1c098b 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -34,7 +34,6 @@ use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ noop::NoopTransactionPool, BestTransactionsAttributes, TransactionPool, }; -use reth_trie::HashedPostState; use revm::{ db::{states::bundle_state::BundleRetention, State}, primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState}, @@ -353,16 +352,20 @@ where let logs_bloom = execution_outcome.block_logs_bloom(block_number).expect("Number is in range"); // calculate the state root - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); - let (state_root, trie_output) = { + + let (state_root, trie_output, hashed_state) = { let state_provider = db.database.0.inner.borrow_mut(); - state_provider.db.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { - warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), - %err, - "failed to calculate state root for payload" - ); - })? + let hashed_state = + state_provider.db.hashed_post_state_from_bundle_state(execution_outcome.state()); + let (state_root, trie_output) = + state_provider.db.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to calculate state root for payload" + ); + })?; + (state_root, trie_output, hashed_state) }; // create the block header diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 08ddf9e4167b..c58411fcdded 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -1,7 +1,7 @@ use crate::BlockExecutionOutput; use alloy_primitives::{Address, BlockNumber, Bloom, Log, B256, U256}; use reth_primitives::{logs_bloom, Account, Bytecode, Receipt, Receipts, Requests, StorageEntry}; -use reth_trie::HashedPostState; +use reth_trie::{HashedPostState, KeyHasher}; use revm::{ db::{states::BundleState, BundleAccount}, primitives::AccountInfo, @@ -162,8 +162,8 @@ impl ExecutionOutcome { /// Returns [`HashedPostState`] for this execution outcome. /// See [`HashedPostState::from_bundle_state`] for more info. - pub fn hash_state_slow(&self) -> HashedPostState { - HashedPostState::from_bundle_state(&self.bundle.state) + pub fn hash_state_slow(&self) -> HashedPostState { + HashedPostState::from_bundle_state::(&self.bundle.state) } /// Transform block number to the index of block. diff --git a/crates/evm/execution-types/src/lib.rs b/crates/evm/execution-types/src/lib.rs index f98ebfe73a5f..c5adafecb1da 100644 --- a/crates/evm/execution-types/src/lib.rs +++ b/crates/evm/execution-types/src/lib.rs @@ -19,6 +19,9 @@ pub use execute::*; mod execution_outcome; pub use execution_outcome::*; +// Re-export commonly used execution types from `revm`. +pub use revm::db::{BundleAccount, BundleState}; + /// Bincode-compatible serde implementations for commonly used types for (EVM) block execution. /// /// `bincode` crate doesn't work with optionally serializable serde fields, but some of the diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 46cc82edb6ce..4f7db968ecff 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -24,7 +24,6 @@ reth-execution-types.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-basic-payload-builder.workspace = true -reth-trie.workspace = true reth-chain-state.workspace = true # op-reth diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index e590635f524e..c4cf45b9e3b4 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -24,7 +24,6 @@ use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ noop::NoopTransactionPool, BestTransactionsAttributes, TransactionPool, }; -use reth_trie::HashedPostState; use revm::{ db::{states::bundle_state::BundleRetention, State}, primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState}, @@ -451,16 +450,19 @@ where let logs_bloom = execution_outcome.block_logs_bloom(block_number).expect("Number is in range"); // calculate the state root - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); - let (state_root, trie_output) = { + let (state_root, trie_output, hashed_state) = { let state_provider = db.database.0.inner.borrow_mut(); - state_provider.db.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { - warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), - %err, - "failed to calculate state root for payload" - ); - })? + let hashed_state = + state_provider.db.hashed_post_state_from_bundle_state(execution_outcome.state()); + let (state_root, trie_output) = + state_provider.db.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to calculate state root for payload" + ); + })?; + (state_root, trie_output, hashed_state) }; // create the block header diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 813997c72d11..f15398a81cfa 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -6,14 +6,15 @@ use alloy_primitives::{ }; use reth_primitives::{Account, Bytecode}; use reth_storage_api::{ - AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, - StorageRootProvider, + AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, + StateRootProvider, StorageRootProvider, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, - TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, KeccakKeyHasher, + MultiProof, StorageProof, TrieInput, }; +use revm::db::BundleState; /// Mock state for testing #[derive(Debug, Default, Clone, Eq, PartialEq)] @@ -141,6 +142,19 @@ impl StateProofProvider for StateProviderTest { } } +impl HashedPostStateProvider for StateProviderTest { + fn hashed_post_state_from_bundle_state(&self, bundle_state: &BundleState) -> HashedPostState { + HashedPostState::from_bundle_state::(&bundle_state.state) + } + + fn hashed_post_state_from_reverts( + &self, + _block_number: BlockNumber, + ) -> ProviderResult { + unimplemented!("reverts are not supported") + } +} + impl StateProvider for StateProviderTest { fn storage( &self, diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index 9d0f6cfd83d6..2044fe132678 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -29,7 +29,6 @@ reth-execution-types.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-network-api.workspace = true -reth-trie.workspace = true # ethereum alloy-eips.workspace = true diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 832cf17055a5..78187b3e2c5b 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -31,7 +31,6 @@ use reth_revm::{ }; use reth_rpc_eth_types::{EthApiError, PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool}; -use reth_trie::HashedPostState; use revm::{db::states::bundle_state::BundleRetention, DatabaseCommit, State}; use tokio::sync::Mutex; use tracing::debug; @@ -398,7 +397,10 @@ pub trait LoadPendingBlock: EthApiTypes { block_number, Vec::new(), ); - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); + + let state_provider = &db.database; + let hashed_state = + state_provider.hashed_post_state_from_bundle_state(execution_outcome.state()); let receipts_root = self.receipts_root(&block_env, &execution_outcome, block_number); @@ -406,7 +408,6 @@ pub trait LoadPendingBlock: EthApiTypes { execution_outcome.block_logs_bloom(block_number).expect("Block is present"); // calculate the state root - let state_provider = &db.database; let state_root = state_provider.state_root(hashed_state).map_err(Self::Error::from_eth_err)?; diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 7422dcfb8a7b..fafa9fe7d721 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -130,6 +130,22 @@ impl reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'_> { } } +impl reth_storage_api::HashedPostStateProvider for StateProviderTraitObjWrapper<'_> { + fn hashed_post_state_from_bundle_state( + &self, + bundle_state: &reth_execution_types::BundleState, + ) -> reth_trie::HashedPostState { + self.0.hashed_post_state_from_bundle_state(bundle_state) + } + + fn hashed_post_state_from_reverts( + &self, + block_number: alloy_primitives::BlockNumber, + ) -> ProviderResult { + self.0.hashed_post_state_from_reverts(block_number) + } +} + impl StateProvider for StateProviderTraitObjWrapper<'_> { fn account_balance( &self, diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 6e39a3697b30..0be97cc67008 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -27,7 +27,7 @@ use reth_primitives::{ }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::StorageChangeSetReader; +use reth_storage_api::{HashedPostStateProvider, StorageChangeSetReader}; use reth_storage_errors::provider::ProviderResult; use revm::{ db::states::PlainStorageRevert, @@ -1707,6 +1707,22 @@ impl StateReader for BlockchainProvider2 { } } +impl HashedPostStateProvider for BlockchainProvider2 { + fn hashed_post_state_from_bundle_state( + &self, + bundle_state: &reth_execution_types::BundleState, + ) -> reth_trie::HashedPostState { + self.database.hashed_post_state_from_bundle_state(bundle_state) + } + + fn hashed_post_state_from_reverts( + &self, + block_number: BlockNumber, + ) -> ProviderResult { + self.database.hashed_post_state_from_reverts(block_number) + } +} + #[cfg(test)] mod tests { use std::{ diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index be6549033cde..8c45a0133093 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -6,7 +6,7 @@ use alloy_primitives::{ Address, BlockNumber, Bytes, B256, }; use reth_primitives::{Account, Bytecode}; -use reth_storage_api::{StateProofProvider, StorageRootProvider}; +use reth_storage_api::{HashedPostStateProvider, StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, @@ -86,7 +86,7 @@ impl StateRootProvider { fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - let mut state = HashedPostState::from_bundle_state(&bundle_state.state); + let mut state = self.state_provider.hashed_post_state_from_bundle_state(bundle_state); state.extend(hashed_state); self.state_provider.state_root(state) } @@ -100,7 +100,7 @@ impl StateRootProvider hashed_state: HashedPostState, ) -> ProviderResult<(B256, TrieUpdates)> { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - let mut state = HashedPostState::from_bundle_state(&bundle_state.state); + let mut state = self.state_provider.hashed_post_state_from_bundle_state(bundle_state); state.extend(hashed_state); self.state_provider.state_root_with_updates(state) } @@ -110,7 +110,7 @@ impl StateRootProvider mut input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + input.prepend(self.state_provider.hashed_post_state_from_bundle_state(bundle_state)); self.state_provider.state_root_from_nodes_with_updates(input) } } @@ -150,7 +150,7 @@ impl StateProofProvider slots: &[B256], ) -> ProviderResult { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + input.prepend(self.state_provider.hashed_post_state_from_bundle_state(bundle_state)); self.state_provider.proof(input, address, slots) } @@ -160,7 +160,7 @@ impl StateProofProvider targets: HashMap>, ) -> ProviderResult { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + input.prepend(self.state_provider.hashed_post_state_from_bundle_state(bundle_state)); self.state_provider.multiproof(input, targets) } @@ -170,11 +170,29 @@ impl StateProofProvider target: HashedPostState, ) -> ProviderResult> { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + input.prepend(self.state_provider.hashed_post_state_from_bundle_state(bundle_state)); self.state_provider.witness(input, target) } } +impl HashedPostStateProvider + for BundleStateProvider +{ + fn hashed_post_state_from_bundle_state( + &self, + bundle_state: &reth_execution_types::BundleState, + ) -> HashedPostState { + self.state_provider.hashed_post_state_from_bundle_state(bundle_state) + } + + fn hashed_post_state_from_reverts( + &self, + block_number: BlockNumber, + ) -> ProviderResult { + self.state_provider.hashed_post_state_from_reverts(block_number) + } +} + impl StateProvider for BundleStateProvider { fn storage( &self, diff --git a/crates/storage/provider/src/providers/consistent_view.rs b/crates/storage/provider/src/providers/consistent_view.rs index 4640f4603354..330b3abba3cd 100644 --- a/crates/storage/provider/src/providers/consistent_view.rs +++ b/crates/storage/provider/src/providers/consistent_view.rs @@ -2,11 +2,10 @@ use crate::{BlockNumReader, DatabaseProviderFactory, HeaderProvider}; use alloy_primitives::B256; use reth_errors::ProviderError; use reth_primitives::GotExpected; -use reth_storage_api::{BlockReader, DBProvider}; +use reth_storage_api::{BlockReader, HashedPostStateProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::HashedPostState; -use reth_trie_db::DatabaseHashedPostState; pub use reth_storage_errors::provider::ConsistentViewError; @@ -33,7 +32,7 @@ pub struct ConsistentDbView { impl ConsistentDbView where - Factory: DatabaseProviderFactory, + Factory: DatabaseProviderFactory, { /// Creates new consistent database view. pub const fn new(factory: Factory, tip: Option) -> Self { @@ -59,7 +58,7 @@ where { Ok(HashedPostState::default()) } else { - Ok(HashedPostState::from_reverts(provider.tx_ref(), block_number + 1)?) + Ok(provider.hashed_post_state_from_reverts(block_number + 1)?) } } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 62e04c2cf651..62a5829258b3 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -23,8 +23,10 @@ use reth_primitives::{ }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::TryIntoHistoricalStateProvider; +use reth_storage_api::{HashedPostStateProvider, TryIntoHistoricalStateProvider}; use reth_storage_errors::provider::ProviderResult; +use reth_trie::HashedPostState; +use reth_trie_db::{DatabaseHashedPostState, StateCommitment}; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ ops::{RangeBounds, RangeInclusive}, @@ -623,6 +625,28 @@ impl PruneCheckpointReader for ProviderFactory { } } +impl HashedPostStateProvider for ProviderFactory { + fn hashed_post_state_from_bundle_state( + &self, + bundle_state: &reth_execution_types::BundleState, + ) -> reth_trie::HashedPostState { + HashedPostState::from_bundle_state::<::KeyHasher>( + &bundle_state.state, + ) + } + + fn hashed_post_state_from_reverts( + &self, + block_number: BlockNumber, + ) -> ProviderResult { + HashedPostState::from_reverts::<::KeyHasher>( + &self.db.tx().map_err(Into::::into)?, + block_number, + ) + .map_err(Into::into) + } +} + impl Clone for ProviderFactory { fn clone(&self) -> Self { Self { diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 1c3042d5e4a6..aa9fe269e0e6 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -48,14 +48,18 @@ use reth_primitives::{ }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider}; +use reth_storage_api::{ + HashedPostStateProvider, StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider, +}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, updates::{StorageTrieUpdates, TrieUpdates}, - HashedPostStateSorted, Nibbles, StateRoot, StoredNibbles, + HashedPostState, HashedPostStateSorted, Nibbles, StateRoot, StoredNibbles, +}; +use reth_trie_db::{ + DatabaseHashedPostState, DatabaseStateRoot, DatabaseStorageTrieCursor, StateCommitment, }; -use reth_trie_db::{DatabaseStateRoot, DatabaseStorageTrieCursor, StateCommitment}; use revm::{ db::states::{PlainStateReverts, PlainStorageChangeset, PlainStorageRevert, StateChangeset}, primitives::{BlockEnv, CfgEnvWithHandlerCfg}, @@ -3758,6 +3762,25 @@ impl ChainStateBlockWriter } } +impl HashedPostStateProvider + for DatabaseProvider +{ + fn hashed_post_state_from_bundle_state( + &self, + bundle_state: &reth_execution_types::BundleState, + ) -> reth_trie::HashedPostState { + HashedPostState::from_bundle_state::(&bundle_state.state) + } + + fn hashed_post_state_from_reverts( + &self, + block_number: BlockNumber, + ) -> ProviderResult { + HashedPostState::from_reverts::(self.tx_ref(), block_number) + .map_err(Into::into) + } +} + impl DBProvider for DatabaseProvider { diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 967343a9763c..6f5ee4403e5f 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -14,7 +14,7 @@ use reth_db_api::{ transaction::DbTx, }; use reth_primitives::{constants::EPOCH_SLOTS, Account, Bytecode, StaticFileSegment}; -use reth_storage_api::{StateProofProvider, StorageRootProvider}; +use reth_storage_api::{HashedPostStateProvider, StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ proof::{Proof, StorageProof}, @@ -159,7 +159,7 @@ impl<'b, TX: DbTx, SC: StateCommitment> HistoricalStateProviderRef<'b, TX, SC> { ); } - Ok(HashedPostState::from_reverts(self.tx, self.block_number)?) + Ok(HashedPostState::from_reverts::(self.tx, self.block_number)?) } /// Retrieve revert hashed storage for this history provider and target address. @@ -410,6 +410,24 @@ impl StateProofProvider for HistoricalStateProvid } } +impl HashedPostStateProvider + for HistoricalStateProviderRef<'_, TX, SC> +{ + fn hashed_post_state_from_bundle_state( + &self, + bundle_state: &reth_execution_types::BundleState, + ) -> HashedPostState { + HashedPostState::from_bundle_state::(&bundle_state.state) + } + + fn hashed_post_state_from_reverts( + &self, + block_number: BlockNumber, + ) -> ProviderResult { + HashedPostState::from_reverts::(self.tx, block_number).map_err(Into::into) + } +} + impl StateProvider for HistoricalStateProviderRef<'_, TX, SC> { /// Get storage. fn storage( diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 1299db28b911..f20f00954fab 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -12,7 +12,7 @@ use reth_db_api::{ transaction::DbTx, }; use reth_primitives::{Account, Bytecode, StaticFileSegment}; -use reth_storage_api::{StateProofProvider, StorageRootProvider}; +use reth_storage_api::{HashedPostStateProvider, StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::{ proof::{Proof, StorageProof}, @@ -21,8 +21,8 @@ use reth_trie::{ AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageRoot, TrieInput, }; use reth_trie_db::{ - DatabaseProof, DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, - DatabaseTrieWitness, StateCommitment, + DatabaseHashedPostState, DatabaseProof, DatabaseStateRoot, DatabaseStorageProof, + DatabaseStorageRoot, DatabaseTrieWitness, StateCommitment, }; use std::marker::PhantomData; @@ -164,6 +164,22 @@ impl StateProofProvider for LatestStateProviderRe } } +impl HashedPostStateProvider for LatestStateProviderRef<'_, TX, SC> { + fn hashed_post_state_from_bundle_state( + &self, + bundle_state: &reth_execution_types::BundleState, + ) -> HashedPostState { + HashedPostState::from_bundle_state::(&bundle_state.state) + } + + fn hashed_post_state_from_reverts( + &self, + block_number: BlockNumber, + ) -> ProviderResult { + HashedPostState::from_reverts::(self.tx, block_number).map_err(Into::into) + } +} + impl StateProvider for LatestStateProviderRef<'_, TX, SC> { /// Get storage. fn storage( diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index b90924354c43..424565340b11 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -56,6 +56,10 @@ macro_rules! delegate_provider_impls { fn multiproof(&self, input: reth_trie::TrieInput, targets: alloy_primitives::map::HashMap>) -> reth_storage_errors::provider::ProviderResult; fn witness(&self, input: reth_trie::TrieInput, target: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult>; } + HashedPostStateProvider $(where [$($generics)*])? { + fn hashed_post_state_from_reverts(&self, block_number: alloy_primitives::BlockNumber) -> reth_storage_errors::provider::ProviderResult; + fn hashed_post_state_from_bundle_state(&self, bundle_state: &revm::db::BundleState) -> reth_trie::HashedPostState; + } ); } } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index d99093ac80a5..479b075deced 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -27,12 +27,13 @@ use reth_primitives::{ }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ - DatabaseProviderFactory, StageCheckpointReader, StateProofProvider, StorageRootProvider, + DatabaseProviderFactory, HashedPostStateProvider, StageCheckpointReader, StateProofProvider, + StorageRootProvider, }; use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, - TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, KeccakKeyHasher, + MultiProof, StorageProof, TrieInput, }; use reth_trie_db::MerklePatriciaTrie; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; @@ -670,6 +671,22 @@ impl StateProofProvider for MockEthProvider { } } +impl HashedPostStateProvider for MockEthProvider { + fn hashed_post_state_from_bundle_state( + &self, + bundle_state: &reth_execution_types::BundleState, + ) -> HashedPostState { + HashedPostState::from_bundle_state::(&bundle_state.state) + } + + fn hashed_post_state_from_reverts( + &self, + _block_number: BlockNumber, + ) -> ProviderResult { + unimplemented!("hashed_post_state_from_reverts not supported for MockEthProvider") + } +} + impl StateProvider for MockEthProvider { fn storage( &self, diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 0a205389c9b6..dd4091a2c07e 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -24,7 +24,7 @@ use reth_primitives::{ }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{StateProofProvider, StorageRootProvider}; +use reth_storage_api::{HashedPostStateProvider, StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, @@ -394,6 +394,22 @@ impl StateProofProvider for NoopProvider { } } +impl HashedPostStateProvider for NoopProvider { + fn hashed_post_state_from_bundle_state( + &self, + _bundle_state: &reth_execution_types::BundleState, + ) -> HashedPostState { + HashedPostState::default() + } + + fn hashed_post_state_from_reverts( + &self, + _block_number: BlockNumber, + ) -> ProviderResult { + Ok(HashedPostState::default()) + } +} + impl StateProvider for NoopProvider { fn storage( &self, diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 5b16b2da4e5a..562f77788735 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -555,7 +555,7 @@ mod tests { use reth_storage_api::DatabaseProviderFactory; use reth_trie::{ test_utils::{state_root, storage_root_prehashed}, - HashedPostState, HashedStorage, StateRoot, StorageRoot, + HashedPostState, HashedStorage, KeccakKeyHasher, StateRoot, StorageRoot, }; use reth_trie_db::{DatabaseStateRoot, DatabaseStorageRoot}; use revm::{ @@ -1447,7 +1447,7 @@ mod tests { 0, Vec::new() ) - .hash_state_slow(), + .hash_state_slow::(), ) .unwrap(), state_root(expected.clone().into_iter().map(|(address, (account, storage))| ( diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index 1bbcd614e77e..f2eebdaed548 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -5,9 +5,10 @@ use super::{ use alloy_eips::{BlockId, BlockNumHash, BlockNumberOrTag}; use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, StorageValue, B256, U256}; use auto_impl::auto_impl; -use reth_execution_types::ExecutionOutcome; +use reth_execution_types::{BundleState, ExecutionOutcome}; use reth_primitives::{Bytecode, KECCAK_EMPTY}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; +use reth_trie::HashedPostState; /// Type alias of boxed [`StateProvider`]. pub type StateProviderBox = Box; @@ -20,6 +21,7 @@ pub trait StateProvider: + StateRootProvider + StorageRootProvider + StateProofProvider + + HashedPostStateProvider + Send + Sync { @@ -174,6 +176,19 @@ pub trait StateProviderFactory: BlockIdReader + Send + Sync { fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult>; } +/// Trait that provides the hashed state from various sources. +#[auto_impl::auto_impl(&, Box, Arc)] +pub trait HashedPostStateProvider { + /// Returns the `HashedPostState` of the provided `BundleState`. + fn hashed_post_state_from_bundle_state(&self, bundle_state: &BundleState) -> HashedPostState; + + /// Returns the `HashedPostState` for the given block number. + fn hashed_post_state_from_reverts( + &self, + block_number: BlockNumber, + ) -> ProviderResult; +} + /// Blockchain trait provider that gives access to the blockchain state that is not yet committed /// (pending). pub trait BlockchainTreePendingStateProvider: Send + Sync { diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index 4d46183dfda4..0a1aa5fbce95 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -1,5 +1,5 @@ use crate::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory, PrefixSetLoader}; -use alloy_primitives::{keccak256, Address, BlockNumber, B256, U256}; +use alloy_primitives::{Address, BlockNumber, B256, U256}; use reth_db::tables; use reth_db_api::{ cursor::DbCursorRO, @@ -11,7 +11,8 @@ use reth_primitives::Account; use reth_storage_errors::db::DatabaseError; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, trie_cursor::InMemoryTrieCursorFactory, - updates::TrieUpdates, HashedPostState, HashedStorage, StateRoot, StateRootProgress, TrieInput, + updates::TrieUpdates, HashedPostState, HashedStorage, KeyHasher, StateRoot, StateRootProgress, + TrieInput, }; use std::{ collections::{hash_map, HashMap}, @@ -126,7 +127,7 @@ pub trait DatabaseStateRoot<'a, TX>: Sized { pub trait DatabaseHashedPostState: Sized { /// Initializes [`HashedPostState`] from reverts. Iterates over state reverts from the specified /// block up to the current tip and aggregates them into hashed state in reverse. - fn from_reverts(tx: &TX, from: BlockNumber) -> Result; + fn from_reverts(tx: &TX, from: BlockNumber) -> Result; } impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> @@ -220,7 +221,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> } impl DatabaseHashedPostState for HashedPostState { - fn from_reverts(tx: &TX, from: BlockNumber) -> Result { + fn from_reverts(tx: &TX, from: BlockNumber) -> Result { // Iterate over account changesets and record value before first occurring account change. let mut accounts = HashMap::>::default(); let mut account_changesets_cursor = tx.cursor_read::()?; @@ -245,19 +246,19 @@ impl DatabaseHashedPostState for HashedPostState { } let hashed_accounts = - accounts.into_iter().map(|(address, info)| (keccak256(address), info)).collect(); + accounts.into_iter().map(|(address, info)| (KH::hash_key(address), info)).collect(); let hashed_storages = storages .into_iter() .map(|(address, storage)| { ( - keccak256(address), + KH::hash_key(address), HashedStorage::from_iter( // The `wiped` flag indicates only whether previous storage entries // should be looked up in db or not. For reverts it's a noop since all // wiped changes had been written as storage reverts. false, - storage.into_iter().map(|(slot, value)| (keccak256(slot), value)), + storage.into_iter().map(|(slot, value)| (KH::hash_key(slot), value)), ), ) }) @@ -274,6 +275,7 @@ mod tests { use reth_db::test_utils::create_test_rw_db; use reth_db_api::database::Database; use reth_primitives::revm_primitives::AccountInfo; + use reth_trie::KeccakKeyHasher; use revm::db::BundleState; #[test] @@ -294,7 +296,7 @@ mod tests { .build(); assert_eq!(bundle_state.reverts.len(), 1); - let post_state = HashedPostState::from_bundle_state(&bundle_state.state); + let post_state = HashedPostState::from_bundle_state::(&bundle_state.state); assert_eq!(post_state.accounts.len(), 2); assert_eq!(post_state.storages.len(), 2); diff --git a/crates/trie/parallel/src/parallel_root.rs b/crates/trie/parallel/src/parallel_root.rs index e432b91062ca..3ac976b5ca98 100644 --- a/crates/trie/parallel/src/parallel_root.rs +++ b/crates/trie/parallel/src/parallel_root.rs @@ -6,7 +6,8 @@ use alloy_rlp::{BufMut, Encodable}; use itertools::Itertools; use reth_execution_errors::StorageRootError; use reth_provider::{ - providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, + providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, + HashedPostStateProvider, ProviderError, }; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, @@ -57,7 +58,11 @@ impl ParallelStateRoot { impl ParallelStateRoot where - Factory: DatabaseProviderFactory + Clone + Send + Sync + 'static, + Factory: DatabaseProviderFactory + + Clone + + Send + + Sync + + 'static, { /// Calculate incremental state root in parallel. pub fn incremental_root(self) -> Result { diff --git a/crates/trie/trie/benches/hash_post_state.rs b/crates/trie/trie/benches/hash_post_state.rs index 6e913ef78a3c..7111a785f469 100644 --- a/crates/trie/trie/benches/hash_post_state.rs +++ b/crates/trie/trie/benches/hash_post_state.rs @@ -2,7 +2,7 @@ use alloy_primitives::{keccak256, map::HashMap, Address, B256, U256}; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; -use reth_trie::{HashedPostState, HashedStorage}; +use reth_trie::{HashedPostState, HashedStorage, KeccakKeyHasher}; use revm::db::{states::BundleBuilder, BundleAccount}; pub fn hash_post_state(c: &mut Criterion) { @@ -19,7 +19,7 @@ pub fn hash_post_state(c: &mut Criterion) { // parallel group.bench_function(BenchmarkId::new("parallel hashing", size), |b| { - b.iter(|| HashedPostState::from_bundle_state(&state)) + b.iter(|| HashedPostState::from_bundle_state::(&state)) }); } } diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index 2af48dfff798..c53d2fd71aa8 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -6,6 +6,7 @@ use alloy_primitives::{keccak256, Address, B256, U256}; use itertools::Itertools; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use reth_primitives::Account; +use reth_trie_common::KeyHasher; use revm::db::{states::CacheAccount, AccountStatus, BundleAccount}; use std::{ borrow::Cow, @@ -25,13 +26,13 @@ impl HashedPostState { /// Initialize [`HashedPostState`] from bundle state. /// Hashes all changed accounts and storage entries that are currently stored in the bundle /// state. - pub fn from_bundle_state<'a>( + pub fn from_bundle_state<'a, KH: KeyHasher>( state: impl IntoParallelIterator, ) -> Self { let hashed = state .into_par_iter() .map(|(address, account)| { - let hashed_address = keccak256(address); + let hashed_address = KH::hash_key(address); let hashed_account = account.info.clone().map(Into::into); let hashed_storage = HashedStorage::from_plain_storage( account.status, @@ -52,13 +53,13 @@ impl HashedPostState { /// Initialize [`HashedPostState`] from cached state. /// Hashes all changed accounts and storage entries that are currently stored in cache. - pub fn from_cache_state<'a>( + pub fn from_cache_state<'a, KH: KeyHasher>( state: impl IntoParallelIterator, ) -> Self { let hashed = state .into_par_iter() .map(|(address, account)| { - let hashed_address = keccak256(address); + let hashed_address = KH::hash_key(address); let hashed_account = account.account.as_ref().map(|a| a.info.clone().into()); let hashed_storage = HashedStorage::from_plain_storage( account.status, @@ -347,7 +348,8 @@ impl HashedStorageSorted { #[cfg(test)] mod tests { - use alloy_primitives::Bytes; + use alloy_primitives::{keccak256, Bytes}; + use reth_trie_common::KeccakKeyHasher; use revm::{ db::{ states::{plain_account::PlainStorage, StorageSlot}, @@ -463,7 +465,7 @@ mod tests { let state = vec![(&address, &account)]; // Convert the bundle state into a hashed post state. - let hashed_state = HashedPostState::from_bundle_state(state); + let hashed_state = HashedPostState::from_bundle_state::(state); // Validate the hashed post state. assert_eq!(hashed_state.accounts.len(), 1); @@ -502,7 +504,7 @@ mod tests { let state = vec![(&address, &account)]; // Convert the cache state into a hashed post state. - let hashed_state = HashedPostState::from_cache_state(state); + let hashed_state = HashedPostState::from_cache_state::(state); // Validate the hashed post state. assert_eq!(hashed_state.accounts.len(), 1);