diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index b313c32ce33d..8d53f457af91 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -35,7 +35,6 @@ exclude_crates=( reth-ethereum-payload-builder reth-etl reth-evm-ethereum - reth-evm-optimism reth-execution-errors reth-exex reth-exex-test-utils @@ -49,8 +48,9 @@ exclude_crates=( reth-node-ethereum reth-node-events reth-node-metrics - reth-node-optimism reth-optimism-cli + reth-optimism-evm + reth-optimism-node reth-optimism-payload-builder reth-optimism-rpc reth-payload-builder diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index 09b20fb1ef8d..ddf1383ff4d9 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -20,7 +20,24 @@ rpc-compat: - eth_getBlockByNumber/get-latest (reth) - eth_getBlockByNumber/get-safe (reth) -# https://github.com/paradigmxyz/reth/issues/8732 + - eth_createAccessList/create-al-contract-eip1559 (reth) + - eth_createAccessList/create-al-contract (reth) + - eth_getProof/get-account-proof-blockhash (reth) + - eth_getProof/get-account-proof-latest (reth) + - eth_getProof/get-account-proof-with-storage (reth) + - eth_getTransactionByBlockHashAndIndex/get-block-n (reth) + - eth_getTransactionByBlockNumberAndIndex/get-block-n (reth) + - eth_getTransactionByHash/get-access-list (reth) + - eth_getTransactionByHash/get-blob-tx (reth) + - eth_getTransactionByHash/get-dynamic-fee (reth) + - eth_getTransactionByHash/get-legacy-create (reth) + - eth_getTransactionByHash/get-legacy-input (reth) + - eth_getTransactionByHash/get-legacy-tx (reth) + - eth_getTransactionReceipt/get-legacy-contract (reth) + - eth_getTransactionReceipt/get-legacy-input (reth) + - eth_getTransactionReceipt/get-legacy-receipt (reth)' + + # https://github.com/paradigmxyz/reth/issues/8732 engine-withdrawals: - Withdrawals Fork On Genesis (Paris) (reth) - Withdrawals Fork on Block 1 (Paris) (reth) diff --git a/.github/assets/hive/expected_failures_experimental.yaml b/.github/assets/hive/expected_failures_experimental.yaml index d4b3d2bcbd3c..91fd1a88ab83 100644 --- a/.github/assets/hive/expected_failures_experimental.yaml +++ b/.github/assets/hive/expected_failures_experimental.yaml @@ -20,6 +20,23 @@ rpc-compat: - eth_getBlockByNumber/get-latest (reth) - eth_getBlockByNumber/get-safe (reth) + - eth_createAccessList/create-al-contract-eip1559 (reth) + - eth_createAccessList/create-al-contract (reth) + - eth_getProof/get-account-proof-blockhash (reth) + - eth_getProof/get-account-proof-latest (reth) + - eth_getProof/get-account-proof-with-storage (reth) + - eth_getTransactionByBlockHashAndIndex/get-block-n (reth) + - eth_getTransactionByBlockNumberAndIndex/get-block-n (reth) + - eth_getTransactionByHash/get-access-list (reth) + - eth_getTransactionByHash/get-blob-tx (reth) + - eth_getTransactionByHash/get-dynamic-fee (reth) + - eth_getTransactionByHash/get-legacy-create (reth) + - eth_getTransactionByHash/get-legacy-input (reth) + - eth_getTransactionByHash/get-legacy-tx (reth) + - eth_getTransactionReceipt/get-legacy-contract (reth) + - eth_getTransactionReceipt/get-legacy-input (reth) + - eth_getTransactionReceipt/get-legacy-receipt (reth)' + # https://github.com/paradigmxyz/reth/issues/8732 engine-withdrawals: - Withdrawals Fork On Genesis (Paris) (reth) diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index aa225516ae72..7ba3395cce84 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -48,7 +48,7 @@ jobs: name: Run tests run: | cargo nextest run \ - --locked -p reth-node-optimism --features "optimism" + --locked -p reth-optimism-node --features "optimism" integration-success: name: integration success diff --git a/Cargo.lock b/Cargo.lock index da895a2b6f7c..d4fa4c6a6a26 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,9 +97,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-chains" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "805f7a974de5804f5c053edc6ca43b20883bdd3a733b3691200ae3a4b454a2db" +checksum = "8158b4878c67837e5413721cc44298e6a2d88d39203175ea025e51892a16ba4c" dependencies = [ "alloy-rlp", "arbitrary", @@ -111,31 +111,34 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629b62e38d471cc15fea534eb7283d2f8a4e8bdb1811bcc5d66dda6cfce6fae1" +checksum = "705687d5bfd019fee57cf9e206b27b30a9a9617535d5590a02b171e813208f8e" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", "arbitrary", + "auto_impl", "c-kzg", + "derive_more 1.0.0", "serde", + "serde_with", ] [[package]] name = "alloy-dyn-abi" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4004925bff5ba0a11739ae84dbb6601a981ea692f3bd45b626935ee90a6b8471" +checksum = "0b499852e1d0e9b8c6db0f24c48998e647c0d5762a01090f955106a7700e4611" dependencies = [ "alloy-json-abi", "alloy-primitives", "alloy-sol-type-parser", "alloy-sol-types", "const-hex", - "derive_more", + "derive_more 1.0.0", "itoa", "serde", "serde_json", @@ -157,9 +160,9 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d319bb544ca6caeab58c39cea8921c55d924d4f68f2c60f24f914673f9a74a" +checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -167,13 +170,14 @@ dependencies = [ "k256", "rand 0.8.5", "serde", + "serde_with", ] [[package]] name = "alloy-eips" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f923dd5fca5f67a43d81ed3ebad0880bd41f6dd0ada930030353ac356c54cd0f" +checksum = "6ffb906284a1e1f63c4607da2068c8197458a352d0b3e9796e67353d72a9be85" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -182,7 +186,7 @@ dependencies = [ "alloy-serde", "arbitrary", "c-kzg", - "derive_more", + "derive_more 1.0.0", "once_cell", "serde", "sha2 0.10.8", @@ -190,9 +194,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a7a18afb0b318616b6b2b0e2e7ac5529d32a966c673b48091c9919e284e6aca" +checksum = "8429cf4554eed9b40feec7f4451113e76596086447550275e3def933faf47ce3" dependencies = [ "alloy-primitives", "alloy-serde", @@ -201,9 +205,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9996daf962fd0a90d3c93b388033228865953b92de7bb1959b891d78750a4091" +checksum = "a438d4486b5d525df3b3004188f9d5cd1d65cd30ecc41e5a3ccef6f6342e8af9" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -213,9 +217,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3c717b5298fad078cd3a418335b266eba91b511383ca9bd497f742d5975d5ab" +checksum = "f8fa8a1a3c4cbd221f2b8e3693aeb328fca79a757fe556ed08e47bbbc2a70db7" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -227,9 +231,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb3705ce7d8602132bcf5ac7a1dd293a42adc2f183abf5907c30ac535ceca049" +checksum = "85fa23a6a9d612b52e402c995f2d582c25165ec03ac6edf64c861a76bc5b87cd" dependencies = [ "alloy-consensus", "alloy-eips", @@ -248,10 +252,11 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94ad40869867ed2d9cd3842b1e800889e5b49e6b92da346e93862b4a741bedf3" +checksum = "801492711d4392b2ccf5fc0bc69e299fa1aab15167d74dcaa9aab96a54f684bd" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-serde", @@ -260,9 +265,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5988a227293f949525f0a1b3e1ef728d2ef24afa96bad2b7788c6c9617fa3eec" +checksum = "4f1334a738aa1710cb8227441b3fcc319202ce78e967ef37406940242df4a454" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -277,9 +282,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "411aff151f2a73124ee473708e82ed51b2535f68928b6a1caa8bc1246ae6f7cd" +checksum = "260d3ff3bff0bb84599f032a2f2c6828180b0ea0cd41fdaf44f39cef3ba41861" dependencies = [ "alloy-rlp", "arbitrary", @@ -287,25 +292,30 @@ dependencies = [ "cfg-if", "const-hex", "derive_arbitrary", - "derive_more", + "derive_more 1.0.0", "getrandom 0.2.15", + "hashbrown 0.14.5", "hex-literal", + "indexmap 2.5.0", "itoa", "k256", "keccak-asm", + "paste", "proptest", "proptest-derive", "rand 0.8.5", "ruint", + "rustc-hash 2.0.0", "serde", + "sha3", "tiny-keccak", ] [[package]] name = "alloy-provider" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927f708dd457ed63420400ee5f06945df9632d5d101851952056840426a10dc5" +checksum = "fcfaa4ffec0af04e3555686b8aacbcdf7d13638133a0672749209069750f78a6" dependencies = [ "alloy-chains", "alloy-consensus", @@ -341,9 +351,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d05f63677e210d758cd5d6d1ce10f20c980c3560ccfbe79ba1997791862a04f" +checksum = "f32cef487122ae75c91eb50154c70801d71fabdb976fec6c49e0af5e6486ab15" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -377,14 +387,14 @@ checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "alloy-rpc-client" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d82952dca71173813d4e5733e2c986d8b04aea9e0f3b0a576664c232ad050a5" +checksum = "370143ed581aace6e663342d21d209c6b2e34ee6142f7d6675adb518deeaf0dc" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -406,10 +416,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64333d639f2a0cf73491813c629a405744e16343a4bc5640931be707c345ecc5" +checksum = "9ffc534b7919e18f35e3aa1f507b6f3d9d92ec298463a9f6beaac112809d8d06" dependencies = [ + "alloy-primitives", "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-serde", @@ -418,9 +429,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fefd12e99dd6b7de387ed13ad047ce2c90d8950ca62fc48b8a457ebb8f936c61" +checksum = "cb520ed46cc5b7d8c014a73fdd77b6a310383a2a5c0a5ae3c9b8055881f062b7" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -430,9 +441,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d25cb45ad7c0930dd62eecf164d2afe4c3d2dd2c82af85680ad1f118e1e5cb83" +checksum = "d780adaa5d95b07ad92006b2feb68ecfa7e2015f7d5976ceaac4c906c73ebd07" dependencies = [ "alloy-primitives", "alloy-serde", @@ -441,9 +452,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7081d2206dca51ce23a06338d78d9b536931cc3f15134fc1c6535eb2b77f18" +checksum = "7a8dc5980fe30203d698627cddb5f0cedc57f900c8b5e1229c8b9448e37acb4a" dependencies = [ "alloy-eips", "alloy-primitives", @@ -455,9 +466,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f9f9033796bb3078d11cc9c839f00e277431ef997db2849a46045fcffee3835" +checksum = "59d8f8c5bfb160081a772f1f68eb9a37e8929c4ef74e5d01f5b78c2b645a5c5e" dependencies = [ "alloy-primitives", "serde", @@ -465,27 +476,28 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1464c4dd646e1bdfde86ae65ce5ba168dbb29180b478011fe87117ae46b1629b" +checksum = "e0285c4c09f838ab830048b780d7f4a4f460f309aa1194bb049843309524c64c" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", - "derive_more", + "derive_more 1.0.0", "jsonrpsee-types", "jsonwebtoken", "rand 0.8.5", "serde", + "strum", ] [[package]] name = "alloy-rpc-types-eth" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83aa984386deda02482660aa31cb8ca1e63d533f1c31a52d7d181ac5ec68e9b8" +checksum = "413f4aa3ccf2c3e4234a047c5fa4727916d7daf25a89f9b765df0ba09784fd87" dependencies = [ "alloy-consensus", "alloy-eips", @@ -494,9 +506,7 @@ dependencies = [ "alloy-rlp", "alloy-serde", "alloy-sol-types", - "cfg-if", - "derive_more", - "hashbrown 0.14.5", + "derive_more 1.0.0", "itertools 0.13.0", "jsonrpsee-types", "serde", @@ -505,9 +515,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "922d92389e5022650c4c60ffd2f9b2467c3f853764f0f74ff16a23106f9017d5" +checksum = "7cec23ce56c869eec5f6b6fd6a8a92b5aa0cfaf8d7be3a96502e537554dc7430" dependencies = [ "alloy-eips", "alloy-primitives", @@ -518,9 +528,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98db35cd42c90b484377e6bc44d95377a7a38a5ebee996e67754ac0446d542ab" +checksum = "017cad3e5793c5613588c1f9732bcbad77e820ba7d0feaba3527749f856fdbc5" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -532,9 +542,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bac37082c3b21283b3faf5cc0e08974272aee2f756ce1adeb26db56a5fce0d5" +checksum = "2b230e321c416be7f50530159392b4c41a45596d40d97e185575bcd0b545e521" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -544,9 +554,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "731f75ec5d383107fd745d781619bd9cedf145836c51ecb991623d41278e71fa" +checksum = "9dff0ab1cdd43ca001e324dc27ee0e8606bd2161d6623c63e0e0b8c4dfc13600" dependencies = [ "alloy-primitives", "arbitrary", @@ -556,9 +566,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307324cca94354cd654d6713629f0383ec037e1ff9e3e3d547212471209860c0" +checksum = "2fd4e0ad79c81a27ca659be5d176ca12399141659fef2bcbfdc848da478f4504" dependencies = [ "alloy-primitives", "async-trait", @@ -570,9 +580,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fabe917ab1778e760b4701628d1cae8e028ee9d52ac6307de4e1e9286ab6b5f" +checksum = "494e0a256f3e99f2426f994bcd1be312c02cb8f88260088dacb33a8b8936475f" dependencies = [ "alloy-consensus", "alloy-network", @@ -588,23 +598,23 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0458ccb02a564228fcd76efb8eb5a520521a8347becde37b402afec9a1b83859" +checksum = "68e7f6e8fe5b443f82b3f1e15abfa191128f71569148428e49449d01f6f49e8b" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc65475025fc1e84bf86fc840f04f63fcccdcf3cf12053c99918e4054dfbc69" +checksum = "6b96ce28d2fde09abb6135f410c41fad670a3a770b6776869bd852f1df102e6f" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -613,31 +623,31 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed10f0715a0b69fde3236ff3b9ae5f6f7c97db5a387747100070d3016b9266b" +checksum = "906746396a8296537745711630d9185746c0b50c033d5e9d18b0a6eba3d53f90" dependencies = [ "const-hex", "dunce", "heck", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3edae8ea1de519ccba896b6834dec874230f72fe695ff3c9c118e90ec7cff783" +checksum = "bc85178909a49c8827ffccfc9103a7ce1767ae66a801b69bdc326913870bf8e6" dependencies = [ "serde", "winnow", @@ -645,9 +655,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1eb88e4da0a1b697ed6a9f811fdba223cf4d5c21410804fd1707836af73a462b" +checksum = "d86a533ce22525969661b25dfe296c112d35eb6861f188fd284f8bd4bb3842ae" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -658,9 +668,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33616b2edf7454302a1d48084db185e52c309f73f6c10be99b0fe39354b3f1e9" +checksum = "2ac3e97dad3d31770db0fc89bd6a63b789fbae78963086733f960cf32c483904" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -677,9 +687,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a944f5310c690b62bbb3e7e5ce34527cbd36b2d18532a797af123271ce595a49" +checksum = "b367dcccada5b28987c2296717ee04b9a5637aacd78eacb1726ef211678b5212" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -692,9 +702,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09fd8491249f74d16ec979b1f5672377b12ebb818e6056478ffa386954dbd350" +checksum = "b90cf9cde7f2fce617da52768ee28f522264b282d148384a4ca0ea85af04fa3a" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -711,9 +721,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.3.6" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9704761f6297fe482276bee7f77a93cb42bd541c2bd6c1c560b6f3a9ece672e" +checksum = "7153b88690de6a50bba81c11e1d706bc41dbb90126d607404d60b763f6a3947f" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -729,16 +739,15 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.5.3" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a46c9c4fdccda7982e7928904bd85fe235a0404ee3d7e197fff13d61eac8b4f" +checksum = "e9703ce68b97f8faae6f7739d1e003fc97621b856953cbcdbb2b515743f23288" dependencies = [ "alloy-primitives", "alloy-rlp", "arbitrary", "derive_arbitrary", - "derive_more", - "hashbrown 0.14.5", + "derive_more 1.0.0", "nybbles", "proptest", "proptest-derive", @@ -834,7 +843,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1037,9 +1046,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -1048,24 +1057,24 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "async-trait" -version = "0.1.82" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1103,24 +1112,22 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "backon" -version = "0.4.4" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d67782c3f868daa71d3533538e98a8e13713231969def7536e8039606fc46bf0" +checksum = "e4fa97bb310c33c811334143cf64c5bb2b7b3c06e453db6b095d7061eff8f113" dependencies = [ "fastrand 2.1.1", - "futures-core", - "pin-project", "tokio", ] @@ -1213,7 +1220,25 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.77", + "syn 2.0.79", +] + +[[package]] +name = "bindgen" +version = "0.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" +dependencies = [ + "bitflags 2.6.0", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "proc-macro2", + "quote", + "regex", + "rustc-hash 1.1.0", + "shlex", + "syn 2.0.79", ] [[package]] @@ -1395,7 +1420,7 @@ checksum = "240f4126219a83519bad05c9a40bfc0303921eeb571fc2d7e44c17ffac99d3f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "synstructure", ] @@ -1477,17 +1502,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "bstr" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c" -dependencies = [ - "memchr", - "regex-automata 0.4.7", - "serde", -] - [[package]] name = "bumpalo" version = "3.16.0" @@ -1517,7 +1531,7 @@ checksum = "0cc8b54b395f2fcfbb3d90c47b01c7f444d94d05bdeb775811dec868ac3bbc26" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1605,9 +1619,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.21" +version = "1.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" +checksum = "812acba72f0a070b003d3697490d2b55b837230ae7c6c6497f05cc2ddbb8d938" dependencies = [ "jobserver", "libc", @@ -1700,9 +1714,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.18" +version = "4.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" +checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615" dependencies = [ "clap_builder", "clap_derive", @@ -1710,9 +1724,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.18" +version = "4.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" +checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b" dependencies = [ "anstream", "anstyle", @@ -1729,7 +1743,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1811,7 +1825,7 @@ version = "7.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b34115915337defe99b2aff5c2ce6771e5fbc4079f4b506301f5cf394c8452f7" dependencies = [ - "crossterm", + "crossterm 0.27.0", "strum", "strum_macros", "unicode-width", @@ -1819,13 +1833,14 @@ dependencies = [ [[package]] name = "compact_str" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f86b9c4c00838774a6d902ef931eff7470720c51d90c2e32cfe15dc304737b3f" +checksum = "6050c3a16ddab2e412160b31f2c871015704239bca62f72f6e5f0be631d3f644" dependencies = [ "castaway", "cfg-if", "itoa", + "rustversion", "ryu", "static_assertions", ] @@ -1862,9 +1877,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.12.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8a24a26d37e1ffd45343323dc9fe6654ceea44c12f2fcb3d7ac29e610bc6" +checksum = "0121754e84117e65f9d90648ee6aa4882a6e63110307ab73967a4c5e7e69e586" dependencies = [ "cfg-if", "cpufeatures", @@ -2057,8 +2072,21 @@ dependencies = [ "bitflags 2.6.0", "crossterm_winapi", "libc", - "mio 0.8.11", "parking_lot 0.12.3", + "winapi", +] + +[[package]] +name = "crossterm" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" +dependencies = [ + "bitflags 2.6.0", + "crossterm_winapi", + "mio 1.0.2", + "parking_lot 0.12.3", + "rustix", "signal-hook", "signal-hook-mio", "winapi", @@ -2166,7 +2194,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2190,7 +2218,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2201,7 +2229,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2321,7 +2349,18 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", +] + +[[package]] +name = "derive_more" +version = "0.99.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] @@ -2342,7 +2381,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "unicode-xid", ] @@ -2456,7 +2495,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2573,12 +2612,6 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" -[[package]] -name = "endian-type" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" - [[package]] name = "enr" version = "0.12.1" @@ -2608,7 +2641,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2619,7 +2652,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2648,6 +2681,7 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" name = "example-beacon-api-sidecar-fetcher" version = "0.1.0" dependencies = [ + "alloy-primitives", "alloy-rpc-types-beacon", "clap", "eyre", @@ -2678,6 +2712,7 @@ dependencies = [ name = "example-bsc-p2p" version = "0.0.0" dependencies = [ + "alloy-primitives", "reth-chainspec", "reth-discv4", "reth-network", @@ -2696,13 +2731,13 @@ name = "example-custom-dev-node" version = "0.0.0" dependencies = [ "alloy-genesis", + "alloy-primitives", "eyre", "futures-util", "reth", "reth-chainspec", "reth-node-core", "reth-node-ethereum", - "reth-primitives", "serde_json", "tokio", ] @@ -2712,6 +2747,7 @@ name = "example-custom-engine-types" version = "0.0.0" dependencies = [ "alloy-genesis", + "alloy-primitives", "alloy-rpc-types", "eyre", "reth", @@ -2734,6 +2770,7 @@ name = "example-custom-evm" version = "0.0.0" dependencies = [ "alloy-genesis", + "alloy-primitives", "eyre", "reth", "reth-chainspec", @@ -2750,12 +2787,12 @@ dependencies = [ name = "example-custom-inspector" version = "0.0.0" dependencies = [ + "alloy-primitives", "alloy-rpc-types", "clap", "futures-util", "reth", "reth-node-ethereum", - "reth-rpc-types", ] [[package]] @@ -2773,6 +2810,7 @@ dependencies = [ name = "example-custom-payload-builder" version = "0.0.0" dependencies = [ + "alloy-primitives", "eyre", "futures-util", "reth", @@ -2790,6 +2828,7 @@ dependencies = [ name = "example-custom-rlpx-subprotocol" version = "0.0.0" dependencies = [ + "alloy-primitives", "eyre", "futures", "rand 0.8.5", @@ -2800,7 +2839,6 @@ dependencies = [ "reth-node-ethereum", "reth-primitives", "reth-provider", - "reth-rpc-types", "tokio", "tokio-stream", "tracing", @@ -2810,6 +2848,7 @@ dependencies = [ name = "example-db-access" version = "0.0.0" dependencies = [ + "alloy-primitives", "alloy-rpc-types", "eyre", "reth-chainspec", @@ -2818,7 +2857,6 @@ dependencies = [ "reth-node-types", "reth-primitives", "reth-provider", - "reth-rpc-types", ] [[package]] @@ -2884,6 +2922,7 @@ dependencies = [ name = "example-polygon-p2p" version = "0.0.0" dependencies = [ + "alloy-primitives", "reth-chainspec", "reth-discv4", "reth-network", @@ -2916,6 +2955,7 @@ name = "example-stateful-precompile" version = "0.0.0" dependencies = [ "alloy-genesis", + "alloy-primitives", "eyre", "parking_lot 0.12.3", "reth", @@ -2933,6 +2973,7 @@ dependencies = [ name = "example-txpool-tracing" version = "0.0.0" dependencies = [ + "alloy-primitives", "alloy-rpc-types-trace", "clap", "futures-util", @@ -3046,9 +3087,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.33" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" +checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ "crc32fast", "miniz_oxide", @@ -3161,7 +3202,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -3559,9 +3600,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -3634,9 +3675,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ "bytes", "futures-channel", @@ -3647,39 +3688,42 @@ dependencies = [ "pin-project-lite", "socket2 0.5.7", "tokio", - "tower 0.4.13", "tower-service", "tracing", ] [[package]] name = "iai-callgrind" -version = "0.11.1" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146bf76de95f03c5f4b118f0f2f350ef18df47cc0595755bd29d8f668209466c" +checksum = "4bd871e6374d5ca2d9b48dd23b3c7ef63a4201728621f6d75937dfcc66e91809" dependencies = [ "bincode", + "derive_more 0.99.18", "iai-callgrind-macros", "iai-callgrind-runner", ] [[package]] name = "iai-callgrind-macros" -version = "0.2.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2a4bb39225592c0a28cfca6f70af52ebd8da23f533c2cdd0a3329c1fa252d56" +checksum = "397649417510422ded7033f86132f833cca8c2e5081d0dfbec939b2353da7021" dependencies = [ - "proc-macro-error", + "derive_more 0.99.18", + "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.77", + "serde", + "serde_json", + "syn 2.0.79", ] [[package]] name = "iai-callgrind-runner" -version = "0.11.1" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60484b2e469ef4f1af6f196af738889ff375151dd11ac223647ed8a97529107d" +checksum = "f3783c337f9e931af702b5d5835ff2a6824bf55e416461a4e042dfb4b8fdbbea" dependencies = [ "serde", ] @@ -3822,7 +3866,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -3923,6 +3967,7 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" dependencies = [ + "arbitrary", "equivalent", "hashbrown 0.14.5", "serde", @@ -3982,6 +4027,16 @@ dependencies = [ "generic-array", ] +[[package]] +name = "instability" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b23a0c8dfe501baac4adf6ebbfa6eddf8f0c07f56b058cc1288017e32397846c" +dependencies = [ + "quote", + "syn 2.0.79", +] + [[package]] name = "instant" version = "0.1.13" @@ -4035,9 +4090,9 @@ checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" [[package]] name = "iri-string" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c25163201be6ded9e686703e85532f8f852ea1f92ba625cb3c51f7fe6d07a4a" +checksum = "44bd7eced44cfe2cebc674adb2a7124a754a4b5269288d22e9f39f8fada3562d" dependencies = [ "memchr", "serde", @@ -4133,9 +4188,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.24.4" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fd1ead9fb95614e8dc5556d12a8681c2f6d352d0c1d3efc8708c7ccbba47bc6" +checksum = "126b48a5acc3c52fbd5381a77898cb60e145123179588a29e7ac48f9c06e401b" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4151,9 +4206,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.24.4" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89841d4f03a14c055eb41d4f41901819573ef948e8ee0d5c86466fd286b2ce7f" +checksum = "bf679a8e0e083c77997f7c4bb4ca826577105906027ae462aac70ff348d02c6a" dependencies = [ "base64 0.22.1", "futures-channel", @@ -4176,9 +4231,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.24.4" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff79651479f69ada7bda604ef2acf3f1aa50755d97cc36d25ff04c2664f9d96f" +checksum = "b0e503369a76e195b65af35058add0e6900b794a4e9a9316900ddd3a87a80477" dependencies = [ "async-trait", "bytes", @@ -4203,9 +4258,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.24.4" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68ed8b301b19f4dad8ddc66ed956a70fc227def5c19b3898e0a29ce8f0edee06" +checksum = "f2c0caba4a6a8efbafeec9baa986aa22a75a96c29d3e4b0091b0098d6470efb5" dependencies = [ "async-trait", "base64 0.22.1", @@ -4228,22 +4283,22 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.24.4" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0d4c6bec4909c966f59f52db3655c0e9d4685faae8b49185973d9d7389bb884" +checksum = "fc660a9389e2748e794a40673a4155d501f32db667757cdb80edeff0306b489b" dependencies = [ "heck", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "jsonrpsee-server" -version = "0.24.4" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebe2198e5fd96cf2153ecc123364f699b6e2151317ea09c7bf799c43c2fe1415" +checksum = "af6e6c9b6d975edcb443565d648b605f3e85a04ec63aa6941811a8894cc9cded" dependencies = [ "futures-util", "http", @@ -4268,9 +4323,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.24.4" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "531e386460425e49679587871a056f2895a47dade21457324ad1262cd78ef6d9" +checksum = "d8fb16314327cbc94fdf7965ef7e4422509cd5597f76d137bd104eb34aeede67" dependencies = [ "http", "serde", @@ -4280,9 +4335,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.24.4" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a2d2206c8f04c6b79a11bd1d92d6726b6f7fd3dec57c91e07fa53e867268bbb" +checksum = "e0da62b43702bd5640ea305d35df95da30abc878e79a7b4b01feda3beaf35d3c" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4291,9 +4346,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.24.4" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87bc869e143d430e748988261d19b630e8f1692074e68f1a7f0eb4c521d2fc58" +checksum = "39aabf5d6c6f22da8d5b808eea1fab0736059f11fb42f71f141b14f404e5046a" dependencies = [ "http", "jsonrpsee-client-transport", @@ -4402,9 +4457,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.158" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libloading" @@ -4447,7 +4502,7 @@ version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae9ea4b75e1a81675429dafe43441df1caea70081e82246a8cccf514884a88bb" dependencies = [ - "bindgen", + "bindgen 0.69.4", "errno", "libc", ] @@ -4460,7 +4515,7 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags 2.6.0", "libc", - "redox_syscall 0.5.4", + "redox_syscall 0.5.7", ] [[package]] @@ -4649,6 +4704,18 @@ dependencies = [ "portable-atomic", ] +[[package]] +name = "metrics-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3dbdd96ed57d565ec744cba02862d707acf373c5772d152abae6ec5c4e24f6c" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "syn 2.0.79", +] + [[package]] name = "metrics-exporter-prometheus" version = "0.15.3" @@ -4684,16 +4751,12 @@ version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4259040465c955f9f2f1a4a8a16dc46726169bca0f88e8fb2dbeced487c3e828" dependencies = [ - "aho-corasick", "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.14.5", - "indexmap 2.5.0", "metrics", "num_cpus", - "ordered-float", "quanta", - "radix_trie", "sketches-ddsketch", ] @@ -4768,20 +4831,20 @@ checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ "hermit-abi 0.3.9", "libc", + "log", "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", ] [[package]] name = "mockall" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43766c2b5203b10de348ffe19f7e54564b64f3d6018ff7648d1e2d6d3a0f0a48" +checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" dependencies = [ "cfg-if", "downcast", "fragile", - "lazy_static", "mockall_derive", "predicates", "predicates-tree", @@ -4789,14 +4852,14 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af7cbce79ec385a1d4f54baa90a76401eb15d9cab93685f62e7e9f942aa00ae2" +checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -4828,9 +4891,9 @@ checksum = "1fafa6961cabd9c63bcd77a45d7e3b7f3b552b70417831fb0f56db717e72407e" [[package]] name = "multiaddr" -version = "0.18.1" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b852bc02a2da5feed68cd14fa50d0774b92790a5bdbfa932a813926c8472070" +checksum = "fe6351f60b488e04c1d21bc69e56b89cb3f5e8f5d22557d6e8031bdfd79b6961" dependencies = [ "arrayref", "byteorder", @@ -4841,7 +4904,7 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint", + "unsigned-varint 0.8.0", "url", ] @@ -4863,16 +4926,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" dependencies = [ "core2", - "unsigned-varint", -] - -[[package]] -name = "nibble_vec" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43" -dependencies = [ - "smallvec", + "unsigned-varint 0.7.2", ] [[package]] @@ -5052,7 +5106,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -5089,9 +5143,12 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "82881c4be219ab5faaf2ad5e5e5ecdff8c66bd7402ca3160975c93b24961afd1" +dependencies = [ + "portable-atomic", +] [[package]] name = "oorandom" @@ -5101,9 +5158,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.2.12" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21aad1fbf80d2bcd7406880efc7ba109365f44bbb72896758ddcbfa46bf1592c" +checksum = "c4f7f318f885db6e1455370ca91f74b7faed152c8142f6418f0936d606e582ff" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5111,16 +5168,17 @@ dependencies = [ "alloy-rlp", "alloy-serde", "arbitrary", - "derive_more", + "derive_more 1.0.0", "serde", + "serde_with", "spin", ] [[package]] name = "op-alloy-genesis" -version = "0.2.12" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e1b8a9b70da0e027242ec1762f0f3a386278b6291d00d12ff5a64929dc19f68" +checksum = "c8215c87b74d2fbbaff0fd2887868a8341df33a3c495ee01f813e5ddd5be9c46" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5132,9 +5190,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.2.12" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "783ce4ebc0a994eee2188431511b16692b704e1e8fff0c77d8c0354d3c2b1fc8" +checksum = "3cd514c4ccd0b3c69fa3e7050cde77db842d4c308ae48f9a3e1ce263e823e45e" dependencies = [ "alloy-consensus", "alloy-network", @@ -5146,16 +5204,16 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.2.12" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf300a82ae2d30e2255bfea87a2259da49f63a25a44db561ae64cc9e3084139f" +checksum = "fa5c397fbe35e07f9c95a571440ca2e90df754e198496d82ff4127de00b89dd9" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", - "hashbrown 0.14.5", + "derive_more 1.0.0", "op-alloy-consensus", "op-alloy-genesis", "serde", @@ -5163,17 +5221,16 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.2.12" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e281fbfc2198b7c0c16457d6524f83d192662bc9f3df70f24c3038d4521616df" +checksum = "547d29c5ab957ff32e14edddb93652dad748d2ef6cbe4b0fe8615ce06b0a3ddb" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-network-primitives", "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", - "cfg-if", - "hashbrown 0.14.5", "op-alloy-consensus", "serde", "serde_json", @@ -5181,17 +5238,14 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.2.12" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2947272a81ebf988f4804b6f0f6a7c0b2f6f89a908cb410e36f8f3828f81c778" +checksum = "5041122e20b76644cc690bba688671eecdc4626e6384a76eb740535d6ddcef14" dependencies = [ - "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "alloy-serde", - "derive_more", - "op-alloy-consensus", - "op-alloy-genesis", + "derive_more 1.0.0", "op-alloy-protocol", "serde", ] @@ -5203,10 +5257,11 @@ dependencies = [ "clap", "reth-cli-util", "reth-node-builder", - "reth-node-optimism", "reth-optimism-cli", + "reth-optimism-node", "reth-optimism-rpc", "reth-provider", + "tracing", ] [[package]] @@ -5227,15 +5282,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" -[[package]] -name = "ordered-float" -version = "4.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a91171844676f8c7990ce64959210cd2eaef32c2612c50f9fae9f8aaa6065a6" -dependencies = [ - "num-traits", -] - [[package]] name = "overload" version = "0.1.1" @@ -5340,7 +5386,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.4", + "redox_syscall 0.5.7", "smallvec", "windows-targets 0.52.6", ] @@ -5428,7 +5474,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -5457,7 +5503,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -5484,9 +5530,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "plain_hasher" @@ -5545,9 +5591,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d30538d42559de6b034bc76fd6dd4c38961b1ee5c6c56e3808c50128fdbc22ce" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] name = "powerfmt" @@ -5629,7 +5675,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" dependencies = [ "proc-macro2", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -5704,7 +5750,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -5756,7 +5802,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", @@ -5780,7 +5826,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -5885,16 +5931,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" -[[package]] -name = "radix_trie" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c069c179fcdc6a2fe24d8d18305cf085fdbd4f922c041943e203685d6a1c58fd" -dependencies = [ - "endian-type", - "nibble_vec", -] - [[package]] name = "rand" version = "0.7.3" @@ -5917,6 +5953,7 @@ dependencies = [ "libc", "rand_chacha 0.3.1", "rand_core 0.6.4", + "serde", ] [[package]] @@ -5977,18 +6014,18 @@ dependencies = [ [[package]] name = "ratatui" -version = "0.27.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16546c5b5962abf8ce6e2881e722b4e0ae3b6f1a08a26ae3573c55853ca68d3" +checksum = "fdef7f9be5c0122f890d58bdf4d964349ba6a6161f705907526d891efabba57d" dependencies = [ "bitflags 2.6.0", "cassowary", "compact_str", - "crossterm", + "crossterm 0.28.1", + "instability", "itertools 0.13.0", "lru", "paste", - "stability", "strum", "strum_macros", "unicode-segmentation", @@ -6042,9 +6079,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.4" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags 2.6.0", ] @@ -6062,14 +6099,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.6" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -6083,13 +6120,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", ] [[package]] @@ -6100,9 +6137,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "regress" @@ -6116,9 +6153,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.7" +version = "0.12.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" +checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" dependencies = [ "base64 0.22.1", "bytes", @@ -6139,7 +6176,7 @@ dependencies = [ "pin-project-lite", "quinn", "rustls", - "rustls-native-certs 0.7.3", + "rustls-native-certs 0.8.0", "rustls-pemfile", "rustls-pki-types", "serde", @@ -6173,6 +6210,9 @@ dependencies = [ name = "reth" version = "1.0.7" dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", "alloy-rlp", "alloy-rpc-types", "aquamarine", @@ -6243,6 +6283,7 @@ name = "reth-auto-seal-consensus" version = "1.0.7" dependencies = [ "alloy-primitives", + "alloy-rpc-types-engine", "futures-util", "reth-beacon-consensus", "reth-chainspec", @@ -6253,10 +6294,10 @@ dependencies = [ "reth-execution-types", "reth-network-p2p", "reth-network-peers", + "reth-optimism-consensus", "reth-primitives", "reth-provider", "reth-revm", - "reth-rpc-types", "reth-stages-api", "reth-tokio-util", "reth-transaction-pool", @@ -6296,6 +6337,7 @@ version = "1.0.7" dependencies = [ "alloy-genesis", "alloy-primitives", + "alloy-rpc-types-engine", "assert_matches", "futures", "itertools 0.13.0", @@ -6325,7 +6367,6 @@ dependencies = [ "reth-provider", "reth-prune", "reth-prune-types", - "reth-rpc-types", "reth-rpc-types-compat", "reth-stages", "reth-stages-api", @@ -6347,6 +6388,7 @@ version = "1.0.7" dependencies = [ "alloy-eips", "alloy-json-rpc", + "alloy-primitives", "alloy-provider", "alloy-pubsub", "alloy-rpc-client", @@ -6379,6 +6421,8 @@ dependencies = [ name = "reth-blockchain-tree" version = "1.0.7" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-genesis", "alloy-primitives", "aquamarine", @@ -6427,12 +6471,13 @@ dependencies = [ name = "reth-chain-state" version = "1.0.7" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-signer", "alloy-signer-local", "auto_impl", - "derive_more", + "derive_more 1.0.0", "metrics", "parking_lot 0.12.3", "pin-project", @@ -6461,7 +6506,7 @@ dependencies = [ "alloy-rlp", "alloy-trie", "auto_impl", - "derive_more", + "derive_more 1.0.0", "once_cell", "op-alloy-rpc-types", "reth-ethereum-forks", @@ -6493,7 +6538,7 @@ dependencies = [ "backon", "clap", "comfy-table", - "crossterm", + "crossterm 0.28.1", "eyre", "fdlimit", "futures", @@ -6599,18 +6644,18 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "reth-config" version = "1.0.7" dependencies = [ + "alloy-primitives", "eyre", "humantime-serde", "reth-network-peers", "reth-network-types", - "reth-primitives", "reth-prune-types", "reth-stages-types", "serde", @@ -6624,7 +6669,7 @@ version = "1.0.7" dependencies = [ "alloy-primitives", "auto_impl", - "derive_more", + "derive_more 1.0.0", "reth-primitives", ] @@ -6632,6 +6677,7 @@ dependencies = [ name = "reth-consensus-common" version = "1.0.7" dependencies = [ + "alloy-consensus", "alloy-primitives", "mockall", "rand 0.8.5", @@ -6648,17 +6694,17 @@ version = "1.0.7" dependencies = [ "alloy-consensus", "alloy-eips", + "alloy-primitives", "alloy-provider", "alloy-rpc-types", + "alloy-rpc-types-engine", "auto_impl", "eyre", "futures", "reqwest", "reth-node-api", - "reth-node-core", "reth-rpc-api", "reth-rpc-builder", - "reth-rpc-types", "reth-tracing", "ringbuffer", "serde", @@ -6674,7 +6720,7 @@ dependencies = [ "assert_matches", "bytes", "criterion", - "derive_more", + "derive_more 1.0.0", "eyre", "iai-callgrind", "metrics", @@ -6713,7 +6759,7 @@ dependencies = [ "alloy-primitives", "arbitrary", "bytes", - "derive_more", + "derive_more 1.0.0", "metrics", "modular-bitfield", "parity-scale-codec", @@ -6808,7 +6854,7 @@ version = "1.0.7" dependencies = [ "alloy-primitives", "alloy-rlp", - "derive_more", + "derive_more 1.0.0", "discv5", "enr", "futures", @@ -6964,6 +7010,7 @@ name = "reth-engine-local" version = "1.0.7" dependencies = [ "alloy-primitives", + "alloy-rpc-types-engine", "eyre", "futures-util", "reth-beacon-consensus", @@ -6980,7 +7027,6 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-prune", - "reth-rpc-types", "reth-stages-api", "reth-tracing", "reth-transaction-pool", @@ -7036,6 +7082,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", + "alloy-rpc-types-engine", "assert_matches", "futures", "metrics", @@ -7062,7 +7109,6 @@ dependencies = [ "reth-prune", "reth-prune-types", "reth-revm", - "reth-rpc-types", "reth-rpc-types-compat", "reth-stages", "reth-stages-api", @@ -7081,6 +7127,7 @@ name = "reth-engine-util" version = "1.0.7" dependencies = [ "alloy-primitives", + "alloy-rpc-types-engine", "eyre", "futures", "itertools 0.13.0", @@ -7095,7 +7142,6 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-revm", - "reth-rpc-types", "reth-rpc-types-compat", "reth-trie", "revm-primitives", @@ -7127,7 +7173,7 @@ dependencies = [ "arbitrary", "async-stream", "bytes", - "derive_more", + "derive_more 1.0.0", "futures", "pin-project", "proptest", @@ -7157,13 +7203,14 @@ name = "reth-eth-wire-types" version = "1.0.7" dependencies = [ "alloy-chains", + "alloy-consensus", "alloy-eips", "alloy-genesis", "alloy-primitives", "alloy-rlp", "arbitrary", "bytes", - "derive_more", + "derive_more 1.0.0", "proptest", "proptest-arbitrary-interop", "rand 0.8.5", @@ -7207,12 +7254,12 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", + "alloy-rpc-types-engine", "reth-chain-state", "reth-chainspec", "reth-engine-primitives", "reth-payload-primitives", "reth-primitives", - "reth-rpc-types", "reth-rpc-types-compat", "serde", "serde_json", @@ -7277,6 +7324,7 @@ name = "reth-evm" version = "1.0.7" dependencies = [ "alloy-eips", + "alloy-primitives", "auto_impl", "futures-util", "metrics", @@ -7297,6 +7345,7 @@ dependencies = [ name = "reth-evm-ethereum" version = "1.0.7" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-genesis", "alloy-primitives", @@ -7315,29 +7364,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "reth-evm-optimism" -version = "1.0.7" -dependencies = [ - "alloy-genesis", - "alloy-primitives", - "reth-chainspec", - "reth-ethereum-forks", - "reth-evm", - "reth-execution-errors", - "reth-execution-types", - "reth-optimism-chainspec", - "reth-optimism-consensus", - "reth-optimism-forks", - "reth-primitives", - "reth-prune-types", - "reth-revm", - "revm", - "revm-primitives", - "thiserror", - "tracing", -] - [[package]] name = "reth-execution-errors" version = "1.0.7" @@ -7345,7 +7371,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", - "derive_more", + "derive_more 1.0.0", "nybbles", "reth-consensus", "reth-prune-types", @@ -7359,24 +7385,33 @@ version = "1.0.7" dependencies = [ "alloy-eips", "alloy-primitives", - "reth-chainspec", + "arbitrary", + "bincode", + "rand 0.8.5", "reth-execution-errors", "reth-primitives", "reth-trie", "revm", "serde", + "serde_with", ] [[package]] name = "reth-exex" version = "1.0.7" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-genesis", "alloy-primitives", "eyre", "futures", + "itertools 0.13.0", "metrics", + "parking_lot 0.12.3", + "rand 0.8.5", "reth-blockchain-tree", + "reth-chain-state", "reth-chainspec", "reth-config", "reth-db-api", @@ -7398,8 +7433,8 @@ dependencies = [ "reth-tasks", "reth-testing-utils", "reth-tracing", + "rmp-serde", "secp256k1", - "serde_json", "tempfile", "tokio", "tokio-util", @@ -7433,6 +7468,7 @@ dependencies = [ "reth-provider", "reth-tasks", "reth-transaction-pool", + "tempfile", "thiserror", "tokio", ] @@ -7443,8 +7479,14 @@ version = "1.0.7" dependencies = [ "alloy-eips", "alloy-primitives", - "reth-provider", + "arbitrary", + "bincode", + "rand 0.8.5", + "reth-chain-state", + "reth-execution-types", + "reth-primitives", "serde", + "serde_with", ] [[package]] @@ -7510,13 +7552,14 @@ dependencies = [ "byteorder", "criterion", "dashmap 6.1.0", - "derive_more", + "derive_more 1.0.0", "indexmap 2.5.0", "parking_lot 0.12.3", "pprof", "rand 0.8.5", "rand_xorshift", "reth-mdbx-sys", + "smallvec", "tempfile", "thiserror", "tracing", @@ -7526,7 +7569,7 @@ dependencies = [ name = "reth-mdbx-sys" version = "1.0.7" dependencies = [ - "bindgen", + "bindgen 0.70.1", "cc", ] @@ -7536,24 +7579,11 @@ version = "1.0.7" dependencies = [ "futures", "metrics", - "reth-metrics-derive", + "metrics-derive", "tokio", "tokio-util", ] -[[package]] -name = "reth-metrics-derive" -version = "1.0.7" -dependencies = [ - "metrics", - "proc-macro2", - "quote", - "regex", - "serial_test", - "syn 2.0.77", - "trybuild", -] - [[package]] name = "reth-net-banlist" version = "1.0.7" @@ -7579,6 +7609,7 @@ dependencies = [ name = "reth-network" version = "1.0.7" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-node-bindings", "alloy-primitives", @@ -7587,7 +7618,7 @@ dependencies = [ "aquamarine", "auto_impl", "criterion", - "derive_more", + "derive_more 1.0.0", "discv5", "enr", "futures", @@ -7641,7 +7672,7 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", "auto_impl", - "derive_more", + "derive_more 1.0.0", "enr", "futures", "reth-eth-wire-types", @@ -7663,7 +7694,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "auto_impl", - "derive_more", + "derive_more 1.0.0", "futures", "parking_lot 0.12.3", "reth-consensus", @@ -7711,7 +7742,7 @@ version = "1.0.7" dependencies = [ "anyhow", "bincode", - "derive_more", + "derive_more 1.0.0", "lz4_flex", "memmap2", "rand 0.8.5", @@ -7755,6 +7786,7 @@ dependencies = [ "reth-auto-seal-consensus", "reth-beacon-consensus", "reth-blockchain-tree", + "reth-chain-state", "reth-chainspec", "reth-cli-util", "reth-config", @@ -7812,7 +7844,7 @@ dependencies = [ "alloy-rpc-types-engine", "clap", "const_format", - "derive_more", + "derive_more 1.0.0", "dirs-next", "eyre", "futures", @@ -7839,7 +7871,6 @@ dependencies = [ "reth-rpc-eth-api", "reth-rpc-eth-types", "reth-rpc-server-types", - "reth-rpc-types", "reth-rpc-types-compat", "reth-stages-types", "reth-storage-api", @@ -7935,7 +7966,7 @@ dependencies = [ "reth-metrics", "reth-provider", "reth-tasks", - "socket2 0.4.10", + "socket2 0.5.7", "tikv-jemalloc-ctl", "tokio", "tower 0.4.13", @@ -7943,57 +7974,6 @@ dependencies = [ "vergen", ] -[[package]] -name = "reth-node-optimism" -version = "1.0.7" -dependencies = [ - "alloy-genesis", - "alloy-primitives", - "async-trait", - "clap", - "eyre", - "jsonrpsee", - "jsonrpsee-types", - "op-alloy-rpc-types-engine", - "parking_lot 0.12.3", - "reqwest", - "reth", - "reth-auto-seal-consensus", - "reth-basic-payload-builder", - "reth-beacon-consensus", - "reth-chainspec", - "reth-consensus", - "reth-db", - "reth-discv5", - "reth-e2e-test-utils", - "reth-evm", - "reth-evm-optimism", - "reth-network", - "reth-node-api", - "reth-node-builder", - "reth-optimism-chainspec", - "reth-optimism-consensus", - "reth-optimism-forks", - "reth-optimism-payload-builder", - "reth-optimism-rpc", - "reth-payload-builder", - "reth-primitives", - "reth-provider", - "reth-revm", - "reth-rpc", - "reth-rpc-eth-api", - "reth-rpc-eth-types", - "reth-rpc-types", - "reth-rpc-types-compat", - "reth-tracing", - "reth-transaction-pool", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", -] - [[package]] name = "reth-node-types" version = "1.0.7" @@ -8010,11 +7990,12 @@ dependencies = [ "alloy-chains", "alloy-genesis", "alloy-primitives", - "derive_more", + "derive_more 1.0.0", "once_cell", "op-alloy-rpc-types", "reth-chainspec", "reth-ethereum-forks", + "reth-network-peers", "reth-optimism-forks", "reth-primitives-traits", "serde_json", @@ -8040,14 +8021,14 @@ dependencies = [ "reth-db-common", "reth-downloaders", "reth-errors", - "reth-evm-optimism", "reth-execution-types", "reth-network-p2p", "reth-node-builder", "reth-node-core", "reth-node-events", - "reth-node-optimism", "reth-optimism-chainspec", + "reth-optimism-evm", + "reth-optimism-node", "reth-optimism-primitives", "reth-primitives", "reth-provider", @@ -8078,6 +8059,32 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-optimism-evm" +version = "1.0.7" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-genesis", + "alloy-primitives", + "op-alloy-consensus", + "reth-chainspec", + "reth-ethereum-forks", + "reth-evm", + "reth-execution-errors", + "reth-execution-types", + "reth-optimism-chainspec", + "reth-optimism-consensus", + "reth-optimism-forks", + "reth-primitives", + "reth-prune-types", + "reth-revm", + "revm", + "revm-primitives", + "thiserror", + "tracing", +] + [[package]] name = "reth-optimism-forks" version = "1.0.7" @@ -8089,26 +8096,82 @@ dependencies = [ "serde", ] +[[package]] +name = "reth-optimism-node" +version = "1.0.7" +dependencies = [ + "alloy-eips", + "alloy-genesis", + "alloy-primitives", + "alloy-rpc-types-engine", + "async-trait", + "clap", + "eyre", + "jsonrpsee", + "jsonrpsee-types", + "op-alloy-consensus", + "op-alloy-rpc-types-engine", + "parking_lot 0.12.3", + "reqwest", + "reth", + "reth-auto-seal-consensus", + "reth-basic-payload-builder", + "reth-beacon-consensus", + "reth-chainspec", + "reth-consensus", + "reth-db", + "reth-discv5", + "reth-e2e-test-utils", + "reth-evm", + "reth-network", + "reth-node-api", + "reth-node-builder", + "reth-optimism-chainspec", + "reth-optimism-consensus", + "reth-optimism-evm", + "reth-optimism-forks", + "reth-optimism-payload-builder", + "reth-optimism-rpc", + "reth-payload-builder", + "reth-primitives", + "reth-provider", + "reth-revm", + "reth-rpc", + "reth-rpc-eth-api", + "reth-rpc-eth-types", + "reth-rpc-types-compat", + "reth-tracing", + "reth-transaction-pool", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "reth-optimism-payload-builder" version = "1.0.7" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rlp", + "alloy-rpc-types-engine", "op-alloy-rpc-types-engine", "reth-basic-payload-builder", "reth-chain-state", "reth-chainspec", "reth-evm", - "reth-evm-optimism", "reth-execution-types", + "reth-optimism-chainspec", + "reth-optimism-consensus", + "reth-optimism-evm", "reth-optimism-forks", "reth-payload-builder", "reth-payload-primitives", "reth-primitives", "reth-provider", "reth-revm", - "reth-rpc-types", "reth-rpc-types-compat", "reth-transaction-pool", "reth-trie", @@ -8132,10 +8195,11 @@ dependencies = [ name = "reth-optimism-rpc" version = "1.0.7" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rpc-types", "alloy-rpc-types-eth", - "derive_more", + "derive_more 1.0.0", "jsonrpsee-types", "op-alloy-consensus", "op-alloy-network", @@ -8144,11 +8208,12 @@ dependencies = [ "reqwest", "reth-chainspec", "reth-evm", - "reth-evm-optimism", "reth-network-api", "reth-node-api", "reth-node-builder", "reth-optimism-chainspec", + "reth-optimism-consensus", + "reth-optimism-evm", "reth-optimism-forks", "reth-primitives", "reth-provider", @@ -8165,6 +8230,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-optimism-storage" +version = "1.0.7" +dependencies = [ + "reth-codecs", + "reth-db-api", + "reth-primitives", + "reth-prune-types", + "reth-stages-types", +] + [[package]] name = "reth-payload-builder" version = "1.0.7" @@ -8230,10 +8306,11 @@ dependencies = [ "alloy-serde", "arbitrary", "assert_matches", + "bincode", "bytes", "c-kzg", "criterion", - "derive_more", + "derive_more 1.0.0", "k256", "modular-bitfield", "once_cell", @@ -8248,14 +8325,15 @@ dependencies = [ "reth-codecs", "reth-ethereum-forks", "reth-optimism-chainspec", - "reth-optimism-forks", "reth-primitives-traits", "reth-static-file-types", + "reth-testing-utils", "reth-trie-common", "revm-primitives", "secp256k1", "serde", "serde_json", + "serde_with", "test-fuzz", "zstd", ] @@ -8270,18 +8348,21 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "arbitrary", + "bincode", "byteorder", "bytes", - "derive_more", + "derive_more 1.0.0", "modular-bitfield", "proptest", "proptest-arbitrary-interop", "rand 0.8.5", "reth-codecs", + "reth-testing-utils", "revm-primitives", "roaring", "serde", "serde_json", + "serde_with", "test-fuzz", ] @@ -8289,6 +8370,7 @@ dependencies = [ name = "reth-provider" version = "1.0.7" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", @@ -8371,7 +8453,7 @@ dependencies = [ "arbitrary", "assert_matches", "bytes", - "derive_more", + "derive_more 1.0.0", "modular-bitfield", "proptest", "proptest-arbitrary-interop", @@ -8387,6 +8469,7 @@ dependencies = [ name = "reth-revm" version = "1.0.7" dependencies = [ + "alloy-primitives", "reth-chainspec", "reth-consensus-common", "reth-ethereum-forks", @@ -8405,6 +8488,7 @@ version = "1.0.7" dependencies = [ "alloy-consensus", "alloy-dyn-abi", + "alloy-eips", "alloy-genesis", "alloy-network", "alloy-primitives", @@ -8420,7 +8504,7 @@ dependencies = [ "alloy-signer", "alloy-signer-local", "async-trait", - "derive_more", + "derive_more 1.0.0", "futures", "http", "http-body", @@ -8478,6 +8562,7 @@ dependencies = [ "alloy-rpc-types-anvil", "alloy-rpc-types-beacon", "alloy-rpc-types-debug", + "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-rpc-types-mev", "alloy-rpc-types-trace", @@ -8488,7 +8573,6 @@ dependencies = [ "reth-network-peers", "reth-primitives", "reth-rpc-eth-api", - "reth-rpc-types", "serde_json", ] @@ -8518,6 +8602,7 @@ dependencies = [ "alloy-network", "alloy-primitives", "alloy-rpc-types", + "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-rpc-types-trace", "alloy-serde", @@ -8548,7 +8633,6 @@ dependencies = [ "reth-rpc-eth-types", "reth-rpc-layer", "reth-rpc-server-types", - "reth-rpc-types", "reth-rpc-types-compat", "reth-tasks", "reth-tokio-util", @@ -8570,6 +8654,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", + "alloy-rpc-types-engine", "assert_matches", "async-trait", "jsonrpsee-core", @@ -8586,7 +8671,6 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-rpc-api", - "reth-rpc-types", "reth-rpc-types-compat", "reth-storage-api", "reth-tasks", @@ -8650,7 +8734,7 @@ dependencies = [ "alloy-rpc-types-eth", "alloy-serde", "alloy-sol-types", - "derive_more", + "derive_more 1.0.0", "futures", "jsonrpsee-core", "jsonrpsee-types", @@ -8665,7 +8749,6 @@ dependencies = [ "reth-primitives", "reth-revm", "reth-rpc-server-types", - "reth-rpc-types", "reth-rpc-types-compat", "reth-storage-api", "reth-tasks", @@ -8703,35 +8786,28 @@ name = "reth-rpc-server-types" version = "1.0.7" dependencies = [ "alloy-primitives", + "alloy-rpc-types-engine", "jsonrpsee-core", "jsonrpsee-types", "reth-errors", "reth-network-api", "reth-primitives", - "reth-rpc-types", "serde", "strum", ] -[[package]] -name = "reth-rpc-types" -version = "1.0.7" -dependencies = [ - "alloy-rpc-types-engine", - "jsonrpsee-types", -] - [[package]] name = "reth-rpc-types-compat" version = "1.0.7" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-rpc-types", + "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-serde", "reth-primitives", - "reth-rpc-types", "reth-trie-common", "serde_json", ] @@ -8740,8 +8816,10 @@ dependencies = [ name = "reth-stages" version = "1.0.7" dependencies = [ + "alloy-primitives", "alloy-rlp", "assert_matches", + "bincode", "criterion", "futures-util", "itertools 0.13.0", @@ -8860,7 +8938,7 @@ version = "1.0.7" dependencies = [ "alloy-primitives", "clap", - "derive_more", + "derive_more 1.0.0", "serde", "strum", ] @@ -8890,7 +8968,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", - "derive_more", + "derive_more 1.0.0", "reth-fs-util", "reth-primitives", ] @@ -8916,6 +8994,7 @@ dependencies = [ name = "reth-testing-utils" version = "1.0.7" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-genesis", "alloy-primitives", @@ -8951,6 +9030,7 @@ dependencies = [ name = "reth-transaction-pool" version = "1.0.7" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", @@ -8998,8 +9078,9 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "auto_impl", + "bincode", "criterion", - "derive_more", + "derive_more 1.0.0", "itertools 0.13.0", "metrics", "proptest", @@ -9015,6 +9096,7 @@ dependencies = [ "revm", "serde", "serde_json", + "serde_with", "tokio", "tracing", "triehash", @@ -9031,7 +9113,7 @@ dependencies = [ "alloy-trie", "arbitrary", "bytes", - "derive_more", + "derive_more 1.0.0", "hash-db", "itertools 0.13.0", "nybbles", @@ -9051,7 +9133,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "auto_impl", - "derive_more", + "derive_more 1.0.0", "itertools 0.13.0", "metrics", "proptest", @@ -9086,7 +9168,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "criterion", - "derive_more", + "derive_more 1.0.0", "itertools 0.13.0", "metrics", "proptest", @@ -9108,9 +9190,9 @@ dependencies = [ [[package]] name = "revm" -version = "14.0.2" +version = "14.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9f3f55d0414c3d73902d876ba3d55a654f05fe937089fbf5f34b1ced26d78d5" +checksum = "641702b12847f9ed418d552f4fcabe536d867a2c980e96b6e7e25d7b992f929f" dependencies = [ "auto_impl", "cfg-if", @@ -9123,9 +9205,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.7.6" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b57b33a24b5b8b8efa1da3f60d44f02d6e649f06ef925d7780723ff14ff55321" +checksum = "43c44af0bf801f48d25f7baf25cf72aff4c02d610f83b428175228162fef0246" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -9142,9 +9224,9 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "10.0.2" +version = "10.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713dbb271acd13afb06dcd460c1dc43da211e7ac9bc73cdf13528f615f55f96b" +checksum = "2e5e14002afae20b5bf1566f22316122f42f57517000e559c55b25bf7a49cba2" dependencies = [ "revm-primitives", "serde", @@ -9152,9 +9234,9 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "11.0.2" +version = "11.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73010c271d53fa7904e9845338e95f3955eb1200a0355e0abfdb89c41aaa9cd" +checksum = "3198c06247e8d4ad0d1312591edf049b0de4ddffa9fecb625c318fd67db8639b" dependencies = [ "aurora-engine-modexp", "blst", @@ -9172,11 +9254,12 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "9.0.2" +version = "10.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7a6bff9dbde3370a5ac9555104117f7e6039b3cc76e8d5d9d01899088beca2a" +checksum = "6f1525851a03aff9a9d6a1d018b414d76252d6802ab54695b27093ecd7e7a101" dependencies = [ - "alloy-eips", + "alloy-eip2930", + "alloy-eip7702", "alloy-primitives", "auto_impl", "bitflags 2.6.0", @@ -9185,7 +9268,6 @@ dependencies = [ "cfg-if", "dyn-clone", "enumn", - "hashbrown 0.14.5", "hex", "serde", ] @@ -9258,6 +9340,28 @@ dependencies = [ "rustc-hex", ] +[[package]] +name = "rmp" +version = "0.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "228ed7c16fa39782c3b3468e974aec2795e9089153cd08ee2e9aefb3613334c4" +dependencies = [ + "byteorder", + "num-traits", + "paste", +] + +[[package]] +name = "rmp-serde" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e599a477cf9840e92f2cde9a7189e67b42c57532749bf90aea6ec10facd4db" +dependencies = [ + "byteorder", + "rmp", + "serde", +] + [[package]] name = "roaring" version = "0.10.6" @@ -9331,6 +9435,9 @@ name = "rustc-hash" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +dependencies = [ + "rand 0.8.5", +] [[package]] name = "rustc-hex" @@ -9412,19 +9519,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" +checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" [[package]] name = "rustls-platform-verifier" @@ -9505,9 +9611,9 @@ dependencies = [ [[package]] name = "scc" -version = "2.1.17" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c947adb109a8afce5fc9c7bf951f87f146e9147b3a6a58413105628fb1d1e66" +checksum = "836f1e0f4963ef5288b539b643b35e043e76a32d0f4e47e67febf69576527f50" dependencies = [ "sdd", ] @@ -9664,7 +9770,7 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -9699,14 +9805,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "serde_spanned" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -9725,9 +9831,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.9.0" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" +checksum = "9720086b3357bcb44fce40117d769a4d068c70ecfa190850a980a71755f66fcc" dependencies = [ "base64 0.22.1", "chrono", @@ -9743,14 +9849,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.9.0" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" +checksum = "5f1abbfe725f27678f4663bcacb75a83e829fd464c25d78dd038a3a29e307cec" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -9759,8 +9865,6 @@ version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b4b487fe2acf240a021cf57c6b2b4903b1e78ca0ecd862a71b71d2a51fed77d" dependencies = [ - "futures", - "log", "once_cell", "parking_lot 0.12.3", "scc", @@ -9775,7 +9879,7 @@ checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -9874,7 +9978,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" dependencies = [ "libc", - "mio 0.8.11", + "mio 1.0.2", "signal-hook", ] @@ -9902,10 +10006,6 @@ name = "similar" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" -dependencies = [ - "bstr", - "unicode-segmentation", -] [[package]] name = "similar-asserts" @@ -10027,16 +10127,6 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" -[[package]] -name = "stability" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d904e7009df136af5297832a3ace3370cd14ff1546a232f4f185036c2736fcac" -dependencies = [ - "quote", - "syn 2.0.77", -] - [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -10080,7 +10170,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -10104,9 +10194,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "symbolic-common" -version = "12.11.1" +version = "12.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fdf97c441f18a4f92425b896a4ec7a27e03631a0b1047ec4e34e9916a9a167e" +checksum = "366f1b4c6baf6cfefc234bbd4899535fca0b06c74443039a73f6dfb2fad88d77" dependencies = [ "debugid", "memmap2", @@ -10116,9 +10206,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.11.1" +version = "12.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc8ece6b129e97e53d1fbb3f61d33a6a9e5369b11d01228c068094d6d134eaea" +checksum = "aba05ba5b9962ea5617baf556293720a8b2d0a282aa14ee4bf10e22efc7da8c8" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -10138,9 +10228,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.77" +version = "2.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" +checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" dependencies = [ "proc-macro2", "quote", @@ -10149,14 +10239,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b95156f8b577cb59dc0b1df15c6f29a10afc5f8a7ac9786b0b5c68c19149278" +checksum = "0ab661c8148c2261222a4d641ad5477fd4bea79406a99056096a0b41b35617a5" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -10182,21 +10272,20 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "sysinfo" -version = "0.30.13" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a5b4ddaee55fb2bea2bf0e5000747e5f5c0de765e5a5ff87f4cd106439f4bb3" +checksum = "355dbe4f8799b304b05e1b0f05fc59b2a18d36645cf169607da45bde2f69a1be" dependencies = [ - "cfg-if", "core-foundation-sys", "libc", + "memchr", "ntapi", - "once_cell", - "windows 0.52.0", + "windows 0.57.0", ] [[package]] @@ -10207,9 +10296,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", "fastrand 2.1.1", @@ -10218,15 +10307,6 @@ dependencies = [ "windows-sys 0.59.0", ] -[[package]] -name = "termcolor" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" -dependencies = [ - "winapi-util", -] - [[package]] name = "termtree" version = "0.4.1" @@ -10235,9 +10315,9 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test-fuzz" -version = "5.2.2" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9c7343da240e16d7ed8e7a11a6f8e7535c634c38a40736f3b001b38b274962c" +checksum = "3ab7a9bb33d134e863862ab9dad2ac7e022ac89707914627f498fe0f29248d9b" dependencies = [ "serde", "test-fuzz-internal", @@ -10247,9 +10327,9 @@ dependencies = [ [[package]] name = "test-fuzz-internal" -version = "5.2.2" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7fbf19217465046777a6c53186f5a72e397a6485a438e6778a283bdd4be720" +checksum = "d0bef5dd380747bd7b6e636a8032a24aa34fcecaf843e59fc97d299681922e86" dependencies = [ "bincode", "cargo_metadata", @@ -10258,24 +10338,25 @@ dependencies = [ [[package]] name = "test-fuzz-macro" -version = "5.2.2" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f7fa70c261f748327f21d189b20f447a4306bc1f85ed2571adf185765b14e0a" +checksum = "a7e6b4c7391a38f0f026972ec2200bcfd1ec45533aa266fdae5858d011afc500" dependencies = [ "darling", + "heck", "itertools 0.13.0", "once_cell", "prettyplease", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "test-fuzz-runtime" -version = "5.2.2" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ba7fe4e29cb917d48fcd18324ed745b6d5e43e9bea76f5f85eb8352f5829ac1" +checksum = "c9fbe6fb7481ec6d9bf64ae2c5d49cb1b40f8da624a91031482af7b08168c679" dependencies = [ "hex", "num-traits", @@ -10292,22 +10373,22 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -10484,7 +10565,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -10512,9 +10593,9 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.23.1" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6989540ced10490aaf14e6bad2e3d33728a2813310a0c71d1574304c49631cd" +checksum = "edc5f74e248dc973e0dbb7b74c7e0d6fcc301c694ff50049504004ef4d0cdcd9" dependencies = [ "futures-util", "log", @@ -10564,9 +10645,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.21" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap 2.5.0", "serde", @@ -10685,7 +10766,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -10775,9 +10856,9 @@ dependencies = [ [[package]] name = "tracy-client" -version = "0.17.3" +version = "0.17.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373db47331c3407b343538df77eea2516884a0b126cdfb4b135acfd400015dd7" +checksum = "746b078c6a09ebfd5594609049e07116735c304671eaab06ce749854d23435bc" dependencies = [ "loom", "once_cell", @@ -10787,9 +10868,9 @@ dependencies = [ [[package]] name = "tracy-client-sys" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49cf0064dcb31c99aa1244c1b93439359e53f72ed217eef5db50abd442241e9a" +checksum = "68613466112302fdbeabc5fa55f7d57462a0b247d5a6b7d7e09401fb471a144d" dependencies = [ "cc", ] @@ -10856,25 +10937,11 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" -[[package]] -name = "trybuild" -version = "1.0.99" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "207aa50d36c4be8d8c6ea829478be44a372c6a77669937bb39c698e52f1491e8" -dependencies = [ - "glob", - "serde", - "serde_derive", - "serde_json", - "termcolor", - "toml", -] - [[package]] name = "tungstenite" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e2e2ce1e47ed2994fd43b04c8f618008d4cabdd5ee34027cf14f9d918edd9c8" +checksum = "18e5b8366ee7a95b16d32197d0b2604b43a0be89dc5fac9f8e96ccafbaedda8a" dependencies = [ "byteorder", "bytes", @@ -10898,9 +10965,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "ucd-trie" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" [[package]] name = "uint" @@ -10995,6 +11062,12 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" +[[package]] +name = "unsigned-varint" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" + [[package]] name = "untrusted" version = "0.9.0" @@ -11140,7 +11213,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "wasm-bindgen-shared", ] @@ -11174,7 +11247,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11187,9 +11260,9 @@ checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "wasm-streams" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" +checksum = "4e072d4e72f700fb3443d8fe94a39315df013eef1104903cdb0a2abd322bbecd" dependencies = [ "futures-util", "js-sys", @@ -11254,16 +11327,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" -dependencies = [ - "windows-core 0.52.0", - "windows-targets 0.52.6", -] - [[package]] name = "windows" version = "0.57.0" @@ -11326,7 +11389,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -11337,7 +11400,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -11348,7 +11411,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -11359,7 +11422,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -11551,9 +11614,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -11634,7 +11697,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "synstructure", ] @@ -11656,7 +11719,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -11676,7 +11739,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "synstructure", ] @@ -11697,7 +11760,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -11719,7 +11782,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index c386abc6f679..6a268b0684d4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,7 +48,6 @@ members = [ "crates/exex/test-utils/", "crates/exex/types/", "crates/metrics/", - "crates/metrics/metrics-derive/", "crates/net/banlist/", "crates/net/discv4/", "crates/net/discv5/", @@ -79,6 +78,7 @@ members = [ "crates/optimism/payload/", "crates/optimism/primitives/", "crates/optimism/rpc/", + "crates/optimism/storage", "crates/payload/basic/", "crates/payload/builder/", "crates/payload/primitives/", @@ -98,7 +98,6 @@ members = [ "crates/rpc/rpc-server-types/", "crates/rpc/rpc-testing-util/", "crates/rpc/rpc-types-compat/", - "crates/rpc/rpc-types/", "crates/rpc/rpc/", "crates/stages/api/", "crates/stages/stages/", @@ -332,7 +331,7 @@ reth-ethereum-payload-builder = { path = "crates/ethereum/payload" } reth-etl = { path = "crates/etl" } reth-evm = { path = "crates/evm" } reth-evm-ethereum = { path = "crates/ethereum/evm" } -reth-evm-optimism = { path = "crates/optimism/evm" } +reth-optimism-evm = { path = "crates/optimism/evm" } reth-execution-errors = { path = "crates/evm/execution-errors" } reth-execution-types = { path = "crates/evm/execution-types" } reth-exex = { path = "crates/exex/exex" } @@ -344,7 +343,6 @@ reth-ipc = { path = "crates/rpc/ipc" } reth-libmdbx = { path = "crates/storage/libmdbx-rs" } reth-mdbx-sys = { path = "crates/storage/libmdbx-rs/mdbx-sys" } reth-metrics = { path = "crates/metrics" } -reth-metrics-derive = { path = "crates/metrics/metrics-derive" } reth-net-banlist = { path = "crates/net/banlist" } reth-net-nat = { path = "crates/net/nat" } reth-network = { path = "crates/net/network" } @@ -359,7 +357,7 @@ reth-node-core = { path = "crates/node/core" } reth-node-ethereum = { path = "crates/ethereum/node" } reth-node-events = { path = "crates/node/events" } reth-node-metrics = { path = "crates/node/metrics" } -reth-node-optimism = { path = "crates/optimism/node" } +reth-optimism-node = { path = "crates/optimism/node" } reth-node-types = { path = "crates/node/types" } reth-optimism-chainspec = { path = "crates/optimism/chainspec" } reth-optimism-cli = { path = "crates/optimism/cli" } @@ -368,6 +366,7 @@ reth-optimism-forks = { path = "crates/optimism/hardforks" } reth-optimism-payload-builder = { path = "crates/optimism/payload" } reth-optimism-primitives = { path = "crates/optimism/primitives" } reth-optimism-rpc = { path = "crates/optimism/rpc" } +reth-optimism-storage = { path = "crates/optimism/storage" } reth-payload-builder = { path = "crates/payload/builder" } reth-payload-primitives = { path = "crates/payload/primitives" } reth-payload-validator = { path = "crates/payload/validator" } @@ -388,7 +387,6 @@ reth-rpc-eth-api = { path = "crates/rpc/rpc-eth-api" } reth-rpc-eth-types = { path = "crates/rpc/rpc-eth-types", default-features = false } reth-rpc-layer = { path = "crates/rpc/rpc-layer" } reth-rpc-server-types = { path = "crates/rpc/rpc-server-types" } -reth-rpc-types = { path = "crates/rpc/rpc-types" } reth-rpc-types-compat = { path = "crates/rpc/rpc-types-compat" } reth-stages = { path = "crates/stages/stages" } reth-stages-api = { path = "crates/stages/api" } @@ -408,73 +406,77 @@ reth-trie-db = { path = "crates/trie/db" } reth-trie-parallel = { path = "crates/trie/parallel" } # revm -revm = { version = "14.0.2", features = [ +revm = { version = "14.0.3", features = [ "std", "secp256k1", "blst", ], default-features = false } -revm-inspectors = "0.7.6" -revm-primitives = { version = "9.0.2", features = [ +revm-inspectors = "0.8.1" +revm-primitives = { version = "10.0.0", features = [ "std", ], default-features = false } # eth alloy-chains = "0.1.32" alloy-dyn-abi = "0.8.0" -alloy-primitives = { version = "0.8.3", default-features = false } +alloy-primitives = { version = "0.8.4", default-features = false } alloy-rlp = "0.3.4" alloy-sol-types = "0.8.0" -alloy-trie = { version = "0.5", default-features = false } +alloy-trie = { version = "0.6", default-features = false } -alloy-consensus = { version = "0.3.6", default-features = false } -alloy-eips = { version = "0.3.6", default-features = false } -alloy-genesis = { version = "0.3.6", default-features = false } -alloy-json-rpc = { version = "0.3.6", default-features = false } -alloy-network = { version = "0.3.6", default-features = false } -alloy-network-primitives = { version = "0.3.6", default-features = false } -alloy-node-bindings = { version = "0.3.6", default-features = false } -alloy-provider = { version = "0.3.6", features = [ +alloy-consensus = { version = "0.4.2", default-features = false } +alloy-eips = { version = "0.4.2", default-features = false } +alloy-genesis = { version = "0.4.2", default-features = false } +alloy-json-rpc = { version = "0.4.2", default-features = false } +alloy-network = { version = "0.4.2", default-features = false } +alloy-network-primitives = { version = "0.4.2", default-features = false } +alloy-node-bindings = { version = "0.4.2", default-features = false } +alloy-provider = { version = "0.4.2", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.3.6", default-features = false } -alloy-rpc-client = { version = "0.3.6", default-features = false } -alloy-rpc-types = { version = "0.3.6", features = [ +alloy-pubsub = { version = "0.4.2", default-features = false } +alloy-rpc-client = { version = "0.4.2", default-features = false } +alloy-rpc-types = { version = "0.4.2", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.3.6", default-features = false } -alloy-rpc-types-anvil = { version = "0.3.6", default-features = false } -alloy-rpc-types-beacon = { version = "0.3.6", default-features = false } -alloy-rpc-types-debug = { version = "0.3.6", default-features = false } -alloy-rpc-types-engine = { version = "0.3.6", default-features = false } -alloy-rpc-types-eth = { version = "0.3.6", default-features = false } -alloy-rpc-types-mev = { version = "0.3.6", default-features = false } -alloy-rpc-types-trace = { version = "0.3.6", default-features = false } -alloy-rpc-types-txpool = { version = "0.3.6", default-features = false } -alloy-serde = { version = "0.3.6", default-features = false } -alloy-signer = { version = "0.3.6", default-features = false } -alloy-signer-local = { version = "0.3.6", default-features = false } -alloy-transport = { version = "0.3.6" } -alloy-transport-http = { version = "0.3.6", features = [ +alloy-rpc-types-admin = { version = "0.4.2", default-features = false } +alloy-rpc-types-anvil = { version = "0.4.2", default-features = false } +alloy-rpc-types-beacon = { version = "0.4.2", default-features = false } +alloy-rpc-types-debug = { version = "0.4.2", default-features = false } +alloy-rpc-types-engine = { version = "0.4.2", default-features = false } +alloy-rpc-types-eth = { version = "0.4.2", default-features = false } +alloy-rpc-types-mev = { version = "0.4.2", default-features = false } +alloy-rpc-types-trace = { version = "0.4.2", default-features = false } +alloy-rpc-types-txpool = { version = "0.4.2", default-features = false } +alloy-serde = { version = "0.4.2", default-features = false } +alloy-signer = { version = "0.4.2", default-features = false } +alloy-signer-local = { version = "0.4.2", default-features = false } +alloy-transport = { version = "0.4.2" } +alloy-transport-http = { version = "0.4.2", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.3.6", default-features = false } -alloy-transport-ws = { version = "0.3.6", default-features = false } +alloy-transport-ipc = { version = "0.4.2", default-features = false } +alloy-transport-ws = { version = "0.4.2", default-features = false } # op -op-alloy-rpc-types = "0.2.12" -op-alloy-rpc-types-engine = "0.2.12" -op-alloy-network = "0.2.12" -op-alloy-consensus = "0.2.12" +op-alloy-rpc-types = "0.3.2" +op-alloy-rpc-types-engine = "0.3.2" +op-alloy-network = "0.3.2" +op-alloy-consensus = "0.3.2" # misc aquamarine = "0.5" auto_impl = "1" -backon = "0.4" +backon = { version = "1.2", default-features = false, features = [ + "std-blocking-sleep", + "tokio-sleep", +] } +bincode = "1.3" bitflags = "2.4" boyer-moore-magiclen = "0.2.16" bytes = "1.5" -clap = "4" cfg-if = "1.0" +clap = "4" const_format = { version = "0.2.32", features = ["rust_1_64"] } dashmap = "6.0" derive_more = { version = "1", features = ["full"] } @@ -515,9 +517,10 @@ zstd = "0.13" # metrics metrics = "0.23.0" +metrics-derive = "0.1" metrics-exporter-prometheus = { version = "0.15.0", default-features = false } metrics-process = "2.1.0" -metrics-util = "0.17.0" +metrics-util = { default-features = false, version = "0.17.0" } # proc-macros proc-macro2 = "1.0" @@ -575,20 +578,20 @@ toml = "0.8" arbitrary = "1.3" assert_matches = "1.5.0" criterion = "0.5" -iai-callgrind = "0.11" +iai-callgrind = "0.13" pprof = "0.13" proptest = "1.4" proptest-derive = "0.5" -serial_test = "3" -similar-asserts = "1.5.0" +serial_test = { default-features = false, version = "3" } +similar-asserts = { default-features = false, version = "1.5.0" } tempfile = "3.8" -test-fuzz = "5" +test-fuzz = "6" tikv-jemalloc-ctl = "0.6" tikv-jemallocator = "0.6" tracy-client = "0.17.3" -#[patch.crates-io] +[patch.crates-io] #alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} #alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} #alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} diff --git a/Makefile b/Makefile index 4d26e7872ed1..4d897c7ee482 100644 --- a/Makefile +++ b/Makefile @@ -13,9 +13,9 @@ CARGO_TARGET_DIR ?= target # List of features to use when building. Can be overridden via the environment. # No jemalloc on Windows ifeq ($(OS),Windows_NT) - FEATURES ?= asm-keccak + FEATURES ?= asm-keccak min-debug-logs else - FEATURES ?= jemalloc asm-keccak + FEATURES ?= jemalloc asm-keccak min-debug-logs endif # Cargo profile for builds. Default is for local builds, CI uses an override. @@ -53,7 +53,7 @@ install: ## Build and install the reth binary under `~/.cargo/bin`. .PHONY: install-op install-op: ## Build and install the op-reth binary under `~/.cargo/bin`. cargo install --path crates/optimism/bin --bin op-reth --force --locked \ - --features "optimism,$(FEATURES)" \ + --features "optimism $(FEATURES)" \ --profile "$(PROFILE)" \ $(CARGO_INSTALL_EXTRA_FLAGS) @@ -67,14 +67,14 @@ build-debug: ## Build the reth binary into `target/debug` directory. .PHONY: build-op build-op: ## Build the op-reth binary into `target` directory. - cargo build --bin op-reth --features "optimism,$(FEATURES)" --profile "$(PROFILE)" --manifest-path crates/optimism/bin/Cargo.toml + cargo build --bin op-reth --features "optimism $(FEATURES)" --profile "$(PROFILE)" --manifest-path crates/optimism/bin/Cargo.toml # Builds the reth binary natively. build-native-%: cargo build --bin reth --target $* --features "$(FEATURES)" --profile "$(PROFILE)" op-build-native-%: - cargo build --bin op-reth --target $* --features "optimism,$(FEATURES)" --profile "$(PROFILE)" --manifest-path crates/optimism/bin/Cargo.toml + cargo build --bin op-reth --target $* --features "optimism $(FEATURES)" --profile "$(PROFILE)" --manifest-path crates/optimism/bin/Cargo.toml # The following commands use `cross` to build a cross-compile. # @@ -106,7 +106,7 @@ build-%: op-build-%: RUSTFLAGS="-C link-arg=-lgcc -Clink-arg=-static-libgcc" \ - cross build --bin op-reth --target $* --features "optimism,$(FEATURES)" --profile "$(PROFILE)" --manifest-path crates/optimism/bin/Cargo.toml + cross build --bin op-reth --target $* --features "optimism $(FEATURES)" --profile "$(PROFILE)" --manifest-path crates/optimism/bin/Cargo.toml # Unfortunately we can't easily use cross to build for Darwin because of licensing issues. # If we wanted to, we would need to build a custom Docker image with the SDK available. @@ -477,5 +477,4 @@ check-features: --package reth-codecs \ --package reth-primitives-traits \ --package reth-primitives \ - --package reth-rpc-types \ --feature-powerset diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index d9e980f3b0b8..e4e40daeca91 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -36,6 +36,7 @@ alloy-pubsub.workspace = true alloy-json-rpc.workspace = true alloy-rpc-client.workspace = true alloy-eips.workspace = true +alloy-primitives.workspace = true # reqwest reqwest = { workspace = true, default-features = false, features = [ diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index 61780b3ab208..a8c18b48a2b7 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -11,13 +11,14 @@ use crate::{ }, valid_payload::{call_forkchoice_updated, call_new_payload}, }; +use alloy_primitives::B256; use alloy_provider::Provider; use alloy_rpc_types_engine::ForkchoiceState; use clap::Parser; use csv::Writer; use reth_cli_runner::CliContext; use reth_node_core::args::BenchmarkArgs; -use reth_primitives::{Block, B256}; +use reth_primitives::Block; use reth_rpc_types_compat::engine::payload::block_to_payload; use std::time::Instant; use tracing::{debug, info}; @@ -100,8 +101,7 @@ impl Command { ) .await?; - let new_payload_result = - NewPayloadResult { gas_used: gas_used as u64, latency: start.elapsed() }; + let new_payload_result = NewPayloadResult { gas_used, latency: start.elapsed() }; call_forkchoice_updated(&auth_provider, message_version, forkchoice_state, None) .await?; @@ -119,8 +119,7 @@ impl Command { info!(%combined_result); // record the current result - let gas_row = - TotalGasRow { block_number, gas_used: gas_used as u64, time: current_duration }; + let gas_row = TotalGasRow { block_number, gas_used, time: current_duration }; results.push((gas_row, combined_result)); } diff --git a/bin/reth-bench/src/bench/new_payload_only.rs b/bin/reth-bench/src/bench/new_payload_only.rs index 866ffa5b46e8..e6392318a542 100644 --- a/bin/reth-bench/src/bench/new_payload_only.rs +++ b/bin/reth-bench/src/bench/new_payload_only.rs @@ -10,12 +10,13 @@ use crate::{ }, valid_payload::call_new_payload, }; +use alloy_primitives::B256; use alloy_provider::Provider; use clap::Parser; use csv::Writer; use reth_cli_runner::CliContext; use reth_node_core::args::BenchmarkArgs; -use reth_primitives::{Block, B256}; +use reth_primitives::Block; use reth_rpc_types_compat::engine::payload::block_to_payload; use std::time::Instant; use tracing::{debug, info}; @@ -77,16 +78,14 @@ impl Command { call_new_payload(&auth_provider, payload, parent_beacon_block_root, versioned_hashes) .await?; - let new_payload_result = - NewPayloadResult { gas_used: gas_used as u64, latency: start.elapsed() }; + let new_payload_result = NewPayloadResult { gas_used, latency: start.elapsed() }; info!(%new_payload_result); // current duration since the start of the benchmark let current_duration = total_benchmark_duration.elapsed(); // record the current result - let row = - TotalGasRow { block_number, gas_used: gas_used as u64, time: current_duration }; + let row = TotalGasRow { block_number, gas_used, time: current_duration }; results.push((row, new_payload_result)); } diff --git a/bin/reth-bench/src/valid_payload.rs b/bin/reth-bench/src/valid_payload.rs index 51b48227cb2f..6353aea71233 100644 --- a/bin/reth-bench/src/valid_payload.rs +++ b/bin/reth-bench/src/valid_payload.rs @@ -2,6 +2,7 @@ //! response. This is useful for benchmarking, as it allows us to wait for a payload to be valid //! before sending additional calls. +use alloy_primitives::B256; use alloy_provider::{ext::EngineApi, Network}; use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, @@ -9,7 +10,6 @@ use alloy_rpc_types_engine::{ }; use alloy_transport::{Transport, TransportResult}; use reth_node_api::EngineApiMessageVersion; -use reth_primitives::B256; use tracing::error; /// An extension trait for providers that implement the engine API, to wait for a VALID response. diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index a78f0f958695..3d3cbd06f4d7 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -66,8 +66,11 @@ reth-engine-util.workspace = true reth-prune.workspace = true # crypto +alloy-eips.workspace = true alloy-rlp.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } +alloy-consensus.workspace = true +alloy-primitives.workspace = true # tracing tracing.workspace = true diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index f06cb716989d..8bb0971ec081 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -16,7 +16,7 @@ use reth_cli_commands::{ use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; use reth_node_builder::{NodeBuilder, WithLaunchContext}; -use reth_node_core::args::utils::DefaultChainSpecParser; +use reth_node_core::args::utils::EthereumChainSpecParser; use reth_node_ethereum::{EthExecutorProvider, EthereumNode}; use reth_tracing::FileWorkerGuard; use std::{ffi::OsString, fmt, future::Future, sync::Arc}; @@ -34,7 +34,8 @@ pub use crate::core::cli::*; /// This is the entrypoint to the executable. #[derive(Debug, Parser)] #[command(author, version = SHORT_VERSION, long_version = LONG_VERSION, about = "Reth", long_about = None)] -pub struct Cli { +pub struct Cli +{ /// The command to run #[command(subcommand)] command: Commands, @@ -116,14 +117,14 @@ impl, Ext: clap::Args + fmt::Debug> Cl /// /// ```no_run /// use clap::Parser; - /// use reth::{args::utils::DefaultChainSpecParser, cli::Cli}; + /// use reth::{args::utils::EthereumChainSpecParser, cli::Cli}; /// /// #[derive(Debug, Parser)] /// pub struct MyArgs { /// pub enable: bool, /// } /// - /// Cli::::parse() + /// Cli::::parse() /// .run(|builder, my_args: MyArgs| async move { /// // launch the node /// @@ -250,7 +251,7 @@ mod tests { /// runtime #[test] fn test_parse_help_all_subcommands() { - let reth = Cli::::command(); + let reth = Cli::::command(); for sub_command in reth.get_subcommands() { let err = Cli::try_parse_args_from(["reth", sub_command.get_name(), "--help"]) .err() diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index ab613dad6a71..a7f75c02a8f8 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -1,4 +1,7 @@ //! Command for debugging block building. +use alloy_consensus::TxEip4844; +use alloy_eips::eip2718::Encodable2718; +use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rlp::Decodable; use alloy_rpc_types::engine::{BlobsBundleV1, PayloadAttributes}; use clap::Parser; @@ -23,9 +26,8 @@ use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine, PayloadBuilderAttribut use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider}; use reth_payload_builder::database::CachedReads; use reth_primitives::{ - revm_primitives::KzgSettings, Address, BlobTransaction, BlobTransactionSidecar, Bytes, + revm_primitives::KzgSettings, BlobTransaction, BlobTransactionSidecar, PooledTransactionsElement, SealedBlock, SealedBlockWithSenders, Transaction, TransactionSigned, - TxEip4844, B256, U256, }; use reth_provider::{ providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, @@ -199,7 +201,7 @@ impl> Command { encoded_length } - _ => transaction.length_without_header(), + _ => transaction.encode_2718_len(), }; debug!(target: "reth::cli", ?transaction, "Adding transaction to the pool"); diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 64ca4dc2dc84..215afacb583c 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -1,6 +1,7 @@ //! Command for debugging execution. use crate::{args::NetworkArgs, utils::get_single_header}; +use alloy_primitives::{BlockNumber, B256}; use clap::Parser; use futures::{stream::select as stream_select, StreamExt}; use reth_beacon_consensus::EthBeaconConsensus; @@ -22,7 +23,7 @@ use reth_network_api::NetworkInfo; use reth_network_p2p::{headers::client::HeadersClient, BlockClient}; use reth_node_api::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; use reth_node_ethereum::EthExecutorProvider; -use reth_primitives::{BlockHashOrNumber, BlockNumber, B256}; +use reth_primitives::BlockHashOrNumber; use reth_provider::{ BlockExecutionWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, }; diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index d46665e48137..51851c0b0ad2 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -118,14 +118,14 @@ impl> Command { let header = (move || { get_single_header(client.clone(), BlockHashOrNumber::Number(target_block_number)) }) - .retry(&backoff) + .retry(backoff) .notify(|err, _| warn!(target: "reth::cli", "Error requesting header: {err}. Retrying...")) .await?; let client = fetch_client.clone(); let chain = provider_factory.chain_spec(); let block = (move || get_single_body(client.clone(), Arc::clone(&chain), header.clone())) - .retry(&backoff) + .retry(backoff) .notify( |err, _| warn!(target: "reth::cli", "Error requesting body: {err}. Retrying..."), ) diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 0eb77ae0ab7b..8e02a52eaf07 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -113,7 +113,7 @@ impl> Command { let to_header = (move || { get_single_header(client.clone(), BlockHashOrNumber::Number(self.to)) }) - .retry(&backoff) + .retry(backoff) .notify(|err, _| warn!(target: "reth::cli", "Error requesting header: {err}. Retrying...")) .await?; info!(target: "reth::cli", target_block_number=self.to, "Finished downloading tip of block range"); diff --git a/bin/reth/src/main.rs b/bin/reth/src/main.rs index 67a8919e27ae..578f2987d73f 100644 --- a/bin/reth/src/main.rs +++ b/bin/reth/src/main.rs @@ -4,7 +4,7 @@ static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator(); use clap::{Args, Parser}; -use reth::{args::utils::DefaultChainSpecParser, cli::Cli}; +use reth::{args::utils::EthereumChainSpecParser, cli::Cli}; use reth_node_builder::{ engine_tree_config::{ TreeConfig, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, @@ -50,7 +50,7 @@ fn main() { } if let Err(err) = - Cli::::parse().run(|builder, engine_args| async move { + Cli::::parse().run(|builder, engine_args| async move { let enable_engine2 = engine_args.experimental; match enable_engine2 { true => { diff --git a/book/developers/exex/hello-world.md b/book/developers/exex/hello-world.md index 3c90e5a693d0..facb07e5307f 100644 --- a/book/developers/exex/hello-world.md +++ b/book/developers/exex/hello-world.md @@ -125,7 +125,7 @@ async fn my_exex(mut ctx: ExExContext) -> eyre:: if let Some(committed_chain) = notification.committed_chain() { ctx.events - .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + .send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; } } diff --git a/book/developers/exex/how-it-works.md b/book/developers/exex/how-it-works.md index 7fd179bf9155..228711d3fea4 100644 --- a/book/developers/exex/how-it-works.md +++ b/book/developers/exex/how-it-works.md @@ -23,4 +23,4 @@ event to signify what blocks have been processed. This event is used by Reth to An ExEx will only receive notifications for block numbers greater than the block in the most recently emitted `FinishedHeight` event. -To clarify: if an ExEx emits `ExExEvent::FinishedHeight(0)` it will receive notifications for any `block_number > 0`. +To clarify: if an ExEx emits `ExExEvent::FinishedHeight` for `block #0` it will receive notifications for any `block_number > 0`. diff --git a/book/developers/exex/remote.md b/book/developers/exex/remote.md index 2db5074e1df7..4344e28b34fc 100644 --- a/book/developers/exex/remote.md +++ b/book/developers/exex/remote.md @@ -279,7 +279,7 @@ async fn remote_exex( while let Some(notification) = ctx.notifications.next().await { if let Some(committed_chain) = notification.committed_chain() { ctx.events - .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + .send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; } info!("Notification sent to the gRPC server"); @@ -388,7 +388,7 @@ async fn remote_exex( while let Some(notification) = ctx.notifications.next().await { if let Some(committed_chain) = notification.committed_chain() { ctx.events - .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + .send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; } info!(?notification, "Notification sent to the gRPC server"); diff --git a/book/developers/exex/tracking-state.md b/book/developers/exex/tracking-state.md index 4d3bbd0a35ae..52c73e618029 100644 --- a/book/developers/exex/tracking-state.md +++ b/book/developers/exex/tracking-state.md @@ -57,7 +57,7 @@ impl Future for MyExEx { if let Some(committed_chain) = notification.committed_chain() { this.ctx .events - .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + .send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; } } @@ -152,7 +152,7 @@ impl Future for MyExEx { this.ctx .events - .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + .send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; } if let Some(first_block) = this.first_block { diff --git a/book/run/sync-op-mainnet.md b/book/run/sync-op-mainnet.md index ebdacdf6167f..057860c33494 100644 --- a/book/run/sync-op-mainnet.md +++ b/book/run/sync-op-mainnet.md @@ -49,7 +49,7 @@ Imports a `.rlp` file of blocks. Import of >100 million OVM blocks, from genesis to Bedrock, completes in 45 minutes. ```bash -$ op-reth import-op +$ op-reth import-op --chain optimism ``` #### 2. Import Receipts @@ -63,7 +63,7 @@ Imports a `.rlp` file of receipts, that has been exported with command specified Import of >100 million OVM receipts, from genesis to Bedrock, completes in 30 minutes. ```bash -$ op-reth import-receipts-op +$ op-reth import-receipts-op --chain optimism ``` #### 3. Import State diff --git a/crates/blockchain-tree-api/src/error.rs b/crates/blockchain-tree-api/src/error.rs index 867580e3c409..4dd42c889a36 100644 --- a/crates/blockchain-tree-api/src/error.rs +++ b/crates/blockchain-tree-api/src/error.rs @@ -194,8 +194,8 @@ impl std::fmt::Debug for InsertBlockErrorData { } } -impl std::error::Error for InsertBlockErrorData { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { +impl core::error::Error for InsertBlockErrorData { + fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { Some(&self.kind) } } @@ -240,8 +240,8 @@ impl std::fmt::Debug for InsertBlockErrorDataTwo { } } -impl std::error::Error for InsertBlockErrorDataTwo { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { +impl core::error::Error for InsertBlockErrorDataTwo { + fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { Some(&self.kind) } } @@ -333,6 +333,9 @@ pub enum InsertBlockErrorKindTwo { /// Provider error. #[error(transparent)] Provider(#[from] ProviderError), + /// Other errors. + #[error(transparent)] + Other(#[from] Box), } impl InsertBlockErrorKindTwo { @@ -365,6 +368,7 @@ impl InsertBlockErrorKindTwo { } } Self::Provider(err) => Err(InsertBlockFatalError::Provider(err)), + Self::Other(err) => Err(InternalBlockExecutionError::Other(err).into()), } } } @@ -421,7 +425,7 @@ pub enum InsertBlockErrorKind { Provider(#[from] ProviderError), /// An internal error occurred, like interacting with the database. #[error(transparent)] - Internal(#[from] Box), + Internal(#[from] Box), /// Canonical error. #[error(transparent)] Canonical(#[from] CanonicalError), diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 6cf22f1c8c93..cff117c92b05 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -25,13 +25,14 @@ reth-execution-types.workspace = true reth-stages-api.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-trie-db = { workspace = true, features = ["metrics"] } -reth-trie-parallel = { workspace = true, features = ["parallel"] } +reth-trie-parallel.workspace = true reth-network.workspace = true reth-consensus.workspace = true reth-node-types.workspace = true # ethereum alloy-primitives.workspace = true +alloy-eips.workspace = true # common parking_lot.workspace = true @@ -59,6 +60,7 @@ reth-evm-ethereum.workspace = true parking_lot.workspace = true assert_matches.workspace = true alloy-genesis.workspace = true +alloy-consensus.workspace = true [features] test-utils = [] diff --git a/crates/blockchain-tree/src/block_buffer.rs b/crates/blockchain-tree/src/block_buffer.rs index 99729af0fae5..e116463e4af6 100644 --- a/crates/blockchain-tree/src/block_buffer.rs +++ b/crates/blockchain-tree/src/block_buffer.rs @@ -183,8 +183,9 @@ impl BlockBuffer { #[cfg(test)] mod tests { use crate::BlockBuffer; + use alloy_eips::BlockNumHash; use alloy_primitives::BlockHash; - use reth_primitives::{BlockNumHash, SealedBlockWithSenders}; + use reth_primitives::SealedBlockWithSenders; use reth_testing_utils::generators::{self, random_block, BlockParams, Rng}; use std::collections::HashMap; diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index fb132bdedc4c..0c48b3b9ce85 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -2,10 +2,11 @@ use super::state::SidechainId; use crate::canonical_chain::CanonicalChain; +use alloy_eips::BlockNumHash; use alloy_primitives::{BlockHash, BlockNumber}; use linked_hash_set::LinkedHashSet; use reth_execution_types::Chain; -use reth_primitives::{BlockNumHash, SealedBlockWithSenders}; +use reth_primitives::SealedBlockWithSenders; use std::collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet}; /// Internal indices of the blocks and chains. @@ -533,7 +534,7 @@ mod tests { block_indices.insert_non_fork_block(block_number_2, block_hash_3, chain_id_2); // Block number 1 should have two block hashes associated with it. - let mut expected_hashes_for_block_1 = HashSet::new(); + let mut expected_hashes_for_block_1 = HashSet::default(); expected_hashes_for_block_1.insert(block_hash_1); expected_hashes_for_block_1.insert(block_hash_2); assert_eq!( @@ -601,11 +602,11 @@ mod tests { assert_eq!(block_indices.blocks_to_chain.get(&block_hash_2), Some(&chain_id)); // Check that block numbers map to their respective hashes. - let mut expected_hashes_1 = HashSet::new(); + let mut expected_hashes_1 = HashSet::default(); expected_hashes_1.insert(block_hash_1); assert_eq!(block_indices.block_number_to_block_hashes.get(&1), Some(&expected_hashes_1)); - let mut expected_hashes_2 = HashSet::new(); + let mut expected_hashes_2 = HashSet::default(); expected_hashes_2.insert(block_hash_2); assert_eq!(block_indices.block_number_to_block_hashes.get(&2), Some(&expected_hashes_2)); diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index ed702bff1a0f..4bed718aa0a6 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -5,6 +5,7 @@ use crate::{ state::{SidechainId, TreeState}, AppendableChain, BlockIndices, BlockchainTreeConfig, ExecutionData, TreeExternals, }; +use alloy_eips::{BlockNumHash, ForkBlock}; use alloy_primitives::{BlockHash, BlockNumber, B256, U256}; use reth_blockchain_tree_api::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, @@ -16,8 +17,8 @@ use reth_execution_errors::{BlockExecutionError, BlockValidationError}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_node_types::NodeTypesWithDB; use reth_primitives::{ - BlockNumHash, EthereumHardfork, ForkBlock, GotExpected, Hardforks, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, StaticFileSegment, + EthereumHardfork, GotExpected, Hardforks, Receipt, SealedBlock, SealedBlockWithSenders, + SealedHeader, StaticFileSegment, }; use reth_provider::{ providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, BlockWriter, @@ -589,7 +590,7 @@ where // Find all forks of given block. let mut dependent_block = self.block_indices().fork_to_child().get(block).cloned().unwrap_or_default(); - let mut dependent_chains = HashSet::new(); + let mut dependent_chains = HashSet::default(); while let Some(block) = dependent_block.pop_back() { // Get chain of dependent block. @@ -1375,6 +1376,7 @@ where #[cfg(test)] mod tests { use super::*; + use alloy_consensus::TxEip1559; use alloy_genesis::{Genesis, GenesisAccount}; use alloy_primitives::{keccak256, Address, Sealable, B256}; use assert_matches::assert_matches; @@ -1390,7 +1392,7 @@ mod tests { proofs::{calculate_receipt_root, calculate_transaction_root}, revm_primitives::AccountInfo, Account, BlockBody, Header, Signature, Transaction, TransactionSigned, - TransactionSignedEcRecovered, TxEip1559, Withdrawals, + TransactionSignedEcRecovered, Withdrawals, }; use reth_provider::{ test_utils::{ @@ -1566,7 +1568,7 @@ mod tests { Transaction::Eip1559(TxEip1559 { chain_id: chain_spec.chain.id(), nonce, - gas_limit: MIN_TRANSACTION_GAS as u128, + gas_limit: MIN_TRANSACTION_GAS, to: Address::ZERO.into(), max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, ..Default::default() @@ -1602,10 +1604,10 @@ mod tests { let sealed = Header { number, parent_hash: parent.unwrap_or_default(), - gas_used: (body.len() as u64 * MIN_TRANSACTION_GAS) as u128, - gas_limit: chain_spec.max_gas_limit.into(), + gas_used: body.len() as u64 * MIN_TRANSACTION_GAS, + gas_limit: chain_spec.max_gas_limit, mix_hash: B256::random(), - base_fee_per_gas: Some(EIP1559_INITIAL_BASE_FEE.into()), + base_fee_per_gas: Some(EIP1559_INITIAL_BASE_FEE), transactions_root, receipts_root, state_root: state_root_unhashed(HashMap::from([( @@ -2179,7 +2181,7 @@ mod tests { (block1.parent_hash, HashSet::from([block1a_hash])), (block1.hash(), HashSet::from([block2.hash()])), ])) - .with_pending_blocks((block2.number + 1, HashSet::new())) + .with_pending_blocks((block2.number + 1, HashSet::default())) .assert(&tree); assert_matches!(tree.make_canonical(block1a_hash), Ok(_)); @@ -2203,7 +2205,7 @@ mod tests { (block1.parent_hash, HashSet::from([block1.hash()])), (block1.hash(), HashSet::from([block2.hash()])), ])) - .with_pending_blocks((block1a.number + 1, HashSet::new())) + .with_pending_blocks((block1a.number + 1, HashSet::default())) .assert(&tree); // check notification. @@ -2240,7 +2242,7 @@ mod tests { (block1.parent_hash, HashSet::from([block1a_hash])), (block1.hash(), HashSet::from([block2a_hash])), ])) - .with_pending_blocks((block2.number + 1, HashSet::new())) + .with_pending_blocks((block2.number + 1, HashSet::default())) .assert(&tree); // check notification. @@ -2309,7 +2311,7 @@ mod tests { .with_chain_num(1) .with_block_to_chain(HashMap::from([(block2a_hash, 4.into())])) .with_fork_to_child(HashMap::from([(block1.hash(), HashSet::from([block2a_hash]))])) - .with_pending_blocks((block2.number + 1, HashSet::new())) + .with_pending_blocks((block2.number + 1, HashSet::default())) .assert(&tree); // check notification. diff --git a/crates/blockchain-tree/src/bundle.rs b/crates/blockchain-tree/src/bundle.rs index 226afd8fab59..6f62d4136bb7 100644 --- a/crates/blockchain-tree/src/bundle.rs +++ b/crates/blockchain-tree/src/bundle.rs @@ -1,7 +1,7 @@ //! [`ExecutionDataProvider`] implementations used by the tree. +use alloy_eips::ForkBlock; use alloy_primitives::{BlockHash, BlockNumber}; -use reth_primitives::ForkBlock; use reth_provider::{BlockExecutionForkProvider, ExecutionDataProvider, ExecutionOutcome}; use std::collections::BTreeMap; diff --git a/crates/blockchain-tree/src/canonical_chain.rs b/crates/blockchain-tree/src/canonical_chain.rs index e3dc596ba0e7..7dcd466f7d64 100644 --- a/crates/blockchain-tree/src/canonical_chain.rs +++ b/crates/blockchain-tree/src/canonical_chain.rs @@ -1,5 +1,5 @@ +use alloy_eips::BlockNumHash; use alloy_primitives::{BlockHash, BlockNumber}; -use reth_primitives::BlockNumHash; use std::collections::BTreeMap; /// This keeps track of (non-finalized) blocks of the canonical chain. diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 596458e20390..393e525d5ae2 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -5,6 +5,7 @@ use super::externals::TreeExternals; use crate::BundleStateDataRef; +use alloy_eips::ForkBlock; use alloy_primitives::{BlockHash, BlockNumber, U256}; use reth_blockchain_tree_api::{ error::{BlockchainTreeError, InsertBlockErrorKind}, @@ -14,7 +15,7 @@ use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_errors::BlockExecutionError; use reth_execution_types::{Chain, ExecutionOutcome}; -use reth_primitives::{ForkBlock, GotExpected, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{GotExpected, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ providers::{BundleStateProvider, ConsistentDbView, ProviderNodeTypes}, FullExecutionDataProvider, ProviderError, StateRootProvider, TryIntoHistoricalStateProvider, diff --git a/crates/blockchain-tree/src/metrics.rs b/crates/blockchain-tree/src/metrics.rs index 5d44a6391178..121d0a69786f 100644 --- a/crates/blockchain-tree/src/metrics.rs +++ b/crates/blockchain-tree/src/metrics.rs @@ -5,26 +5,6 @@ use reth_metrics::{ }; use std::time::{Duration, Instant}; -/// Metrics for the entire blockchain tree -#[derive(Metrics)] -#[metrics(scope = "blockchain_tree")] -pub struct TreeMetrics { - /// Total number of sidechains (not including the canonical chain) - pub sidechains: Gauge, - /// The highest block number in the canonical chain - pub canonical_chain_height: Gauge, - /// The number of reorgs - pub reorgs: Counter, - /// The latest reorg depth - pub latest_reorg_depth: Gauge, - /// Longest sidechain height - pub longest_sidechain_height: Gauge, - /// The number of times cached trie updates were used for insert. - pub trie_updates_insert_cached: Counter, - /// The number of times trie updates were recomputed for insert. - pub trie_updates_insert_recomputed: Counter, -} - /// Metrics for the blockchain tree block buffer #[derive(Metrics)] #[metrics(scope = "blockchain_tree.block_buffer")] @@ -65,6 +45,26 @@ impl MakeCanonicalDurationsRecorder { } } +/// Metrics for the entire blockchain tree +#[derive(Metrics)] +#[metrics(scope = "blockchain_tree")] +pub struct TreeMetrics { + /// Total number of sidechains (not including the canonical chain) + pub sidechains: Gauge, + /// The highest block number in the canonical chain + pub canonical_chain_height: Gauge, + /// The number of reorgs + pub reorgs: Counter, + /// The latest reorg depth + pub latest_reorg_depth: Gauge, + /// Longest sidechain height + pub longest_sidechain_height: Gauge, + /// The number of times cached trie updates were used for insert. + pub trie_updates_insert_cached: Counter, + /// The number of times trie updates were recomputed for insert. + pub trie_updates_insert_recomputed: Counter, +} + /// Represents actions for making a canonical chain. #[derive(Debug, Copy, Clone)] pub(crate) enum MakeCanonicalAction { diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index 76e59a47792f..925b8f03add7 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -1,3 +1,4 @@ +use alloy_eips::BlockNumHash; use alloy_primitives::{BlockHash, BlockNumber}; use reth_blockchain_tree_api::{ self, @@ -5,7 +6,7 @@ use reth_blockchain_tree_api::{ BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, InsertPayloadOk, }; -use reth_primitives::{BlockNumHash, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ BlockchainTreePendingStateProvider, CanonStateNotificationSender, CanonStateNotifications, CanonStateSubscriptions, FullExecutionDataProvider, diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 333527b83ef7..8e6cceccdd19 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -1,6 +1,7 @@ //! Wrapper around `BlockchainTree` that allows for it to be shared. use super::BlockchainTree; +use alloy_eips::BlockNumHash; use alloy_primitives::{BlockHash, BlockNumber}; use parking_lot::RwLock; use reth_blockchain_tree_api::{ @@ -10,7 +11,7 @@ use reth_blockchain_tree_api::{ }; use reth_evm::execute::BlockExecutorProvider; use reth_node_types::NodeTypesWithDB; -use reth_primitives::{BlockNumHash, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ providers::ProviderNodeTypes, BlockchainTreePendingStateProvider, CanonStateSubscriptions, FullExecutionDataProvider, ProviderError, diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index 91f68b761514..63016918c5cb 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -42,12 +42,14 @@ pin-project.workspace = true # optional deps for test-utils alloy-signer = { workspace = true, optional = true } alloy-signer-local = { workspace = true, optional = true } +alloy-consensus = { workspace = true, optional = true } rand = { workspace = true, optional = true } revm = { workspace = true, optional = true } [dev-dependencies] alloy-signer.workspace = true alloy-signer-local.workspace = true +alloy-consensus.workspace = true rand.workspace = true revm.workspace = true @@ -55,6 +57,7 @@ revm.workspace = true test-utils = [ "alloy-signer", "alloy-signer-local", + "alloy-consensus", "rand", "revm" ] diff --git a/crates/chain-state/src/chain_info.rs b/crates/chain-state/src/chain_info.rs index f01d4727198b..d9e2c03e2738 100644 --- a/crates/chain-state/src/chain_info.rs +++ b/crates/chain-state/src/chain_info.rs @@ -112,15 +112,25 @@ impl ChainInfoTracker { /// Sets the safe header of the chain. pub fn set_safe(&self, header: SealedHeader) { - self.inner.safe_block.send_modify(|h| { - let _ = h.replace(header); + self.inner.safe_block.send_if_modified(|current_header| { + if current_header.as_ref().map(SealedHeader::hash) != Some(header.hash()) { + let _ = current_header.replace(header); + return true + } + + false }); } /// Sets the finalized header of the chain. pub fn set_finalized(&self, header: SealedHeader) { - self.inner.finalized_block.send_modify(|h| { - let _ = h.replace(header); + self.inner.finalized_block.send_if_modified(|current_header| { + if current_header.as_ref().map(SealedHeader::hash) != Some(header.hash()) { + let _ = current_header.replace(header); + return true + } + + false }); } diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 2051fb308cf0..f2a73d27fa21 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -5,22 +5,18 @@ use crate::{ ChainInfoTracker, MemoryOverlayStateProvider, }; use alloy_eips::BlockNumHash; -use alloy_primitives::{Address, TxHash, B256}; +use alloy_primitives::{map::HashMap, Address, TxHash, B256}; use parking_lot::RwLock; use reth_chainspec::ChainInfo; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives::{ - Header, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, - TransactionSigned, + BlockWithSenders, Header, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, + TransactionMeta, TransactionSigned, }; use reth_storage_api::StateProviderBox; use reth_trie::{updates::TrieUpdates, HashedPostState}; -use std::{ - collections::{BTreeMap, HashMap}, - sync::Arc, - time::Instant, -}; +use std::{collections::BTreeMap, sync::Arc, time::Instant}; use tokio::sync::{broadcast, watch}; /// Size of the broadcast channel used to notify canonical state events. @@ -197,7 +193,7 @@ impl CanonicalInMemoryState { /// Create an empty state. pub fn empty() -> Self { - Self::new(HashMap::new(), BTreeMap::new(), None, None) + Self::new(HashMap::default(), BTreeMap::new(), None, None) } /// Create a new in memory state with the given local head and finalized header @@ -237,7 +233,7 @@ impl CanonicalInMemoryState { pub fn set_pending_block(&self, pending: ExecutedBlock) { // fetch the state of the pending block's parent block let parent = self.state_by_hash(pending.block().parent_hash); - let pending = BlockState::with_parent(pending, parent.map(|p| (*p).clone())); + let pending = BlockState::with_parent(pending, parent); self.inner.in_memory_state.pending.send_modify(|p| { p.replace(pending); }); @@ -245,6 +241,9 @@ impl CanonicalInMemoryState { } /// Append new blocks to the in memory state. + /// + /// This removes all reorged blocks and appends the new blocks to the tracked chain and connects + /// them to their parent blocks. fn update_blocks(&self, new_blocks: I, reorged: I) where I: IntoIterator, @@ -265,8 +264,7 @@ impl CanonicalInMemoryState { // insert the new blocks for block in new_blocks { let parent = blocks.get(&block.block().parent_hash).cloned(); - let block_state = - BlockState::with_parent(block.clone(), parent.map(|p| (*p).clone())); + let block_state = BlockState::with_parent(block.clone(), parent); let hash = block_state.hash(); let number = block_state.number(); @@ -321,20 +319,21 @@ impl CanonicalInMemoryState { // clear all numbers numbers.clear(); - // drain all blocks and only keep the ones that are not persisted + // drain all blocks and only keep the ones that are not persisted (below the persisted + // height) let mut old_blocks = blocks .drain() + .filter(|(_, b)| b.block().block().number > persisted_height) .map(|(_, b)| b.block.clone()) - .filter(|b| b.block().number > persisted_height) .collect::>(); // sort the blocks by number so we can insert them back in natural order (low -> high) old_blocks.sort_unstable_by_key(|block| block.block().number); + // re-insert the blocks in natural order and connect them to their parent blocks for block in old_blocks { let parent = blocks.get(&block.block().parent_hash).cloned(); - let block_state = - BlockState::with_parent(block.clone(), parent.map(|p| (*p).clone())); + let block_state = BlockState::with_parent(block.clone(), parent); let hash = block_state.hash(); let number = block_state.number(); @@ -346,10 +345,7 @@ impl CanonicalInMemoryState { // also shift the pending state if it exists self.inner.in_memory_state.pending.send_modify(|p| { if let Some(p) = p.as_mut() { - p.parent = blocks - .get(&p.block().block.parent_hash) - .cloned() - .map(|p| Box::new((*p).clone())); + p.parent = blocks.get(&p.block().block.parent_hash).cloned(); } }); } @@ -419,7 +415,7 @@ impl CanonicalInMemoryState { self.inner.chain_info_tracker.on_transition_configuration_exchanged(); } - /// Returns the timepstamp of the last transition configuration exchanged, + /// Returns the timestamp of the last transition configuration exchanged, pub fn last_exchanged_transition_configuration_timestamp(&self) -> Option { self.inner.chain_info_tracker.last_transition_configuration_exchanged_at() } @@ -503,6 +499,20 @@ impl CanonicalInMemoryState { self.inner.canon_state_notification_sender.send(event).ok(); } + /// Return state provider with reference to in-memory blocks that overlay database state. + /// + /// This merges the state of all blocks that are part of the chain that the requested block is + /// the head of. This includes all blocks that connect back to the canonical block on disk. + pub fn state_provider_from_state( + &self, + state: &BlockState, + historical: StateProviderBox, + ) -> MemoryOverlayStateProvider { + let in_memory = state.chain().into_iter().map(|block_state| block_state.block()).collect(); + + MemoryOverlayStateProvider::new(historical, in_memory) + } + /// Return state provider with reference to in-memory blocks that overlay database state. /// /// This merges the state of all blocks that are part of the chain that the requested block is @@ -521,22 +531,12 @@ impl CanonicalInMemoryState { MemoryOverlayStateProvider::new(historical, in_memory) } - /// Returns an iterator over all canonical blocks in the in-memory state, from newest to oldest. + /// Returns an iterator over all __canonical blocks__ in the in-memory state, from newest to + /// oldest (highest to lowest). + /// + /// This iterator contains a snapshot of the in-memory state at the time of the call. pub fn canonical_chain(&self) -> impl Iterator> { - let pending = self.inner.in_memory_state.pending.borrow().clone(); - let head = self.inner.in_memory_state.head_state(); - - // this clone is cheap because we only expect to keep in memory a few - // blocks and all of them are Arcs. - let blocks = self.inner.in_memory_state.blocks.read().clone(); - - std::iter::once(pending).filter_map(|p| p.map(Arc::new)).chain(std::iter::successors( - head, - move |state| { - let parent_hash = state.block().block().parent_hash; - blocks.get(&parent_hash).cloned() - }, - )) + self.inner.in_memory_state.head_state().into_iter().flat_map(|head| head.iter()) } /// Returns a `TransactionSigned` for the given `TxHash` if found. @@ -571,18 +571,9 @@ impl CanonicalInMemoryState { index: index as u64, block_hash: block_state.hash(), block_number: block_state.block().block.number, - base_fee: block_state - .block() - .block() - .header - .base_fee_per_gas - .map(|base_fee| base_fee as u64), + base_fee: block_state.block().block().header.base_fee_per_gas, timestamp: block_state.block().block.timestamp, - excess_blob_gas: block_state - .block() - .block - .excess_blob_gas - .map(|excess_blob| excess_blob as u64), + excess_blob_gas: block_state.block().block.excess_blob_gas, }; return Some((tx.clone(), meta)) } @@ -598,7 +589,7 @@ pub struct BlockState { /// The executed block that determines the state after this block has been executed. block: ExecutedBlock, /// The block's parent block if it exists. - parent: Option>, + parent: Option>, } #[allow(dead_code)] @@ -609,8 +600,8 @@ impl BlockState { } /// [`BlockState`] constructor with parent. - pub fn with_parent(block: ExecutedBlock, parent: Option) -> Self { - Self { block, parent: parent.map(Box::new) } + pub const fn with_parent(block: ExecutedBlock, parent: Option>) -> Self { + Self { block, parent } } /// Returns the hash and block of the on disk block this state can be traced back to. @@ -627,6 +618,25 @@ impl BlockState { self.block.clone() } + /// Returns a reference to the executed block that determines the state. + pub const fn block_ref(&self) -> &ExecutedBlock { + &self.block + } + + /// Returns the block with senders for the state. + pub fn block_with_senders(&self) -> BlockWithSenders { + let block = self.block.block().clone(); + let senders = self.block.senders().clone(); + BlockWithSenders { block: block.unseal(), senders } + } + + /// Returns the sealed block with senders for the state. + pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { + let block = self.block.block().clone(); + let senders = self.block.senders().clone(); + SealedBlockWithSenders { block, senders } + } + /// Returns the hash of executed block that determines the state. pub fn hash(&self) -> B256 { self.block.block().hash() @@ -670,8 +680,12 @@ impl BlockState { .unwrap_or_default() } - /// Returns a vector of parent `BlockStates`. - /// The block state order in the output vector is newest to oldest. + /// Returns a vector of __parent__ `BlockStates`. + /// + /// The block state order in the output vector is newest to oldest (highest to lowest): + /// `[5,4,3,2,1]` + /// + /// Note: This does not include self. pub fn parent_state_chain(&self) -> Vec<&Self> { let mut parents = Vec::new(); let mut current = self.parent.as_deref(); @@ -685,8 +699,8 @@ impl BlockState { } /// Returns a vector of `BlockStates` representing the entire in memory chain. - /// The block state order in the output vector is newest to oldest, including - /// self as the first element. + /// The block state order in the output vector is newest to oldest (highest to lowest), + /// including self as the first element. pub fn chain(&self) -> Vec<&Self> { let mut chain = vec![self]; self.append_parent_chain(&mut chain); @@ -697,6 +711,13 @@ impl BlockState { pub fn append_parent_chain<'a>(&'a self, chain: &mut Vec<&'a Self>) { chain.extend(self.parent_state_chain()); } + + /// Returns an iterator over the atomically captured chain of in memory blocks. + /// + /// This yields the blocks from newest to oldest (highest to lowest). + pub fn iter(self: Arc) -> impl Iterator> { + std::iter::successors(Some(self), |state| state.parent.clone()) + } } /// Represents an executed block stored in-memory. @@ -843,7 +864,7 @@ impl NewCanonicalChain { mod tests { use super::*; use crate::test_utils::TestBlockBuilder; - use alloy_primitives::{BlockNumber, Bytes, StorageKey, StorageValue}; + use alloy_primitives::{map::HashSet, BlockNumber, Bytes, StorageKey, StorageValue}; use rand::Rng; use reth_errors::ProviderResult; use reth_primitives::{Account, Bytecode, Receipt, Requests}; @@ -852,7 +873,6 @@ mod tests { StorageRootProvider, }; use reth_trie::{AccountProof, HashedStorage, MultiProof, TrieInput}; - use std::collections::HashSet; fn create_mock_state( test_block_builder: &mut TestBlockBuilder, @@ -875,7 +895,7 @@ mod tests { for i in 1..=num_blocks { let mut state = create_mock_state(test_block_builder, i, parent_hash); if let Some(parent) = parent_state { - state.parent = Some(Box::new(parent)); + state.parent = Some(Arc::new(parent)); } parent_hash = state.hash(); parent_state = Some(state.clone()); @@ -984,7 +1004,7 @@ mod tests { #[test] fn test_in_memory_state_impl_state_by_hash() { - let mut state_by_hash = HashMap::new(); + let mut state_by_hash = HashMap::default(); let number = rand::thread_rng().gen::(); let mut test_block_builder = TestBlockBuilder::default(); let state = Arc::new(create_mock_state(&mut test_block_builder, number, B256::random())); @@ -998,7 +1018,7 @@ mod tests { #[test] fn test_in_memory_state_impl_state_by_number() { - let mut state_by_hash = HashMap::new(); + let mut state_by_hash = HashMap::default(); let mut hash_by_number = BTreeMap::new(); let number = rand::thread_rng().gen::(); @@ -1017,7 +1037,7 @@ mod tests { #[test] fn test_in_memory_state_impl_head_state() { - let mut state_by_hash = HashMap::new(); + let mut state_by_hash = HashMap::default(); let mut hash_by_number = BTreeMap::new(); let mut test_block_builder = TestBlockBuilder::default(); let state1 = Arc::new(create_mock_state(&mut test_block_builder, 1, B256::random())); @@ -1045,7 +1065,7 @@ mod tests { let pending_hash = pending_state.hash(); let in_memory_state = - InMemoryState::new(HashMap::new(), BTreeMap::new(), Some(pending_state)); + InMemoryState::new(HashMap::default(), BTreeMap::new(), Some(pending_state)); let result = in_memory_state.pending_state(); assert!(result.is_some()); @@ -1056,7 +1076,7 @@ mod tests { #[test] fn test_in_memory_state_impl_no_pending_state() { - let in_memory_state = InMemoryState::new(HashMap::new(), BTreeMap::new(), None); + let in_memory_state = InMemoryState::new(HashMap::default(), BTreeMap::new(), None); assert_eq!(in_memory_state.pending_state(), None); } @@ -1171,7 +1191,7 @@ mod tests { // Check the pending state assert_eq!( state.pending_state().unwrap(), - BlockState::with_parent(block2.clone(), Some(BlockState::new(block1))) + BlockState::with_parent(block2.clone(), Some(Arc::new(BlockState::new(block1)))) ); // Check the pending block @@ -1206,14 +1226,14 @@ mod tests { let block2 = test_block_builder.get_executed_block_with_number(2, block1.block().hash()); let block3 = test_block_builder.get_executed_block_with_number(3, block2.block().hash()); - let state1 = BlockState::new(block1.clone()); - let state2 = BlockState::with_parent(block2.clone(), Some(state1.clone())); - let state3 = BlockState::with_parent(block3.clone(), Some(state2.clone())); + let state1 = Arc::new(BlockState::new(block1.clone())); + let state2 = Arc::new(BlockState::with_parent(block2.clone(), Some(state1.clone()))); + let state3 = Arc::new(BlockState::with_parent(block3.clone(), Some(state2.clone()))); - let mut blocks = HashMap::new(); - blocks.insert(block1.block().hash(), Arc::new(state1)); - blocks.insert(block2.block().hash(), Arc::new(state2)); - blocks.insert(block3.block().hash(), Arc::new(state3)); + let mut blocks = HashMap::default(); + blocks.insert(block1.block().hash(), state1); + blocks.insert(block2.block().hash(), state2); + blocks.insert(block3.block().hash(), state3); let mut numbers = BTreeMap::new(); numbers.insert(1, block1.block().hash()); @@ -1257,7 +1277,7 @@ mod tests { fn test_canonical_in_memory_state_canonical_chain_single_block() { let block = TestBlockBuilder::default().get_executed_block_with_number(1, B256::random()); let hash = block.block().hash(); - let mut blocks = HashMap::new(); + let mut blocks = HashMap::default(); blocks.insert(hash, Arc::new(BlockState::new(block))); let mut numbers = BTreeMap::new(); numbers.insert(1, hash); @@ -1272,20 +1292,17 @@ mod tests { #[test] fn test_canonical_in_memory_state_canonical_chain_multiple_blocks() { - let mut blocks = HashMap::new(); - let mut numbers = BTreeMap::new(); let mut parent_hash = B256::random(); let mut block_builder = TestBlockBuilder::default(); + let state = CanonicalInMemoryState::empty(); for i in 1..=3 { let block = block_builder.get_executed_block_with_number(i, parent_hash); let hash = block.block().hash(); - blocks.insert(hash, Arc::new(BlockState::new(block.clone()))); - numbers.insert(i, hash); + state.update_blocks(Some(block), None); parent_hash = hash; } - let state = CanonicalInMemoryState::new(blocks, numbers, None, None); let chain: Vec<_> = state.canonical_chain().collect(); assert_eq!(chain.len(), 3); @@ -1294,31 +1311,27 @@ mod tests { assert_eq!(chain[2].number(), 1); } + // ensures the pending block is not part of the canonical chain #[test] fn test_canonical_in_memory_state_canonical_chain_with_pending_block() { - let mut blocks = HashMap::new(); - let mut numbers = BTreeMap::new(); let mut parent_hash = B256::random(); let mut block_builder = TestBlockBuilder::default(); + let state = CanonicalInMemoryState::empty(); for i in 1..=2 { let block = block_builder.get_executed_block_with_number(i, parent_hash); let hash = block.block().hash(); - blocks.insert(hash, Arc::new(BlockState::new(block.clone()))); - numbers.insert(i, hash); + state.update_blocks(Some(block), None); parent_hash = hash; } let pending_block = block_builder.get_executed_block_with_number(3, parent_hash); - let pending_state = BlockState::new(pending_block); - - let state = CanonicalInMemoryState::new(blocks, numbers, Some(pending_state), None); + state.set_pending_block(pending_block); let chain: Vec<_> = state.canonical_chain().collect(); - assert_eq!(chain.len(), 3); - assert_eq!(chain[0].number(), 3); - assert_eq!(chain[1].number(), 2); - assert_eq!(chain[2].number(), 1); + assert_eq!(chain.len(), 2); + assert_eq!(chain[0].number(), 2); + assert_eq!(chain[1].number(), 1); } #[test] diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index 35315fb52158..2712d1259e85 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -1,5 +1,9 @@ use super::ExecutedBlock; -use alloy_primitives::{keccak256, Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; +use alloy_primitives::{ + keccak256, + map::{HashMap, HashSet}, + Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, +}; use reth_errors::ProviderResult; use reth_primitives::{Account, Bytecode}; use reth_storage_api::{ @@ -9,10 +13,7 @@ use reth_storage_api::{ use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, }; -use std::{ - collections::{HashMap, HashSet}, - sync::OnceLock, -}; +use std::sync::OnceLock; /// A state provider that stores references to in-memory blocks along with their state as well as /// the historical state provider for fallback lookups. diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index 6fa072f82940..4b0bfcdd996a 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -2,18 +2,18 @@ use crate::{ in_memory::ExecutedBlock, CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions, }; -use alloy_primitives::{Address, BlockNumber, B256, U256}; +use alloy_consensus::TxEip1559; +use alloy_primitives::{Address, BlockNumber, Sealable, B256, U256}; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; use rand::{thread_rng, Rng}; use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ - alloy_primitives::Sealable, constants::{EIP1559_INITIAL_BASE_FEE, EMPTY_ROOT_HASH}, proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, BlockBody, Header, Receipt, Receipts, Requests, SealedBlock, SealedBlockWithSenders, - SealedHeader, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, + SealedHeader, Transaction, TransactionSigned, TransactionSignedEcRecovered, }; use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState}; use revm::{db::BundleState, primitives::AccountInfo}; @@ -89,7 +89,7 @@ impl TestBlockBuilder { let tx = Transaction::Eip1559(TxEip1559 { chain_id: self.chain_spec.chain.id(), nonce, - gas_limit: MIN_TRANSACTION_GAS as u128, + gas_limit: MIN_TRANSACTION_GAS, to: Address::random().into(), max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, max_priority_fee_per_gas: 1, @@ -132,10 +132,10 @@ impl TestBlockBuilder { let header = Header { number, parent_hash, - gas_used: transactions.len() as u128 * MIN_TRANSACTION_GAS as u128, - gas_limit: self.chain_spec.max_gas_limit.into(), + gas_used: transactions.len() as u64 * MIN_TRANSACTION_GAS, + gas_limit: self.chain_spec.max_gas_limit, mix_hash: B256::random(), - base_fee_per_gas: Some(EIP1559_INITIAL_BASE_FEE.into()), + base_fee_per_gas: Some(EIP1559_INITIAL_BASE_FEE), transactions_root: calculate_transaction_root(&transactions), receipts_root: calculate_receipt_root(&receipts), beneficiary: Address::random(), diff --git a/crates/chainspec/Cargo.toml b/crates/chainspec/Cargo.toml index 539ea0c5e93b..de04ffd1e6aa 100644 --- a/crates/chainspec/Cargo.toml +++ b/crates/chainspec/Cargo.toml @@ -49,15 +49,13 @@ op-alloy-rpc-types.workspace = true [features] default = ["std"] -optimism = [ - "serde", - "dep:op-alloy-rpc-types", - "reth-optimism-forks", -] -std = [] -arbitrary = [ - "alloy-chains/arbitrary" +optimism = ["serde", "dep:op-alloy-rpc-types", "reth-optimism-forks"] +std = [ + "alloy-chains/std", + "alloy-eips/std", + "alloy-genesis/std", + "alloy-primitives/std", + "alloy-trie/std", ] +arbitrary = ["alloy-chains/arbitrary"] test-utils = [] - - diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index 15a3a024494c..fb64e08ae1ef 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -1,14 +1,16 @@ use crate::{ChainSpec, DepositContract}; +use alloc::vec::Vec; use alloy_chains::Chain; use alloy_eips::eip1559::BaseFeeParams; use alloy_genesis::Genesis; use alloy_primitives::B256; use core::fmt::{Debug, Display}; +use reth_network_peers::NodeRecord; use reth_primitives_traits::Header; /// Trait representing type configuring a chain spec. #[auto_impl::auto_impl(&, Arc)] -pub trait EthChainSpec: Send + Sync + Unpin + Debug + 'static { +pub trait EthChainSpec: Send + Sync + Unpin + Debug { // todo: make chain spec type generic over hardfork //type Hardfork: Clone + Copy + 'static; @@ -41,6 +43,9 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug + 'static { /// The block gas limit. fn max_gas_limit(&self) -> u64; + + /// The bootnodes for the chain, if any. + fn bootnodes(&self) -> Option>; } impl EthChainSpec for ChainSpec { @@ -83,4 +88,8 @@ impl EthChainSpec for ChainSpec { fn max_gas_limit(&self) -> u64 { self.max_gas_limit } + + fn bootnodes(&self) -> Option> { + self.bootnodes() + } } diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 463501ee4df6..45070db197d4 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -411,10 +411,7 @@ impl ChainSpec { /// Returns the hardfork display helper. pub fn display_hardforks(&self) -> DisplayHardforks { - DisplayHardforks::new( - &self.hardforks, - self.paris_block_and_final_difficulty.map(|(block, _)| block), - ) + DisplayHardforks::new(&self, self.paris_block_and_final_difficulty.map(|(block, _)| block)) } /// Get the fork id for the given hardfork. @@ -613,6 +610,18 @@ impl Hardforks for ChainSpec { fn forks_iter(&self) -> impl Iterator { self.hardforks.forks_iter() } + + fn fork_id(&self, head: &Head) -> ForkId { + self.fork_id(head) + } + + fn latest_fork_id(&self) -> ForkId { + self.latest_fork_id() + } + + fn fork_filter(&self, head: Head) -> ForkFilter { + self.fork_filter(head) + } } impl EthereumHardforks for ChainSpec { @@ -816,13 +825,13 @@ fn into_optimism_chain_spec(genesis: Genesis) -> ChainSpec { } } -/// A trait for reading the current [`ChainSpec`]. +/// A trait for reading the current chainspec. #[auto_impl::auto_impl(&, Arc)] pub trait ChainSpecProvider: Send + Sync { /// The chain spec type. - type ChainSpec: EthChainSpec; + type ChainSpec: EthChainSpec + 'static; - /// Get an [`Arc`] to the [`ChainSpec`]. + /// Get an [`Arc`] to the chainspec. fn chain_spec(&self) -> Arc; } @@ -2327,7 +2336,7 @@ Post-merge hard forks (timestamp based): #[test] fn test_paris_block_and_total_difficulty() { - let genesis = Genesis { gas_limit: 0x2fefd8u128, ..Default::default() }; + let genesis = Genesis { gas_limit: 0x2fefd8u64, ..Default::default() }; let paris_chainspec = ChainSpecBuilder::default() .chain(Chain::from_id(1337)) .genesis(genesis) @@ -2339,7 +2348,7 @@ Post-merge hard forks (timestamp based): #[test] fn test_default_cancun_header_forkhash() { // set the gas limit from the hive test genesis according to the hash - let genesis = Genesis { gas_limit: 0x2fefd8u128, ..Default::default() }; + let genesis = Genesis { gas_limit: 0x2fefd8u64, ..Default::default() }; let default_chainspec = ChainSpecBuilder::default() .chain(Chain::from_id(1337)) .genesis(genesis) diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index dd300229ff9a..4835c3d0fa29 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -60,7 +60,11 @@ serde.workspace = true serde_json.workspace = true tracing.workspace = true backon.workspace = true -secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } +secp256k1 = { workspace = true, features = [ + "global-context", + "rand-std", + "recovery", +] } # io fdlimit.workspace = true @@ -68,8 +72,8 @@ toml = { workspace = true, features = ["display"] } # tui comfy-table = "7.0" -crossterm = "0.27.0" -ratatui = { version = "0.27", default-features = false, features = [ +crossterm = "0.28.0" +ratatui = { version = "0.28", default-features = false, features = [ "crossterm", ] } @@ -88,5 +92,5 @@ dev = [ "dep:arbitrary", "dep:proptest-arbitrary-interop", "reth-primitives/arbitrary", - "reth-db-api/arbitrary" + "reth-db-api/arbitrary", ] diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 3b4a00b2d2b2..956a63a5aa0e 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -3,7 +3,7 @@ use alloy_primitives::B256; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; -use reth_chainspec::ChainSpec; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_config::{config::EtlConfig, Config}; use reth_db::{init_db, open_db_read_only, DatabaseEnv}; @@ -50,14 +50,14 @@ pub struct EnvironmentArgs { pub db: DatabaseArgs, } -impl> EnvironmentArgs { +impl> EnvironmentArgs { /// Initializes environment according to [`AccessRights`] and returns an instance of /// [`Environment`]. pub fn init>( &self, access: AccessRights, ) -> eyre::Result> { - let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain); + let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain()); let db_path = data_dir.db(); let sf_path = data_dir.static_files(); @@ -93,7 +93,7 @@ impl> EnvironmentArgs { let provider_factory = self.create_provider_factory(&config, db, sfp)?; if access.is_read_write() { - debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); + debug!(target: "reth::cli", chain=%self.chain.chain(), genesis=?self.chain.genesis_hash(), "Initializing genesis"); init_genesis(&provider_factory)?; } diff --git a/crates/cli/commands/src/db/checksum.rs b/crates/cli/commands/src/db/checksum.rs index 7aeed6dfe141..60ec09c9606e 100644 --- a/crates/cli/commands/src/db/checksum.rs +++ b/crates/cli/commands/src/db/checksum.rs @@ -1,11 +1,12 @@ use crate::db::get::{maybe_json_value_parser, table_key}; use ahash::RandomState; use clap::Parser; -use reth_chainspec::ChainSpec; +use reth_chainspec::EthereumHardforks; use reth_db::{DatabaseEnv, RawKey, RawTable, RawValue, TableViewer, Tables}; use reth_db_api::{cursor::DbCursorRO, table::Table, transaction::DbTx}; use reth_db_common::DbTool; use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; +use reth_provider::providers::ProviderNodeTypes; use std::{ hash::{BuildHasher, Hasher}, sync::Arc, @@ -35,7 +36,7 @@ pub struct Command { impl Command { /// Execute `db checksum` command - pub fn execute>( + pub fn execute>( self, tool: &DbTool>>, ) -> eyre::Result<()> { @@ -63,9 +64,7 @@ impl ChecksumViewer<'_, N> { } } -impl> TableViewer<(u64, Duration)> - for ChecksumViewer<'_, N> -{ +impl TableViewer<(u64, Duration)> for ChecksumViewer<'_, N> { type Error = eyre::Report; fn view(&self) -> Result<(u64, Duration), Self::Error> { diff --git a/crates/cli/commands/src/db/diff.rs b/crates/cli/commands/src/db/diff.rs index 0b7b7790732a..c1346b3742a3 100644 --- a/crates/cli/commands/src/db/diff.rs +++ b/crates/cli/commands/src/db/diff.rs @@ -267,7 +267,7 @@ where T::Key: Hash, { fn default() -> Self { - Self { discrepancies: HashMap::new(), extra_elements: HashMap::new() } + Self { discrepancies: HashMap::default(), extra_elements: HashMap::default() } } } diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index 2734e1da1854..5b794feeada2 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -1,6 +1,5 @@ -use alloy_primitives::BlockHash; +use alloy_primitives::{hex, BlockHash}; use clap::Parser; -use reth_chainspec::ChainSpec; use reth_db::{ static_file::{ColumnSelectorOne, ColumnSelectorTwo, HeaderMask, ReceiptMask, TransactionMask}, tables, RawKey, RawTable, Receipts, TableViewer, Transactions, @@ -8,8 +7,8 @@ use reth_db::{ use reth_db_api::table::{Decompress, DupSort, Table}; use reth_db_common::DbTool; use reth_node_builder::NodeTypesWithDB; -use reth_primitives::{hex, Header}; -use reth_provider::StaticFileProviderFactory; +use reth_primitives::Header; +use reth_provider::{providers::ProviderNodeTypes, StaticFileProviderFactory}; use reth_static_file_types::StaticFileSegment; use tracing::error; @@ -54,10 +53,7 @@ enum Subcommand { impl Command { /// Execute `db get` command - pub fn execute>( - self, - tool: &DbTool, - ) -> eyre::Result<()> { + pub fn execute(self, tool: &DbTool) -> eyre::Result<()> { match self.subcommand { Subcommand::Mdbx { table, key, subkey, raw } => { table.view(&GetValueViewer { tool, key, subkey, raw })? @@ -148,7 +144,7 @@ struct GetValueViewer<'a, N: NodeTypesWithDB> { raw: bool, } -impl> TableViewer<()> for GetValueViewer<'_, N> { +impl TableViewer<()> for GetValueViewer<'_, N> { type Error = eyre::Report; fn view(&self) -> Result<(), Self::Error> { diff --git a/crates/cli/commands/src/db/list.rs b/crates/cli/commands/src/db/list.rs index 3dfa4f388486..63eca1d8683b 100644 --- a/crates/cli/commands/src/db/list.rs +++ b/crates/cli/commands/src/db/list.rs @@ -2,7 +2,7 @@ use super::tui::DbListTUI; use alloy_primitives::hex; use clap::Parser; use eyre::WrapErr; -use reth_chainspec::ChainSpec; +use reth_chainspec::EthereumHardforks; use reth_db::{DatabaseEnv, RawValue, TableViewer, Tables}; use reth_db_api::{database::Database, table::Table}; use reth_db_common::{DbTool, ListFilter}; @@ -53,7 +53,7 @@ pub struct Command { impl Command { /// Execute `db list` command - pub fn execute>( + pub fn execute>( self, tool: &DbTool>>, ) -> eyre::Result<()> { diff --git a/crates/cli/commands/src/db/mod.rs b/crates/cli/commands/src/db/mod.rs index 6d48256101fb..1c000f56bc25 100644 --- a/crates/cli/commands/src/db/mod.rs +++ b/crates/cli/commands/src/db/mod.rs @@ -1,6 +1,6 @@ use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::{Parser, Subcommand}; -use reth_chainspec::ChainSpec; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_db::version::{get_db_version, DatabaseVersionError, DB_VERSION}; use reth_db_common::DbTool; @@ -63,12 +63,12 @@ macro_rules! db_ro_exec { }; } -impl> Command { +impl> Command { /// Execute `db` command pub async fn execute>( self, ) -> eyre::Result<()> { - let data_dir = self.env.datadir.clone().resolve_datadir(self.env.chain.chain); + let data_dir = self.env.datadir.clone().resolve_datadir(self.env.chain.chain()); let db_path = data_dir.db(); let static_files_path = data_dir.static_files(); @@ -160,13 +160,13 @@ impl> Command { #[cfg(test)] mod tests { use super::*; - use reth_node_core::args::utils::{DefaultChainSpecParser, SUPPORTED_CHAINS}; + use reth_node_core::args::utils::{EthereumChainSpecParser, SUPPORTED_CHAINS}; use std::path::Path; #[test] fn parse_stats_globals() { let path = format!("../{}", SUPPORTED_CHAINS[0]); - let cmd = Command::::try_parse_from([ + let cmd = Command::::try_parse_from([ "reth", "--datadir", &path, diff --git a/crates/cli/commands/src/db/stats.rs b/crates/cli/commands/src/db/stats.rs index 76fb69b4a956..ac36b866b07a 100644 --- a/crates/cli/commands/src/db/stats.rs +++ b/crates/cli/commands/src/db/stats.rs @@ -4,14 +4,14 @@ use comfy_table::{Cell, Row, Table as ComfyTable}; use eyre::WrapErr; use human_bytes::human_bytes; use itertools::Itertools; -use reth_chainspec::ChainSpec; +use reth_chainspec::EthereumHardforks; use reth_db::{mdbx, static_file::iter_static_files, DatabaseEnv, TableViewer, Tables}; use reth_db_api::database::Database; use reth_db_common::DbTool; use reth_fs_util as fs; use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_provider::providers::StaticFileProvider; +use reth_provider::providers::{ProviderNodeTypes, StaticFileProvider}; use reth_static_file_types::SegmentRangeInclusive; use std::{sync::Arc, time::Duration}; @@ -38,7 +38,7 @@ pub struct Command { impl Command { /// Execute `db stats` command - pub fn execute>( + pub fn execute>( self, data_dir: ChainPath, tool: &DbTool>>, @@ -325,10 +325,7 @@ impl Command { Ok(table) } - fn checksum_report>( - &self, - tool: &DbTool, - ) -> eyre::Result { + fn checksum_report(&self, tool: &DbTool) -> eyre::Result { let mut table = ComfyTable::new(); table.load_preset(comfy_table::presets::ASCII_MARKDOWN); table.set_header(vec![Cell::new("Table"), Cell::new("Checksum"), Cell::new("Elapsed")]); diff --git a/crates/cli/commands/src/db/tui.rs b/crates/cli/commands/src/db/tui.rs index 746f2cd974f1..240ca376970c 100644 --- a/crates/cli/commands/src/db/tui.rs +++ b/crates/cli/commands/src/db/tui.rs @@ -347,7 +347,7 @@ where let outer_chunks = Layout::default() .direction(Direction::Vertical) .constraints([Constraint::Percentage(95), Constraint::Percentage(5)].as_ref()) - .split(f.size()); + .split(f.area()); // Columns { diff --git a/crates/cli/commands/src/dump_genesis.rs b/crates/cli/commands/src/dump_genesis.rs index 30d3bc9651df..a5c0675cc7e8 100644 --- a/crates/cli/commands/src/dump_genesis.rs +++ b/crates/cli/commands/src/dump_genesis.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use clap::Parser; -use reth_chainspec::ChainSpec; +use reth_chainspec::EthChainSpec; use reth_cli::chainspec::ChainSpecParser; /// Dumps genesis block JSON configuration to stdout @@ -21,7 +21,7 @@ pub struct DumpGenesisCommand { chain: Arc, } -impl> DumpGenesisCommand { +impl> DumpGenesisCommand { /// Execute the `dump-genesis` command pub async fn execute(self) -> eyre::Result<()> { println!("{}", serde_json::to_string_pretty(self.chain.genesis())?); @@ -32,12 +32,12 @@ impl> DumpGenesisCommand { #[cfg(test)] mod tests { use super::*; - use reth_node_core::args::utils::{DefaultChainSpecParser, SUPPORTED_CHAINS}; + use reth_node_core::args::utils::{EthereumChainSpecParser, SUPPORTED_CHAINS}; #[test] fn parse_dump_genesis_command_chain_args() { for chain in SUPPORTED_CHAINS { - let args: DumpGenesisCommand = + let args: DumpGenesisCommand = DumpGenesisCommand::parse_from(["reth", "--chain", chain]); assert_eq!( Ok(args.chain.chain), diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index 5b35e8aa1c7b..31c6cdc69157 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -4,7 +4,7 @@ use alloy_primitives::B256; use clap::Parser; use futures::{Stream, StreamExt}; use reth_beacon_consensus::EthBeaconConsensus; -use reth_chainspec::ChainSpec; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_config::Config; use reth_consensus::Consensus; @@ -20,12 +20,12 @@ use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::{HeaderDownloader, SyncTarget}, }; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithEngine}; +use reth_node_builder::NodeTypesWithEngine; use reth_node_core::version::SHORT_VERSION; use reth_node_events::node::NodeEvent; use reth_provider::{ - BlockNumReader, ChainSpecProvider, HeaderProvider, ProviderError, ProviderFactory, - StageCheckpointReader, + providers::ProviderNodeTypes, BlockNumReader, ChainSpecProvider, HeaderProvider, ProviderError, + ProviderFactory, StageCheckpointReader, }; use reth_prune::PruneModes; use reth_stages::{prelude::*, Pipeline, StageId, StageSet}; @@ -56,7 +56,7 @@ pub struct ImportCommand { path: PathBuf, } -impl> ImportCommand { +impl> ImportCommand { /// Execute `import` command pub async fn execute(self, executor: F) -> eyre::Result<()> where @@ -168,7 +168,7 @@ pub fn build_import_pipeline( executor: E, ) -> eyre::Result<(Pipeline, impl Stream)> where - N: NodeTypesWithDB, + N: ProviderNodeTypes, C: Consensus + 'static, E: BlockExecutorProvider, { @@ -180,7 +180,7 @@ where let last_block_number = provider_factory.last_block_number()?; let local_head = provider_factory .sealed_header(last_block_number)? - .ok_or(ProviderError::HeaderNotFound(last_block_number.into()))?; + .ok_or_else(|| ProviderError::HeaderNotFound(last_block_number.into()))?; let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) .build(file_client.clone(), consensus.clone()) @@ -231,12 +231,12 @@ where #[cfg(test)] mod tests { use super::*; - use reth_node_core::args::utils::{DefaultChainSpecParser, SUPPORTED_CHAINS}; + use reth_node_core::args::utils::{EthereumChainSpecParser, SUPPORTED_CHAINS}; #[test] fn parse_common_import_command_chain_args() { for chain in SUPPORTED_CHAINS { - let args: ImportCommand = + let args: ImportCommand = ImportCommand::parse_from(["reth", "--chain", chain, "."]); assert_eq!( Ok(args.env.chain.chain), diff --git a/crates/cli/commands/src/init_cmd.rs b/crates/cli/commands/src/init_cmd.rs index 63a8827eb24c..5fde9ac0d0ba 100644 --- a/crates/cli/commands/src/init_cmd.rs +++ b/crates/cli/commands/src/init_cmd.rs @@ -2,7 +2,7 @@ use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; -use reth_chainspec::ChainSpec; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_node_builder::NodeTypesWithEngine; use reth_provider::BlockHashReader; @@ -15,7 +15,7 @@ pub struct InitCommand { env: EnvironmentArgs, } -impl> InitCommand { +impl> InitCommand { /// Execute the `init` command pub async fn execute>( self, diff --git a/crates/cli/commands/src/init_state.rs b/crates/cli/commands/src/init_state.rs index 67955d714aff..16e99f8fe976 100644 --- a/crates/cli/commands/src/init_state.rs +++ b/crates/cli/commands/src/init_state.rs @@ -3,12 +3,12 @@ use crate::common::{AccessRights, Environment, EnvironmentArgs}; use alloy_primitives::B256; use clap::Parser; -use reth_chainspec::ChainSpec; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_config::config::EtlConfig; use reth_db_common::init::init_from_state_dump; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithEngine}; -use reth_provider::ProviderFactory; +use reth_node_builder::NodeTypesWithEngine; +use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; use std::{fs::File, io::BufReader, path::PathBuf}; use tracing::info; @@ -40,7 +40,7 @@ pub struct InitStateCommand { pub state: PathBuf, } -impl> InitStateCommand { +impl> InitStateCommand { /// Execute the `init` command pub async fn execute>( self, @@ -59,7 +59,7 @@ impl> InitStateCommand { } /// Initialize chain with state at specific block, from a file with state dump. -pub fn init_at_state>( +pub fn init_at_state( state_dump_path: PathBuf, factory: ProviderFactory, etl_config: EtlConfig, diff --git a/crates/cli/commands/src/node.rs b/crates/cli/commands/src/node.rs index ae85e0acf73d..fe49b769a3d3 100644 --- a/crates/cli/commands/src/node.rs +++ b/crates/cli/commands/src/node.rs @@ -1,7 +1,7 @@ //! Main node command for launching a node use clap::{value_parser, Args, Parser}; -use reth_chainspec::ChainSpec; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_cli_util::parse_socket_address; @@ -9,7 +9,7 @@ use reth_db::{init_db, DatabaseEnv}; use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{ args::{ - utils::DefaultChainSpecParser, DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, NetworkArgs, + utils::EthereumChainSpecParser, DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, }, node_config::NodeConfig, @@ -21,7 +21,7 @@ use std::{ffi::OsString, fmt, future::Future, net::SocketAddr, path::PathBuf, sy /// Start the node #[derive(Debug, Parser)] pub struct NodeCommand< - C: ChainSpecParser = DefaultChainSpecParser, + C: ChainSpecParser = EthereumChainSpecParser, Ext: clap::Args + fmt::Debug = NoArgs, > { /// The path to the configuration file to use. @@ -112,7 +112,7 @@ pub struct NodeCommand< pub ext: Ext, } -impl> NodeCommand { +impl NodeCommand { /// Parsers only the default CLI arguments pub fn parse_args() -> Self { Self::parse() @@ -128,7 +128,11 @@ impl> NodeCommand { } } -impl, Ext: clap::Args + fmt::Debug> NodeCommand { +impl< + C: ChainSpecParser, + Ext: clap::Args + fmt::Debug, + > NodeCommand +{ /// Launches the node /// /// This transforms the node command into a node config and launches the node using the given @@ -214,7 +218,7 @@ mod tests { #[test] fn parse_help_node_command() { - let err = NodeCommand::::try_parse_args_from(["reth", "--help"]) + let err = NodeCommand::::try_parse_args_from(["reth", "--help"]) .unwrap_err(); assert_eq!(err.kind(), clap::error::ErrorKind::DisplayHelp); } @@ -355,7 +359,7 @@ mod tests { #[test] fn with_unused_ports_conflicts_with_instance() { - let err = NodeCommand::::try_parse_args_from([ + let err = NodeCommand::::try_parse_args_from([ "reth", "--with-unused-ports", "--instance", diff --git a/crates/cli/commands/src/p2p/mod.rs b/crates/cli/commands/src/p2p/mod.rs index e8eadddbd906..898bfbf8e34a 100644 --- a/crates/cli/commands/src/p2p/mod.rs +++ b/crates/cli/commands/src/p2p/mod.rs @@ -5,7 +5,7 @@ use std::{path::PathBuf, sync::Arc}; use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::{Parser, Subcommand}; -use reth_chainspec::ChainSpec; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_util::{get_secret_key, hash_or_num_value_parser}; use reth_config::Config; @@ -73,10 +73,10 @@ pub enum Subcommands { Rlpx(rlpx::Command), } -impl> Command { +impl> Command { /// Execute `p2p` command pub async fn execute(self) -> eyre::Result<()> { - let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain); + let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain()); let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); // Load configuration @@ -100,13 +100,12 @@ impl> Command { let net = NetworkConfigBuilder::new(p2p_secret_key) .peer_config(config.peers_config_with_basic_nodes_from_file(None)) .external_ip_resolver(self.network.nat) - .chain_spec(self.chain.clone()) - .disable_discv4_discovery_if(self.chain.chain.is_optimism()) + .disable_discv4_discovery_if(self.chain.chain().is_optimism()) .boot_nodes(boot_nodes.clone()) .apply(|builder| { self.network.discovery.apply_to_builder(builder, rlpx_socket, boot_nodes) }) - .build_with_noop_provider() + .build_with_noop_provider(self.chain) .manager() .await?; let network = net.handle().clone(); @@ -119,7 +118,7 @@ impl> Command { match self.command { Subcommands::Header { id } => { let header = (move || get_single_header(fetch_client.clone(), id)) - .retry(&backoff) + .retry(backoff) .notify(|err, _| println!("Error requesting header: {err}. Retrying...")) .await?; println!("Successfully downloaded header: {header:?}"); @@ -133,7 +132,7 @@ impl> Command { let header = (move || { get_single_header(client.clone(), BlockHashOrNumber::Number(number)) }) - .retry(&backoff) + .retry(backoff) .notify(|err, _| println!("Error requesting header: {err}. Retrying...")) .await?; header.hash() @@ -143,7 +142,7 @@ impl> Command { let client = fetch_client.clone(); client.get_block_bodies(vec![hash]) }) - .retry(&backoff) + .retry(backoff) .notify(|err, _| println!("Error requesting block: {err}. Retrying...")) .await? .split(); diff --git a/crates/cli/commands/src/prune.rs b/crates/cli/commands/src/prune.rs index d19247e21a7b..7dbb66fc2faf 100644 --- a/crates/cli/commands/src/prune.rs +++ b/crates/cli/commands/src/prune.rs @@ -1,7 +1,7 @@ //! Command that runs pruning without any limits. use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; -use reth_chainspec::ChainSpec; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_node_builder::NodeTypesWithEngine; use reth_prune::PrunerBuilder; @@ -15,7 +15,7 @@ pub struct PruneCommand { env: EnvironmentArgs, } -impl> PruneCommand { +impl> PruneCommand { /// Execute the `prune` command pub async fn execute>( self, diff --git a/crates/cli/commands/src/recover/mod.rs b/crates/cli/commands/src/recover/mod.rs index 9bf81817458d..3216449e49b6 100644 --- a/crates/cli/commands/src/recover/mod.rs +++ b/crates/cli/commands/src/recover/mod.rs @@ -1,7 +1,7 @@ //! `reth recover` command. use clap::{Parser, Subcommand}; -use reth_chainspec::ChainSpec; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_node_builder::NodeTypesWithEngine; @@ -22,7 +22,7 @@ pub enum Subcommands { StorageTries(storage_tries::Command), } -impl> Command { +impl> Command { /// Execute `recover` command pub async fn execute>( self, diff --git a/crates/cli/commands/src/recover/storage_tries.rs b/crates/cli/commands/src/recover/storage_tries.rs index 65cb741f324e..794058fac1d8 100644 --- a/crates/cli/commands/src/recover/storage_tries.rs +++ b/crates/cli/commands/src/recover/storage_tries.rs @@ -1,6 +1,6 @@ use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; -use reth_chainspec::ChainSpec; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_db::tables; @@ -21,7 +21,7 @@ pub struct Command { env: EnvironmentArgs, } -impl> Command { +impl> Command { /// Execute `storage-tries` recovery command pub async fn execute>( self, @@ -33,7 +33,7 @@ impl> Command { let best_block = provider.best_block_number()?; let best_header = provider .sealed_header(best_block)? - .ok_or(ProviderError::HeaderNotFound(best_block.into()))?; + .ok_or_else(|| ProviderError::HeaderNotFound(best_block.into()))?; let mut deleted_tries = 0; let tx_mut = provider.tx_mut(); diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index 6571cbaae864..e324c9851502 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -2,10 +2,10 @@ use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; use itertools::Itertools; -use reth_chainspec::ChainSpec; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_db::{static_file::iter_static_files, tables}; -use reth_db_api::transaction::DbTxMut; +use reth_db_api::transaction::{DbTx, DbTxMut}; use reth_db_common::{ init::{insert_genesis_header, insert_genesis_history, insert_genesis_state}, DbTool, @@ -13,6 +13,7 @@ use reth_db_common::{ use reth_node_builder::NodeTypesWithEngine; use reth_node_core::args::StageEnum; use reth_provider::{writer::UnifiedStorageWriter, StaticFileProviderFactory}; +use reth_prune::PruneSegment; use reth_stages::StageId; use reth_static_file_types::StaticFileSegment; @@ -25,7 +26,7 @@ pub struct Command { stage: StageEnum, } -impl> Command { +impl> Command { /// Execute `db` command pub async fn execute>( self, @@ -89,6 +90,17 @@ impl> Command { } StageEnum::Senders => { tx.clear::()?; + // Reset pruned numbers to not count them in the next rerun's stage progress + if let Some(mut prune_checkpoint) = + tx.get::(PruneSegment::SenderRecovery)? + { + prune_checkpoint.block_number = None; + prune_checkpoint.tx_number = None; + tx.put::( + PruneSegment::SenderRecovery, + prune_checkpoint, + )?; + } tx.put::( StageId::SenderRecovery.to_string(), Default::default(), @@ -164,7 +176,7 @@ impl> Command { StageId::IndexStorageHistory.to_string(), Default::default(), )?; - insert_genesis_history(&provider_rw.0, self.env.chain.genesis.alloc.iter())?; + insert_genesis_history(&provider_rw.0, self.env.chain.genesis().alloc.iter())?; } StageEnum::TxLookup => { tx.clear::()?; diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index c807ac94145d..709fc59190d4 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -1,7 +1,6 @@ use std::sync::Arc; use super::setup; -use reth_chainspec::ChainSpec; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{ cursor::DbCursorRO, database::Database, table::TableImporter, transaction::DbTx, @@ -10,7 +9,10 @@ use reth_db_common::DbTool; use reth_evm::{execute::BlockExecutorProvider, noop::NoopBlockExecutorProvider}; use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_provider::{providers::StaticFileProvider, DatabaseProviderFactory, ProviderFactory}; +use reth_provider::{ + providers::{ProviderNodeTypes, StaticFileProvider}, + DatabaseProviderFactory, ProviderFactory, +}; use reth_stages::{stages::ExecutionStage, Stage, StageCheckpoint, UnwindInput}; use tracing::info; @@ -23,7 +25,7 @@ pub(crate) async fn dump_execution_stage( executor: E, ) -> eyre::Result<()> where - N: NodeTypesWithDB, + N: ProviderNodeTypes, E: BlockExecutorProvider, { let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; @@ -129,7 +131,7 @@ fn import_tables_with_range( /// Dry-run an unwind to FROM block, so we can get the `PlainStorageState` and /// `PlainAccountState` safely. There might be some state dependency from an address /// which hasn't been changed in the given range. -fn unwind_and_copy>( +fn unwind_and_copy( db_tool: &DbTool, from: u64, tip_block_number: u64, @@ -166,7 +168,7 @@ fn dry_run( executor: E, ) -> eyre::Result<()> where - N: NodeTypesWithDB, + N: ProviderNodeTypes, E: BlockExecutorProvider, { info!(target: "reth::cli", "Executing stage. [dry-run]"); diff --git a/crates/cli/commands/src/stage/dump/hashing_account.rs b/crates/cli/commands/src/stage/dump/hashing_account.rs index 94d8129e0382..738dcabafa70 100644 --- a/crates/cli/commands/src/stage/dump/hashing_account.rs +++ b/crates/cli/commands/src/stage/dump/hashing_account.rs @@ -3,17 +3,19 @@ use std::sync::Arc; use super::setup; use alloy_primitives::BlockNumber; use eyre::Result; -use reth_chainspec::ChainSpec; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; +use reth_node_builder::NodeTypesWithDBAdapter; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_provider::{providers::StaticFileProvider, DatabaseProviderFactory, ProviderFactory}; +use reth_provider::{ + providers::{ProviderNodeTypes, StaticFileProvider}, + DatabaseProviderFactory, ProviderFactory, +}; use reth_stages::{stages::AccountHashingStage, Stage, StageCheckpoint, UnwindInput}; use tracing::info; -pub(crate) async fn dump_hashing_account_stage>( +pub(crate) async fn dump_hashing_account_stage( db_tool: &DbTool, from: BlockNumber, to: BlockNumber, @@ -49,7 +51,7 @@ pub(crate) async fn dump_hashing_account_stage>( +fn unwind_and_copy( db_tool: &DbTool, from: u64, tip_block_number: u64, @@ -74,7 +76,7 @@ fn unwind_and_copy>( } /// Try to re-execute the stage straight away -fn dry_run>( +fn dry_run( output_provider_factory: ProviderFactory, to: u64, from: u64, diff --git a/crates/cli/commands/src/stage/dump/hashing_storage.rs b/crates/cli/commands/src/stage/dump/hashing_storage.rs index 16a90eeedcb3..204c087a234d 100644 --- a/crates/cli/commands/src/stage/dump/hashing_storage.rs +++ b/crates/cli/commands/src/stage/dump/hashing_storage.rs @@ -2,17 +2,19 @@ use std::sync::Arc; use super::setup; use eyre::Result; -use reth_chainspec::ChainSpec; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; +use reth_node_builder::NodeTypesWithDBAdapter; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_provider::{providers::StaticFileProvider, DatabaseProviderFactory, ProviderFactory}; +use reth_provider::{ + providers::{ProviderNodeTypes, StaticFileProvider}, + DatabaseProviderFactory, ProviderFactory, +}; use reth_stages::{stages::StorageHashingStage, Stage, StageCheckpoint, UnwindInput}; use tracing::info; -pub(crate) async fn dump_hashing_storage_stage>( +pub(crate) async fn dump_hashing_storage_stage( db_tool: &DbTool, from: u64, to: u64, @@ -39,7 +41,7 @@ pub(crate) async fn dump_hashing_storage_stage>( +fn unwind_and_copy( db_tool: &DbTool, from: u64, tip_block_number: u64, @@ -69,7 +71,7 @@ fn unwind_and_copy>( } /// Try to re-execute the stage straight away -fn dry_run>( +fn dry_run( output_provider_factory: ProviderFactory, to: u64, from: u64, diff --git a/crates/cli/commands/src/stage/dump/merkle.rs b/crates/cli/commands/src/stage/dump/merkle.rs index 4b3d9c30331e..f7e9e2fc1afc 100644 --- a/crates/cli/commands/src/stage/dump/merkle.rs +++ b/crates/cli/commands/src/stage/dump/merkle.rs @@ -3,16 +3,18 @@ use std::sync::Arc; use super::setup; use alloy_primitives::BlockNumber; use eyre::Result; -use reth_chainspec::ChainSpec; use reth_config::config::EtlConfig; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; use reth_evm::noop::NoopBlockExecutorProvider; use reth_exex::ExExManagerHandle; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; +use reth_node_builder::NodeTypesWithDBAdapter; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_provider::{providers::StaticFileProvider, DatabaseProviderFactory, ProviderFactory}; +use reth_provider::{ + providers::{ProviderNodeTypes, StaticFileProvider}, + DatabaseProviderFactory, ProviderFactory, +}; use reth_prune::PruneModes; use reth_stages::{ stages::{ @@ -23,7 +25,7 @@ use reth_stages::{ }; use tracing::info; -pub(crate) async fn dump_merkle_stage>( +pub(crate) async fn dump_merkle_stage( db_tool: &DbTool, from: BlockNumber, to: BlockNumber, @@ -66,7 +68,7 @@ pub(crate) async fn dump_merkle_stage> } /// Dry-run an unwind to FROM block and copy the necessary table data to the new database. -fn unwind_and_copy>( +fn unwind_and_copy( db_tool: &DbTool, range: (u64, u64), tip_block_number: u64, @@ -144,7 +146,7 @@ fn unwind_and_copy>( } /// Try to re-execute the stage straight away -fn dry_run>( +fn dry_run( output_provider_factory: ProviderFactory, to: u64, from: u64, diff --git a/crates/cli/commands/src/stage/dump/mod.rs b/crates/cli/commands/src/stage/dump/mod.rs index 44161d9b3bb2..6fd2f23aa0e5 100644 --- a/crates/cli/commands/src/stage/dump/mod.rs +++ b/crates/cli/commands/src/stage/dump/mod.rs @@ -1,7 +1,7 @@ //! Database debugging tool use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; -use reth_chainspec::ChainSpec; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_db::{init_db, mdbx::DatabaseArguments, tables, DatabaseEnv}; use reth_db_api::{ @@ -75,24 +75,26 @@ pub struct StageCommand { macro_rules! handle_stage { ($stage_fn:ident, $tool:expr, $command:expr) => {{ let StageCommand { output_datadir, from, to, dry_run, .. } = $command; - let output_datadir = output_datadir.with_chain($tool.chain().chain, DatadirArgs::default()); + let output_datadir = + output_datadir.with_chain($tool.chain().chain(), DatadirArgs::default()); $stage_fn($tool, *from, *to, output_datadir, *dry_run).await? }}; ($stage_fn:ident, $tool:expr, $command:expr, $executor:expr) => {{ let StageCommand { output_datadir, from, to, dry_run, .. } = $command; - let output_datadir = output_datadir.with_chain($tool.chain().chain, DatadirArgs::default()); + let output_datadir = + output_datadir.with_chain($tool.chain().chain(), DatadirArgs::default()); $stage_fn($tool, *from, *to, output_datadir, *dry_run, $executor).await? }}; } -impl> Command { +impl> Command { /// Execute `dump-stage` command pub async fn execute(self, executor: F) -> eyre::Result<()> where N: NodeTypesWithEngine, E: BlockExecutorProvider, - F: FnOnce(Arc) -> E, + F: FnOnce(Arc) -> E, { let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; let tool = DbTool::new(provider_factory)?; diff --git a/crates/cli/commands/src/stage/mod.rs b/crates/cli/commands/src/stage/mod.rs index a4e0d088ac94..562bd73a28d7 100644 --- a/crates/cli/commands/src/stage/mod.rs +++ b/crates/cli/commands/src/stage/mod.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use clap::{Parser, Subcommand}; -use reth_chainspec::ChainSpec; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_evm::execute::BlockExecutorProvider; @@ -39,13 +39,13 @@ pub enum Subcommands { Unwind(unwind::Command), } -impl> Command { +impl> Command { /// Execute `stage` command pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> where N: NodeTypesWithEngine, E: BlockExecutorProvider, - F: FnOnce(Arc) -> E, + F: FnOnce(Arc) -> E, { match self.command { Subcommands::Run(command) => command.execute::(ctx, executor).await, diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index 9bc0fa04a365..23d6f6f28ac6 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -6,7 +6,7 @@ use crate::common::{AccessRights, Environment, EnvironmentArgs}; use alloy_eips::BlockHashOrNumber; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; -use reth_chainspec::ChainSpec; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; @@ -102,13 +102,13 @@ pub struct Command { network: NetworkArgs, } -impl> Command { +impl> Command { /// Execute `stage` command pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> where N: NodeTypesWithEngine, E: BlockExecutorProvider, - F: FnOnce(Arc) -> E, + F: FnOnce(Arc) -> E, { // Raise the fd limit of the process. // Does not do anything on windows. @@ -131,7 +131,7 @@ impl> Command { target_triple: VERGEN_CARGO_TARGET_TRIPLE, build_profile: BUILD_PROFILE_NAME, }, - ChainSpecInfo { name: provider_factory.chain_spec().chain.to_string() }, + ChainSpecInfo { name: provider_factory.chain_spec().chain().to_string() }, ctx.task_executor, Hooks::new( provider_factory.db_ref().clone(), diff --git a/crates/cli/commands/src/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs index 6c7bdebd1184..ae3ae2500874 100644 --- a/crates/cli/commands/src/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -5,7 +5,7 @@ use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use clap::{Parser, Subcommand}; use reth_beacon_consensus::EthBeaconConsensus; -use reth_chainspec::ChainSpec; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_config::Config; use reth_consensus::Consensus; @@ -16,8 +16,8 @@ use reth_exex::ExExManagerHandle; use reth_node_builder::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_core::args::NetworkArgs; use reth_provider::{ - BlockExecutionWriter, BlockNumReader, ChainSpecProvider, FinalizedBlockReader, - FinalizedBlockWriter, ProviderFactory, StaticFileProviderFactory, + providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, ChainSpecProvider, + FinalizedBlockReader, FinalizedBlockWriter, ProviderFactory, StaticFileProviderFactory, }; use reth_prune::PruneModes; use reth_stages::{ @@ -48,7 +48,7 @@ pub struct Command { offline: bool, } -impl> Command { +impl> Command { /// Execute `db stage unwind` command pub async fn execute>( self, @@ -189,7 +189,7 @@ impl Subcommands { /// Returns the block range to unwind. /// /// This returns an inclusive range: [target..=latest] - fn unwind_range>>( + fn unwind_range>>( &self, factory: ProviderFactory, ) -> eyre::Result> { @@ -213,13 +213,13 @@ impl Subcommands { #[cfg(test)] mod tests { - use reth_node_core::args::utils::DefaultChainSpecParser; + use reth_node_core::args::utils::EthereumChainSpecParser; use super::*; #[test] fn parse_unwind() { - let cmd = Command::::parse_from([ + let cmd = Command::::parse_from([ "reth", "--datadir", "dir", @@ -228,7 +228,7 @@ mod tests { ]); assert_eq!(cmd.command, Subcommands::ToBlock { target: BlockHashOrNumber::Number(100) }); - let cmd = Command::::parse_from([ + let cmd = Command::::parse_from([ "reth", "--datadir", "dir", diff --git a/crates/cli/util/src/allocator.rs b/crates/cli/util/src/allocator.rs index b5974e2245f8..ee13e7c61cb5 100644 --- a/crates/cli/util/src/allocator.rs +++ b/crates/cli/util/src/allocator.rs @@ -12,6 +12,7 @@ cfg_if::cfg_if! { cfg_if::cfg_if! { if #[cfg(feature = "tracy-allocator")] { type AllocatorWrapper = tracy_client::ProfiledAllocator; + tracy_client::register_demangler!(); const fn new_allocator_wrapper() -> AllocatorWrapper { AllocatorWrapper::new(AllocatorInner {}, 100) } @@ -23,9 +24,6 @@ cfg_if::cfg_if! { } } -#[cfg(feature = "tracy-allocator")] -tracy_client::register_demangler!(); - /// Custom allocator. pub type Allocator = AllocatorWrapper; diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index d8224a3d62ed..a186b8407a8a 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -27,4 +27,4 @@ eyre.workspace = true [dev-dependencies] tempfile.workspace = true reth-network-peers.workspace = true -reth-primitives.workspace = true +alloy-primitives.workspace = true diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 24c992fddd82..e4a7fc9677aa 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -444,8 +444,8 @@ where mod tests { use super::{Config, EXTENSION}; use crate::PruneConfig; + use alloy_primitives::Address; use reth_network_peers::TrustedPeer; - use reth_primitives::Address; use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig}; use std::{collections::BTreeMap, path::Path, str::FromStr, time::Duration}; diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml index ca35937a4739..b4b281230336 100644 --- a/crates/consensus/auto-seal/Cargo.toml +++ b/crates/consensus/auto-seal/Cargo.toml @@ -26,7 +26,6 @@ reth-transaction-pool.workspace = true reth-evm.workspace = true reth-engine-primitives.workspace = true reth-consensus.workspace = true -reth-rpc-types.workspace = true reth-network-peers.workspace = true reth-tokio-util.workspace = true reth-trie.workspace = true @@ -34,6 +33,10 @@ reth-trie.workspace = true # ethereum alloy-primitives.workspace = true revm-primitives.workspace = true +alloy-rpc-types-engine.workspace = true + +# optimism +reth-optimism-consensus = { workspace = true, optional = true } # async futures-util.workspace = true @@ -42,4 +45,4 @@ tokio-stream.workspace = true tracing.workspace = true [features] -optimism = ["reth-provider/optimism"] +optimism = ["reth-provider/optimism", "reth-optimism-consensus"] diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index fea15f84d510..f1ef64c8c0fa 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -301,7 +301,7 @@ impl StorageInner { withdrawals_root: withdrawals.map(|w| proofs::calculate_withdrawals_root(w)), difficulty: U256::from(2), number: self.best_block + 1, - gas_limit: chain_spec.max_gas_limit().into(), + gas_limit: chain_spec.max_gas_limit(), timestamp, base_fee_per_gas, blob_gas_used: blob_gas_used.map(Into::into), @@ -326,10 +326,8 @@ impl StorageInner { } _ => (0, 0), }; - header.excess_blob_gas = Some( - calc_excess_blob_gas(parent_excess_blob_gas as u64, parent_blob_gas_used as u64) - .into(), - ) + header.excess_blob_gas = + Some(calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) } header @@ -406,7 +404,7 @@ impl StorageInner { // now we need to update certain header fields with the results of the execution header.state_root = db.state_root(hashed_state)?; - header.gas_used = gas_used.into(); + header.gas_used = gas_used; let receipts = execution_outcome.receipts_by_block(header.number); @@ -419,7 +417,13 @@ impl StorageInner { header.receipts_root = { #[cfg(feature = "optimism")] let receipts_root = execution_outcome - .optimism_receipts_root_slow(header.number, &chain_spec, header.timestamp) + .generic_receipts_root_slow(header.number, |receipts| { + reth_optimism_consensus::calculate_receipt_root_no_memo_optimism( + receipts, + &chain_spec, + header.timestamp, + ) + }) .expect("Receipts is present"); #[cfg(not(feature = "optimism"))] @@ -588,7 +592,7 @@ mod tests { assert_eq!(header.parent_hash, best_block_hash); assert_eq!(header.number, best_block_number + 1); assert_eq!(header.timestamp, timestamp); - assert_eq!(header.gas_limit, chain_spec.max_gas_limit.into()); + assert_eq!(header.gas_limit, chain_spec.max_gas_limit); } #[test] @@ -682,7 +686,7 @@ mod tests { withdrawals_root: None, difficulty: U256::from(2), number: 1, - gas_limit: chain_spec.max_gas_limit.into(), + gas_limit: chain_spec.max_gas_limit, timestamp, base_fee_per_gas: None, blob_gas_used: Some(0), diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index 16c726c7a1a2..e4873615f1d0 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -1,12 +1,11 @@ use crate::{mode::MiningMode, Storage}; +use alloy_rpc_types_engine::ForkchoiceState; use futures_util::{future::BoxFuture, FutureExt}; use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_engine_primitives::EngineTypes; use reth_evm::execute::BlockExecutorProvider; -use reth_primitives::IntoRecoveredTransaction; use reth_provider::{CanonChainTracker, StateProviderFactory}; -use reth_rpc_types::engine::ForkchoiceState; use reth_stages_api::PipelineEvent; use reth_tokio_util::EventStream; use reth_transaction_pool::{TransactionPool, ValidPoolTransaction}; @@ -87,7 +86,7 @@ where Pool: TransactionPool + Unpin + 'static, Engine: EngineTypes, Executor: BlockExecutorProvider, - ChainSpec: EthChainSpec + EthereumHardforks, + ChainSpec: EthChainSpec + EthereumHardforks + 'static, { type Output = (); diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 6d3fc798cbeb..f62c6fbf2a91 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -18,7 +18,6 @@ reth-primitives.workspace = true reth-stages-api.workspace = true reth-errors.workspace = true reth-provider.workspace = true -reth-rpc-types.workspace = true reth-tasks.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true @@ -32,6 +31,7 @@ reth-node-types.workspace = true # ethereum alloy-primitives.workspace = true +alloy-rpc-types-engine.workspace = true # async tokio = { workspace = true, features = ["sync"] } diff --git a/crates/consensus/beacon/src/engine/error.rs b/crates/consensus/beacon/src/engine/error.rs index 21c6974737d6..5fc6df2b884d 100644 --- a/crates/consensus/beacon/src/engine/error.rs +++ b/crates/consensus/beacon/src/engine/error.rs @@ -1,6 +1,6 @@ use crate::engine::hooks::EngineHookError; +use alloy_rpc_types_engine::ForkchoiceUpdateError; use reth_errors::{DatabaseError, RethError}; -use reth_rpc_types::engine::ForkchoiceUpdateError; use reth_stages_api::PipelineError; /// Beacon engine result. @@ -57,12 +57,12 @@ pub enum BeaconForkChoiceUpdateError { EngineUnavailable, /// An internal error occurred, not necessarily related to the update. #[error(transparent)] - Internal(Box), + Internal(Box), } impl BeaconForkChoiceUpdateError { /// Create a new internal error. - pub fn internal(e: E) -> Self { + pub fn internal(e: E) -> Self { Self::Internal(Box::new(e)) } } @@ -89,12 +89,12 @@ pub enum BeaconOnNewPayloadError { EngineUnavailable, /// An internal error occurred, not necessarily related to the payload. #[error(transparent)] - Internal(Box), + Internal(Box), } impl BeaconOnNewPayloadError { /// Create a new internal error. - pub fn internal(e: E) -> Self { + pub fn internal(e: E) -> Self { Self::Internal(Box::new(e)) } } diff --git a/crates/consensus/beacon/src/engine/event.rs b/crates/consensus/beacon/src/engine/event.rs index d617ee4f23c4..975085a32f35 100644 --- a/crates/consensus/beacon/src/engine/event.rs +++ b/crates/consensus/beacon/src/engine/event.rs @@ -1,7 +1,7 @@ use crate::engine::forkchoice::ForkchoiceStatus; use alloy_primitives::B256; +use alloy_rpc_types_engine::ForkchoiceState; use reth_primitives::{SealedBlock, SealedHeader}; -use reth_rpc_types::engine::ForkchoiceState; use std::{ fmt::{Display, Formatter, Result}, sync::Arc, diff --git a/crates/consensus/beacon/src/engine/forkchoice.rs b/crates/consensus/beacon/src/engine/forkchoice.rs index f02b1200338b..975c2ee3bc45 100644 --- a/crates/consensus/beacon/src/engine/forkchoice.rs +++ b/crates/consensus/beacon/src/engine/forkchoice.rs @@ -1,5 +1,5 @@ use alloy_primitives::B256; -use reth_rpc_types::engine::{ForkchoiceState, PayloadStatusEnum}; +use alloy_rpc_types_engine::{ForkchoiceState, PayloadStatusEnum}; /// The struct that keeps track of the received forkchoice state and their status. #[derive(Debug, Clone, Default)] diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index aee554f8241a..65b7c38df918 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -4,12 +4,12 @@ use crate::{ engine::message::OnForkChoiceUpdated, BeaconConsensusEngineEvent, BeaconEngineMessage, BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, }; +use alloy_rpc_types_engine::{ + CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, +}; use futures::TryFutureExt; use reth_engine_primitives::EngineTypes; use reth_errors::RethResult; -use reth_rpc_types::engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, -}; use reth_tokio_util::{EventSender, EventStream}; use tokio::sync::{mpsc::UnboundedSender, oneshot}; diff --git a/crates/consensus/beacon/src/engine/hooks/mod.rs b/crates/consensus/beacon/src/engine/hooks/mod.rs index b5e6ea61e38a..828a6f968500 100644 --- a/crates/consensus/beacon/src/engine/hooks/mod.rs +++ b/crates/consensus/beacon/src/engine/hooks/mod.rs @@ -104,7 +104,7 @@ pub enum EngineHookError { Common(#[from] RethError), /// An internal error occurred. #[error(transparent)] - Internal(#[from] Box), + Internal(#[from] Box), } /// Level of database access the hook needs for execution. diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index 435f8a313d85..fdaad0cc4b0d 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -1,12 +1,12 @@ use crate::engine::{error::BeaconOnNewPayloadError, forkchoice::ForkchoiceStatus}; +use alloy_rpc_types_engine::{ + CancunPayloadFields, ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceState, + ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, +}; use futures::{future::Either, FutureExt}; use reth_engine_primitives::EngineTypes; use reth_errors::RethResult; use reth_payload_primitives::PayloadBuilderError; -use reth_rpc_types::engine::{ - CancunPayloadFields, ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceState, - ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, -}; use std::{ fmt::Display, future::Future, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 74515f6c3707..5774d4da26b8 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,4 +1,8 @@ use alloy_primitives::{BlockNumber, B256}; +use alloy_rpc_types_engine::{ + CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, + PayloadValidationError, +}; use futures::{stream::BoxStream, Future, StreamExt}; use itertools::Either; use reth_blockchain_tree_api::{ @@ -22,10 +26,6 @@ use reth_provider::{ providers::ProviderNodeTypes, BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, StageCheckpointReader, }; -use reth_rpc_types::engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, - PayloadValidationError, -}; use reth_stages_api::{ControlFlow, Pipeline, PipelineTarget, StageId}; use reth_tasks::TaskSpawner; use reth_tokio_util::EventSender; @@ -945,7 +945,7 @@ where let safe = self .blockchain .find_block_by_hash(safe_block_hash, BlockSource::Any)? - .ok_or_else(|| ProviderError::UnknownBlockHash(safe_block_hash))?; + .ok_or(ProviderError::UnknownBlockHash(safe_block_hash))?; self.blockchain.set_safe(SealedHeader::new(safe.header, safe_block_hash)); } Ok(()) @@ -965,7 +965,7 @@ where let finalized = self .blockchain .find_block_by_hash(finalized_block_hash, BlockSource::Any)? - .ok_or_else(|| ProviderError::UnknownBlockHash(finalized_block_hash))?; + .ok_or(ProviderError::UnknownBlockHash(finalized_block_hash))?; self.blockchain.finalize_block(finalized.number)?; self.blockchain .set_finalized(SealedHeader::new(finalized.header, finalized_block_hash)); @@ -1984,10 +1984,10 @@ mod tests { test_utils::{spawn_consensus_engine, TestConsensusEngineBuilder}, BeaconForkChoiceUpdateError, }; + use alloy_rpc_types_engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; use assert_matches::assert_matches; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_provider::{BlockWriter, ProviderFactory}; - use reth_rpc_types::engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; use reth_rpc_types_compat::engine::payload::block_to_payload_v1; use reth_stages::{ExecOutput, PipelineError, StageError}; use reth_stages_api::StageCheckpoint; @@ -2180,11 +2180,11 @@ mod tests { mod fork_choice_updated { use super::*; use alloy_primitives::U256; + use alloy_rpc_types_engine::ForkchoiceUpdateError; use generators::BlockParams; use reth_db::{tables, test_utils::create_test_static_files_dir, Database}; use reth_db_api::transaction::DbTxMut; use reth_provider::{providers::StaticFileProvider, test_utils::MockNodeTypesWithDB}; - use reth_rpc_types::engine::ForkchoiceUpdateError; use reth_testing_utils::generators::random_block; #[tokio::test] diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 6e010a59df16..9426ca19712f 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -619,7 +619,7 @@ mod tests { let client = TestFullBlockClient::default(); let sealed = Header { base_fee_per_gas: Some(7), - gas_limit: chain_spec.max_gas_limit.into(), + gas_limit: chain_spec.max_gas_limit, ..Default::default() } .seal_slow(); diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 44d1d0f05c1d..4dfd9c87d321 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -5,6 +5,9 @@ use crate::{ BeaconOnNewPayloadError, EthBeaconConsensus, MIN_BLOCKS_FOR_PIPELINE_RUN, }; use alloy_primitives::{BlockNumber, Sealable, B256}; +use alloy_rpc_types_engine::{ + CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, +}; use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, }; @@ -30,9 +33,6 @@ use reth_provider::{ }; use reth_prune::Pruner; use reth_prune_types::PruneModes; -use reth_rpc_types::engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, -}; use reth_stages::{sets::DefaultStages, test_utils::TestStages, ExecOutput, Pipeline, StageError}; use reth_static_file::StaticFileProducer; use reth_tasks::TokioTaskExecutor; diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index c9fea9789b86..66a92270dba8 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -23,4 +23,6 @@ revm-primitives.workspace = true [dev-dependencies] reth-storage-api.workspace = true rand.workspace = true -mockall = "0.12" +mockall = "0.13" + +alloy-consensus.workspace = true diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 66a953a50870..88a4cabe96c5 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -16,8 +16,8 @@ use revm_primitives::calc_excess_blob_gas; pub const fn validate_header_gas(header: &Header) -> Result<(), ConsensusError> { if header.gas_used > header.gas_limit { return Err(ConsensusError::HeaderGasUsedExceedsGasLimit { - gas_used: header.gas_used as u64, - gas_limit: header.gas_limit as u64, + gas_used: header.gas_used, + gas_limit: header.gas_limit, }) } Ok(()) @@ -66,8 +66,7 @@ pub fn validate_shanghai_withdrawals(block: &SealedBlock) -> Result<(), Consensu pub fn validate_cancun_gas(block: &SealedBlock) -> Result<(), ConsensusError> { // Check that the blob gas used in the header matches the sum of the blob gas used by each // blob tx - let header_blob_gas_used = - block.blob_gas_used.ok_or(ConsensusError::BlobGasUsedMissing)? as u64; + let header_blob_gas_used = block.blob_gas_used.ok_or(ConsensusError::BlobGasUsedMissing)?; let total_blob_gas = block.blob_gas_used(); if total_blob_gas != header_blob_gas_used { return Err(ConsensusError::BlobGasUsedDiff(GotExpected { @@ -151,25 +150,25 @@ pub fn validate_4844_header_standalone(header: &Header) -> Result<(), ConsensusE return Err(ConsensusError::ParentBeaconBlockRootMissing) } - if blob_gas_used as u64 > MAX_DATA_GAS_PER_BLOCK { + if blob_gas_used > MAX_DATA_GAS_PER_BLOCK { return Err(ConsensusError::BlobGasUsedExceedsMaxBlobGasPerBlock { - blob_gas_used: blob_gas_used as u64, + blob_gas_used, max_blob_gas_per_block: MAX_DATA_GAS_PER_BLOCK, }) } - if blob_gas_used as u64 % DATA_GAS_PER_BLOB != 0 { + if blob_gas_used % DATA_GAS_PER_BLOB != 0 { return Err(ConsensusError::BlobGasUsedNotMultipleOfBlobGasPerBlob { - blob_gas_used: blob_gas_used as u64, + blob_gas_used, blob_gas_per_blob: DATA_GAS_PER_BLOB, }) } // `excess_blob_gas` must also be a multiple of `DATA_GAS_PER_BLOB`. This will be checked later // (via `calc_excess_blob_gas`), but it doesn't hurt to catch the problem sooner. - if excess_blob_gas as u64 % DATA_GAS_PER_BLOB != 0 { + if excess_blob_gas % DATA_GAS_PER_BLOB != 0 { return Err(ConsensusError::ExcessBlobGasNotMultipleOfBlobGasPerBlob { - excess_blob_gas: excess_blob_gas as u64, + excess_blob_gas, blob_gas_per_blob: DATA_GAS_PER_BLOB, }) } @@ -225,7 +224,7 @@ pub fn validate_against_parent_eip1559_base_fee Result<(), ConsensusError> { if chain_spec.fork(EthereumHardfork::London).active_at_block(header.number) { - let base_fee = header.base_fee_per_gas.ok_or(ConsensusError::BaseFeeMissing)? as u64; + let base_fee = header.base_fee_per_gas.ok_or(ConsensusError::BaseFeeMissing)?; let expected_base_fee = if chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number) { @@ -235,7 +234,7 @@ pub fn validate_against_parent_eip1559_base_fee are evaluated as 0. // // This means in the first post-fork block, calc_excess_blob_gas will return 0. - let parent_blob_gas_used = parent.blob_gas_used.unwrap_or(0) as u64; - let parent_excess_blob_gas = parent.excess_blob_gas.unwrap_or(0) as u64; + let parent_blob_gas_used = parent.blob_gas_used.unwrap_or(0); + let parent_excess_blob_gas = parent.excess_blob_gas.unwrap_or(0); if header.blob_gas_used.is_none() { return Err(ConsensusError::BlobGasUsedMissing) } - let excess_blob_gas = - header.excess_blob_gas.ok_or(ConsensusError::ExcessBlobGasMissing)? as u64; + let excess_blob_gas = header.excess_blob_gas.ok_or(ConsensusError::ExcessBlobGasMissing)?; let expected_excess_blob_gas = calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used); @@ -302,6 +300,7 @@ pub fn validate_against_parent_4844( #[cfg(test)] mod tests { use super::*; + use alloy_consensus::TxEip4844; use alloy_primitives::{ hex_literal::hex, Address, BlockHash, BlockNumber, Bytes, Parity, Sealable, U256, }; @@ -310,7 +309,7 @@ mod tests { use reth_chainspec::ChainSpecBuilder; use reth_primitives::{ proofs, Account, BlockBody, BlockHashOrNumber, Signature, Transaction, TransactionSigned, - TxEip4844, Withdrawal, Withdrawals, + Withdrawal, Withdrawals, }; use reth_storage_api::{ errors::provider::ProviderResult, AccountReader, HeaderProvider, WithdrawalsProvider, @@ -539,7 +538,7 @@ mod tests { let transaction = mock_blob_tx(1, 10); let sealed = Header { - base_fee_per_gas: Some(1337u128), + base_fee_per_gas: Some(1337), withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), blob_gas_used: Some(1), transactions_root: proofs::calculate_transaction_root(&[transaction.clone()]), diff --git a/crates/consensus/debug-client/Cargo.toml b/crates/consensus/debug-client/Cargo.toml index 8a03c052768d..c37beef10742 100644 --- a/crates/consensus/debug-client/Cargo.toml +++ b/crates/consensus/debug-client/Cargo.toml @@ -13,9 +13,7 @@ workspace = true [dependencies] # reth reth-node-api.workspace = true -reth-node-core.workspace = true reth-rpc-api.workspace = true -reth-rpc-types.workspace = true reth-rpc-builder.workspace = true reth-tracing.workspace = true @@ -24,6 +22,8 @@ alloy-consensus = { workspace = true, features = ["serde"] } alloy-eips.workspace = true alloy-provider = { workspace = true, features = ["ws"] } alloy-rpc-types.workspace = true +alloy-rpc-types-engine.workspace = true +alloy-primitives.workspace = true auto_impl.workspace = true futures.workspace = true diff --git a/crates/consensus/debug-client/src/client.rs b/crates/consensus/debug-client/src/client.rs index eec1b5b99e9e..a6a59a6a380b 100644 --- a/crates/consensus/debug-client/src/client.rs +++ b/crates/consensus/debug-client/src/client.rs @@ -1,13 +1,10 @@ use alloy_consensus::TxEnvelope; use alloy_eips::eip2718::Encodable2718; +use alloy_primitives::B256; use alloy_rpc_types::{Block, BlockTransactions}; +use alloy_rpc_types_engine::{ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3}; use reth_node_api::EngineTypes; -use reth_node_core::{ - primitives::B256, - rpc::types::{ExecutionPayloadV2, ExecutionPayloadV3}, -}; use reth_rpc_builder::auth::AuthServerHandle; -use reth_rpc_types::ExecutionPayloadV1; use reth_tracing::tracing::warn; use ringbuffer::{AllocRingBuffer, RingBuffer}; use std::future::Future; @@ -133,7 +130,7 @@ impl DebugConsensusClient

{ continue; } }; - let state = reth_rpc_types::engine::ForkchoiceState { + let state = alloy_rpc_types_engine::ForkchoiceState { head_block_hash: block_hash, safe_block_hash, finalized_block_hash, @@ -200,8 +197,8 @@ pub fn block_to_execution_payload_v3(block: Block) -> ExecutionNewPayload { logs_bloom: block.header.logs_bloom, prev_randao: block.header.mix_hash.unwrap(), block_number: block.header.number, - gas_limit: block.header.gas_limit.try_into().unwrap(), - gas_used: block.header.gas_used.try_into().unwrap(), + gas_limit: block.header.gas_limit, + gas_used: block.header.gas_used, timestamp: block.header.timestamp, extra_data: block.header.extra_data.clone(), base_fee_per_gas: block.header.base_fee_per_gas.unwrap().try_into().unwrap(), @@ -218,8 +215,8 @@ pub fn block_to_execution_payload_v3(block: Block) -> ExecutionNewPayload { }, withdrawals: block.withdrawals.clone().unwrap_or_default(), }, - blob_gas_used: block.header.blob_gas_used.unwrap().try_into().unwrap(), - excess_blob_gas: block.header.excess_blob_gas.unwrap().try_into().unwrap(), + blob_gas_used: block.header.blob_gas_used.unwrap(), + excess_blob_gas: block.header.excess_blob_gas.unwrap(), }; ExecutionNewPayload { diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 3d2961cf8cd9..998b48e70431 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -10,7 +10,7 @@ use reth::{ rpc::api::eth::{helpers::AddDevSigners, FullEthApiServer}, tasks::TaskManager, }; -use reth_chainspec::ChainSpec; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_node_builder::{ components::NodeComponentsBuilder, rpc::EthApiBuilderProvider, FullNodeTypesAdapter, Node, @@ -47,11 +47,11 @@ mod traits; /// Creates the initial setup with `num_nodes` started and interconnected. pub async fn setup( num_nodes: usize, - chain_spec: Arc, + chain_spec: Arc, is_dev: bool, ) -> eyre::Result<(Vec>, TaskManager, Wallet)> where - N: Default + Node> + NodeTypesWithEngine, + N: Default + Node> + NodeTypesWithEngine, N::ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter, Components: NodeComponents, Network: PeersHandleProvider>, @@ -73,8 +73,7 @@ where let mut nodes: Vec> = Vec::with_capacity(num_nodes); for idx in 0..num_nodes { - let node_config = NodeConfig::test() - .with_chain(chain_spec.clone()) + let node_config = NodeConfig::new(chain_spec.clone()) .with_network(network_config.clone()) .with_unused_ports() .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()) diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 391a070df7dd..2ea39348f5de 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -17,7 +17,7 @@ use reth::{ types::engine::PayloadStatusEnum, }, }; -use reth_chainspec::ChainSpec; +use reth_chainspec::EthereumHardforks; use reth_node_builder::{NodeAddOns, NodeTypesWithEngine}; use reth_stages_types::StageId; use tokio_stream::StreamExt; @@ -50,7 +50,7 @@ impl NodeTestContext where Engine: EngineTypes, Node: FullNodeComponents, - Node::Types: NodeTypesWithEngine, + Node::Types: NodeTypesWithEngine, Node::Network: PeersHandleProvider, AddOns: NodeAddOns, { diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index 3ff378a08304..b8cbe4d77add 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -8,7 +8,7 @@ use reth::{ DebugApiServer, }, }; -use reth_chainspec::ChainSpec; +use reth_chainspec::EthereumHardforks; use reth_node_builder::{EthApiTypes, NodeTypes}; #[allow(missing_debug_implementations)] @@ -18,7 +18,7 @@ pub struct RpcTestContext { impl RpcTestContext where - Node: FullNodeComponents>, + Node: FullNodeComponents>, EthApi: EthApiSpec + EthTransactions + TraceExt, { /// Injects a raw transaction into the node tx pool via RPC server diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs index b3d01faedc9e..04960304442d 100644 --- a/crates/e2e-test-utils/src/transaction.rs +++ b/crates/e2e-test-utils/src/transaction.rs @@ -29,7 +29,7 @@ impl TransactionTestContext { /// Creates a deployment transaction and signs it, returning an envelope. pub async fn deploy_tx( chain_id: u64, - gas: u128, + gas: u64, init_code: Bytes, wallet: PrivateKeySigner, ) -> TxEnvelope { @@ -40,7 +40,7 @@ impl TransactionTestContext { /// Creates a deployment transaction and signs it, returning bytes. pub async fn deploy_tx_bytes( chain_id: u64, - gas: u128, + gas: u64, init_code: Bytes, wallet: PrivateKeySigner, ) -> Bytes { @@ -145,7 +145,7 @@ impl TransactionTestContext { /// Creates a type 2 transaction fn tx( chain_id: u64, - gas: u128, + gas: u64, data: Option, delegate_to: Option, nonce: u64, diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 4b15213191db..37d5bb08293d 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -4,12 +4,9 @@ use alloy_primitives::{keccak256, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use eyre::OptionExt; use pretty_assertions::Comparison; -use reth_chainspec::ChainSpec; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_engine_primitives::InvalidBlockHook; -use reth_evm::{ - system_calls::{apply_beacon_root_contract_call, apply_blockhashes_contract_call}, - ConfigureEvm, -}; +use reth_evm::{system_calls::SystemCaller, ConfigureEvm}; use reth_primitives::{Header, Receipt, SealedBlockWithSenders, SealedHeader}; use reth_provider::{BlockExecutionOutput, ChainSpecProvider, StateProviderFactory}; use reth_revm::{ @@ -52,7 +49,11 @@ impl InvalidBlockWitnessHook { impl InvalidBlockWitnessHook where - P: StateProviderFactory + ChainSpecProvider + Send + Sync + 'static, + P: StateProviderFactory + + ChainSpecProvider + + Send + + Sync + + 'static, EvmConfig: ConfigureEvm

, { fn on_invalid_block( @@ -83,23 +84,10 @@ where EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()), ); + let mut system_caller = SystemCaller::new(&self.evm_config, self.provider.chain_spec()); + // Apply pre-block system contract calls. - apply_beacon_root_contract_call( - &self.evm_config, - self.provider.chain_spec().as_ref(), - block.timestamp, - block.number, - block.parent_beacon_block_root, - &mut evm, - )?; - apply_blockhashes_contract_call( - &self.evm_config, - &self.provider.chain_spec(), - block.timestamp, - block.number, - block.parent_hash, - &mut evm, - )?; + system_caller.apply_pre_execution_changes(&block.clone().unseal(), &mut evm)?; // Re-execute all of the transactions in the block to load all touched accounts into // the cache DB. @@ -130,10 +118,10 @@ where db.merge_transitions(BundleRetention::Reverts); // Take the bundle state - let bundle_state = db.take_bundle(); + let mut bundle_state = db.take_bundle(); // Initialize a map of preimages. - let mut state_preimages = HashMap::new(); + let mut state_preimages = HashMap::default(); // Grab all account proofs for the data accessed during block execution. // @@ -170,7 +158,11 @@ where let state = state_provider.witness(Default::default(), hashed_state.clone())?; // Write the witness to the output directory. - let response = ExecutionWitness { state, keys: Some(state_preimages) }; + let response = ExecutionWitness { + state: HashMap::from_iter(state), + codes: Default::default(), + keys: Some(state_preimages), + }; let re_executed_witness_path = self.save_file( format!("{}_{}.witness.re_executed.json", block.number, block.hash()), &response, @@ -206,6 +198,21 @@ where } // The bundle state after re-execution should match the original one. + // + // NOTE: This should not be needed if `Reverts` had a comparison method that sorted first, + // or otherwise did not care about order. + // + // See: https://github.com/bluealloy/revm/issues/1813 + let mut output = output.clone(); + for reverts in output.state.reverts.iter_mut() { + reverts.sort_by(|left, right| left.0.cmp(&right.0)); + } + + // We also have to sort the `bundle_state` reverts + for reverts in bundle_state.reverts.iter_mut() { + reverts.sort_by(|left, right| left.0.cmp(&right.0)); + } + if bundle_state != output.state { let original_path = self.save_file( format!("{}_{}.bundle_state.original.json", block.number, block.hash()), @@ -292,7 +299,11 @@ where impl InvalidBlockHook for InvalidBlockWitnessHook where - P: StateProviderFactory + ChainSpecProvider + Send + Sync + 'static, + P: StateProviderFactory + + ChainSpecProvider + + Send + + Sync + + 'static, EvmConfig: ConfigureEvm
, { fn on_invalid_block( diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index c43a97abd850..d7a5d05091f1 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -11,7 +11,9 @@ exclude.workspace = true [dependencies] # reth reth-beacon-consensus.workspace = true +reth-chain-state.workspace = true reth-engine-tree.workspace = true +reth-ethereum-engine-primitives.workspace = true reth-node-types.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true @@ -23,6 +25,7 @@ reth-stages-api.workspace = true # alloy alloy-primitives.workspace = true +alloy-rpc-types-engine.workspace = true # async tokio.workspace = true @@ -42,7 +45,6 @@ reth-ethereum-engine-primitives.workspace = true reth-exex-test-utils.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } -reth-rpc-types.workspace = true reth-tracing.workspace = true [lints] diff --git a/crates/engine/local/src/lib.rs b/crates/engine/local/src/lib.rs index cf6ff3069b22..1b84c8a113f4 100644 --- a/crates/engine/local/src/lib.rs +++ b/crates/engine/local/src/lib.rs @@ -1,3 +1,4 @@ //! A local engine service that can be used to drive a dev chain. pub mod miner; +pub mod payload; pub mod service; diff --git a/crates/engine/local/src/payload.rs b/crates/engine/local/src/payload.rs new file mode 100644 index 000000000000..4fd49f53fb49 --- /dev/null +++ b/crates/engine/local/src/payload.rs @@ -0,0 +1,30 @@ +//! The implementation of the [`PayloadAttributesBuilder`] for the +//! [`LocalEngineService`](super::service::LocalEngineService). + +use alloy_primitives::{Address, B256}; +use reth_ethereum_engine_primitives::EthPayloadAttributes; +use reth_payload_primitives::PayloadAttributesBuilder; +use std::{convert::Infallible, time::UNIX_EPOCH}; + +/// The attributes builder for local Ethereum payload. +#[derive(Debug)] +pub struct EthLocalPayloadAttributesBuilder; + +impl PayloadAttributesBuilder for EthLocalPayloadAttributesBuilder { + type PayloadAttributes = EthPayloadAttributes; + type Error = Infallible; + + fn build(&self) -> Result { + let ts = std::time::SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("cannot be earlier than UNIX_EPOCH"); + + Ok(EthPayloadAttributes { + timestamp: ts.as_secs(), + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: None, + parent_beacon_block_root: None, + }) + } +} diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index f9d2bd2aacb0..c9794ecfabb0 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -7,8 +7,9 @@ //! building at a fixed interval. use crate::miner::MiningMode; -use alloy_primitives::B256; +use eyre::eyre; use reth_beacon_consensus::EngineNodeTypes; +use reth_chain_state::{CanonicalInMemoryState, ExecutedBlock, NewCanonicalChain}; use reth_engine_tree::persistence::PersistenceHandle; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{ @@ -17,12 +18,12 @@ use reth_payload_primitives::{ use reth_provider::ProviderFactory; use reth_prune::PrunerWithFactory; use reth_stages_api::MetricEventsSender; -use std::fmt::Formatter; use tokio::sync::oneshot; use tracing::debug; /// Provides a local dev service engine that can be used to drive the /// chain forward. +#[derive(Debug)] pub struct LocalEngineService where N: EngineNodeTypes, @@ -32,30 +33,14 @@ where payload_builder: PayloadBuilderHandle, /// The payload attribute builder for the engine payload_attributes_builder: B, + /// Keep track of the Canonical chain state that isn't persisted on disk yet + canonical_in_memory_state: CanonicalInMemoryState, /// A handle to the persistence layer persistence_handle: PersistenceHandle, - /// The hash of the current head - head: B256, /// The mining mode for the engine mode: MiningMode, } -impl std::fmt::Debug for LocalEngineService -where - N: EngineNodeTypes, - B: PayloadAttributesBuilder::PayloadAttributes>, -{ - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.debug_struct("LocalEngineService") - .field("payload_builder", &self.payload_builder) - .field("payload_attributes_builder", &self.payload_attributes_builder) - .field("persistence_handle", &self.persistence_handle) - .field("head", &self.head) - .field("mode", &self.mode) - .finish() - } -} - impl LocalEngineService where N: EngineNodeTypes, @@ -67,14 +52,20 @@ where payload_attributes_builder: B, provider: ProviderFactory, pruner: PrunerWithFactory>, + canonical_in_memory_state: CanonicalInMemoryState, sync_metrics_tx: MetricEventsSender, - head: B256, mode: MiningMode, ) -> Self { let persistence_handle = PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx); - Self { payload_builder, payload_attributes_builder, persistence_handle, head, mode } + Self { + payload_builder, + payload_attributes_builder, + canonical_in_memory_state, + persistence_handle, + mode, + } } /// Spawn the [`LocalEngineService`] on a tokio green thread. The service will poll the payload @@ -86,8 +77,8 @@ where payload_attributes_builder: B, provider: ProviderFactory, pruner: PrunerWithFactory>, + canonical_in_memory_state: CanonicalInMemoryState, sync_metrics_tx: MetricEventsSender, - head: B256, mode: MiningMode, ) { let engine = Self::new( @@ -95,8 +86,8 @@ where payload_attributes_builder, provider, pruner, + canonical_in_memory_state, sync_metrics_tx, - head, mode, ); @@ -112,26 +103,29 @@ where (&mut self.mode).await; // Start a new payload building job - let new_head = self.build_and_save_payload().await; + let executed_block = self.build_and_save_payload().await; - if new_head.is_err() { - debug!(target: "local_engine", err = ?new_head.unwrap_err(), "failed payload building"); + if executed_block.is_err() { + debug!(target: "local_engine", err = ?executed_block.unwrap_err(), "failed payload building"); continue } + let block = executed_block.expect("not error"); - // Update the head - self.head = new_head.expect("not error"); + let res = self.update_canonical_in_memory_state(block); + if res.is_err() { + debug!(target: "local_engine", err = ?res.unwrap_err(), "failed canonical state update"); + } } } /// Builds a payload by initiating a new payload job via the [`PayloadBuilderHandle`], - /// saving the execution outcome to persistence and returning the current head of the - /// chain. - async fn build_and_save_payload(&self) -> eyre::Result { + /// saving the execution outcome to persistence and returning the executed block. + async fn build_and_save_payload(&self) -> eyre::Result { let payload_attributes = self.payload_attributes_builder.build()?; + let parent = self.canonical_in_memory_state.get_canonical_head().hash(); let payload_builder_attributes = ::PayloadBuilderAttributes::try_new( - self.head, + parent, payload_attributes, ) .map_err(|_| eyre::eyre!("failed to fetch payload attributes"))?; @@ -142,22 +136,38 @@ where .await? .await?; - let block = payload.executed_block().map(|block| vec![block]).unwrap_or_default(); + let executed_block = + payload.executed_block().ok_or_else(|| eyre!("missing executed block"))?; let (tx, rx) = oneshot::channel(); - let _ = self.persistence_handle.save_blocks(block, tx); + let _ = self.persistence_handle.save_blocks(vec![executed_block.clone()], tx); // Wait for the persistence_handle to complete - let new_head = rx.await?.ok_or_else(|| eyre::eyre!("missing new head"))?; + let _ = rx.await?.ok_or_else(|| eyre!("missing new head"))?; + + Ok(executed_block) + } + + /// Update the canonical in memory state and send notification for a new canon state to + /// all the listeners. + fn update_canonical_in_memory_state(&self, executed_block: ExecutedBlock) -> eyre::Result<()> { + let chain = NewCanonicalChain::Commit { new: vec![executed_block] }; + let tip = chain.tip().header.clone(); + let notification = chain.to_chain_notification(); - Ok(new_head.hash) + // Update the tracked in-memory state with the new chain + self.canonical_in_memory_state.update_chain(chain); + self.canonical_in_memory_state.set_canonical_head(tip); + + // Sends an event to all active listeners about the new canonical chain + self.canonical_in_memory_state.notify_canon_state(notification); + Ok(()) } } #[cfg(test)] mod tests { use super::*; - use alloy_primitives::B256; use reth_chainspec::MAINNET; use reth_config::PruneConfig; use reth_db::test_utils::{create_test_rw_db, create_test_static_files_dir}; @@ -178,11 +188,11 @@ mod tests { struct TestPayloadAttributesBuilder; impl PayloadAttributesBuilder for TestPayloadAttributesBuilder { - type PayloadAttributes = reth_rpc_types::engine::PayloadAttributes; + type PayloadAttributes = alloy_rpc_types_engine::PayloadAttributes; type Error = Infallible; fn build(&self) -> Result { - Ok(reth_rpc_types::engine::PayloadAttributes { + Ok(alloy_rpc_types_engine::PayloadAttributes { timestamp: 0, prev_randao: Default::default(), suggested_fee_recipient: Default::default(), @@ -201,20 +211,20 @@ mod tests { let provider = ProviderFactory::>::new( create_test_rw_db(), MAINNET.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), + StaticFileProvider::read_write(static_dir_path)?, ); let pruner = PrunerBuilder::new(PruneConfig::default()) .build_with_provider_factory(provider.clone()); + // Create an empty canonical in memory state + let canonical_in_memory_state = CanonicalInMemoryState::empty(); + // Start the payload builder service let payload_handle = spawn_test_payload_service::(); // Sync metric channel let (sync_metrics_tx, _) = unbounded_channel(); - // Get the attributes for start of block building - let genesis_hash = B256::random(); - // Launch the LocalEngineService in interval mode let period = Duration::from_secs(1); LocalEngineService::spawn_new( @@ -222,13 +232,17 @@ mod tests { TestPayloadAttributesBuilder, provider.clone(), pruner, + canonical_in_memory_state, sync_metrics_tx, - genesis_hash, MiningMode::interval(period), ); + // Check that we have no block for now + let block = provider.block_by_number(0)?; + assert!(block.is_none()); + // Wait 4 intervals - tokio::time::sleep(4 * period).await; + tokio::time::sleep(2 * period).await; // Assert a block has been build let block = provider.block_by_number(0)?; @@ -246,11 +260,14 @@ mod tests { let provider = ProviderFactory::>::new( create_test_rw_db(), MAINNET.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), + StaticFileProvider::read_write(static_dir_path)?, ); let pruner = PrunerBuilder::new(PruneConfig::default()) .build_with_provider_factory(provider.clone()); + // Create an empty canonical in memory state + let canonical_in_memory_state = CanonicalInMemoryState::empty(); + // Start the payload builder service let payload_handle = spawn_test_payload_service::(); @@ -260,17 +277,14 @@ mod tests { // Sync metric channel let (sync_metrics_tx, _) = unbounded_channel(); - // Get the attributes for start of block building - let genesis_hash = B256::random(); - // Launch the LocalEngineService in instant mode LocalEngineService::spawn_new( payload_handle, TestPayloadAttributesBuilder, provider.clone(), pruner, + canonical_in_memory_state, sync_metrics_tx, - genesis_hash, MiningMode::instant(pool.clone()), ); @@ -295,4 +309,54 @@ mod tests { Ok(()) } + + #[tokio::test] + async fn test_canonical_chain_subscription() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + // Start the provider and the pruner + let (_, static_dir_path) = create_test_static_files_dir(); + let provider = ProviderFactory::>::new( + create_test_rw_db(), + MAINNET.clone(), + StaticFileProvider::read_write(static_dir_path)?, + ); + let pruner = PrunerBuilder::new(PruneConfig::default()) + .build_with_provider_factory(provider.clone()); + + // Create an empty canonical in memory state + let canonical_in_memory_state = CanonicalInMemoryState::empty(); + let mut notifications = canonical_in_memory_state.subscribe_canon_state(); + + // Start the payload builder service + let payload_handle = spawn_test_payload_service::(); + + // Start a transaction pool + let pool = testing_pool(); + + // Sync metric channel + let (sync_metrics_tx, _) = unbounded_channel(); + + // Launch the LocalEngineService in instant mode + LocalEngineService::spawn_new( + payload_handle, + TestPayloadAttributesBuilder, + provider.clone(), + pruner, + canonical_in_memory_state, + sync_metrics_tx, + MiningMode::instant(pool.clone()), + ); + + // Add a transaction to the pool + let transaction = MockTransaction::legacy().with_gas_price(10); + pool.add_transaction(Default::default(), transaction).await?; + + // Check a notification is received for block 0 + let res = notifications.recv().await?; + + assert_eq!(res.tip().number, 0); + + Ok(()) + } } diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 4697e7fb87a5..91c9cd5422d0 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -29,7 +29,6 @@ reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-revm.workspace = true -reth-rpc-types.workspace = true reth-stages-api.workspace = true reth-tasks.workspace = true reth-trie.workspace = true @@ -38,6 +37,7 @@ reth-trie-parallel.workspace = true # alloy alloy-primitives.workspace = true alloy-eips.workspace = true +alloy-rpc-types-engine.workspace = true # common futures.workspace = true diff --git a/crates/engine/tree/src/backfill.rs b/crates/engine/tree/src/backfill.rs index 440e86693c77..78e21a7b5efc 100644 --- a/crates/engine/tree/src/backfill.rs +++ b/crates/engine/tree/src/backfill.rs @@ -230,12 +230,12 @@ impl PipelineState { mod tests { use super::*; use crate::test_utils::{insert_headers_into_client, TestPipelineBuilder}; - use alloy_primitives::{BlockNumber, B256}; + use alloy_primitives::{BlockNumber, Sealable, B256}; use assert_matches::assert_matches; use futures::poll; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_network_p2p::test_utils::TestFullBlockClient; - use reth_primitives::{alloy_primitives::Sealable, Header, SealedHeader}; + use reth_primitives::{Header, SealedHeader}; use reth_provider::test_utils::MockNodeTypesWithDB; use reth_stages::ExecOutput; use reth_stages_api::StageCheckpoint; @@ -269,7 +269,7 @@ mod tests { let client = TestFullBlockClient::default(); let sealed = Header { base_fee_per_gas: Some(7), - gas_limit: chain_spec.max_gas_limit.into(), + gas_limit: chain_spec.max_gas_limit, ..Default::default() } .seal_slow(); diff --git a/crates/engine/tree/src/download.rs b/crates/engine/tree/src/download.rs index aff99c884065..9ecec70ae369 100644 --- a/crates/engine/tree/src/download.rs +++ b/crates/engine/tree/src/download.rs @@ -305,11 +305,12 @@ impl BlockDownloader for NoopBlockDownloader { mod tests { use super::*; use crate::test_utils::insert_headers_into_client; + use alloy_primitives::Sealable; use assert_matches::assert_matches; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_network_p2p::test_utils::TestFullBlockClient; - use reth_primitives::{alloy_primitives::Sealable, Header, SealedHeader}; + use reth_primitives::{Header, SealedHeader}; use std::{future::poll_fn, sync::Arc}; struct TestHarness { @@ -330,7 +331,7 @@ mod tests { let client = TestFullBlockClient::default(); let sealed = Header { base_fee_per_gas: Some(7), - gas_limit: chain_spec.max_gas_limit.into(), + gas_limit: chain_spec.max_gas_limit, ..Default::default() } .seal_slow(); diff --git a/crates/engine/tree/src/test_utils.rs b/crates/engine/tree/src/test_utils.rs index 3d04e2db765f..f17766a43ed7 100644 --- a/crates/engine/tree/src/test_utils.rs +++ b/crates/engine/tree/src/test_utils.rs @@ -1,7 +1,7 @@ -use alloy_primitives::B256; +use alloy_primitives::{Sealable, B256}; use reth_chainspec::ChainSpec; use reth_network_p2p::test_utils::TestFullBlockClient; -use reth_primitives::{alloy_primitives::Sealable, BlockBody, SealedHeader}; +use reth_primitives::{BlockBody, SealedHeader}; use reth_provider::{ test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, ExecutionOutcome, diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 2df1fbdac700..52dbf34173df 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -1,3 +1,4 @@ +use reth_blockchain_tree::metrics::TreeMetrics; use reth_evm::metrics::ExecutorMetrics; use reth_metrics::{ metrics::{Counter, Gauge, Histogram}, @@ -13,6 +14,8 @@ pub(crate) struct EngineApiMetrics { pub(crate) executor: ExecutorMetrics, /// Metrics for block validation pub(crate) block_validation: BlockValidationMetrics, + /// A copy of legacy blockchain tree metrics, to be replaced when we replace the old tree + pub(crate) tree: TreeMetrics, } /// Metrics for the `EngineApi`. @@ -21,6 +24,8 @@ pub(crate) struct EngineApiMetrics { pub(crate) struct EngineMetrics { /// How many executed blocks are currently stored. pub(crate) executed_blocks: Gauge, + /// How many already executed blocks were directly inserted into the tree. + pub(crate) inserted_already_executed_blocks: Counter, /// The number of times the pipeline was run. pub(crate) pipeline_runs: Counter, /// The total count of forkchoice updated messages received. diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 3b7cf8ae2b84..e01b75288220 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -5,7 +5,14 @@ use crate::{ persistence::PersistenceHandle, }; use alloy_eips::BlockNumHash; -use alloy_primitives::{BlockNumber, B256, U256}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + BlockNumber, B256, U256, +}; +use alloy_rpc_types_engine::{ + CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, + PayloadValidationError, +}; use reth_beacon_consensus::{ BeaconConsensusEngineEvent, BeaconEngineMessage, ForkchoiceStateTracker, InvalidHeaderCache, OnForkChoiceUpdated, MIN_BLOCKS_FOR_PIPELINE_RUN, @@ -34,19 +41,12 @@ use reth_provider::{ TransactionVariant, }; use reth_revm::database::StateProviderDatabase; -use reth_rpc_types::{ - engine::{ - CancunPayloadFields, ForkchoiceState, PayloadStatus, PayloadStatusEnum, - PayloadValidationError, - }, - ExecutionPayload, -}; use reth_stages_api::ControlFlow; use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; -use reth_trie_parallel::parallel_root::ParallelStateRoot; +use reth_trie_parallel::parallel_root::{ParallelStateRoot, ParallelStateRootError}; use std::{ cmp::Ordering, - collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet, VecDeque}, + collections::{btree_map, hash_map, BTreeMap, VecDeque}, fmt::Debug, ops::Bound, sync::{ @@ -104,11 +104,11 @@ impl TreeState { /// Returns a new, empty tree state that points to the given canonical head. fn new(current_canonical_head: BlockNumHash) -> Self { Self { - blocks_by_hash: HashMap::new(), + blocks_by_hash: HashMap::default(), blocks_by_number: BTreeMap::new(), current_canonical_head, - parent_to_child: HashMap::new(), - persisted_trie_updates: HashMap::new(), + parent_to_child: HashMap::default(), + persisted_trie_updates: HashMap::default(), } } @@ -453,8 +453,6 @@ pub enum TreeAction { MakeCanonical { /// The sync target head hash sync_target_head: B256, - /// The sync target finalized hash - sync_target_finalized: Option, }, } @@ -549,6 +547,7 @@ where config: TreeConfig, ) -> Self { let (incoming_tx, incoming) = std::sync::mpsc::channel(); + Self { provider, executor_provider, @@ -813,22 +812,9 @@ where let mut outcome = TreeOutcome::new(status); if outcome.outcome.is_valid() && self.is_sync_target_head(block_hash) { - // NOTE: if we are in this branch, `is_sync_target_head` has returned true, - // meaning a sync target state exists, so we can safely unwrap - let sync_target = self - .state - .forkchoice_state_tracker - .sync_target_state() - .expect("sync target must exist"); - - // if the hash is zero then we should act like there is no finalized hash - let sync_target_finalized = (!sync_target.finalized_block_hash.is_zero()) - .then_some(sync_target.finalized_block_hash); - // if the block is valid and it is the sync target head, make it canonical outcome = outcome.with_event(TreeEvent::TreeAction(TreeAction::MakeCanonical { sync_target_head: block_hash, - sync_target_finalized, })); } @@ -841,18 +827,14 @@ where /// /// Note: This does not update the tracked state and instead returns the new chain based on the /// given head. - fn on_new_head( - &self, - new_head: B256, - finalized_block: Option, - ) -> ProviderResult> { + fn on_new_head(&self, new_head: B256) -> ProviderResult> { // get the executed new head block let Some(new_head_block) = self.state.tree_state.blocks_by_hash.get(&new_head) else { return Ok(None) }; let new_head_number = new_head_block.block.number; - let current_canonical_number = self.state.tree_state.current_canonical_head.number; + let mut current_canonical_number = self.state.tree_state.current_canonical_head.number; let mut new_chain = vec![new_head_block.clone()]; let mut current_hash = new_head_block.block.parent_hash; @@ -864,9 +846,9 @@ where // that are _above_ the current canonical head. while current_number > current_canonical_number { if let Some(block) = self.executed_block_by_hash(current_hash)? { - new_chain.push(block.clone()); current_hash = block.block.parent_hash; current_number -= 1; + new_chain.push(block); } else { warn!(target: "engine::tree", current_hash=?current_hash, "Sidechain block not found in TreeState"); // This should never happen as we're walking back a chain that should connect to @@ -888,10 +870,29 @@ where let mut old_chain = Vec::new(); let mut old_hash = self.state.tree_state.current_canonical_head.hash; - while old_hash != current_hash { + // If the canonical chain is ahead of the new chain, + // gather all blocks until new head number. + while current_canonical_number > current_number { if let Some(block) = self.executed_block_by_hash(old_hash)? { old_chain.push(block.clone()); old_hash = block.block.header.parent_hash; + current_canonical_number -= 1; + } else { + // This shouldn't happen as we're walking back the canonical chain + warn!(target: "engine::tree", current_hash=?old_hash, "Canonical block not found in TreeState"); + return Ok(None); + } + } + + // Both new and old chain pointers are now at the same height. + debug_assert_eq!(current_number, current_canonical_number); + + // Walk both chains from specified hashes at same height until + // a common ancestor (fork block) is reached. + while old_hash != current_hash { + if let Some(block) = self.executed_block_by_hash(old_hash)? { + old_hash = block.block.header.parent_hash; + old_chain.push(block); } else { // This shouldn't happen as we're walking back the canonical chain warn!(target: "engine::tree", current_hash=?old_hash, "Canonical block not found in TreeState"); @@ -904,9 +905,9 @@ where } if let Some(block) = self.executed_block_by_hash(current_hash)? { - if self.is_fork(block.block.hash(), finalized_block)? { - new_chain.push(block.clone()); + if self.is_fork(block.block.hash())? { current_hash = block.block.parent_hash; + new_chain.push(block); } } else { // This shouldn't happen as we've already walked this path @@ -926,28 +927,31 @@ where /// extension of the canonical chain. /// * walking back from the current head to verify that the target hash is not already part of /// the canonical chain. - fn is_fork(&self, target_hash: B256, finalized_hash: Option) -> ProviderResult { + fn is_fork(&self, target_hash: B256) -> ProviderResult { // verify that the given hash is not part of an extension of the canon chain. + let canonical_head = self.state.tree_state.canonical_head(); let mut current_hash = target_hash; while let Some(current_block) = self.sealed_header_by_hash(current_hash)? { - if current_block.hash() == self.state.tree_state.canonical_block_hash() { + if current_block.hash() == canonical_head.hash { return Ok(false) } + // We already passed the canonical head + if current_block.number <= canonical_head.number { + break + } current_hash = current_block.parent_hash; } - // verify that the given hash is not already part of the canon chain - current_hash = self.state.tree_state.canonical_block_hash(); - while let Some(current_block) = self.sealed_header_by_hash(current_hash)? { - if Some(current_hash) == finalized_hash { - return Ok(true) - } + // verify that the given hash is not already part of canonical chain stored in memory + if self.canonical_in_memory_state.header_by_hash(target_hash).is_some() { + return Ok(false) + } - if current_block.hash() == target_hash { - return Ok(false) - } - current_hash = current_block.parent_hash; + // verify that the given hash is not already part of persisted canonical chain + if self.provider.block_number(target_hash)?.is_some() { + return Ok(false) } + Ok(true) } @@ -1021,11 +1025,8 @@ where return Ok(valid_outcome(state.head_block_hash)) } - let finalized_block_opt = - (!state.finalized_block_hash.is_zero()).then_some(state.finalized_block_hash); - // 2. ensure we can apply a new chain update for the head block - if let Some(chain_update) = self.on_new_head(state.head_block_hash, finalized_block_opt)? { + if let Some(chain_update) = self.on_new_head(state.head_block_hash)? { let tip = chain_update.tip().header.clone(); self.on_canonical_chain_update(chain_update); @@ -1190,7 +1191,9 @@ where FromEngine::Request(request) => { match request { EngineApiRequest::InsertExecutedBlock(block) => { + debug!(target: "engine::tree", block=?block.block().num_hash(), "inserting already executed block"); self.state.tree_state.insert_executed(block); + self.metrics.engine.inserted_already_executed_blocks.increment(1); } EngineApiRequest::Beacon(request) => { match request { @@ -1216,7 +1219,7 @@ where if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(Into::into)) { - error!("Failed to send event: {err:?}"); + error!(target: "engine::tree", "Failed to send event: {err:?}"); } } BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { @@ -1226,7 +1229,7 @@ where Box::new(e), ) })) { - error!("Failed to send event: {err:?}"); + error!(target: "engine::tree", "Failed to send event: {err:?}"); } } BeaconEngineMessage::TransitionConfigurationExchanged => { @@ -1293,6 +1296,7 @@ where backfill_num_hash, ); self.metrics.engine.executed_blocks.set(self.state.tree_state.block_count() as f64); + self.metrics.tree.canonical_chain_height.set(backfill_height as f64); // remove all buffered blocks below the backfill height self.state.buffer.remove_old_blocks(backfill_height); @@ -1353,8 +1357,8 @@ where /// Attempts to make the given target canonical. /// /// This will update the tracked canonical in memory state and do the necessary housekeeping. - fn make_canonical(&mut self, target: B256, finalized: Option) -> ProviderResult<()> { - if let Some(chain_update) = self.on_new_head(target, finalized)? { + fn make_canonical(&mut self, target: B256) -> ProviderResult<()> { + if let Some(chain_update) = self.on_new_head(target)? { self.on_canonical_chain_update(chain_update); } @@ -1374,8 +1378,8 @@ where fn on_tree_event(&mut self, event: TreeEvent) -> ProviderResult<()> { match event { TreeEvent::TreeAction(action) => match action { - TreeAction::MakeCanonical { sync_target_head, sync_target_finalized } => { - self.make_canonical(sync_target_head, sync_target_finalized)?; + TreeAction::MakeCanonical { sync_target_head } => { + self.make_canonical(sync_target_head)?; } }, TreeEvent::BackfillAction(action) => { @@ -1412,10 +1416,9 @@ where debug!(target: "engine::tree", "emitting backfill action event"); } - let _ = self - .outgoing - .send(event) - .inspect_err(|err| error!("Failed to send internal event: {err:?}")); + let _ = self.outgoing.send(event).inspect_err( + |err| error!(target: "engine::tree", "Failed to send internal event: {err:?}"), + ); } /// Returns true if the canonical chain length minus the last persisted @@ -1523,19 +1526,13 @@ where /// Return sealed block from database or in-memory state by hash. fn sealed_header_by_hash(&self, hash: B256) -> ProviderResult> { // check memory first - let block = self - .state - .tree_state - .block_by_hash(hash) - // TODO: clone for compatibility. should we return an Arc here? - .map(|block| block.as_ref().clone().header); + let block = + self.state.tree_state.block_by_hash(hash).map(|block| block.as_ref().clone().header); if block.is_some() { Ok(block) - } else if let Some(block_num) = self.provider.block_number(hash)? { - Ok(self.provider.sealed_header(block_num)?) } else { - Ok(None) + self.provider.sealed_header_by_hash(hash) } } @@ -1702,6 +1699,7 @@ where fn validate_block(&self, block: &SealedBlockWithSenders) -> Result<(), ConsensusError> { if let Err(e) = self.consensus.validate_header_with_total_difficulty(block, U256::MAX) { error!( + target: "engine::tree", ?block, "Failed to validate total difficulty for block {}: {e}", block.header.hash() @@ -1710,12 +1708,12 @@ where } if let Err(e) = self.consensus.validate_header(block) { - error!(?block, "Failed to validate header {}: {e}", block.header.hash()); + error!(target: "engine::tree", ?block, "Failed to validate header {}: {e}", block.header.hash()); return Err(e) } if let Err(e) = self.consensus.validate_block_pre_execution(block) { - error!(?block, "Failed to validate block {}: {e}", block.header.hash()); + error!(target: "engine::tree", ?block, "Failed to validate block {}: {e}", block.header.hash()); return Err(e) } @@ -1745,12 +1743,7 @@ where if self.is_sync_target_head(child_num_hash.hash) && matches!(res, InsertPayloadOk2::Inserted(BlockStatus2::Valid)) { - // we are using the sync target here because we're trying to make the sync - // target canonical - let sync_target_finalized = - self.state.forkchoice_state_tracker.sync_target_finalized(); - - self.make_canonical(child_num_hash.hash, sync_target_finalized)?; + self.make_canonical(child_num_hash.hash)?; } } Err(err) => { @@ -1950,6 +1943,7 @@ where let old_first = old.first().map(|first| first.block.num_hash()); trace!(target: "engine::tree", ?new_first, ?old_first, "Reorg detected, new and old first blocks"); + self.update_reorg_metrics(old.len()); self.reinsert_reorged_blocks(new.clone()); self.reinsert_reorged_blocks(old.clone()); } @@ -1958,6 +1952,9 @@ where self.canonical_in_memory_state.update_chain(chain_update); self.canonical_in_memory_state.set_canonical_head(tip.clone()); + // Update metrics based on new tip + self.metrics.tree.canonical_chain_height.set(tip.number as f64); + // sends an event to all active listeners about the new canonical chain self.canonical_in_memory_state.notify_canon_state(notification); @@ -1968,6 +1965,12 @@ where )); } + /// This updates metrics based on the given reorg length. + fn update_reorg_metrics(&self, old_chain_length: usize) { + self.metrics.tree.reorgs.increment(1); + self.metrics.tree.latest_reorg_depth.set(old_chain_length as f64); + } + /// This reinserts any blocks in the new chain that do not already exist in the tree fn reinsert_reorged_blocks(&mut self, new_chain: Vec) { for block in new_chain { @@ -2048,14 +2051,11 @@ where Ok(InsertPayloadOk2::Inserted(BlockStatus2::Valid)) => { if self.is_sync_target_head(block_num_hash.hash) { trace!(target: "engine::tree", "appended downloaded sync target block"); - let sync_target_finalized = - self.state.forkchoice_state_tracker.sync_target_finalized(); // we just inserted the current sync target block, we can try to make it // canonical return Ok(Some(TreeEvent::TreeAction(TreeAction::MakeCanonical { sync_target_head: block_num_hash.hash, - sync_target_finalized, }))) } trace!(target: "engine::tree", "appended downloaded block"); @@ -2146,7 +2146,7 @@ where )) })?; if let Err(e) = self.consensus.validate_header_against_parent(&block, &parent_block) { - warn!(?block, "Failed to validate header {} against parent: {e}", block.header.hash()); + warn!(target: "engine::tree", ?block, "Failed to validate header {} against parent: {e}", block.header.hash()); return Err(e.into()) } @@ -2193,14 +2193,14 @@ where let persistence_in_progress = self.persistence_state.in_progress(); if !persistence_in_progress { state_root_result = match self - .compute_state_root_in_parallel(block.parent_hash, &hashed_state) + .compute_state_root_parallel(block.parent_hash, &hashed_state) { Ok((state_root, trie_output)) => Some((state_root, trie_output)), - Err(ProviderError::ConsistentView(error)) => { - debug!(target: "engine::tree", %error, "Parallel state root computation failed consistency check, falling back"); + Err(ParallelStateRootError::Provider(ProviderError::ConsistentView(error))) => { + debug!(target: "engine", %error, "Parallel state root computation failed consistency check, falling back"); None } - Err(error) => return Err(error.into()), + Err(error) => return Err(InsertBlockErrorKindTwo::Other(Box::new(error))), }; } @@ -2246,13 +2246,9 @@ where self.state.tree_state.insert_executed(executed); self.metrics.engine.executed_blocks.set(self.state.tree_state.block_count() as f64); - // we are checking that this is a fork block compared to the current `SYNCING` forkchoice - // state. - let finalized = self.state.forkchoice_state_tracker.sync_target_finalized(); - // emit insert event let elapsed = start.elapsed(); - let engine_event = if self.is_fork(block_hash, finalized)? { + let engine_event = if self.is_fork(block_hash)? { BeaconConsensusEngineEvent::ForkBlockAdded(sealed_block, elapsed) } else { BeaconConsensusEngineEvent::CanonicalBlockAdded(sealed_block, elapsed) @@ -2271,11 +2267,11 @@ where /// Returns `Err(_)` if error was encountered during computation. /// `Err(ProviderError::ConsistentView(_))` can be safely ignored and fallback computation /// should be used instead. - fn compute_state_root_in_parallel( + fn compute_state_root_parallel( &self, parent_hash: B256, hashed_state: &HashedPostState, - ) -> ProviderResult<(B256, TrieUpdates)> { + ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { let consistent_view = ConsistentDbView::new_with_latest_tip(self.provider.clone())?; let mut input = TrieInput::default(); @@ -2297,7 +2293,7 @@ where // Extend with block we are validating root for. input.append_ref(hashed_state); - Ok(ParallelStateRoot::new(consistent_view, input).incremental_root_with_updates()?) + ParallelStateRoot::new(consistent_view, input).incremental_root_with_updates() } /// Handles an error that occurred while inserting a block. @@ -2546,14 +2542,14 @@ pub enum AdvancePersistenceError { mod tests { use super::*; use crate::persistence::PersistenceAction; - use alloy_primitives::Bytes; + use alloy_primitives::{Bytes, Sealable}; use alloy_rlp::Decodable; + use assert_matches::assert_matches; use reth_beacon_consensus::{EthBeaconConsensus, ForkchoiceStatus}; use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm::test_utils::MockExecutorProvider; - use reth_primitives::alloy_primitives::Sealable; use reth_provider::test_utils::MockEthProvider; use reth_rpc_types_compat::engine::{block_to_payload_v1, payload::block_to_payload_v3}; use reth_trie::updates::TrieUpdates; @@ -2695,12 +2691,11 @@ mod tests { } fn with_blocks(mut self, blocks: Vec) -> Self { - let mut blocks_by_hash = HashMap::with_capacity(blocks.len()); + let mut blocks_by_hash = HashMap::default(); let mut blocks_by_number = BTreeMap::new(); - let mut state_by_hash = HashMap::with_capacity(blocks.len()); + let mut state_by_hash = HashMap::default(); let mut hash_by_number = BTreeMap::new(); - let mut parent_to_child: HashMap> = - HashMap::with_capacity(blocks.len()); + let mut parent_to_child: HashMap> = HashMap::default(); let mut parent_hash = B256::ZERO; for block in &blocks { @@ -3156,7 +3151,7 @@ mod tests { assert_eq!( tree_state.parent_to_child.get(&blocks[0].block.hash()), - Some(&HashSet::from([blocks[1].block.hash()])) + Some(&HashSet::from_iter([blocks[1].block.hash()])) ); assert!(!tree_state.parent_to_child.contains_key(&blocks[1].block.hash())); @@ -3165,7 +3160,7 @@ mod tests { assert_eq!( tree_state.parent_to_child.get(&blocks[1].block.hash()), - Some(&HashSet::from([blocks[2].block.hash()])) + Some(&HashSet::from_iter([blocks[2].block.hash()])) ); assert!(tree_state.parent_to_child.contains_key(&blocks[1].block.hash())); @@ -3253,11 +3248,11 @@ mod tests { assert_eq!( tree_state.parent_to_child.get(&blocks[2].block.hash()), - Some(&HashSet::from([blocks[3].block.hash()])) + Some(&HashSet::from_iter([blocks[3].block.hash()])) ); assert_eq!( tree_state.parent_to_child.get(&blocks[3].block.hash()), - Some(&HashSet::from([blocks[4].block.hash()])) + Some(&HashSet::from_iter([blocks[4].block.hash()])) ); } @@ -3303,11 +3298,11 @@ mod tests { assert_eq!( tree_state.parent_to_child.get(&blocks[2].block.hash()), - Some(&HashSet::from([blocks[3].block.hash()])) + Some(&HashSet::from_iter([blocks[3].block.hash()])) ); assert_eq!( tree_state.parent_to_child.get(&blocks[3].block.hash()), - Some(&HashSet::from([blocks[4].block.hash()])) + Some(&HashSet::from_iter([blocks[4].block.hash()])) ); } @@ -3353,11 +3348,11 @@ mod tests { assert_eq!( tree_state.parent_to_child.get(&blocks[2].block.hash()), - Some(&HashSet::from([blocks[3].block.hash()])) + Some(&HashSet::from_iter([blocks[3].block.hash()])) ); assert_eq!( tree_state.parent_to_child.get(&blocks[3].block.hash()), - Some(&HashSet::from([blocks[4].block.hash()])) + Some(&HashSet::from_iter([blocks[4].block.hash()])) ); } @@ -3389,7 +3384,7 @@ mod tests { test_harness.tree.state.tree_state.insert_executed(fork_block_5.clone()); // normal (non-reorg) case - let result = test_harness.tree.on_new_head(blocks[4].block.hash(), None).unwrap(); + let result = test_harness.tree.on_new_head(blocks[4].block.hash()).unwrap(); assert!(matches!(result, Some(NewCanonicalChain::Commit { .. }))); if let Some(NewCanonicalChain::Commit { new }) = result { assert_eq!(new.len(), 2); @@ -3398,7 +3393,7 @@ mod tests { } // reorg case - let result = test_harness.tree.on_new_head(fork_block_5.block.hash(), None).unwrap(); + let result = test_harness.tree.on_new_head(fork_block_5.block.hash()).unwrap(); assert!(matches!(result, Some(NewCanonicalChain::Reorg { .. }))); if let Some(NewCanonicalChain::Reorg { new, old }) = result { assert_eq!(new.len(), 3); @@ -3455,18 +3450,28 @@ mod tests { }); } - // reorg case - let result = - test_harness.tree.on_new_head(chain_b.first().unwrap().block.hash(), None).unwrap(); - assert!(matches!(result, Some(NewCanonicalChain::Reorg { .. }))); - if let Some(NewCanonicalChain::Reorg { new, old }) = result { - assert_eq!(new.len(), 1); - assert_eq!(new[0].block.hash(), chain_b[0].block.hash()); + // for each block in chain_b, reorg to it and then back to canonical + let mut expected_new = Vec::new(); + for block in &chain_b { + // reorg to chain from block b + let result = test_harness.tree.on_new_head(block.block.hash()).unwrap(); + assert_matches!(result, Some(NewCanonicalChain::Reorg { .. })); + + expected_new.push(block); + if let Some(NewCanonicalChain::Reorg { new, old }) = result { + assert_eq!(new.len(), expected_new.len()); + for (index, block) in expected_new.iter().enumerate() { + assert_eq!(new[index].block.hash(), block.block.hash()); + } - assert_eq!(old.len(), chain_a.len()); - for (index, block) in chain_a.iter().enumerate() { - assert_eq!(old[index].block.hash(), block.block.hash()); + assert_eq!(old.len(), chain_a.len()); + for (index, block) in chain_a.iter().enumerate() { + assert_eq!(old[index].block.hash(), block.block.hash()); + } } + + // set last block of chain a as canonical head + test_harness.tree.on_new_head(chain_a.last().unwrap().hash()).unwrap(); } } @@ -3544,7 +3549,7 @@ mod tests { let event = test_harness.from_tree_rx.recv().await.unwrap(); match event { EngineApiEvent::Download(DownloadRequest::BlockSet(actual_block_set)) => { - let expected_block_set = HashSet::from([missing_block.hash()]); + let expected_block_set = HashSet::from_iter([missing_block.hash()]); assert_eq!(actual_block_set, expected_block_set); } _ => panic!("Unexpected event: {:#?}", event), @@ -3639,7 +3644,7 @@ mod tests { let event = test_harness.from_tree_rx.recv().await.unwrap(); match event { EngineApiEvent::Download(DownloadRequest::BlockSet(hash_set)) => { - assert_eq!(hash_set, HashSet::from([main_chain_last_hash])); + assert_eq!(hash_set, HashSet::from_iter([main_chain_last_hash])); } _ => panic!("Unexpected event: {:#?}", event), } @@ -3702,7 +3707,7 @@ mod tests { let event = test_harness.from_tree_rx.recv().await.unwrap(); match event { EngineApiEvent::Download(DownloadRequest::BlockSet(hash_set)) => { - assert_eq!(hash_set, HashSet::from([main_chain_backfill_target_hash])); + assert_eq!(hash_set, HashSet::from_iter([main_chain_backfill_target_hash])); } _ => panic!("Unexpected event: {:#?}", event), } @@ -3747,7 +3752,7 @@ mod tests { let event = test_harness.from_tree_rx.recv().await.unwrap(); match event { EngineApiEvent::Download(DownloadRequest::BlockSet(target_hash)) => { - assert_eq!(target_hash, HashSet::from([main_chain_last_hash])); + assert_eq!(target_hash, HashSet::from_iter([main_chain_last_hash])); } _ => panic!("Unexpected event: {:#?}", event), } @@ -3891,7 +3896,7 @@ mod tests { test_harness.check_canon_head(chain_b_tip_hash); // verify that chain A is now considered a fork - assert!(test_harness.tree.is_fork(chain_a.last().unwrap().hash(), None).unwrap()); + assert!(test_harness.tree.is_fork(chain_a.last().unwrap().hash()).unwrap()); } #[tokio::test] diff --git a/crates/engine/tree/src/tree/persistence_state.rs b/crates/engine/tree/src/tree/persistence_state.rs index fca51291bb5d..b00b7175ff53 100644 --- a/crates/engine/tree/src/tree/persistence_state.rs +++ b/crates/engine/tree/src/tree/persistence_state.rs @@ -1,5 +1,5 @@ use alloy_eips::BlockNumHash; -use reth_primitives::B256; +use alloy_primitives::B256; use std::{collections::VecDeque, time::Instant}; use tokio::sync::oneshot; use tracing::{debug, trace}; diff --git a/crates/engine/util/Cargo.toml b/crates/engine/util/Cargo.toml index 76817694238e..20a0acb8d428 100644 --- a/crates/engine/util/Cargo.toml +++ b/crates/engine/util/Cargo.toml @@ -15,7 +15,6 @@ workspace = true reth-primitives.workspace = true reth-errors.workspace = true reth-fs-util.workspace = true -reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true reth-engine-primitives.workspace = true reth-beacon-consensus.workspace = true @@ -29,6 +28,7 @@ reth-trie.workspace = true # alloy alloy-primitives.workspace = true +alloy-rpc-types-engine.workspace = true # async tokio = { workspace = true, default-features = false } diff --git a/crates/engine/util/src/engine_store.rs b/crates/engine/util/src/engine_store.rs index a897be741ca8..1f3445199611 100644 --- a/crates/engine/util/src/engine_store.rs +++ b/crates/engine/util/src/engine_store.rs @@ -1,13 +1,10 @@ //! Stores engine API messages to disk for later inspection and replay. +use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayload, ForkchoiceState}; use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconEngineMessage; use reth_engine_primitives::EngineTypes; use reth_fs_util as fs; -use reth_rpc_types::{ - engine::{CancunPayloadFields, ForkchoiceState}, - ExecutionPayload, -}; use serde::{Deserialize, Serialize}; use std::{ collections::BTreeMap, diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 8b7b2c036277..bd7b1b95642a 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -1,13 +1,16 @@ //! Stream wrapper that simulates reorgs. use alloy_primitives::U256; +use alloy_rpc_types_engine::{ + CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, +}; use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; use itertools::Either; use reth_beacon_consensus::{BeaconEngineMessage, BeaconOnNewPayloadError, OnForkChoiceUpdated}; use reth_engine_primitives::EngineTypes; use reth_errors::{BlockExecutionError, BlockValidationError, RethError, RethResult}; use reth_ethereum_forks::EthereumHardforks; -use reth_evm::{system_calls::apply_beacon_root_contract_call, ConfigureEvm}; +use reth_evm::{system_calls::SystemCaller, ConfigureEvm}; use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{proofs, Block, BlockBody, Header, Receipt, Receipts}; use reth_provider::{BlockReader, ExecutionOutcome, ProviderError, StateProviderFactory}; @@ -17,10 +20,6 @@ use reth_revm::{ state_change::post_block_withdrawals_balance_increments, DatabaseCommit, }; -use reth_rpc_types::{ - engine::{CancunPayloadFields, ForkchoiceState, PayloadStatus}, - ExecutionPayload, -}; use reth_rpc_types_compat::engine::payload::block_to_payload; use reth_trie::HashedPostState; use revm_primitives::{ @@ -287,9 +286,9 @@ where let mut evm = evm_config.evm_with_env(&mut state, env); // apply eip-4788 pre block contract call - apply_beacon_root_contract_call( - evm_config, - chain_spec, + let mut system_caller = SystemCaller::new(evm_config, chain_spec); + + system_caller.apply_beacon_root_contract_call( reorg_target.timestamp, reorg_target.number, reorg_target.parent_beacon_block_root, @@ -303,7 +302,7 @@ where let mut versioned_hashes = Vec::new(); for tx in candidate_transactions { // ensure we still have capacity for this transaction - if cumulative_gas_used + tx.gas_limit() > reorg_target.gas_limit as u64 { + if cumulative_gas_used + tx.gas_limit() > reorg_target.gas_limit { continue } @@ -372,8 +371,8 @@ where ( Some(sum_blob_gas_used), Some(calc_excess_blob_gas( - reorg_target_parent.excess_blob_gas.unwrap_or_default() as u64, - reorg_target_parent.blob_gas_used.unwrap_or_default() as u64, + reorg_target_parent.excess_blob_gas.unwrap_or_default(), + reorg_target_parent.blob_gas_used.unwrap_or_default(), )), ) } else { @@ -402,7 +401,7 @@ where receipts_root: outcome.receipts_root_slow(reorg_target.header.number).unwrap(), logs_bloom: outcome.block_logs_bloom(reorg_target.header.number).unwrap(), requests_root: None, // TODO(prague) - gas_used: cumulative_gas_used.into(), + gas_used: cumulative_gas_used, blob_gas_used: blob_gas_used.map(Into::into), excess_blob_gas: excess_blob_gas.map(Into::into), state_root: state_provider.state_root(hashed_state)?, diff --git a/crates/engine/util/src/skip_new_payload.rs b/crates/engine/util/src/skip_new_payload.rs index f4ed8a543ab4..d2450711ecfc 100644 --- a/crates/engine/util/src/skip_new_payload.rs +++ b/crates/engine/util/src/skip_new_payload.rs @@ -1,9 +1,9 @@ //! Stream wrapper that skips specified number of new payload messages. +use alloy_rpc_types_engine::{PayloadStatus, PayloadStatusEnum}; use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconEngineMessage; use reth_engine_primitives::EngineTypes; -use reth_rpc_types::engine::{PayloadStatus, PayloadStatusEnum}; use std::{ pin::Pin, task::{ready, Context, Poll}, diff --git a/crates/errors/src/error.rs b/crates/errors/src/error.rs index e74d582759b7..869d5732746d 100644 --- a/crates/errors/src/error.rs +++ b/crates/errors/src/error.rs @@ -37,14 +37,14 @@ pub enum RethError { /// Any other error. #[error(transparent)] - Other(Box), + Other(Box), } impl RethError { /// Create a new `RethError` from a given error. pub fn other(error: E) -> Self where - E: std::error::Error + Send + Sync + 'static, + E: core::error::Error + Send + Sync + 'static, { Self::Other(Box::new(error)) } diff --git a/crates/ethereum-forks/src/display.rs b/crates/ethereum-forks/src/display.rs index 98372ea30dba..fc606854caa2 100644 --- a/crates/ethereum-forks/src/display.rs +++ b/crates/ethereum-forks/src/display.rs @@ -58,7 +58,6 @@ impl core::fmt::Display for DisplayFork { } } -// Todo: This will result in dep cycle so currently commented out // # Examples // // ``` @@ -90,6 +89,7 @@ impl core::fmt::Display for DisplayFork { // - Paris @58750000000000000000000 (network is known to be merged) // Post-merge hard forks (timestamp based): // - Shanghai @1681338455 +// - Cancun @1710338135" /// ``` #[derive(Debug)] pub struct DisplayHardforks { diff --git a/crates/ethereum-forks/src/hardforks/mod.rs b/crates/ethereum-forks/src/hardforks/mod.rs index 11851c738962..78db5464cb2b 100644 --- a/crates/ethereum-forks/src/hardforks/mod.rs +++ b/crates/ethereum-forks/src/hardforks/mod.rs @@ -2,7 +2,7 @@ mod ethereum; pub use ethereum::EthereumHardforks; -use crate::{ForkCondition, Hardfork}; +use crate::{ForkCondition, ForkFilter, ForkId, Hardfork, Head}; #[cfg(feature = "std")] use rustc_hash::FxHashMap; #[cfg(feature = "std")] @@ -31,6 +31,15 @@ pub trait Hardforks: Clone { fn is_fork_active_at_block(&self, fork: H, block_number: u64) -> bool { self.fork(fork).active_at_block(block_number) } + + /// Compute the [`ForkId`] for the given [`Head`] following eip-6122 spec + fn fork_id(&self, head: &Head) -> ForkId; + + /// Returns the [`ForkId`] for the last fork. + fn latest_fork_id(&self) -> ForkId; + + /// Creates a [`ForkFilter`] for the block described by [Head]. + fn fork_filter(&self, head: Head) -> ForkFilter; } /// Ordered list of a chain hardforks that implement [`Hardfork`]. @@ -129,16 +138,6 @@ impl ChainHardforks { } } -impl Hardforks for ChainHardforks { - fn fork(&self, fork: H) -> ForkCondition { - self.fork(fork) - } - - fn forks_iter(&self) -> impl Iterator { - self.forks_iter() - } -} - impl core::fmt::Debug for ChainHardforks { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("ChainHardforks") diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 896975bb41dd..e74f3498fa5f 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -56,40 +56,35 @@ impl EthBeaconConsensus // Determine the parent gas limit, considering elasticity multiplier on the London fork. let parent_gas_limit = if self.chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number) { - parent.gas_limit as u64 * + parent.gas_limit * self.chain_spec .base_fee_params_at_timestamp(header.timestamp) .elasticity_multiplier as u64 } else { - parent.gas_limit as u64 + parent.gas_limit }; // Check for an increase in gas limit beyond the allowed threshold. - if header.gas_limit as u64 > parent_gas_limit { - if header.gas_limit as u64 - parent_gas_limit >= - parent_gas_limit / GAS_LIMIT_BOUND_DIVISOR - { + if header.gas_limit > parent_gas_limit { + if header.gas_limit - parent_gas_limit >= parent_gas_limit / GAS_LIMIT_BOUND_DIVISOR { return Err(ConsensusError::GasLimitInvalidIncrease { parent_gas_limit, - child_gas_limit: header.gas_limit as u64, + child_gas_limit: header.gas_limit, }) } } // Check for a decrease in gas limit beyond the allowed threshold. - else if parent_gas_limit - header.gas_limit as u64 >= - parent_gas_limit / GAS_LIMIT_BOUND_DIVISOR + else if parent_gas_limit - header.gas_limit >= parent_gas_limit / GAS_LIMIT_BOUND_DIVISOR { return Err(ConsensusError::GasLimitInvalidDecrease { parent_gas_limit, - child_gas_limit: header.gas_limit as u64, + child_gas_limit: header.gas_limit, }) } // Check if the self gas limit is below the minimum required limit. - else if header.gas_limit < MINIMUM_GAS_LIMIT.into() { - return Err(ConsensusError::GasLimitInvalidMinimum { - child_gas_limit: header.gas_limit as u64, - }) + else if header.gas_limit < MINIMUM_GAS_LIMIT { + return Err(ConsensusError::GasLimitInvalidMinimum { child_gas_limit: header.gas_limit }) } Ok(()) @@ -238,7 +233,7 @@ mod tests { use reth_primitives::proofs; fn header_with_gas_limit(gas_limit: u64) -> SealedHeader { - let header = Header { gas_limit: gas_limit.into(), ..Default::default() }; + let header = Header { gas_limit, ..Default::default() }; SealedHeader::new(header, B256::ZERO) } @@ -270,15 +265,15 @@ mod tests { fn test_invalid_gas_limit_increase_exceeding_limit() { let parent = header_with_gas_limit(GAS_LIMIT_BOUND_DIVISOR * 10); let child = header_with_gas_limit( - (parent.gas_limit + parent.gas_limit / GAS_LIMIT_BOUND_DIVISOR as u128 + 1) as u64, + parent.gas_limit + parent.gas_limit / GAS_LIMIT_BOUND_DIVISOR + 1, ); assert_eq!( EthBeaconConsensus::new(Arc::new(ChainSpec::default())) .validate_against_parent_gas_limit(&child, &parent), Err(ConsensusError::GasLimitInvalidIncrease { - parent_gas_limit: parent.gas_limit as u64, - child_gas_limit: child.gas_limit as u64, + parent_gas_limit: parent.gas_limit, + child_gas_limit: child.gas_limit, }) ); } @@ -286,7 +281,7 @@ mod tests { #[test] fn test_valid_gas_limit_decrease_within_limit() { let parent = header_with_gas_limit(GAS_LIMIT_BOUND_DIVISOR * 10); - let child = header_with_gas_limit(parent.gas_limit as u64 - 5); + let child = header_with_gas_limit(parent.gas_limit - 5); assert_eq!( EthBeaconConsensus::new(Arc::new(ChainSpec::default())) @@ -299,15 +294,15 @@ mod tests { fn test_invalid_gas_limit_decrease_exceeding_limit() { let parent = header_with_gas_limit(GAS_LIMIT_BOUND_DIVISOR * 10); let child = header_with_gas_limit( - (parent.gas_limit - parent.gas_limit / GAS_LIMIT_BOUND_DIVISOR as u128 - 1) as u64, + parent.gas_limit - parent.gas_limit / GAS_LIMIT_BOUND_DIVISOR - 1, ); assert_eq!( EthBeaconConsensus::new(Arc::new(ChainSpec::default())) .validate_against_parent_gas_limit(&child, &parent), Err(ConsensusError::GasLimitInvalidDecrease { - parent_gas_limit: parent.gas_limit as u64, - child_gas_limit: child.gas_limit as u64, + parent_gas_limit: parent.gas_limit, + child_gas_limit: child.gas_limit, }) ); } @@ -319,7 +314,7 @@ mod tests { let chain_spec = Arc::new(ChainSpecBuilder::mainnet().shanghai_activated().build()); let sealed = Header { - base_fee_per_gas: Some(1337u128), + base_fee_per_gas: Some(1337), withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), ..Default::default() } diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs index 1da648cb3ca6..e510a91ab964 100644 --- a/crates/ethereum/consensus/src/validation.rs +++ b/crates/ethereum/consensus/src/validation.rs @@ -16,9 +16,9 @@ pub fn validate_block_post_execution( // Check if gas used matches the value set in header. let cumulative_gas_used = receipts.last().map(|receipt| receipt.cumulative_gas_used).unwrap_or(0); - if block.gas_used as u64 != cumulative_gas_used { + if block.gas_used != cumulative_gas_used { return Err(ConsensusError::BlockGasUsed { - gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used as u64 }, + gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, gas_spent_by_tx: gas_spent_by_transactions(receipts), }) } @@ -101,7 +101,7 @@ fn compare_receipts_root_and_logs_bloom( #[cfg(test)] mod tests { - use reth_primitives::hex; + use alloy_primitives::hex; use super::*; diff --git a/crates/ethereum/engine-primitives/Cargo.toml b/crates/ethereum/engine-primitives/Cargo.toml index cfeac285328e..e9bcd4256865 100644 --- a/crates/ethereum/engine-primitives/Cargo.toml +++ b/crates/ethereum/engine-primitives/Cargo.toml @@ -16,7 +16,6 @@ reth-chainspec.workspace = true reth-primitives.workspace = true reth-engine-primitives.workspace = true reth-payload-primitives.workspace = true -reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true alloy-rlp.workspace = true reth-chain-state.workspace = true @@ -24,6 +23,7 @@ reth-chain-state.workspace = true # alloy alloy-primitives.workspace = true alloy-eips.workspace = true +alloy-rpc-types-engine.workspace = true # misc serde.workspace = true diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index 92243d72976e..69d73a021747 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -11,6 +11,10 @@ mod payload; use std::sync::Arc; +pub use alloy_rpc_types_engine::{ + ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, + ExecutionPayloadV1, PayloadAttributes as EthPayloadAttributes, +}; pub use payload::{EthBuiltPayload, EthPayloadBuilderAttributes}; use reth_chainspec::ChainSpec; use reth_engine_primitives::{EngineTypes, EngineValidator}; @@ -18,13 +22,6 @@ use reth_payload_primitives::{ validate_version_specific_fields, EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes, PayloadTypes, }; -pub use reth_rpc_types::{ - engine::{ - ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, - PayloadAttributes as EthPayloadAttributes, - }, - ExecutionPayloadV1, -}; /// The types used in the default mainnet ethereum beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index e22012b6c3d3..dd0b7b405e9f 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -3,13 +3,13 @@ use alloy_eips::eip4844::BlobTransactionSidecar; use alloy_primitives::{Address, B256, U256}; use alloy_rlp::Encodable; -use reth_chain_state::ExecutedBlock; -use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; -use reth_primitives::{SealedBlock, Withdrawals}; -use reth_rpc_types::engine::{ +use alloy_rpc_types_engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, ExecutionPayloadV1, PayloadAttributes, PayloadId, }; +use reth_chain_state::ExecutedBlock; +use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; +use reth_primitives::{SealedBlock, Withdrawals}; use reth_rpc_types_compat::engine::payload::{ block_to_payload_v1, block_to_payload_v3, block_to_payload_v4, convert_block_to_payload_field_v2, diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index 8a8e23a4cd11..61ce0a23b904 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -36,6 +36,7 @@ reth-primitives = { workspace = true, features = ["secp256k1"] } secp256k1.workspace = true serde_json.workspace = true alloy-genesis.workspace = true +alloy-consensus.workspace = true [features] default = ["std"] diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 3e574147f272..f1d7de115d57 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -14,10 +14,7 @@ use reth_evm::{ BatchExecutor, BlockExecutionError, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, BlockValidationError, Executor, ProviderError, }, - system_calls::{ - apply_beacon_root_contract_call, apply_blockhashes_contract_call, - apply_consolidation_requests_contract_call, apply_withdrawal_requests_contract_call, - }, + system_calls::SystemCaller, ConfigureEvm, }; use reth_execution_types::ExecutionOutcome; @@ -142,23 +139,9 @@ where DB: Database, DB::Error: Into + Display, { - // apply pre execution changes - apply_beacon_root_contract_call( - &self.evm_config, - &self.chain_spec, - block.timestamp, - block.number, - block.parent_beacon_block_root, - &mut evm, - )?; - apply_blockhashes_contract_call( - &self.evm_config, - &self.chain_spec, - block.timestamp, - block.number, - block.parent_hash, - &mut evm, - )?; + let mut system_caller = SystemCaller::new(&self.evm_config, &self.chain_spec); + + system_caller.apply_pre_execution_changes(block, &mut evm)?; // execute transactions let mut cumulative_gas_used = 0; @@ -166,7 +149,7 @@ where for (sender, transaction) in block.transactions_with_sender() { // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, // must be no greater than the block’s gasLimit. - let block_available_gas = (block.header.gas_limit - cumulative_gas_used) as u64; + let block_available_gas = block.header.gas_limit - cumulative_gas_used; if transaction.gas_limit() > block_available_gas { return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { transaction_gas_limit: transaction.gas_limit(), @@ -189,7 +172,7 @@ where evm.db_mut().commit(state); // append gas used - cumulative_gas_used += result.gas_used() as u128; + cumulative_gas_used += result.gas_used(); // Push transaction changeset and calculate header bloom filter for receipt. receipts.push( @@ -199,7 +182,7 @@ where // Success flag was added in `EIP-658: Embedding transaction status code in // receipts`. success: result.is_success(), - cumulative_gas_used: cumulative_gas_used as u64, + cumulative_gas_used, // convert to reth log logs: result.into_logs(), ..Default::default() @@ -212,20 +195,14 @@ where let deposit_requests = crate::eip6110::parse_deposits_from_receipts(&self.chain_spec, &receipts)?; - // Collect all EIP-7685 requests - let withdrawal_requests = - apply_withdrawal_requests_contract_call(&self.evm_config, &mut evm)?; - - // Collect all EIP-7251 requests - let consolidation_requests = - apply_consolidation_requests_contract_call(&self.evm_config, &mut evm)?; + let post_execution_requests = system_caller.apply_post_execution_changes(&mut evm)?; - [deposit_requests, withdrawal_requests, consolidation_requests].concat() + [deposit_requests, post_execution_requests].concat() } else { vec![] }; - Ok(EthExecuteOutput { receipts, requests, gas_used: cumulative_gas_used as u64 }) + Ok(EthExecuteOutput { receipts, requests, gas_used: cumulative_gas_used }) } } @@ -472,6 +449,7 @@ where #[cfg(test)] mod tests { use super::*; + use alloy_consensus::TxLegacy; use alloy_eips::{ eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}, eip4788::{BEACON_ROOTS_ADDRESS, BEACON_ROOTS_CODE, SYSTEM_ADDRESS}, @@ -481,7 +459,7 @@ mod tests { use reth_chainspec::{ChainSpecBuilder, ForkCondition}; use reth_primitives::{ constants::{EMPTY_ROOT_HASH, ETH_TO_WEI}, - public_key_to_address, Account, Block, BlockBody, Transaction, TxLegacy, + public_key_to_address, Account, Block, BlockBody, Transaction, }; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, @@ -504,7 +482,7 @@ mod tests { BEACON_ROOTS_ADDRESS, beacon_root_contract_account, Some(BEACON_ROOTS_CODE.clone()), - HashMap::new(), + HashMap::default(), ); db @@ -523,7 +501,7 @@ mod tests { WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, withdrawal_requests_contract_account, Some(WITHDRAWAL_REQUEST_PREDEPLOY_CODE.clone()), - HashMap::new(), + HashMap::default(), ); db @@ -684,7 +662,7 @@ mod tests { let mut db = create_state_provider_with_beacon_root_contract(); // insert an empty SYSTEM_ADDRESS - db.insert_account(SYSTEM_ADDRESS, Account::default(), None, HashMap::new()); + db.insert_account(SYSTEM_ADDRESS, Account::default(), None, HashMap::default()); let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) @@ -807,7 +785,7 @@ mod tests { timestamp: 1, number: 1, parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - base_fee_per_gas: Some(u64::MAX.into()), + base_fee_per_gas: Some(u64::MAX), excess_blob_gas: Some(0), ..Header::default() }; @@ -882,7 +860,7 @@ mod tests { HISTORY_STORAGE_ADDRESS, blockhashes_contract_account, Some(HISTORY_STORAGE_CODE.clone()), - HashMap::new(), + HashMap::default(), ); db @@ -1229,7 +1207,7 @@ mod tests { sender_address, Account { nonce: 1, balance: U256::from(ETH_TO_WEI), bytecode_hash: None }, None, - HashMap::new(), + HashMap::default(), ); // https://github.com/lightclient/7002asm/blob/e0d68e04d15f25057af7b6d180423d94b6b3bdb3/test/Contract.t.sol.in#L49-L64 @@ -1249,7 +1227,7 @@ mod tests { Transaction::Legacy(TxLegacy { chain_id: Some(chain_spec.chain.id()), nonce: 1, - gas_price: header.base_fee_per_gas.unwrap(), + gas_price: header.base_fee_per_gas.unwrap().into(), gas_limit: 134_807, to: TxKind::Call(WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS), // `MIN_WITHDRAWAL_REQUEST_FEE` @@ -1312,7 +1290,7 @@ mod tests { sender_address, Account { nonce: 1, balance: U256::from(ETH_TO_WEI), bytecode_hash: None }, None, - HashMap::new(), + HashMap::default(), ); // Define the validator public key and withdrawal amount as fixed bytes @@ -1336,7 +1314,7 @@ mod tests { Transaction::Legacy(TxLegacy { chain_id: Some(chain_spec.chain.id()), nonce: 1, - gas_price: header.base_fee_per_gas.unwrap(), + gas_price: header.base_fee_per_gas.unwrap().into(), gas_limit: 2_500_000, // higher than block gas limit to: TxKind::Call(WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS), value: U256::from(1), diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 1c8b1b6a2619..a71f26f703e4 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -145,7 +145,7 @@ impl ConfigureEvmEnv for EthEvmConfig { None } }) - .map(|excess_blob_gas| BlobExcessGasAndPrice::new(excess_blob_gas as u64)); + .map(BlobExcessGasAndPrice::new); let mut basefee = parent.next_block_base_fee( self.chain_spec.base_fee_params_at_timestamp(attributes.timestamp), @@ -165,7 +165,7 @@ impl ConfigureEvmEnv for EthEvmConfig { gas_limit *= U256::from(elasticity_multiplier); // set the base fee to the initial base fee from the EIP-1559 spec - basefee = Some(EIP1559_INITIAL_BASE_FEE.into()) + basefee = Some(EIP1559_INITIAL_BASE_FEE) } let block_env = BlockEnv { diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index ecce558f1a6f..5d2fcde2f7e5 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -17,14 +17,7 @@ use reth_basic_payload_builder::{ use reth_chain_state::ExecutedBlock; use reth_chainspec::ChainSpec; use reth_errors::RethError; -use reth_evm::{ - system_calls::{ - post_block_consolidation_requests_contract_call, - post_block_withdrawal_requests_contract_call, pre_block_beacon_root_contract_call, - pre_block_blockhashes_contract_call, - }, - ConfigureEvm, NextBlockEnvAttributes, -}; +use reth_evm::{system_calls::SystemCaller, ConfigureEvm, NextBlockEnvAttributes}; use reth_evm_ethereum::{eip6110::parse_deposits_from_receipts, EthEvmConfig}; use reth_execution_types::ExecutionOutcome; use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes}; @@ -33,8 +26,7 @@ use reth_primitives::{ constants::{eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE}, proofs::{self, calculate_requests_root}, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, EthereumHardforks, Header, IntoRecoveredTransaction, Receipt, - EMPTY_OMMER_ROOT_HASH, + Block, BlockBody, EthereumHardforks, Header, Receipt, EMPTY_OMMER_ROOT_HASH, }; use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; @@ -43,9 +35,9 @@ use reth_transaction_pool::{ }; use reth_trie::HashedPostState; use revm::{ - db::states::bundle_state::BundleRetention, + db::{states::bundle_state::BundleRetention, State}, primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState}, - DatabaseCommit, State, + DatabaseCommit, }; use revm_primitives::calc_excess_blob_gas; use std::sync::Arc; @@ -168,35 +160,34 @@ where let block_number = initialized_block_env.number.to::(); + let mut system_caller = SystemCaller::new(&evm_config, chain_spec.clone()); + // apply eip-4788 pre block contract call - pre_block_beacon_root_contract_call( - &mut db, - &evm_config, - &chain_spec, - &initialized_cfg, - &initialized_block_env, - attributes.parent_beacon_block_root, - ) - .map_err(|err| { - warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), - %err, - "failed to apply beacon root contract call for empty payload" - ); - PayloadBuilderError::Internal(err.into()) - })?; + system_caller + .pre_block_beacon_root_contract_call( + &mut db, + &initialized_cfg, + &initialized_block_env, + attributes.parent_beacon_block_root, + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to apply beacon root contract call for payload" + ); + PayloadBuilderError::Internal(err.into()) + })?; // apply eip-2935 blockhashes update - pre_block_blockhashes_contract_call( + system_caller.pre_block_blockhashes_contract_call( &mut db, - &evm_config, - &chain_spec, &initialized_cfg, &initialized_block_env, parent_block.hash(), ) .map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to update blockhashes for empty payload"); + warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to update blockhashes for payload"); PayloadBuilderError::Internal(err.into()) })?; @@ -321,20 +312,20 @@ where { let deposit_requests = parse_deposits_from_receipts(&chain_spec, receipts.iter().flatten()) .map_err(|err| PayloadBuilderError::Internal(RethError::Execution(err.into())))?; - let withdrawal_requests = post_block_withdrawal_requests_contract_call( - &evm_config, - &mut db, - &initialized_cfg, - &initialized_block_env, - ) - .map_err(|err| PayloadBuilderError::Internal(err.into()))?; - let consolidation_requests = post_block_consolidation_requests_contract_call( - &evm_config, - &mut db, - &initialized_cfg, - &initialized_block_env, - ) - .map_err(|err| PayloadBuilderError::Internal(err.into()))?; + let withdrawal_requests = system_caller + .post_block_withdrawal_requests_contract_call( + &mut db, + &initialized_cfg, + &initialized_block_env, + ) + .map_err(|err| PayloadBuilderError::Internal(err.into()))?; + let consolidation_requests = system_caller + .post_block_consolidation_requests_contract_call( + &mut db, + &initialized_cfg, + &initialized_block_env, + ) + .map_err(|err| PayloadBuilderError::Internal(err.into()))?; let requests = [deposit_requests, withdrawal_requests, consolidation_requests].concat(); let requests_root = calculate_requests_root(&requests); @@ -368,7 +359,7 @@ where warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, - "failed to calculate state root for empty payload" + "failed to calculate state root for payload" ); })? }; @@ -391,7 +382,7 @@ where excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_block.timestamp) { let parent_excess_blob_gas = parent_block.excess_blob_gas.unwrap_or_default(); let parent_blob_gas_used = parent_block.blob_gas_used.unwrap_or_default(); - Some(calc_excess_blob_gas(parent_excess_blob_gas as u64, parent_blob_gas_used as u64)) + Some(calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) } else { // for the first post-fork block, both parent.blob_gas_used and // parent.excess_blob_gas are evaluated as 0 @@ -413,11 +404,11 @@ where timestamp: attributes.timestamp, mix_hash: attributes.prev_randao, nonce: BEACON_NONCE.into(), - base_fee_per_gas: Some(base_fee.into()), + base_fee_per_gas: Some(base_fee), number: parent_block.number + 1, - gas_limit: block_gas_limit.into(), + gas_limit: block_gas_limit, difficulty: U256::ZERO, - gas_used: cumulative_gas_used.into(), + gas_used: cumulative_gas_used, extra_data, parent_beacon_block_root: attributes.parent_beacon_block_root, blob_gas_used: blob_gas_used.map(Into::into), diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index f520c75eeaa9..20070d421e97 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -23,7 +23,11 @@ reth-storage-errors.workspace = true reth-execution-types.workspace = true revm.workspace = true + +# alloy +alloy-primitives.workspace = true alloy-eips.workspace = true + auto_impl.workspace = true futures-util.workspace = true metrics = { workspace = true, optional = true } diff --git a/crates/evm/execution-errors/src/lib.rs b/crates/evm/execution-errors/src/lib.rs index 1113cc83d2ea..4dbbfb7abdce 100644 --- a/crates/evm/execution-errors/src/lib.rs +++ b/crates/evm/execution-errors/src/lib.rs @@ -125,12 +125,11 @@ impl From for BlockValidationError { } } -#[cfg(feature = "std")] -impl std::error::Error for BlockValidationError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { +impl core::error::Error for BlockValidationError { + fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { match self { - Self::EVM { error, .. } => std::error::Error::source(error), - Self::StateRoot(source) => std::error::Error::source(source), + Self::EVM { error, .. } => core::error::Error::source(error), + Self::StateRoot(source) => core::error::Error::source(source), _ => Option::None, } } @@ -153,7 +152,7 @@ impl BlockExecutionError { #[cfg(feature = "std")] pub fn other(error: E) -> Self where - E: std::error::Error + Send + Sync + 'static, + E: core::error::Error + Send + Sync + 'static, { Self::Internal(InternalBlockExecutionError::other(error)) } @@ -185,13 +184,12 @@ impl From for BlockExecutionError { } } -#[cfg(feature = "std")] -impl std::error::Error for BlockExecutionError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { +impl core::error::Error for BlockExecutionError { + fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { match self { - Self::Validation(source) => std::error::Error::source(source), - Self::Consensus(source) => std::error::Error::source(source), - Self::Internal(source) => std::error::Error::source(source), + Self::Validation(source) => core::error::Error::source(source), + Self::Consensus(source) => core::error::Error::source(source), + Self::Internal(source) => core::error::Error::source(source), } } } @@ -216,8 +214,7 @@ pub enum InternalBlockExecutionError { #[from] LatestBlock(ProviderError), /// Arbitrary Block Executor Errors - #[cfg(feature = "std")] - Other(Box), + Other(Box), } impl InternalBlockExecutionError { @@ -225,7 +222,7 @@ impl InternalBlockExecutionError { #[cfg(feature = "std")] pub fn other(error: E) -> Self where - E: std::error::Error + Send + Sync + 'static, + E: core::error::Error + Send + Sync + 'static, { Self::Other(Box::new(error)) } @@ -237,12 +234,11 @@ impl InternalBlockExecutionError { } } -#[cfg(feature = "std")] -impl std::error::Error for InternalBlockExecutionError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { +impl core::error::Error for InternalBlockExecutionError { + fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { match self { - Self::Pruning(source) => std::error::Error::source(source), - Self::LatestBlock(source) => std::error::Error::source(source), + Self::Pruning(source) => core::error::Error::source(source), + Self::LatestBlock(source) => core::error::Error::source(source), _ => Option::None, } } diff --git a/crates/evm/execution-errors/src/trie.rs b/crates/evm/execution-errors/src/trie.rs index 306cd6750a9a..9e4b16d8d0c2 100644 --- a/crates/evm/execution-errors/src/trie.rs +++ b/crates/evm/execution-errors/src/trie.rs @@ -15,12 +15,11 @@ pub enum StateRootError { StorageRootError(StorageRootError), } -#[cfg(feature = "std")] -impl std::error::Error for StateRootError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { +impl core::error::Error for StateRootError { + fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { match self { - Self::Database(source) => std::error::Error::source(source), - Self::StorageRootError(source) => std::error::Error::source(source), + Self::Database(source) => core::error::Error::source(source), + Self::StorageRootError(source) => core::error::Error::source(source), } } } @@ -49,11 +48,10 @@ impl From for DatabaseError { } } -#[cfg(feature = "std")] -impl std::error::Error for StorageRootError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { +impl core::error::Error for StorageRootError { + fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { match self { - Self::Database(source) => std::error::Error::source(source), + Self::Database(source) => core::error::Error::source(source), } } } @@ -76,12 +74,11 @@ impl From for ProviderError { } } -#[cfg(feature = "std")] -impl std::error::Error for StateProofError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { +impl core::error::Error for StateProofError { + fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { match self { - Self::Database(source) => std::error::Error::source(source), - Self::Rlp(source) => std::error::Error::source(source), + Self::Database(source) => core::error::Error::source(source), + Self::Rlp(source) => core::error::Error::source(source), } } } @@ -101,6 +98,9 @@ pub enum TrieWitnessError { /// Missing target node. #[display("target node missing from proof {_0:?}")] MissingTargetNode(Nibbles), + /// Unexpected empty root. + #[display("unexpected empty root: {_0:?}")] + UnexpectedEmptyRoot(Nibbles), } impl From for ProviderError { @@ -109,12 +109,11 @@ impl From for ProviderError { } } -#[cfg(feature = "std")] -impl std::error::Error for TrieWitnessError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { +impl core::error::Error for TrieWitnessError { + fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { match self { - Self::Proof(source) => std::error::Error::source(source), - Self::Rlp(source) => std::error::Error::source(source), + Self::Proof(source) => core::error::Error::source(source), + Self::Rlp(source) => core::error::Error::source(source), _ => Option::None, } } diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index cf50b47a03b2..9bd6537326b1 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -12,21 +12,28 @@ workspace = true [dependencies] reth-primitives.workspace = true -reth-chainspec = { workspace = true, optional = true } reth-execution-errors.workspace = true reth-trie.workspace = true revm.workspace = true +# alloy +alloy-primitives.workspace = true +alloy-eips.workspace = true + serde = { workspace = true, optional = true } +serde_with = { workspace = true, optional = true } [dev-dependencies] -reth-primitives = { workspace = true, features = ["test-utils"] } -alloy-primitives.workspace = true alloy-eips.workspace = true +arbitrary.workspace = true +bincode.workspace = true +rand.workspace = true +reth-primitives = { workspace = true, features = ["arbitrary", "test-utils"] } [features] default = ["std"] -optimism = ["dep:reth-chainspec"] +optimism = ["reth-primitives/optimism", "revm/optimism"] serde = ["dep:serde", "reth-trie/serde", "revm/serde"] +serde-bincode-compat = ["reth-primitives/serde-bincode-compat", "reth-trie/serde-bincode-compat", "serde_with"] std = [] diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 4d0dc694a05f..5db5495de59f 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -2,11 +2,13 @@ use crate::ExecutionOutcome; use alloc::{borrow::Cow, collections::BTreeMap}; +use alloy_eips::{eip1898::ForkBlock, BlockNumHash}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash}; use core::{fmt, ops::RangeInclusive}; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; use reth_primitives::{ - Address, BlockHash, BlockNumHash, BlockNumber, ForkBlock, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, TxHash, + Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionSigned, + TransactionSignedEcRecovered, }; use reth_trie::updates::TrieUpdates; use revm::db::BundleState; @@ -505,10 +507,132 @@ pub enum ChainSplit { }, } +/// Bincode-compatible [`Chain`] serde implementation. +#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +pub(super) mod serde_bincode_compat { + use std::collections::BTreeMap; + + use alloc::borrow::Cow; + use alloy_primitives::BlockNumber; + use reth_primitives::serde_bincode_compat::SealedBlockWithSenders; + use reth_trie::serde_bincode_compat::updates::TrieUpdates; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + use serde_with::{DeserializeAs, SerializeAs}; + + use crate::ExecutionOutcome; + + /// Bincode-compatible [`super::Chain`] serde implementation. + /// + /// Intended to use with the [`serde_with::serde_as`] macro in the following way: + /// ```rust + /// use reth_execution_types::{serde_bincode_compat, Chain}; + /// use serde::{Deserialize, Serialize}; + /// use serde_with::serde_as; + /// + /// #[serde_as] + /// #[derive(Serialize, Deserialize)] + /// struct Data { + /// #[serde_as(as = "serde_bincode_compat::Chain")] + /// chain: Chain, + /// } + /// ``` + #[derive(Debug, Serialize, Deserialize)] + pub struct Chain<'a> { + blocks: BTreeMap>, + execution_outcome: Cow<'a, ExecutionOutcome>, + trie_updates: Option>, + } + + impl<'a> From<&'a super::Chain> for Chain<'a> { + fn from(value: &'a super::Chain) -> Self { + Self { + blocks: value + .blocks + .iter() + .map(|(block_number, block)| (*block_number, block.into())) + .collect(), + execution_outcome: Cow::Borrowed(&value.execution_outcome), + trie_updates: value.trie_updates.as_ref().map(Into::into), + } + } + } + + impl<'a> From> for super::Chain { + fn from(value: Chain<'a>) -> Self { + Self { + blocks: value + .blocks + .into_iter() + .map(|(block_number, block)| (block_number, block.into())) + .collect(), + execution_outcome: value.execution_outcome.into_owned(), + trie_updates: value.trie_updates.map(Into::into), + } + } + } + + impl<'a> SerializeAs for Chain<'a> { + fn serialize_as(source: &super::Chain, serializer: S) -> Result + where + S: Serializer, + { + Chain::from(source).serialize(serializer) + } + } + + impl<'de> DeserializeAs<'de, super::Chain> for Chain<'de> { + fn deserialize_as(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Chain::deserialize(deserializer).map(Into::into) + } + } + + #[cfg(test)] + mod tests { + use arbitrary::Arbitrary; + use rand::Rng; + use reth_primitives::SealedBlockWithSenders; + use serde::{Deserialize, Serialize}; + use serde_with::serde_as; + + use super::super::{serde_bincode_compat, Chain}; + + #[test] + fn test_chain_bincode_roundtrip() { + #[serde_as] + #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] + struct Data { + #[serde_as(as = "serde_bincode_compat::Chain")] + chain: Chain, + } + + let mut bytes = [0u8; 1024]; + rand::thread_rng().fill(bytes.as_mut_slice()); + let data = Data { + chain: Chain::new( + vec![SealedBlockWithSenders::arbitrary(&mut arbitrary::Unstructured::new( + &bytes, + )) + .unwrap()], + Default::default(), + None, + ), + }; + + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data = bincode::deserialize(&encoded).unwrap(); + assert_eq!(decoded, data); + } + } +} + #[cfg(test)] mod tests { use super::*; - use reth_primitives::{Receipt, Receipts, TxType, B256}; + use alloy_primitives::B256; + use reth_primitives::{Receipt, Receipts, TxType}; use revm::primitives::{AccountInfo, HashMap}; #[test] diff --git a/crates/evm/execution-types/src/execute.rs b/crates/evm/execution-types/src/execute.rs index 2933fd59815f..0cf5d7050793 100644 --- a/crates/evm/execution-types/src/execute.rs +++ b/crates/evm/execution-types/src/execute.rs @@ -1,4 +1,5 @@ -use reth_primitives::{Request, U256}; +use alloy_primitives::U256; +use reth_primitives::Request; use revm::db::BundleState; /// A helper type for ethereum block inputs that consists of a block and the total difficulty. diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 8996ac9959e3..08ddf9e4167b 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -1,8 +1,6 @@ use crate::BlockExecutionOutput; -use reth_primitives::{ - logs_bloom, Account, Address, BlockNumber, Bloom, Bytecode, Log, Receipt, Receipts, Requests, - StorageEntry, B256, U256, -}; +use alloy_primitives::{Address, BlockNumber, Bloom, Log, B256, U256}; +use reth_primitives::{logs_bloom, Account, Bytecode, Receipt, Receipts, Requests, StorageEntry}; use reth_trie::HashedPostState; use revm::{ db::{states::BundleState, BundleAccount}, @@ -198,24 +196,21 @@ impl ExecutionOutcome { #[cfg(feature = "optimism")] panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead."); #[cfg(not(feature = "optimism"))] - self.receipts.root_slow(self.block_number_to_index(_block_number)?) + self.receipts.root_slow( + self.block_number_to_index(_block_number)?, + reth_primitives::proofs::calculate_receipt_root_no_memo, + ) } /// Returns the receipt root for all recorded receipts. /// Note: this function calculated Bloom filters for every receipt and created merkle trees /// of receipt. This is a expensive operation. - #[cfg(feature = "optimism")] - pub fn optimism_receipts_root_slow( + pub fn generic_receipts_root_slow( &self, block_number: BlockNumber, - chain_spec: impl reth_chainspec::Hardforks, - timestamp: u64, + f: impl FnOnce(&[&Receipt]) -> B256, ) -> Option { - self.receipts.optimism_root_slow( - self.block_number_to_index(block_number)?, - chain_spec, - timestamp, - ) + self.receipts.root_slow(self.block_number_to_index(block_number)?, f) } /// Returns reference to receipts. @@ -371,8 +366,8 @@ impl From<(BlockExecutionOutput, BlockNumber)> for ExecutionOutcome { mod tests { use super::*; use alloy_eips::{eip6110::DepositRequest, eip7002::WithdrawalRequest}; - use alloy_primitives::{FixedBytes, LogData}; - use reth_primitives::{Address, Receipts, Request, Requests, TxType, B256}; + use alloy_primitives::{Address, FixedBytes, LogData, B256}; + use reth_primitives::{Receipts, Request, Requests, TxType}; use std::collections::HashMap; #[test] @@ -441,16 +436,16 @@ mod tests { ); // Create a BundleStateInit object and insert initial data - let mut state_init: BundleStateInit = HashMap::new(); + let mut state_init: BundleStateInit = HashMap::default(); state_init .insert(Address::new([2; 20]), (None, Some(Account::default()), HashMap::default())); // Create a HashMap for account reverts and insert initial data - let mut revert_inner: HashMap = HashMap::new(); + let mut revert_inner: HashMap = HashMap::default(); revert_inner.insert(Address::new([2; 20]), (None, vec![])); // Create a RevertsInit object and insert the revert_inner data - let mut revert_init: RevertsInit = HashMap::new(); + let mut revert_init: RevertsInit = HashMap::default(); revert_init.insert(123, revert_inner); // Assert that creating a new ExecutionOutcome using the new_init method matches diff --git a/crates/evm/execution-types/src/lib.rs b/crates/evm/execution-types/src/lib.rs index 8965f04d7c96..f98ebfe73a5f 100644 --- a/crates/evm/execution-types/src/lib.rs +++ b/crates/evm/execution-types/src/lib.rs @@ -18,3 +18,15 @@ pub use execute::*; mod execution_outcome; pub use execution_outcome::*; + +/// Bincode-compatible serde implementations for commonly used types for (EVM) block execution. +/// +/// `bincode` crate doesn't work with optionally serializable serde fields, but some of the +/// execution types require optional serialization for RPC compatibility. This module makes so that +/// all fields are serialized. +/// +/// Read more: +#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +pub mod serde_bincode_compat { + pub use super::chain::serde_bincode_compat::*; +} diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index fde316da9f5b..a3fca50ec7ee 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -3,9 +3,10 @@ use core::fmt::Display; use crate::execute::{BatchExecutor, BlockExecutorProvider, Executor}; +use alloy_primitives::BlockNumber; use reth_execution_errors::BlockExecutionError; use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; -use reth_primitives::{BlockNumber, BlockWithSenders, Receipt}; +use reth_primitives::{BlockWithSenders, Receipt}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm_primitives::db::Database; diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 60a29d4de9fc..ffc08469dc8d 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -5,8 +5,9 @@ pub use reth_execution_errors::{BlockExecutionError, BlockValidationError}; pub use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; pub use reth_storage_errors::provider::ProviderError; +use alloy_primitives::BlockNumber; use core::fmt::Display; -use reth_primitives::{BlockNumber, BlockWithSenders, Receipt}; +use reth_primitives::{BlockWithSenders, Receipt}; use reth_prune_types::PruneModes; use revm::State; use revm_primitives::db::Database; @@ -151,8 +152,8 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { #[cfg(test)] mod tests { use super::*; + use alloy_primitives::U256; use revm::db::{CacheDB, EmptyDBTyped}; - use revm_primitives::U256; use std::marker::PhantomData; #[derive(Clone, Default)] diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index 569491a1b02c..6fcb3d9f8c3d 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -14,12 +14,11 @@ extern crate alloc; use core::ops::Deref; use crate::builder::RethEvmBuilder; -use reth_primitives::{Address, TransactionSigned, TransactionSignedEcRecovered, B256, U256}; +use alloy_primitives::{Address, Bytes, B256, U256}; +use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; use reth_primitives_traits::BlockHeader; use revm::{Database, Evm, GetInspector}; -use revm_primitives::{ - BlockEnv, Bytes, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, SpecId, TxEnv, -}; +use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, SpecId, TxEnv}; pub mod builder; pub mod either; diff --git a/crates/evm/src/metrics.rs b/crates/evm/src/metrics.rs index 88874828685f..d6ffe0d79c69 100644 --- a/crates/evm/src/metrics.rs +++ b/crates/evm/src/metrics.rs @@ -30,7 +30,7 @@ impl ExecutorMetrics { where F: FnOnce(BlockExecutionInput<'_, BlockWithSenders>) -> R, { - let gas_used = input.block.gas_used as u64; + let gas_used = input.block.gas_used; // Execute the block and record the elapsed time. let execute_start = Instant::now(); diff --git a/crates/evm/src/noop.rs b/crates/evm/src/noop.rs index ae6171a506a9..392bfd0bd722 100644 --- a/crates/evm/src/noop.rs +++ b/crates/evm/src/noop.rs @@ -1,9 +1,10 @@ //! A no operation block executor implementation. +use alloy_primitives::BlockNumber; use core::fmt::Display; use reth_execution_errors::BlockExecutionError; use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; -use reth_primitives::{BlockNumber, BlockWithSenders, Receipt}; +use reth_primitives::{BlockWithSenders, Receipt}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm::State; diff --git a/crates/evm/src/provider.rs b/crates/evm/src/provider.rs index fc3d4ff94f79..8db828ec4a00 100644 --- a/crates/evm/src/provider.rs +++ b/crates/evm/src/provider.rs @@ -1,7 +1,8 @@ //! Provider trait for populating the EVM environment. use crate::ConfigureEvmEnv; -use reth_primitives::{BlockHashOrNumber, Header}; +use alloy_eips::eip1898::BlockHashOrNumber; +use reth_primitives::Header; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; diff --git a/crates/evm/src/system_calls/eip2935.rs b/crates/evm/src/system_calls/eip2935.rs index 4a1ec14a467e..edb71c8b4e06 100644 --- a/crates/evm/src/system_calls/eip2935.rs +++ b/crates/evm/src/system_calls/eip2935.rs @@ -4,52 +4,12 @@ use alloc::{boxed::Box, string::ToString}; use alloy_eips::eip2935::HISTORY_STORAGE_ADDRESS; use crate::ConfigureEvm; -use core::fmt::Display; +use alloy_primitives::B256; use reth_chainspec::EthereumHardforks; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; use reth_primitives::Header; -use revm::{interpreter::Host, Database, DatabaseCommit, Evm}; -use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, B256}; - -/// Apply the [EIP-2935](https://eips.ethereum.org/EIPS/eip-2935) pre block contract call. -/// -/// This constructs a new [`Evm`] with the given database and environment ([`CfgEnvWithHandlerCfg`] -/// and [`BlockEnv`]) to execute the pre block contract call. -/// -/// This uses [`apply_blockhashes_contract_call`] to ultimately apply the -/// blockhash contract state change. -pub fn pre_block_blockhashes_contract_call( - db: &mut DB, - evm_config: &EvmConfig, - chain_spec: impl EthereumHardforks, - initialized_cfg: &CfgEnvWithHandlerCfg, - initialized_block_env: &BlockEnv, - parent_block_hash: B256, -) -> Result<(), BlockExecutionError> -where - DB: Database + DatabaseCommit, - DB::Error: Display, - EvmConfig: ConfigureEvm
, -{ - // Apply the pre-block EIP-2935 contract call - let mut evm_pre_block = Evm::builder() - .with_db(db) - .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - Default::default(), - )) - .build(); - - apply_blockhashes_contract_call( - evm_config, - chain_spec, - initialized_block_env.timestamp.to(), - initialized_block_env.number.to(), - parent_block_hash, - &mut evm_pre_block, - ) -} +use revm::{interpreter::Host, Database, Evm}; +use revm_primitives::ResultAndState; /// Applies the pre-block call to the [EIP-2935] blockhashes contract, using the given block, /// chain specification, and EVM. @@ -64,7 +24,7 @@ where /// /// [EIP-2935]: https://eips.ethereum.org/EIPS/eip-2935 #[inline] -pub fn transact_blockhashes_contract_call( +pub(crate) fn transact_blockhashes_contract_call( evm_config: &EvmConfig, chain_spec: impl EthereumHardforks, block_timestamp: u64, @@ -73,7 +33,7 @@ pub fn transact_blockhashes_contract_call( evm: &mut Evm<'_, EXT, DB>, ) -> Result, BlockExecutionError> where - DB: Database + DatabaseCommit, + DB: Database, DB::Error: core::fmt::Display, EvmConfig: ConfigureEvm
, { @@ -114,38 +74,3 @@ where Ok(Some(res)) } - -/// Applies the pre-block call to the [EIP-2935] blockhashes contract, using the given block, -/// chain specification, and EVM and commits the relevant state changes. -/// -/// If Prague is not activated, or the block is the genesis block, then this is a no-op, and no -/// state changes are made. -/// -/// [EIP-2935]: https://eips.ethereum.org/EIPS/eip-2935 -#[inline] -pub fn apply_blockhashes_contract_call( - evm_config: &EvmConfig, - chain_spec: impl EthereumHardforks, - block_timestamp: u64, - block_number: u64, - parent_block_hash: B256, - evm: &mut Evm<'_, EXT, DB>, -) -> Result<(), BlockExecutionError> -where - DB: Database + DatabaseCommit, - DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm
, -{ - if let Some(res) = transact_blockhashes_contract_call( - evm_config, - chain_spec, - block_timestamp, - block_number, - parent_block_hash, - evm, - )? { - evm.context.evm.db.commit(res.state); - } - - Ok(()) -} diff --git a/crates/evm/src/system_calls/eip4788.rs b/crates/evm/src/system_calls/eip4788.rs index d1148f6cdc74..bc535809680f 100644 --- a/crates/evm/src/system_calls/eip4788.rs +++ b/crates/evm/src/system_calls/eip4788.rs @@ -3,52 +3,12 @@ use alloc::{boxed::Box, string::ToString}; use crate::ConfigureEvm; use alloy_eips::eip4788::BEACON_ROOTS_ADDRESS; +use alloy_primitives::B256; use reth_chainspec::EthereumHardforks; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; use reth_primitives::Header; -use revm::{interpreter::Host, Database, DatabaseCommit, Evm}; -use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, B256}; - -/// Apply the [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788) pre block contract call. -/// -/// This constructs a new [`Evm`] with the given DB, and environment -/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the pre block contract call. -/// -/// This uses [`apply_beacon_root_contract_call`] to ultimately apply the beacon root contract state -/// change. -pub fn pre_block_beacon_root_contract_call( - db: &mut DB, - evm_config: &EvmConfig, - chain_spec: impl EthereumHardforks, - initialized_cfg: &CfgEnvWithHandlerCfg, - initialized_block_env: &BlockEnv, - parent_beacon_block_root: Option, -) -> Result<(), BlockExecutionError> -where - DB: Database + DatabaseCommit, - DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm
, -{ - // apply pre-block EIP-4788 contract call - let mut evm_pre_block = Evm::builder() - .with_db(db) - .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - Default::default(), - )) - .build(); - - // initialize a block from the env, because the pre block call needs the block itself - apply_beacon_root_contract_call( - evm_config, - chain_spec, - initialized_block_env.timestamp.to(), - initialized_block_env.number.to(), - parent_beacon_block_root, - &mut evm_pre_block, - ) -} +use revm::{interpreter::Host, Database, Evm}; +use revm_primitives::ResultAndState; /// Applies the pre-block call to the [EIP-4788] beacon block root contract, using the given block, /// chain spec, EVM. @@ -60,7 +20,7 @@ where /// /// [EIP-4788]: https://eips.ethereum.org/EIPS/eip-4788 #[inline] -pub fn transact_beacon_root_contract_call( +pub(crate) fn transact_beacon_root_contract_call( evm_config: &EvmConfig, chain_spec: &Spec, block_timestamp: u64, @@ -69,7 +29,7 @@ pub fn transact_beacon_root_contract_call( evm: &mut Evm<'_, EXT, DB>, ) -> Result, BlockExecutionError> where - DB: Database + DatabaseCommit, + DB: Database, DB::Error: core::fmt::Display, EvmConfig: ConfigureEvm
, Spec: EthereumHardforks, @@ -124,38 +84,3 @@ where Ok(Some(res)) } - -/// Applies the pre-block call to the [EIP-4788] beacon block root contract, using the given block, -/// chain spec, EVM. -/// -/// If Cancun is not activated or the block is the genesis block, then this is a no-op, and no -/// state changes are made. -/// -/// [EIP-4788]: https://eips.ethereum.org/EIPS/eip-4788 -#[inline] -pub fn apply_beacon_root_contract_call( - evm_config: &EvmConfig, - chain_spec: impl EthereumHardforks, - block_timestamp: u64, - block_number: u64, - parent_beacon_block_root: Option, - evm: &mut Evm<'_, EXT, DB>, -) -> Result<(), BlockExecutionError> -where - DB: Database + DatabaseCommit, - DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm
, -{ - if let Some(res) = transact_beacon_root_contract_call( - evm_config, - &chain_spec, - block_timestamp, - block_number, - parent_beacon_block_root, - evm, - )? { - evm.context.evm.db.commit(res.state); - } - - Ok(()) -} diff --git a/crates/evm/src/system_calls/eip7002.rs b/crates/evm/src/system_calls/eip7002.rs index 9b770cfceb55..9af944e42a53 100644 --- a/crates/evm/src/system_calls/eip7002.rs +++ b/crates/evm/src/system_calls/eip7002.rs @@ -1,48 +1,12 @@ //! [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) system call implementation. -use alloc::{boxed::Box, format, string::ToString, vec::Vec}; -use core::fmt::Display; - use crate::ConfigureEvm; +use alloc::{boxed::Box, format, string::ToString, vec::Vec}; use alloy_eips::eip7002::{WithdrawalRequest, WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS}; +use alloy_primitives::{bytes::Buf, Address, Bytes, FixedBytes}; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::{Buf, Header, Request}; -use revm::{interpreter::Host, Database, DatabaseCommit, Evm}; -use revm_primitives::{ - Address, BlockEnv, Bytes, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, FixedBytes, - ResultAndState, -}; - -/// Apply the [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) post block contract call. -/// -/// This constructs a new [Evm] with the given DB, and environment -/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the post block contract call. -/// -/// This uses [`apply_withdrawal_requests_contract_call`] to ultimately calculate the -/// [requests](Request). -pub fn post_block_withdrawal_requests_contract_call( - evm_config: &EvmConfig, - db: &mut DB, - initialized_cfg: &CfgEnvWithHandlerCfg, - initialized_block_env: &BlockEnv, -) -> Result, BlockExecutionError> -where - DB: Database + DatabaseCommit, - DB::Error: Display, - EvmConfig: ConfigureEvm
, -{ - // apply post-block EIP-7002 contract call - let mut evm_post_block = Evm::builder() - .with_db(db) - .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - Default::default(), - )) - .build(); - - // initialize a block from the env, because the post block call needs the block itself - apply_withdrawal_requests_contract_call(evm_config, &mut evm_post_block) -} +use reth_primitives::{Header, Request}; +use revm::{interpreter::Host, Database, Evm}; +use revm_primitives::{ExecutionResult, ResultAndState}; /// Applies the post-block call to the EIP-7002 withdrawal requests contract. /// @@ -50,12 +14,12 @@ where /// /// Note: this does not commit the state changes to the database, it only transact the call. #[inline] -pub fn transact_withdrawal_requests_contract_call( +pub(crate) fn transact_withdrawal_requests_contract_call( evm_config: &EvmConfig, evm: &mut Evm<'_, EXT, DB>, ) -> Result where - DB: Database + DatabaseCommit, + DB: Database, DB::Error: core::fmt::Display, EvmConfig: ConfigureEvm
, { @@ -98,26 +62,9 @@ where Ok(res) } -/// Applies the post-block call to the EIP-7002 withdrawal requests contract. -/// -/// If Prague is not active at the given timestamp, then this is a no-op, and an empty vector is -/// returned. Otherwise, the withdrawal requests are returned. +/// Parses the withdrawal requests from the execution output. #[inline] -pub fn apply_withdrawal_requests_contract_call( - evm_config: &EvmConfig, - evm: &mut Evm<'_, EXT, DB>, -) -> Result, BlockExecutionError> -where - DB: Database + DatabaseCommit, - DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm
, -{ - let ResultAndState { result, state } = - transact_withdrawal_requests_contract_call(evm_config, evm)?; - - // commit the state - evm.context.evm.db.commit(state); - +pub(crate) fn post_commit(result: ExecutionResult) -> Result, BlockExecutionError> { let mut data = match result { ExecutionResult::Success { output, .. } => Ok(output.into_data()), ExecutionResult::Revert { output, .. } => { diff --git a/crates/evm/src/system_calls/eip7251.rs b/crates/evm/src/system_calls/eip7251.rs index 8a7049671028..f09d4be81afc 100644 --- a/crates/evm/src/system_calls/eip7251.rs +++ b/crates/evm/src/system_calls/eip7251.rs @@ -1,48 +1,12 @@ //! [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) system call implementation. -use alloc::{boxed::Box, format, string::ToString, vec::Vec}; -use core::fmt::Display; - use crate::ConfigureEvm; +use alloc::{boxed::Box, format, string::ToString, vec::Vec}; use alloy_eips::eip7251::{ConsolidationRequest, CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS}; +use alloy_primitives::{bytes::Buf, Address, Bytes, FixedBytes}; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::{Buf, Header, Request}; -use revm::{interpreter::Host, Database, DatabaseCommit, Evm}; -use revm_primitives::{ - Address, BlockEnv, Bytes, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, FixedBytes, - ResultAndState, -}; - -/// Apply the [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) post block contract call. -/// -/// This constructs a new [Evm] with the given DB, and environment -/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the post block contract call. -/// -/// This uses [`apply_consolidation_requests_contract_call`] to ultimately calculate the -/// [requests](Request). -pub fn post_block_consolidation_requests_contract_call( - evm_config: &EvmConfig, - db: &mut DB, - initialized_cfg: &CfgEnvWithHandlerCfg, - initialized_block_env: &BlockEnv, -) -> Result, BlockExecutionError> -where - DB: Database + DatabaseCommit, - DB::Error: Display, - EvmConfig: ConfigureEvm
, -{ - // apply post-block EIP-7251 contract call - let mut evm_post_block = Evm::builder() - .with_db(db) - .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - Default::default(), - )) - .build(); - - // initialize a block from the env, because the post block call needs the block itself - apply_consolidation_requests_contract_call(evm_config, &mut evm_post_block) -} +use reth_primitives::{Header, Request}; +use revm::{interpreter::Host, Database, Evm}; +use revm_primitives::{ExecutionResult, ResultAndState}; /// Applies the post-block call to the EIP-7251 consolidation requests contract. /// @@ -51,12 +15,12 @@ where /// /// Note: this does not commit the state changes to the database, it only transact the call. #[inline] -pub fn transact_consolidation_requests_contract_call( +pub(crate) fn transact_consolidation_requests_contract_call( evm_config: &EvmConfig, evm: &mut Evm<'_, EXT, DB>, ) -> Result where - DB: Database + DatabaseCommit, + DB: Database, DB::Error: core::fmt::Display, EvmConfig: ConfigureEvm
, { @@ -100,26 +64,9 @@ where Ok(res) } -/// Applies the post-block call to the EIP-7251 consolidation requests contract. -/// -/// If Prague is not active at the given timestamp, then this is a no-op, and an empty vector is -/// returned. Otherwise, the consolidation requests are returned. +/// Parses the consolidation requests from the execution output. #[inline] -pub fn apply_consolidation_requests_contract_call( - evm_config: &EvmConfig, - evm: &mut Evm<'_, EXT, DB>, -) -> Result, BlockExecutionError> -where - DB: Database + DatabaseCommit, - DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm
, -{ - let ResultAndState { result, state } = - transact_consolidation_requests_contract_call(evm_config, evm)?; - - // commit the state - evm.context.evm.db.commit(state); - +pub(crate) fn post_commit(result: ExecutionResult) -> Result, BlockExecutionError> { let mut data = match result { ExecutionResult::Success { output, .. } => Ok(output.into_data()), ExecutionResult::Revert { output, .. } => { diff --git a/crates/evm/src/system_calls/mod.rs b/crates/evm/src/system_calls/mod.rs index 50d5c4c857ff..ce5fec42184c 100644 --- a/crates/evm/src/system_calls/mod.rs +++ b/crates/evm/src/system_calls/mod.rs @@ -1,13 +1,324 @@ //! System contract call functions. -mod eip2935; -pub use eip2935::*; +use crate::ConfigureEvm; +use alloc::vec::Vec; +use core::fmt::Display; +use reth_chainspec::EthereumHardforks; +use reth_execution_errors::BlockExecutionError; +use reth_primitives::{Block, Header, Request}; +use revm::{Database, DatabaseCommit, Evm}; +use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, B256}; +mod eip2935; mod eip4788; -pub use eip4788::*; - mod eip7002; -pub use eip7002::*; - mod eip7251; -pub use eip7251::*; + +/// A hook that is called after each state change. +pub trait OnStateHook { + /// Invoked with the result and state after each system call. + fn on_state(&mut self, state: &ResultAndState); +} + +impl OnStateHook for F +where + F: FnMut(&ResultAndState), +{ + fn on_state(&mut self, state: &ResultAndState) { + self(state) + } +} + +/// An [`OnStateHook`] that does nothing. +#[derive(Default, Debug, Clone)] +#[non_exhaustive] +pub struct NoopHook; + +impl OnStateHook for NoopHook { + fn on_state(&mut self, _state: &ResultAndState) {} +} + +/// An ephemeral helper type for executing system calls. +/// +/// This can be used to chain system transaction calls. +#[allow(missing_debug_implementations)] +pub struct SystemCaller<'a, EvmConfig, Chainspec, Hook = NoopHook> { + evm_config: &'a EvmConfig, + chain_spec: Chainspec, + /// Optional hook to be called after each state change. + hook: Option, +} + +impl<'a, EvmConfig, Chainspec> SystemCaller<'a, EvmConfig, Chainspec> { + /// Create a new system caller with the given EVM config, database, and chain spec, and creates + /// the EVM with the given initialized config and block environment. + pub const fn new(evm_config: &'a EvmConfig, chain_spec: Chainspec) -> Self { + Self { evm_config, chain_spec, hook: None } + } +} + +impl<'a, EvmConfig, Chainspec, Hook> SystemCaller<'a, EvmConfig, Chainspec, Hook> { + /// Installs a custom hook to be called after each state change. + pub fn with_state_hook( + self, + hook: H, + ) -> SystemCaller<'a, EvmConfig, Chainspec, H> { + let Self { evm_config, chain_spec, .. } = self; + SystemCaller { evm_config, chain_spec, hook: Some(hook) } + } + /// Convenience method to consume the type and drop borrowed fields + pub fn finish(self) {} +} + +fn initialize_evm<'a, DB>( + db: &'a mut DB, + initialized_cfg: &'a CfgEnvWithHandlerCfg, + initialized_block_env: &'a BlockEnv, +) -> Evm<'a, (), &'a mut DB> +where + DB: Database, +{ + Evm::builder() + .with_db(db) + .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( + initialized_cfg.clone(), + initialized_block_env.clone(), + Default::default(), + )) + .build() +} + +impl<'a, EvmConfig, Chainspec, Hook> SystemCaller<'a, EvmConfig, Chainspec, Hook> +where + EvmConfig: ConfigureEvm
, + Chainspec: EthereumHardforks, + Hook: OnStateHook, +{ + /// Apply pre execution changes. + pub fn apply_pre_execution_changes( + &mut self, + block: &Block, + evm: &mut Evm<'_, Ext, DB>, + ) -> Result<(), BlockExecutionError> + where + DB: Database + DatabaseCommit, + DB::Error: Display, + { + self.apply_blockhashes_contract_call( + block.timestamp, + block.number, + block.parent_hash, + evm, + )?; + self.apply_beacon_root_contract_call( + block.timestamp, + block.number, + block.parent_beacon_block_root, + evm, + )?; + + Ok(()) + } + + /// Apply post execution changes. + pub fn apply_post_execution_changes( + &mut self, + evm: &mut Evm<'_, Ext, DB>, + ) -> Result, BlockExecutionError> + where + DB: Database + DatabaseCommit, + DB::Error: Display, + { + // Collect all EIP-7685 requests + let withdrawal_requests = self.apply_withdrawal_requests_contract_call(evm)?; + + // Collect all EIP-7251 requests + let consolidation_requests = self.apply_consolidation_requests_contract_call(evm)?; + Ok([withdrawal_requests, consolidation_requests].concat()) + } + + /// Applies the pre-block call to the EIP-2935 blockhashes contract. + pub fn pre_block_blockhashes_contract_call( + &mut self, + db: &mut DB, + initialized_cfg: &CfgEnvWithHandlerCfg, + initialized_block_env: &BlockEnv, + parent_block_hash: B256, + ) -> Result<(), BlockExecutionError> + where + DB: Database + DatabaseCommit, + DB::Error: Display, + { + let mut evm = initialize_evm(db, initialized_cfg, initialized_block_env); + self.apply_blockhashes_contract_call( + initialized_block_env.timestamp.to(), + initialized_block_env.number.to(), + parent_block_hash, + &mut evm, + )?; + + Ok(()) + } + + /// Applies the pre-block call to the EIP-2935 blockhashes contract. + pub fn apply_blockhashes_contract_call( + &mut self, + timestamp: u64, + block_number: u64, + parent_block_hash: B256, + evm: &mut Evm<'_, Ext, DB>, + ) -> Result<(), BlockExecutionError> + where + DB: Database + DatabaseCommit, + DB::Error: Display, + { + let result_and_state = eip2935::transact_blockhashes_contract_call( + &self.evm_config.clone(), + &self.chain_spec, + timestamp, + block_number, + parent_block_hash, + evm, + )?; + + if let Some(res) = result_and_state { + if let Some(ref mut hook) = self.hook { + hook.on_state(&res); + } + evm.context.evm.db.commit(res.state); + } + + Ok(()) + } + + /// Applies the pre-block call to the EIP-4788 beacon root contract. + pub fn pre_block_beacon_root_contract_call( + &mut self, + db: &mut DB, + initialized_cfg: &CfgEnvWithHandlerCfg, + initialized_block_env: &BlockEnv, + parent_beacon_block_root: Option, + ) -> Result<(), BlockExecutionError> + where + DB: Database + DatabaseCommit, + DB::Error: Display, + { + let mut evm = initialize_evm(db, initialized_cfg, initialized_block_env); + + self.apply_beacon_root_contract_call( + initialized_block_env.timestamp.to(), + initialized_block_env.number.to(), + parent_beacon_block_root, + &mut evm, + )?; + + Ok(()) + } + + /// Applies the pre-block call to the EIP-4788 beacon root contract. + pub fn apply_beacon_root_contract_call( + &mut self, + timestamp: u64, + block_number: u64, + parent_block_hash: Option, + evm: &mut Evm<'_, Ext, DB>, + ) -> Result<(), BlockExecutionError> + where + DB: Database + DatabaseCommit, + DB::Error: Display, + { + let result_and_state = eip4788::transact_beacon_root_contract_call( + &self.evm_config.clone(), + &self.chain_spec, + timestamp, + block_number, + parent_block_hash, + evm, + )?; + + if let Some(res) = result_and_state { + if let Some(ref mut hook) = self.hook { + hook.on_state(&res); + } + evm.context.evm.db.commit(res.state); + } + + Ok(()) + } + + /// Applies the post-block call to the EIP-7002 withdrawal request contract. + pub fn post_block_withdrawal_requests_contract_call( + &mut self, + db: &mut DB, + initialized_cfg: &CfgEnvWithHandlerCfg, + initialized_block_env: &BlockEnv, + ) -> Result, BlockExecutionError> + where + DB: Database + DatabaseCommit, + DB::Error: Display, + { + let mut evm = initialize_evm(db, initialized_cfg, initialized_block_env); + + let result = self.apply_withdrawal_requests_contract_call(&mut evm)?; + + Ok(result) + } + + /// Applies the post-block call to the EIP-7002 withdrawal request contract. + pub fn apply_withdrawal_requests_contract_call( + &mut self, + evm: &mut Evm<'_, Ext, DB>, + ) -> Result, BlockExecutionError> + where + DB: Database + DatabaseCommit, + DB::Error: Display, + { + let result_and_state = + eip7002::transact_withdrawal_requests_contract_call(&self.evm_config.clone(), evm)?; + + if let Some(ref mut hook) = self.hook { + hook.on_state(&result_and_state); + } + evm.context.evm.db.commit(result_and_state.state); + + eip7002::post_commit(result_and_state.result) + } + + /// Applies the post-block call to the EIP-7251 consolidation requests contract. + pub fn post_block_consolidation_requests_contract_call( + &mut self, + db: &mut DB, + initialized_cfg: &CfgEnvWithHandlerCfg, + initialized_block_env: &BlockEnv, + ) -> Result, BlockExecutionError> + where + DB: Database + DatabaseCommit, + DB::Error: Display, + { + let mut evm = initialize_evm(db, initialized_cfg, initialized_block_env); + + let res = self.apply_consolidation_requests_contract_call(&mut evm)?; + + Ok(res) + } + + /// Applies the post-block call to the EIP-7251 consolidation requests contract. + pub fn apply_consolidation_requests_contract_call( + &mut self, + evm: &mut Evm<'_, Ext, DB>, + ) -> Result, BlockExecutionError> + where + DB: Database + DatabaseCommit, + DB::Error: Display, + { + let result_and_state = + eip7251::transact_consolidation_requests_contract_call(&self.evm_config.clone(), evm)?; + + if let Some(ref mut hook) = self.hook { + hook.on_state(&result_and_state); + } + evm.context.evm.db.commit(result_and_state.state); + + eip7251::post_commit(result_and_state.result) + } +} diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index f30262c281e0..cf45930aece9 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -3,10 +3,11 @@ use crate::execute::{ BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, }; +use alloy_primitives::BlockNumber; use parking_lot::Mutex; use reth_execution_errors::BlockExecutionError; use reth_execution_types::ExecutionOutcome; -use reth_primitives::{BlockNumber, BlockWithSenders, Receipt}; +use reth_primitives::{BlockWithSenders, Receipt}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm::State; diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index 9b6146d220c8..6a3815e4045b 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -13,10 +13,11 @@ workspace = true [dependencies] ## reth +reth-chain-state.workspace = true reth-chainspec.workspace = true reth-config.workspace = true reth-evm.workspace = true -reth-exex-types = { workspace = true, features = ["serde"] } +reth-exex-types = { workspace = true, features = ["serde", "serde-bincode-compat"] } reth-fs-util.workspace = true reth-metrics.workspace = true reth-node-api.workspace = true @@ -33,6 +34,7 @@ reth-tracing.workspace = true # alloy alloy-primitives.workspace = true +alloy-eips.workspace = true ## async futures.workspace = true @@ -41,8 +43,10 @@ tokio.workspace = true ## misc eyre.workspace = true +itertools.workspace = true metrics.workspace = true -serde_json.workspace = true +parking_lot.workspace = true +rmp-serde = "1.3" tracing.workspace = true [dev-dependencies] @@ -56,7 +60,9 @@ reth-provider = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true alloy-genesis.workspace = true +alloy-consensus.workspace = true +rand.workspace = true secp256k1.workspace = true tempfile.workspace = true diff --git a/crates/exex/exex/src/backfill/factory.rs b/crates/exex/exex/src/backfill/factory.rs index c210eda477aa..026df982275f 100644 --- a/crates/exex/exex/src/backfill/factory.rs +++ b/crates/exex/exex/src/backfill/factory.rs @@ -1,5 +1,5 @@ use crate::BackfillJob; -use std::ops::RangeInclusive; +use std::{ops::RangeInclusive, time::Duration}; use alloy_primitives::BlockNumber; use reth_node_api::FullNodeComponents; @@ -25,7 +25,15 @@ impl BackfillJobFactory { executor, provider, prune_modes: PruneModes::none(), - thresholds: ExecutionStageThresholds::default(), + thresholds: ExecutionStageThresholds { + // Default duration for a database transaction to be considered long-lived is + // 60 seconds, so we limit the backfill job to the half of it to be sure we finish + // before the warning is logged. + // + // See `reth_db::implementation::mdbx::tx::LONG_TRANSACTION_DURATION`. + max_duration: Some(Duration::from_secs(30)), + ..Default::default() + }, stream_parallelism: DEFAULT_PARALLELISM, } } diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 6e7307f1f86b..7642edbac30e 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -127,7 +127,7 @@ where if self.thresholds.is_end_of_batch( block_number - *self.range.start(), bundle_size_hint, - cumulative_gas as u64, + cumulative_gas, batch_start.elapsed(), ) { break @@ -140,7 +140,7 @@ where range = ?*self.range.start()..=last_block_number, block_fetch = ?fetch_block_duration, execution = ?execution_duration, - throughput = format_gas_throughput(cumulative_gas as u64, execution_duration), + throughput = format_gas_throughput(cumulative_gas, execution_duration), "Finished executing block range" ); self.range = last_block_number + 1..=*self.range.end(); diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index 8fd6071e8fe5..1c793975c755 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use alloy_consensus::TxEip2930; use alloy_genesis::{Genesis, GenesisAccount}; use alloy_primitives::{b256, Address, TxKind, U256}; use eyre::OptionExt; @@ -10,7 +11,7 @@ use reth_evm::execute::{ use reth_evm_ethereum::execute::EthExecutorProvider; use reth_primitives::{ constants::ETH_TO_WEI, Block, BlockBody, BlockWithSenders, Header, Receipt, Requests, - SealedBlockWithSenders, Transaction, TxEip2930, + SealedBlockWithSenders, Transaction, }; use reth_provider::{ providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, @@ -100,8 +101,8 @@ fn blocks( ), difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), number: 1, - gas_limit: MIN_TRANSACTION_GAS.into(), - gas_used: MIN_TRANSACTION_GAS.into(), + gas_limit: MIN_TRANSACTION_GAS, + gas_used: MIN_TRANSACTION_GAS, ..Default::default() }, body: BlockBody { @@ -110,7 +111,7 @@ fn blocks( Transaction::Eip2930(TxEip2930 { chain_id: chain_spec.chain.id(), nonce: 0, - gas_limit: MIN_TRANSACTION_GAS as u128, + gas_limit: MIN_TRANSACTION_GAS, gas_price: 1_500_000_000, to: TxKind::Call(Address::ZERO), value: U256::from(0.1 * ETH_TO_WEI as f64), @@ -132,8 +133,8 @@ fn blocks( ), difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), number: 2, - gas_limit: MIN_TRANSACTION_GAS.into(), - gas_used: MIN_TRANSACTION_GAS.into(), + gas_limit: MIN_TRANSACTION_GAS, + gas_used: MIN_TRANSACTION_GAS, ..Default::default() }, body: BlockBody { @@ -142,7 +143,7 @@ fn blocks( Transaction::Eip2930(TxEip2930 { chain_id: chain_spec.chain.id(), nonce: 1, - gas_limit: MIN_TRANSACTION_GAS as u128, + gas_limit: MIN_TRANSACTION_GAS, gas_price: 1_500_000_000, to: TxKind::Call(Address::ZERO), value: U256::from(0.1 * ETH_TO_WEI as f64), diff --git a/crates/exex/exex/src/event.rs b/crates/exex/exex/src/event.rs index c26c1c5344b2..1215ea2a502a 100644 --- a/crates/exex/exex/src/event.rs +++ b/crates/exex/exex/src/event.rs @@ -1,4 +1,4 @@ -use alloy_primitives::BlockNumber; +use reth_primitives::BlockNumHash; /// Events emitted by an `ExEx`. #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -9,5 +9,5 @@ pub enum ExExEvent { /// meaning that Reth is allowed to prune them. /// /// On reorgs, it's possible for the height to go down. - FinishedHeight(BlockNumber), + FinishedHeight(BlockNumHash), } diff --git a/crates/exex/exex/src/lib.rs b/crates/exex/exex/src/lib.rs index d54bc3d9f3cf..edc9e40d449d 100644 --- a/crates/exex/exex/src/lib.rs +++ b/crates/exex/exex/src/lib.rs @@ -46,7 +46,11 @@ pub use event::*; mod manager; pub use manager::*; +mod notifications; +pub use notifications::*; + mod wal; +pub use wal::*; // Re-export exex types #[doc(inline)] diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index f4c1687be6ae..e8e24c09db02 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -1,20 +1,20 @@ use crate::{ - BackfillJobFactory, ExExEvent, ExExNotification, FinishedExExHeight, StreamBackfillJob, + wal::Wal, ExExEvent, ExExNotification, ExExNotifications, FinishedExExHeight, WalHandle, }; -use alloy_primitives::{BlockNumber, U256}; -use eyre::OptionExt; -use futures::{Stream, StreamExt}; +use futures::StreamExt; +use itertools::Itertools; use metrics::Gauge; +use reth_chain_state::ForkChoiceStream; use reth_chainspec::Head; -use reth_evm::execute::BlockExecutorProvider; -use reth_exex_types::ExExHead; use reth_metrics::{metrics::Counter, Metrics}; -use reth_provider::{BlockReader, Chain, HeaderProvider, StateProviderFactory}; +use reth_primitives::{BlockNumHash, SealedHeader}; +use reth_provider::HeaderProvider; use reth_tracing::tracing::debug; use std::{ collections::VecDeque, fmt::Debug, future::{poll_fn, Future}, + ops::Not, pin::Pin, sync::{ atomic::{AtomicUsize, Ordering}, @@ -23,11 +23,17 @@ use std::{ task::{ready, Context, Poll}, }; use tokio::sync::{ - mpsc::{self, error::SendError, Receiver, UnboundedReceiver, UnboundedSender}, + mpsc::{self, error::SendError, UnboundedReceiver, UnboundedSender}, watch, }; use tokio_util::sync::{PollSendError, PollSender, ReusableBoxFuture}; +/// Default max size of the internal state notifications buffer. +/// +/// 1024 notifications in the buffer is 3.5 hours of mainnet blocks, +/// or 17 minutes of 1-second blocks. +pub const DEFAULT_EXEX_MANAGER_CAPACITY: usize = 1024; + /// Metrics for an `ExEx`. #[derive(Metrics)] #[metrics(scope = "exex")] @@ -55,27 +61,28 @@ pub struct ExExHandle { receiver: UnboundedReceiver, /// The ID of the next notification to send to this `ExEx`. next_notification_id: usize, - /// The finished block number of the `ExEx`. + /// The finished block of the `ExEx`. /// /// If this is `None`, the `ExEx` has not emitted a `FinishedHeight` event. - finished_height: Option, + finished_height: Option, } impl ExExHandle { /// Create a new handle for the given `ExEx`. /// /// Returns the handle, as well as a [`UnboundedSender`] for [`ExExEvent`]s and a - /// [`Receiver`] for [`ExExNotification`]s that should be given to the `ExEx`. + /// [`mpsc::Receiver`] for [`ExExNotification`]s that should be given to the `ExEx`. pub fn new( id: String, node_head: Head, provider: P, executor: E, + wal_handle: WalHandle, ) -> (Self, UnboundedSender, ExExNotifications) { let (notification_tx, notification_rx) = mpsc::channel(1); let (event_tx, event_rx) = mpsc::unbounded_channel(); let notifications = - ExExNotifications { node_head, provider, executor, notifications: notification_rx }; + ExExNotifications::new(node_head, provider, executor, notification_rx, wal_handle); ( Self { @@ -106,11 +113,12 @@ impl ExExHandle { // Skip the chain commit notification if the finished height of the ExEx is // higher than or equal to the tip of the new notification. // I.e., the ExEx has already processed the notification. - if finished_height >= new.tip().number { + if finished_height.number >= new.tip().number { debug!( + target: "exex::manager", exex_id = %self.id, %notification_id, - %finished_height, + ?finished_height, new_tip = %new.tip().number, "Skipping notification" ); @@ -128,6 +136,7 @@ impl ExExHandle { } debug!( + target: "exex::manager", exex_id = %self.id, %notification_id, "Reserving slot for notification" @@ -138,6 +147,7 @@ impl ExExHandle { } debug!( + target: "exex::manager", exex_id = %self.id, %notification_id, "Sending notification" @@ -153,331 +163,9 @@ impl ExExHandle { } } -/// A stream of [`ExExNotification`]s. The stream will emit notifications for all blocks. -pub struct ExExNotifications { - node_head: Head, - provider: P, - executor: E, - notifications: Receiver, -} - -impl Debug for ExExNotifications { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("ExExNotifications") - .field("provider", &self.provider) - .field("executor", &self.executor) - .field("notifications", &self.notifications) - .finish() - } -} - -impl ExExNotifications { - /// Creates a new instance of [`ExExNotifications`]. - pub const fn new( - node_head: Head, - provider: P, - executor: E, - notifications: Receiver, - ) -> Self { - Self { node_head, provider, executor, notifications } - } - - /// Receives the next value for this receiver. - /// - /// This method returns `None` if the channel has been closed and there are - /// no remaining messages in the channel's buffer. This indicates that no - /// further values can ever be received from this `Receiver`. The channel is - /// closed when all senders have been dropped, or when [`Receiver::close`] is called. - /// - /// # Cancel safety - /// - /// This method is cancel safe. If `recv` is used as the event in a - /// [`tokio::select!`] statement and some other branch - /// completes first, it is guaranteed that no messages were received on this - /// channel. - /// - /// For full documentation, see [`Receiver::recv`]. - #[deprecated(note = "use `ExExNotifications::next` and its `Stream` implementation instead")] - pub async fn recv(&mut self) -> Option { - self.notifications.recv().await - } - - /// Polls to receive the next message on this channel. - /// - /// This method returns: - /// - /// * `Poll::Pending` if no messages are available but the channel is not closed, or if a - /// spurious failure happens. - /// * `Poll::Ready(Some(message))` if a message is available. - /// * `Poll::Ready(None)` if the channel has been closed and all messages sent before it was - /// closed have been received. - /// - /// When the method returns `Poll::Pending`, the `Waker` in the provided - /// `Context` is scheduled to receive a wakeup when a message is sent on any - /// receiver, or when the channel is closed. Note that on multiple calls to - /// `poll_recv` or `poll_recv_many`, only the `Waker` from the `Context` - /// passed to the most recent call is scheduled to receive a wakeup. - /// - /// If this method returns `Poll::Pending` due to a spurious failure, then - /// the `Waker` will be notified when the situation causing the spurious - /// failure has been resolved. Note that receiving such a wakeup does not - /// guarantee that the next call will succeed — it could fail with another - /// spurious failure. - /// - /// For full documentation, see [`Receiver::poll_recv`]. - #[deprecated( - note = "use `ExExNotifications::poll_next` and its `Stream` implementation instead" - )] - pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - self.notifications.poll_recv(cx) - } -} - -impl ExExNotifications -where - P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, - E: BlockExecutorProvider + Clone + Unpin + 'static, -{ - /// Subscribe to notifications with the given head. - /// - /// Notifications will be sent starting from the head, not inclusive. For example, if - /// `head.number == 10`, then the first notification will be with `block.number == 11`. - pub fn with_head(self, head: ExExHead) -> ExExNotificationsWithHead { - ExExNotificationsWithHead::new( - self.node_head, - self.provider, - self.executor, - self.notifications, - head, - ) - } -} - -impl Stream for ExExNotifications { - type Item = ExExNotification; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_mut().notifications.poll_recv(cx) - } -} - -/// A stream of [`ExExNotification`]s. The stream will only emit notifications for blocks that are -/// committed or reverted after the given head. -#[derive(Debug)] -pub struct ExExNotificationsWithHead { - node_head: Head, - provider: P, - executor: E, - notifications: Receiver, - exex_head: ExExHead, - pending_sync: bool, - /// The backfill job to run before consuming any notifications. - backfill_job: Option>, - /// Whether we're currently waiting for the node head to catch up to the same height as the - /// ExEx head. - node_head_catchup_in_progress: bool, -} - -impl ExExNotificationsWithHead -where - P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, - E: BlockExecutorProvider + Clone + Unpin + 'static, -{ - /// Creates a new [`ExExNotificationsWithHead`]. - pub const fn new( - node_head: Head, - provider: P, - executor: E, - notifications: Receiver, - exex_head: ExExHead, - ) -> Self { - Self { - node_head, - provider, - executor, - notifications, - exex_head, - pending_sync: true, - backfill_job: None, - node_head_catchup_in_progress: false, - } - } - - /// Compares the node head against the ExEx head, and synchronizes them in case of a mismatch. - /// - /// Possible situations are: - /// - ExEx is behind the node head (`node_head.number < exex_head.number`). - /// - ExEx is on the canonical chain (`exex_head.hash` is found in the node database). - /// Backfill from the node database. - /// - ExEx is not on the canonical chain (`exex_head.hash` is not found in the node database). - /// Unwind the ExEx to the first block matching between the ExEx and the node, and then - /// bacfkill from the node database. - /// - ExEx is at the same block number (`node_head.number == exex_head.number`). - /// - ExEx is on the canonical chain (`exex_head.hash` is found in the node database). Nothing - /// to do. - /// - ExEx is not on the canonical chain (`exex_head.hash` is not found in the node database). - /// Unwind the ExEx to the first block matching between the ExEx and the node, and then - /// backfill from the node database. - /// - ExEx is ahead of the node head (`node_head.number > exex_head.number`). Wait until the - /// node head catches up to the ExEx head, and then repeat the synchronization process. - fn synchronize(&mut self) -> eyre::Result<()> { - debug!(target: "exex::manager", "Synchronizing ExEx head"); - - let backfill_job_factory = - BackfillJobFactory::new(self.executor.clone(), self.provider.clone()); - match self.exex_head.block.number.cmp(&self.node_head.number) { - std::cmp::Ordering::Less => { - // ExEx is behind the node head - - if let Some(exex_header) = self.provider.header(&self.exex_head.block.hash)? { - // ExEx is on the canonical chain - debug!(target: "exex::manager", "ExEx is behind the node head and on the canonical chain"); - - if exex_header.number != self.exex_head.block.number { - eyre::bail!("ExEx head number does not match the hash") - } - - // ExEx is on the canonical chain, start backfill - let backfill = backfill_job_factory - .backfill(self.exex_head.block.number + 1..=self.node_head.number) - .into_stream(); - self.backfill_job = Some(backfill); - } else { - debug!(target: "exex::manager", "ExEx is behind the node head and not on the canonical chain"); - // ExEx is not on the canonical chain, first unwind it and then backfill - - // TODO(alexey): unwind and backfill - self.backfill_job = None; - } - } - #[allow(clippy::branches_sharing_code)] - std::cmp::Ordering::Equal => { - // ExEx is at the same block height as the node head - - if let Some(exex_header) = self.provider.header(&self.exex_head.block.hash)? { - // ExEx is on the canonical chain - debug!(target: "exex::manager", "ExEx is at the same block height as the node head and on the canonical chain"); - - if exex_header.number != self.exex_head.block.number { - eyre::bail!("ExEx head number does not match the hash") - } - - // ExEx is on the canonical chain and the same as the node head, no need to - // backfill - self.backfill_job = None; - } else { - // ExEx is not on the canonical chain, first unwind it and then backfill - debug!(target: "exex::manager", "ExEx is at the same block height as the node head but not on the canonical chain"); - - // TODO(alexey): unwind and backfill - self.backfill_job = None; - } - } - std::cmp::Ordering::Greater => { - debug!(target: "exex::manager", "ExEx is ahead of the node head"); - - // ExEx is ahead of the node head - - // TODO(alexey): wait until the node head is at the same height as the ExEx head - // and then repeat the process above - self.node_head_catchup_in_progress = true; - } - }; - - Ok(()) - } -} - -impl Stream for ExExNotificationsWithHead -where - P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, - E: BlockExecutorProvider + Clone + Unpin + 'static, -{ - type Item = eyre::Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.get_mut(); - - if this.pending_sync { - this.synchronize()?; - this.pending_sync = false; - } - - if let Some(backfill_job) = &mut this.backfill_job { - if let Some(chain) = ready!(backfill_job.poll_next_unpin(cx)) { - return Poll::Ready(Some(Ok(ExExNotification::ChainCommitted { - new: Arc::new(chain?), - }))) - } - - // Backfill job is done, remove it - this.backfill_job = None; - } - - loop { - let Some(notification) = ready!(this.notifications.poll_recv(cx)) else { - return Poll::Ready(None) - }; - - // 1. Either committed or reverted chain from the notification. - // 2. Block number of the tip of the canonical chain: - // - For committed chain, it's the tip block number. - // - For reverted chain, it's the block number preceding the first block in the chain. - let (chain, tip) = notification - .committed_chain() - .map(|chain| (chain.clone(), chain.tip().number)) - .or_else(|| { - notification - .reverted_chain() - .map(|chain| (chain.clone(), chain.first().number - 1)) - }) - .unzip(); - - if this.node_head_catchup_in_progress { - // If we are waiting for the node head to catch up to the same height as the ExEx - // head, then we need to check if the ExEx is on the canonical chain. - - // Query the chain from the new notification for the ExEx head block number. - let exex_head_block = chain - .as_ref() - .and_then(|chain| chain.blocks().get(&this.exex_head.block.number)); - - // Compare the hash of the block from the new notification to the ExEx head - // hash. - if let Some((block, tip)) = exex_head_block.zip(tip) { - if block.hash() == this.exex_head.block.hash { - // ExEx is on the canonical chain, proceed with the notification - this.node_head_catchup_in_progress = false; - } else { - // ExEx is not on the canonical chain, synchronize - let tip = - this.provider.sealed_header(tip)?.ok_or_eyre("node head not found")?; - this.node_head = Head::new( - tip.number, - tip.hash(), - tip.difficulty, - U256::MAX, - tip.timestamp, - ); - this.synchronize()?; - } - } - } - - if notification - .committed_chain() - .or_else(|| notification.reverted_chain()) - .map_or(false, |chain| chain.first().number > this.exex_head.block.number) - { - return Poll::Ready(Some(Ok(notification))) - } - } - } -} - /// Metrics for the `ExEx` manager. #[derive(Metrics)] -#[metrics(scope = "exex_manager")] +#[metrics(scope = "exex.manager")] pub struct ExExManagerMetrics { /// Max size of the internal state notifications buffer. max_capacity: Gauge, @@ -501,7 +189,10 @@ pub struct ExExManagerMetrics { /// - Error handling /// - Monitoring #[derive(Debug)] -pub struct ExExManager { +pub struct ExExManager

{ + /// Provider for querying headers. + provider: P, + /// Handles to communicate with the `ExEx`'s. exex_handles: Vec, @@ -530,13 +221,18 @@ pub struct ExExManager { /// The finished height of all `ExEx`'s. finished_height: watch::Sender, + /// Write-Ahead Log for the [`ExExNotification`]s. + wal: Wal, + /// A stream of finalized headers. + finalized_header_stream: ForkChoiceStream, + /// A handle to the `ExEx` manager. handle: ExExManagerHandle, /// Metrics for the `ExEx` manager. metrics: ExExManagerMetrics, } -impl ExExManager { +impl

ExExManager

{ /// Create a new [`ExExManager`]. /// /// You must provide an [`ExExHandle`] for each `ExEx` and the maximum capacity of the @@ -544,7 +240,13 @@ impl ExExManager { /// /// When the capacity is exceeded (which can happen if an `ExEx` is slow) no one can send /// notifications over [`ExExManagerHandle`]s until there is capacity again. - pub fn new(handles: Vec, max_capacity: usize) -> Self { + pub fn new( + provider: P, + handles: Vec, + max_capacity: usize, + wal: Wal, + finalized_header_stream: ForkChoiceStream, + ) -> Self { let num_exexs = handles.len(); let (handle_tx, handle_rx) = mpsc::unbounded_channel(); @@ -562,6 +264,8 @@ impl ExExManager { metrics.num_exexs.set(num_exexs as f64); Self { + provider, + exex_handles: handles, handle_rx, @@ -575,6 +279,9 @@ impl ExExManager { is_ready: is_ready_tx, finished_height: finished_height_tx, + wal, + finalized_header_stream, + handle: ExExManagerHandle { exex_tx: handle_tx, num_exexs, @@ -614,73 +321,166 @@ impl ExExManager { } } -impl Future for ExExManager { +impl

ExExManager

+where + P: HeaderProvider, +{ + /// Finalizes the WAL according to the passed finalized header. + /// + /// This function checks if all ExExes are on the canonical chain and finalizes the WAL if + /// necessary. + fn finalize_wal(&self, finalized_header: SealedHeader) -> eyre::Result<()> { + debug!(target: "exex::manager", header = ?finalized_header.num_hash(), "Received finalized header"); + + // Check if all ExExes are on the canonical chain + let exex_finished_heights = self + .exex_handles + .iter() + // Get ID and finished height for each ExEx + .map(|exex_handle| (&exex_handle.id, exex_handle.finished_height)) + // Deduplicate all hashes + .unique_by(|(_, num_hash)| num_hash.map(|num_hash| num_hash.hash)) + // Check if hashes are canonical + .map(|(exex_id, num_hash)| { + num_hash.map_or(Ok((exex_id, num_hash, false)), |num_hash| { + self.provider + .is_known(&num_hash.hash) + // Save the ExEx ID, finished height, and whether the hash is canonical + .map(|is_canonical| (exex_id, Some(num_hash), is_canonical)) + }) + }) + // We collect here to be able to log the unfinalized ExExes below + .collect::, _>>()?; + if exex_finished_heights.iter().all(|(_, _, is_canonical)| *is_canonical) { + // If there is a finalized header and all ExExs are on the canonical chain, finalize + // the WAL with either the lowest finished height among all ExExes, or finalized header + // – whichever is lower. + let lowest_finished_height = exex_finished_heights + .iter() + .copied() + .filter_map(|(_, num_hash, _)| num_hash) + .chain([(finalized_header.num_hash())]) + .min_by_key(|num_hash| num_hash.number) + .unwrap(); + + self.wal.finalize(lowest_finished_height)?; + } else { + let unfinalized_exexes = exex_finished_heights + .into_iter() + .filter_map(|(exex_id, num_hash, is_canonical)| { + is_canonical.not().then_some((exex_id, num_hash)) + }) + .format_with(", ", |(exex_id, num_hash), f| { + f(&format_args!("{exex_id} = {num_hash:?}")) + }) + // We need this because `debug!` uses the argument twice when formatting the final + // log message, but the result of `format_with` can only be used once + .to_string(); + debug!( + target: "exex::manager", + %unfinalized_exexes, + "Not all ExExes are on the canonical chain, can't finalize the WAL" + ); + } + + Ok(()) + } +} + +impl

Future for ExExManager

+where + P: HeaderProvider + Unpin + 'static, +{ type Output = eyre::Result<()>; - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // drain handle notifications - while self.buffer.len() < self.max_capacity { - if let Poll::Ready(Some(notification)) = self.handle_rx.poll_recv(cx) { + /// Main loop of the [`ExExManager`]. The order of operations is as follows: + /// 1. Handle incoming ExEx events. We do it before finalizing the WAL, because it depends on + /// the latest state of [`ExExEvent::FinishedHeight`] events. + /// 2. Finalize the WAL with the finalized header, if necessary. + /// 3. Drain [`ExExManagerHandle`] notifications, push them to the internal buffer and update + /// the internal buffer capacity. + /// 5. Send notifications from the internal buffer to those ExExes that are ready to receive new + /// notifications. + /// 5. Remove notifications from the internal buffer that have been sent to **all** ExExes and + /// update the internal buffer capacity. + /// 6. Update the channel with the lowest [`FinishedExExHeight`] among all ExExes. + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + + // Handle incoming ExEx events + for exex in &mut this.exex_handles { + while let Poll::Ready(Some(event)) = exex.receiver.poll_recv(cx) { + debug!(target: "exex::manager", exex_id = %exex.id, ?event, "Received event from ExEx"); + exex.metrics.events_sent_total.increment(1); + match event { + ExExEvent::FinishedHeight(height) => exex.finished_height = Some(height), + } + } + } + + // Drain the finalized header stream and finalize the WAL with the last header + let mut last_finalized_header = None; + while let Poll::Ready(finalized_header) = this.finalized_header_stream.poll_next_unpin(cx) { + last_finalized_header = finalized_header; + } + if let Some(header) = last_finalized_header { + this.finalize_wal(header)?; + } + + // Drain handle notifications + while this.buffer.len() < this.max_capacity { + if let Poll::Ready(Some(notification)) = this.handle_rx.poll_recv(cx) { debug!( + target: "exex::manager", committed_tip = ?notification.committed_chain().map(|chain| chain.tip().number), reverted_tip = ?notification.reverted_chain().map(|chain| chain.tip().number), "Received new notification" ); - self.push_notification(notification); + this.wal.commit(¬ification)?; + this.push_notification(notification); continue } break } - // update capacity - self.update_capacity(); + // Update capacity + this.update_capacity(); - // advance all poll senders + // Advance all poll senders let mut min_id = usize::MAX; - for idx in (0..self.exex_handles.len()).rev() { - let mut exex = self.exex_handles.swap_remove(idx); + for idx in (0..this.exex_handles.len()).rev() { + let mut exex = this.exex_handles.swap_remove(idx); - // it is a logic error for this to ever underflow since the manager manages the + // It is a logic error for this to ever underflow since the manager manages the // notification IDs let notification_index = exex .next_notification_id - .checked_sub(self.min_id) + .checked_sub(this.min_id) .expect("exex expected notification ID outside the manager's range"); - if let Some(notification) = self.buffer.get(notification_index) { + if let Some(notification) = this.buffer.get(notification_index) { if let Poll::Ready(Err(err)) = exex.send(cx, notification) { - // the channel was closed, which is irrecoverable for the manager + // The channel was closed, which is irrecoverable for the manager return Poll::Ready(Err(err.into())) } } min_id = min_id.min(exex.next_notification_id); - self.exex_handles.push(exex); + this.exex_handles.push(exex); } - // remove processed buffered notifications - debug!(%min_id, "Updating lowest notification id in buffer"); - self.buffer.retain(|&(id, _)| id >= min_id); - self.min_id = min_id; + // Remove processed buffered notifications + debug!(target: "exex::manager", %min_id, "Updating lowest notification id in buffer"); + this.buffer.retain(|&(id, _)| id >= min_id); + this.min_id = min_id; - // update capacity - self.update_capacity(); - - // handle incoming exex events - for exex in &mut self.exex_handles { - while let Poll::Ready(Some(event)) = exex.receiver.poll_recv(cx) { - debug!(exex_id = %exex.id, ?event, "Received event from exex"); - exex.metrics.events_sent_total.increment(1); - match event { - ExExEvent::FinishedHeight(height) => exex.finished_height = Some(height), - } - } - } + // Update capacity + this.update_capacity(); - // update watch channel block number - let finished_height = self.exex_handles.iter_mut().try_fold(u64::MAX, |curr, exex| { - exex.finished_height.map_or(Err(()), |height| Ok(height.min(curr))) + // Update watch channel block number + let finished_height = this.exex_handles.iter_mut().try_fold(u64::MAX, |curr, exex| { + exex.finished_height.map_or(Err(()), |height| Ok(height.number.min(curr))) }); if let Ok(finished_height) = finished_height { - let _ = self.finished_height.send(FinishedExExHeight::Height(finished_height)); + let _ = this.finished_height.send(FinishedExExHeight::Height(finished_height)); } Poll::Pending @@ -810,53 +610,86 @@ impl Clone for ExExManagerHandle { mod tests { use super::*; use alloy_primitives::B256; - use futures::StreamExt; - use reth_db_common::init::init_genesis; - use reth_evm_ethereum::execute::EthExecutorProvider; - use reth_primitives::{Block, BlockNumHash, Header, SealedBlockWithSenders}; - use reth_provider::{ - providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockReader, - BlockWriter, Chain, - }; - use reth_testing_utils::generators::{self, random_block, BlockParams}; + use eyre::OptionExt; + use futures::{FutureExt, StreamExt}; + use rand::Rng; + use reth_primitives::SealedBlockWithSenders; + use reth_provider::{test_utils::create_test_provider_factory, BlockWriter, Chain}; + use reth_testing_utils::generators::{self, random_block}; + + fn empty_finalized_header_stream() -> ForkChoiceStream { + let (tx, rx) = watch::channel(None); + // Do not drop the sender, otherwise the receiver will always return an error + std::mem::forget(tx); + ForkChoiceStream::new(rx) + } #[tokio::test] async fn test_delivers_events() { + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + let (mut exex_handle, event_tx, mut _notification_rx) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); // Send an event and check that it's delivered correctly - event_tx.send(ExExEvent::FinishedHeight(42)).unwrap(); + let event = ExExEvent::FinishedHeight(BlockNumHash::new(42, B256::random())); + event_tx.send(event).unwrap(); let received_event = exex_handle.receiver.recv().await.unwrap(); - assert_eq!(received_event, ExExEvent::FinishedHeight(42)); + assert_eq!(received_event, event); } #[tokio::test] async fn test_has_exexs() { + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + let (exex_handle_1, _, _) = - ExExHandle::new("test_exex_1".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex_1".to_string(), Head::default(), (), (), wal.handle()); - assert!(!ExExManager::new(vec![], 0).handle.has_exexs()); + assert!(!ExExManager::new((), vec![], 0, wal.clone(), empty_finalized_header_stream()) + .handle + .has_exexs()); - assert!(ExExManager::new(vec![exex_handle_1], 0).handle.has_exexs()); + assert!(ExExManager::new((), vec![exex_handle_1], 0, wal, empty_finalized_header_stream()) + .handle + .has_exexs()); } #[tokio::test] async fn test_has_capacity() { - let (exex_handle_1, _, _) = - ExExHandle::new("test_exex_1".to_string(), Head::default(), (), ()); - - assert!(!ExExManager::new(vec![], 0).handle.has_capacity()); + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); - assert!(ExExManager::new(vec![exex_handle_1], 10).handle.has_capacity()); + let (exex_handle_1, _, _) = + ExExHandle::new("test_exex_1".to_string(), Head::default(), (), (), wal.handle()); + + assert!(!ExExManager::new((), vec![], 0, wal.clone(), empty_finalized_header_stream()) + .handle + .has_capacity()); + + assert!(ExExManager::new( + (), + vec![exex_handle_1], + 10, + wal, + empty_finalized_header_stream() + ) + .handle + .has_capacity()); } #[test] fn test_push_notification() { - let (exex_handle, _, _) = ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + + let (exex_handle, _, _) = + ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); // Create a mock ExExManager and add the exex_handle to it - let mut exex_manager = ExExManager::new(vec![exex_handle], 10); + let mut exex_manager = + ExExManager::new((), vec![exex_handle], 10, wal, empty_finalized_header_stream()); // Define the notification for testing let mut block1 = SealedBlockWithSenders::default(); @@ -898,11 +731,21 @@ mod tests { #[test] fn test_update_capacity() { - let (exex_handle, _, _) = ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + + let (exex_handle, _, _) = + ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); // Create a mock ExExManager and add the exex_handle to it let max_capacity = 5; - let mut exex_manager = ExExManager::new(vec![exex_handle], max_capacity); + let mut exex_manager = ExExManager::new( + (), + vec![exex_handle], + max_capacity, + wal, + empty_finalized_header_stream(), + ); // Push some notifications to fill part of the buffer let mut block1 = SealedBlockWithSenders::default(); @@ -932,17 +775,29 @@ mod tests { #[tokio::test] async fn test_updates_block_height() { + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + + let provider_factory = create_test_provider_factory(); + let (exex_handle, event_tx, mut _notification_rx) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); // Check initial block height assert!(exex_handle.finished_height.is_none()); // Update the block height via an event - event_tx.send(ExExEvent::FinishedHeight(42)).unwrap(); + let block = BlockNumHash::new(42, B256::random()); + event_tx.send(ExExEvent::FinishedHeight(block)).unwrap(); // Create a mock ExExManager and add the exex_handle to it - let exex_manager = ExExManager::new(vec![exex_handle], 10); + let exex_manager = ExExManager::new( + provider_factory, + vec![exex_handle], + 10, + Wal::new(temp_dir.path()).unwrap(), + empty_finalized_header_stream(), + ); let mut cx = Context::from_waker(futures::task::noop_waker_ref()); @@ -952,7 +807,7 @@ mod tests { // Check that the block height was updated let updated_exex_handle = &pinned_manager.exex_handles[0]; - assert_eq!(updated_exex_handle.finished_height, Some(42)); + assert_eq!(updated_exex_handle.finished_height, Some(block)); // Get the receiver for the finished height let mut receiver = pinned_manager.handle.finished_height(); @@ -969,17 +824,31 @@ mod tests { #[tokio::test] async fn test_updates_block_height_lower() { + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + + let provider_factory = create_test_provider_factory(); + // Create two `ExExHandle` instances let (exex_handle1, event_tx1, _) = - ExExHandle::new("test_exex1".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex1".to_string(), Head::default(), (), (), wal.handle()); let (exex_handle2, event_tx2, _) = - ExExHandle::new("test_exex2".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex2".to_string(), Head::default(), (), (), wal.handle()); - // Send events to update the block heights of the two handles, with the second being lower - event_tx1.send(ExExEvent::FinishedHeight(42)).unwrap(); - event_tx2.send(ExExEvent::FinishedHeight(10)).unwrap(); + let block1 = BlockNumHash::new(42, B256::random()); + let block2 = BlockNumHash::new(10, B256::random()); - let exex_manager = ExExManager::new(vec![exex_handle1, exex_handle2], 10); + // Send events to update the block heights of the two handles, with the second being lower + event_tx1.send(ExExEvent::FinishedHeight(block1)).unwrap(); + event_tx2.send(ExExEvent::FinishedHeight(block2)).unwrap(); + + let exex_manager = ExExManager::new( + provider_factory, + vec![exex_handle1, exex_handle2], + 10, + Wal::new(temp_dir.path()).unwrap(), + empty_finalized_header_stream(), + ); let mut cx = Context::from_waker(futures::task::noop_waker_ref()); @@ -1002,20 +871,34 @@ mod tests { #[tokio::test] async fn test_updates_block_height_greater() { + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + + let provider_factory = create_test_provider_factory(); + // Create two `ExExHandle` instances let (exex_handle1, event_tx1, _) = - ExExHandle::new("test_exex1".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex1".to_string(), Head::default(), (), (), wal.handle()); let (exex_handle2, event_tx2, _) = - ExExHandle::new("test_exex2".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex2".to_string(), Head::default(), (), (), wal.handle()); // Assert that the initial block height is `None` for the first `ExExHandle`. assert!(exex_handle1.finished_height.is_none()); - // Send events to update the block heights of the two handles, with the second being higher. - event_tx1.send(ExExEvent::FinishedHeight(42)).unwrap(); - event_tx2.send(ExExEvent::FinishedHeight(100)).unwrap(); + let block1 = BlockNumHash::new(42, B256::random()); + let block2 = BlockNumHash::new(100, B256::random()); - let exex_manager = ExExManager::new(vec![exex_handle1, exex_handle2], 10); + // Send events to update the block heights of the two handles, with the second being higher. + event_tx1.send(ExExEvent::FinishedHeight(block1)).unwrap(); + event_tx2.send(ExExEvent::FinishedHeight(block2)).unwrap(); + + let exex_manager = ExExManager::new( + provider_factory, + vec![exex_handle1, exex_handle2], + 10, + Wal::new(temp_dir.path()).unwrap(), + empty_finalized_header_stream(), + ); let mut cx = Context::from_waker(futures::task::noop_waker_ref()); @@ -1042,12 +925,23 @@ mod tests { #[tokio::test] async fn test_exex_manager_capacity() { + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + + let provider_factory = create_test_provider_factory(); + let (exex_handle_1, _, _) = - ExExHandle::new("test_exex_1".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex_1".to_string(), Head::default(), (), (), wal.handle()); // Create an ExExManager with a small max capacity let max_capacity = 2; - let mut exex_manager = ExExManager::new(vec![exex_handle_1], max_capacity); + let mut exex_manager = ExExManager::new( + provider_factory, + vec![exex_handle_1], + max_capacity, + Wal::new(temp_dir.path()).unwrap(), + empty_finalized_header_stream(), + ); let mut cx = Context::from_waker(futures::task::noop_waker_ref()); @@ -1081,8 +975,11 @@ mod tests { #[tokio::test] async fn exex_handle_new() { + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + let (mut exex_handle, _, mut notifications) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); // Check initial state assert_eq!(exex_handle.id, "test_exex"); @@ -1124,11 +1021,14 @@ mod tests { #[tokio::test] async fn test_notification_if_finished_height_gt_chain_tip() { + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + let (mut exex_handle, _, mut notifications) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); // Set finished_height to a value higher than the block tip - exex_handle.finished_height = Some(15); + exex_handle.finished_height = Some(BlockNumHash::new(15, B256::random())); let mut block1 = SealedBlockWithSenders::default(); block1.block.header.set_hash(B256::new([0x01; 32])); @@ -1166,8 +1066,11 @@ mod tests { #[tokio::test] async fn test_sends_chain_reorged_notification() { + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + let (mut exex_handle, _, mut notifications) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); let notification = ExExNotification::ChainReorged { old: Arc::new(Chain::default()), @@ -1176,7 +1079,7 @@ mod tests { // Even if the finished height is higher than the tip of the new chain, the reorg // notification should be received - exex_handle.finished_height = Some(u64::MAX); + exex_handle.finished_height = Some(BlockNumHash::new(u64::MAX, B256::random())); let mut cx = Context::from_waker(futures::task::noop_waker_ref()); @@ -1197,14 +1100,17 @@ mod tests { #[tokio::test] async fn test_sends_chain_reverted_notification() { + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + let (mut exex_handle, _, mut notifications) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); let notification = ExExNotification::ChainReverted { old: Arc::new(Chain::default()) }; // Even if the finished height is higher than the tip of the new chain, the reorg // notification should be received - exex_handle.finished_height = Some(u64::MAX); + exex_handle.finished_height = Some(BlockNumHash::new(u64::MAX, B256::random())); let mut cx = Context::from_waker(futures::task::noop_waker_ref()); @@ -1224,217 +1130,84 @@ mod tests { } #[tokio::test] - async fn exex_notifications_behind_head_canonical() -> eyre::Result<()> { + async fn test_exex_wal() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + let mut rng = generators::rng(); - let provider_factory = create_test_provider_factory(); - let genesis_hash = init_genesis(&provider_factory)?; - let genesis_block = provider_factory - .block(genesis_hash.into())? - .ok_or_else(|| eyre::eyre!("genesis block not found"))?; + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); - let provider = BlockchainProvider2::new(provider_factory.clone())?; + let provider_factory = create_test_provider_factory(); - let node_head_block = random_block( - &mut rng, - genesis_block.number + 1, - BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, - ); + let block = random_block(&mut rng, 0, Default::default()) + .seal_with_senders() + .ok_or_eyre("failed to recover senders")?; let provider_rw = provider_factory.provider_rw()?; - provider_rw.insert_block( - node_head_block.clone().seal_with_senders().ok_or_eyre("failed to recover senders")?, - )?; + provider_rw.insert_block(block.clone())?; provider_rw.commit()?; - let node_head = Head { - number: node_head_block.number, - hash: node_head_block.hash(), - ..Default::default() - }; - let exex_head = - ExExHead { block: BlockNumHash { number: genesis_block.number, hash: genesis_hash } }; - let notification = ExExNotification::ChainCommitted { - new: Arc::new(Chain::new( - vec![random_block( - &mut rng, - node_head.number + 1, - BlockParams { parent: Some(node_head.hash), ..Default::default() }, - ) - .seal_with_senders() - .ok_or_eyre("failed to recover senders")?], - Default::default(), - None, - )), + new: Arc::new(Chain::new(vec![block.clone()], Default::default(), None)), }; - let (notifications_tx, notifications_rx) = mpsc::channel(1); - - notifications_tx.send(notification.clone()).await?; - - let mut notifications = ExExNotifications::new( - node_head, - provider, - EthExecutorProvider::mainnet(), - notifications_rx, - ) - .with_head(exex_head); - - // First notification is the backfill of missing blocks from the canonical chain - assert_eq!( - notifications.next().await.transpose()?, - Some(ExExNotification::ChainCommitted { - new: Arc::new( - BackfillJobFactory::new( - notifications.executor.clone(), - notifications.provider.clone() - ) - .backfill(1..=1) - .next() - .ok_or_eyre("failed to backfill")?? - ) - }) - ); - - // Second notification is the actual notification that we sent before - assert_eq!(notifications.next().await.transpose()?, Some(notification)); - - Ok(()) - } - - #[ignore] - #[tokio::test] - async fn exex_notifications_behind_head_non_canonical() -> eyre::Result<()> { - Ok(()) - } - - #[tokio::test] - async fn exex_notifications_same_head_canonical() -> eyre::Result<()> { - let provider_factory = create_test_provider_factory(); - let genesis_hash = init_genesis(&provider_factory)?; - let genesis_block = provider_factory - .block(genesis_hash.into())? - .ok_or_else(|| eyre::eyre!("genesis block not found"))?; - - let provider = BlockchainProvider2::new(provider_factory)?; - - let node_head = - Head { number: genesis_block.number, hash: genesis_hash, ..Default::default() }; - let exex_head = - ExExHead { block: BlockNumHash { number: node_head.number, hash: node_head.hash } }; - - let notification = ExExNotification::ChainCommitted { - new: Arc::new(Chain::new( - vec![Block { - header: Header { - parent_hash: node_head.hash, - number: node_head.number + 1, - ..Default::default() - }, - ..Default::default() - } - .seal_slow() - .seal_with_senders() - .ok_or_eyre("failed to recover senders")?], - Default::default(), - None, - )), - }; - - let (notifications_tx, notifications_rx) = mpsc::channel(1); - - notifications_tx.send(notification.clone()).await?; + let (finalized_headers_tx, rx) = watch::channel(None); + let finalized_header_stream = ForkChoiceStream::new(rx); - let mut notifications = ExExNotifications::new( - node_head, - provider, - EthExecutorProvider::mainnet(), - notifications_rx, - ) - .with_head(exex_head); - - let new_notification = notifications.next().await.transpose()?; - assert_eq!(new_notification, Some(notification)); - - Ok(()) - } - - #[ignore] - #[tokio::test] - async fn exex_notifications_same_head_non_canonical() -> eyre::Result<()> { - Ok(()) - } + let (exex_handle, events_tx, mut notifications) = + ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); - #[tokio::test] - async fn test_notifications_ahead_of_head() -> eyre::Result<()> { - let mut rng = generators::rng(); + let mut exex_manager = std::pin::pin!(ExExManager::new( + provider_factory, + vec![exex_handle], + 1, + wal, + finalized_header_stream + )); - let provider_factory = create_test_provider_factory(); - let genesis_hash = init_genesis(&provider_factory)?; - let genesis_block = provider_factory - .block(genesis_hash.into())? - .ok_or_else(|| eyre::eyre!("genesis block not found"))?; + let mut cx = Context::from_waker(futures::task::noop_waker_ref()); - let provider = BlockchainProvider2::new(provider_factory)?; + exex_manager.handle().send(notification.clone())?; - let exex_head_block = random_block( - &mut rng, - genesis_block.number + 1, - BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, + assert!(exex_manager.as_mut().poll(&mut cx)?.is_pending()); + assert_eq!( + notifications.next().poll_unpin(&mut cx), + Poll::Ready(Some(notification.clone())) + ); + assert_eq!( + exex_manager.wal.iter_notifications()?.collect::>>()?, + [notification.clone()] ); - let node_head = - Head { number: genesis_block.number, hash: genesis_hash, ..Default::default() }; - let exex_head = ExExHead { - block: BlockNumHash { number: exex_head_block.number, hash: exex_head_block.hash() }, - }; - - let (notifications_tx, notifications_rx) = mpsc::channel(1); - - notifications_tx - .send(ExExNotification::ChainCommitted { - new: Arc::new(Chain::new( - vec![exex_head_block - .clone() - .seal_with_senders() - .ok_or_eyre("failed to recover senders")?], - Default::default(), - None, - )), - }) - .await?; + finalized_headers_tx.send(Some(block.header.clone()))?; + assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); + // WAL isn't finalized because the ExEx didn't emit the `FinishedHeight` event + assert_eq!( + exex_manager.wal.iter_notifications()?.collect::>>()?, + [notification.clone()] + ); - let mut notifications = ExExNotifications::new( - node_head, - provider, - EthExecutorProvider::mainnet(), - notifications_rx, - ) - .with_head(exex_head); + // Send a `FinishedHeight` event with a non-canonical block + events_tx + .send(ExExEvent::FinishedHeight((rng.gen::(), rng.gen::()).into())) + .unwrap(); - // First notification is skipped because the node is catching up with the ExEx - let new_notification = poll_fn(|cx| Poll::Ready(notifications.poll_next_unpin(cx))).await; - assert!(new_notification.is_pending()); + finalized_headers_tx.send(Some(block.header.clone()))?; + assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); + // WAL isn't finalized because the ExEx emitted a `FinishedHeight` event with a + // non-canonical block + assert_eq!( + exex_manager.wal.iter_notifications()?.collect::>>()?, + [notification] + ); - // Imitate the node catching up with the ExEx by sending a notification for the missing - // block - let notification = ExExNotification::ChainCommitted { - new: Arc::new(Chain::new( - vec![random_block( - &mut rng, - exex_head_block.number + 1, - BlockParams { parent: Some(exex_head_block.hash()), ..Default::default() }, - ) - .seal_with_senders() - .ok_or_eyre("failed to recover senders")?], - Default::default(), - None, - )), - }; - notifications_tx.send(notification.clone()).await?; + // Send a `FinishedHeight` event with a canonical block + events_tx.send(ExExEvent::FinishedHeight(block.num_hash())).unwrap(); - // Second notification is received because the node caught up with the ExEx - assert_eq!(notifications.next().await.transpose()?, Some(notification)); + finalized_headers_tx.send(Some(block.header.clone()))?; + assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); + // WAL is finalized + assert!(exex_manager.wal.iter_notifications()?.next().is_none()); Ok(()) } diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs new file mode 100644 index 000000000000..116dac95422b --- /dev/null +++ b/crates/exex/exex/src/notifications.rs @@ -0,0 +1,633 @@ +use crate::{BackfillJobFactory, ExExNotification, StreamBackfillJob, WalHandle}; +use futures::{Stream, StreamExt}; +use reth_chainspec::Head; +use reth_evm::execute::BlockExecutorProvider; +use reth_exex_types::ExExHead; +use reth_provider::{BlockReader, Chain, HeaderProvider, StateProviderFactory}; +use reth_tracing::tracing::debug; +use std::{ + fmt::Debug, + pin::Pin, + sync::Arc, + task::{ready, Context, Poll}, +}; +use tokio::sync::mpsc::Receiver; + +/// A stream of [`ExExNotification`]s. The stream will emit notifications for all blocks. +pub struct ExExNotifications { + node_head: Head, + provider: P, + executor: E, + notifications: Receiver, + wal_handle: WalHandle, +} + +impl Debug for ExExNotifications { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ExExNotifications") + .field("provider", &self.provider) + .field("executor", &self.executor) + .field("notifications", &self.notifications) + .finish() + } +} + +impl ExExNotifications { + /// Creates a new instance of [`ExExNotifications`]. + pub const fn new( + node_head: Head, + provider: P, + executor: E, + notifications: Receiver, + wal_handle: WalHandle, + ) -> Self { + Self { node_head, provider, executor, notifications, wal_handle } + } + + /// Receives the next value for this receiver. + /// + /// This method returns `None` if the channel has been closed and there are + /// no remaining messages in the channel's buffer. This indicates that no + /// further values can ever be received from this `Receiver`. The channel is + /// closed when all senders have been dropped, or when [`Receiver::close`] is called. + /// + /// # Cancel safety + /// + /// This method is cancel safe. If `recv` is used as the event in a + /// [`tokio::select!`] statement and some other branch + /// completes first, it is guaranteed that no messages were received on this + /// channel. + /// + /// For full documentation, see [`Receiver::recv`]. + #[deprecated(note = "use `ExExNotifications::next` and its `Stream` implementation instead")] + pub async fn recv(&mut self) -> Option { + self.notifications.recv().await + } + + /// Polls to receive the next message on this channel. + /// + /// This method returns: + /// + /// * `Poll::Pending` if no messages are available but the channel is not closed, or if a + /// spurious failure happens. + /// * `Poll::Ready(Some(message))` if a message is available. + /// * `Poll::Ready(None)` if the channel has been closed and all messages sent before it was + /// closed have been received. + /// + /// When the method returns `Poll::Pending`, the `Waker` in the provided + /// `Context` is scheduled to receive a wakeup when a message is sent on any + /// receiver, or when the channel is closed. Note that on multiple calls to + /// `poll_recv` or `poll_recv_many`, only the `Waker` from the `Context` + /// passed to the most recent call is scheduled to receive a wakeup. + /// + /// If this method returns `Poll::Pending` due to a spurious failure, then + /// the `Waker` will be notified when the situation causing the spurious + /// failure has been resolved. Note that receiving such a wakeup does not + /// guarantee that the next call will succeed — it could fail with another + /// spurious failure. + /// + /// For full documentation, see [`Receiver::poll_recv`]. + #[deprecated( + note = "use `ExExNotifications::poll_next` and its `Stream` implementation instead" + )] + pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { + self.notifications.poll_recv(cx) + } +} + +impl ExExNotifications +where + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider + Clone + Unpin + 'static, +{ + /// Subscribe to notifications with the given head. This head is the ExEx's + /// latest view of the host chain. + /// + /// Notifications will be sent starting from the head, not inclusive. For + /// example, if `head.number == 10`, then the first notification will be + /// with `block.number == 11`. A `head.number` of 10 indicates that the ExEx + /// has processed up to block 10, and is ready to process block 11. + pub fn with_head(self, head: ExExHead) -> ExExNotificationsWithHead { + ExExNotificationsWithHead::new( + self.node_head, + self.provider, + self.executor, + self.notifications, + self.wal_handle, + head, + ) + } +} + +impl Stream for ExExNotifications { + type Item = ExExNotification; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.get_mut().notifications.poll_recv(cx) + } +} + +/// A stream of [`ExExNotification`]s. The stream will only emit notifications for blocks that are +/// committed or reverted after the given head. +#[derive(Debug)] +pub struct ExExNotificationsWithHead { + node_head: Head, + provider: P, + executor: E, + notifications: Receiver, + wal_handle: WalHandle, + exex_head: ExExHead, + /// If true, then we need to check if the ExEx head is on the canonical chain and if not, + /// revert its head. + pending_check_canonical: bool, + /// If true, then we need to check if the ExEx head is behind the node head and if so, backfill + /// the missing blocks. + pending_check_backfill: bool, + /// The backfill job to run before consuming any notifications. + backfill_job: Option>, +} + +impl ExExNotificationsWithHead +where + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider + Clone + Unpin + 'static, +{ + /// Creates a new [`ExExNotificationsWithHead`]. + pub const fn new( + node_head: Head, + provider: P, + executor: E, + notifications: Receiver, + wal_handle: WalHandle, + exex_head: ExExHead, + ) -> Self { + Self { + node_head, + provider, + executor, + notifications, + wal_handle, + exex_head, + pending_check_canonical: true, + pending_check_backfill: true, + backfill_job: None, + } + } + + /// Checks if the ExEx head is on the canonical chain. + /// + /// If the head block is not found in the database or it's ahead of the node head, it means + /// we're not on the canonical chain and we need to revert the notification with the ExEx + /// head block. + fn check_canonical(&mut self) -> eyre::Result> { + if self.provider.is_known(&self.exex_head.block.hash)? && + self.exex_head.block.number <= self.node_head.number + { + debug!(target: "exex::notifications", "ExEx head is on the canonical chain"); + return Ok(None) + } + + // If the head block is not found in the database, it means we're not on the canonical + // chain. + + // Get the committed notification for the head block from the WAL. + let Some(notification) = + self.wal_handle.get_committed_notification_by_block_hash(&self.exex_head.block.hash)? + else { + return Err(eyre::eyre!( + "Could not find notification for block hash {:?} in the WAL", + self.exex_head.block.hash + )) + }; + + // Update the head block hash to the parent hash of the first committed block. + let committed_chain = notification.committed_chain().unwrap(); + let new_exex_head = + (committed_chain.first().parent_hash, committed_chain.first().number - 1).into(); + debug!(target: "exex::notifications", old_exex_head = ?self.exex_head.block, new_exex_head = ?new_exex_head, "ExEx head updated"); + self.exex_head.block = new_exex_head; + + // Return an inverted notification. See the documentation for + // `ExExNotification::into_inverted`. + Ok(Some(notification.into_inverted())) + } + + /// Compares the node head against the ExEx head, and backfills if needed. + /// + /// CAUTON: This method assumes that the ExEx head is <= the node head, and that it's on the + /// canonical chain. + /// + /// Possible situations are: + /// - ExEx is behind the node head (`node_head.number < exex_head.number`). Backfill from the + /// node database. + /// - ExEx is at the same block number as the node head (`node_head.number == + /// exex_head.number`). Nothing to do. + fn check_backfill(&mut self) -> eyre::Result<()> { + let backfill_job_factory = + BackfillJobFactory::new(self.executor.clone(), self.provider.clone()); + match self.exex_head.block.number.cmp(&self.node_head.number) { + std::cmp::Ordering::Less => { + // ExEx is behind the node head, start backfill + debug!(target: "exex::notifications", "ExEx is behind the node head and on the canonical chain, starting backfill"); + let backfill = backfill_job_factory + .backfill(self.exex_head.block.number + 1..=self.node_head.number) + .into_stream(); + self.backfill_job = Some(backfill); + } + std::cmp::Ordering::Equal => { + debug!(target: "exex::notifications", "ExEx is at the node head"); + } + std::cmp::Ordering::Greater => { + return Err(eyre::eyre!("ExEx is ahead of the node head")) + } + }; + + Ok(()) + } +} + +impl Stream for ExExNotificationsWithHead +where + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider + Clone + Unpin + 'static, +{ + type Item = eyre::Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); + + if this.pending_check_canonical { + if let Some(canonical_notification) = this.check_canonical()? { + return Poll::Ready(Some(Ok(canonical_notification))) + } + + // ExEx head is on the canonical chain, we no longer need to check it + this.pending_check_canonical = false; + } + + if this.pending_check_backfill { + this.check_backfill()?; + this.pending_check_backfill = false; + } + + if let Some(backfill_job) = &mut this.backfill_job { + if let Some(chain) = ready!(backfill_job.poll_next_unpin(cx)) { + return Poll::Ready(Some(Ok(ExExNotification::ChainCommitted { + new: Arc::new(chain?), + }))) + } + + // Backfill job is done, remove it + this.backfill_job = None; + } + + let Some(notification) = ready!(this.notifications.poll_recv(cx)) else { + return Poll::Ready(None) + }; + + if let Some(committed_chain) = notification.committed_chain() { + this.exex_head.block = committed_chain.tip().num_hash(); + } else if let Some(reverted_chain) = notification.reverted_chain() { + let first_block = reverted_chain.first(); + this.exex_head.block = (first_block.parent_hash, first_block.number - 1).into(); + } + + Poll::Ready(Some(Ok(notification))) + } +} + +#[cfg(test)] +mod tests { + use crate::Wal; + + use super::*; + use alloy_consensus::Header; + use alloy_eips::BlockNumHash; + use eyre::OptionExt; + use futures::StreamExt; + use reth_db_common::init::init_genesis; + use reth_evm_ethereum::execute::EthExecutorProvider; + use reth_primitives::Block; + use reth_provider::{ + providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockWriter, + Chain, DatabaseProviderFactory, + }; + use reth_testing_utils::generators::{self, random_block, BlockParams}; + use tokio::sync::mpsc; + + #[tokio::test] + async fn exex_notifications_behind_head_canonical() -> eyre::Result<()> { + let mut rng = generators::rng(); + + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + + let provider_factory = create_test_provider_factory(); + let genesis_hash = init_genesis(&provider_factory)?; + let genesis_block = provider_factory + .block(genesis_hash.into())? + .ok_or_else(|| eyre::eyre!("genesis block not found"))?; + + let provider = BlockchainProvider2::new(provider_factory.clone())?; + + let node_head_block = random_block( + &mut rng, + genesis_block.number + 1, + BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, + ); + let provider_rw = provider_factory.provider_rw()?; + provider_rw.insert_block( + node_head_block.clone().seal_with_senders().ok_or_eyre("failed to recover senders")?, + )?; + provider_rw.commit()?; + + let node_head = Head { + number: node_head_block.number, + hash: node_head_block.hash(), + ..Default::default() + }; + let exex_head = + ExExHead { block: BlockNumHash { number: genesis_block.number, hash: genesis_hash } }; + + let notification = ExExNotification::ChainCommitted { + new: Arc::new(Chain::new( + vec![random_block( + &mut rng, + node_head.number + 1, + BlockParams { parent: Some(node_head.hash), ..Default::default() }, + ) + .seal_with_senders() + .ok_or_eyre("failed to recover senders")?], + Default::default(), + None, + )), + }; + + let (notifications_tx, notifications_rx) = mpsc::channel(1); + + notifications_tx.send(notification.clone()).await?; + + let mut notifications = ExExNotifications::new( + node_head, + provider, + EthExecutorProvider::mainnet(), + notifications_rx, + wal.handle(), + ) + .with_head(exex_head); + + // First notification is the backfill of missing blocks from the canonical chain + assert_eq!( + notifications.next().await.transpose()?, + Some(ExExNotification::ChainCommitted { + new: Arc::new( + BackfillJobFactory::new( + notifications.executor.clone(), + notifications.provider.clone() + ) + .backfill(1..=1) + .next() + .ok_or_eyre("failed to backfill")?? + ) + }) + ); + + // Second notification is the actual notification that we sent before + assert_eq!(notifications.next().await.transpose()?, Some(notification)); + + Ok(()) + } + + #[tokio::test] + async fn exex_notifications_same_head_canonical() -> eyre::Result<()> { + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + + let provider_factory = create_test_provider_factory(); + let genesis_hash = init_genesis(&provider_factory)?; + let genesis_block = provider_factory + .block(genesis_hash.into())? + .ok_or_else(|| eyre::eyre!("genesis block not found"))?; + + let provider = BlockchainProvider2::new(provider_factory)?; + + let node_head = + Head { number: genesis_block.number, hash: genesis_hash, ..Default::default() }; + let exex_head = + ExExHead { block: BlockNumHash { number: node_head.number, hash: node_head.hash } }; + + let notification = ExExNotification::ChainCommitted { + new: Arc::new(Chain::new( + vec![Block { + header: Header { + parent_hash: node_head.hash, + number: node_head.number + 1, + ..Default::default() + }, + ..Default::default() + } + .seal_slow() + .seal_with_senders() + .ok_or_eyre("failed to recover senders")?], + Default::default(), + None, + )), + }; + + let (notifications_tx, notifications_rx) = mpsc::channel(1); + + notifications_tx.send(notification.clone()).await?; + + let mut notifications = ExExNotifications::new( + node_head, + provider, + EthExecutorProvider::mainnet(), + notifications_rx, + wal.handle(), + ) + .with_head(exex_head); + + let new_notification = notifications.next().await.transpose()?; + assert_eq!(new_notification, Some(notification)); + + Ok(()) + } + + #[tokio::test] + async fn exex_notifications_same_head_non_canonical() -> eyre::Result<()> { + let mut rng = generators::rng(); + + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + + let provider_factory = create_test_provider_factory(); + let genesis_hash = init_genesis(&provider_factory)?; + let genesis_block = provider_factory + .block(genesis_hash.into())? + .ok_or_else(|| eyre::eyre!("genesis block not found"))?; + + let provider = BlockchainProvider2::new(provider_factory)?; + + let node_head_block = random_block( + &mut rng, + genesis_block.number + 1, + BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, + ) + .seal_with_senders() + .ok_or_eyre("failed to recover senders")?; + let node_head = Head { + number: node_head_block.number, + hash: node_head_block.hash(), + ..Default::default() + }; + let provider_rw = provider.database_provider_rw()?; + provider_rw.insert_block(node_head_block)?; + provider_rw.commit()?; + let node_head_notification = ExExNotification::ChainCommitted { + new: Arc::new( + BackfillJobFactory::new(EthExecutorProvider::mainnet(), provider.clone()) + .backfill(node_head.number..=node_head.number) + .next() + .ok_or_else(|| eyre::eyre!("failed to backfill"))??, + ), + }; + + let exex_head_block = random_block( + &mut rng, + genesis_block.number + 1, + BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, + ); + let exex_head = ExExHead { block: exex_head_block.num_hash() }; + let exex_head_notification = ExExNotification::ChainCommitted { + new: Arc::new(Chain::new( + vec![exex_head_block + .clone() + .seal_with_senders() + .ok_or_eyre("failed to recover senders")?], + Default::default(), + None, + )), + }; + wal.commit(&exex_head_notification)?; + + let new_notification = ExExNotification::ChainCommitted { + new: Arc::new(Chain::new( + vec![random_block( + &mut rng, + node_head.number + 1, + BlockParams { parent: Some(node_head.hash), ..Default::default() }, + ) + .seal_with_senders() + .ok_or_eyre("failed to recover senders")?], + Default::default(), + None, + )), + }; + + let (notifications_tx, notifications_rx) = mpsc::channel(1); + + notifications_tx.send(new_notification.clone()).await?; + + let mut notifications = ExExNotifications::new( + node_head, + provider, + EthExecutorProvider::mainnet(), + notifications_rx, + wal.handle(), + ) + .with_head(exex_head); + + // First notification is the revert of the ExEx head block to get back to the canonical + // chain + assert_eq!( + notifications.next().await.transpose()?, + Some(exex_head_notification.into_inverted()) + ); + // Second notification is the backfilled block from the canonical chain to get back to the + // canonical tip + assert_eq!(notifications.next().await.transpose()?, Some(node_head_notification)); + // Third notification is the actual notification that we sent before + assert_eq!(notifications.next().await.transpose()?, Some(new_notification)); + + Ok(()) + } + + #[tokio::test] + async fn test_notifications_ahead_of_head() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + let mut rng = generators::rng(); + + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + + let provider_factory = create_test_provider_factory(); + let genesis_hash = init_genesis(&provider_factory)?; + let genesis_block = provider_factory + .block(genesis_hash.into())? + .ok_or_else(|| eyre::eyre!("genesis block not found"))?; + + let provider = BlockchainProvider2::new(provider_factory)?; + + let exex_head_block = random_block( + &mut rng, + genesis_block.number + 1, + BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, + ); + let exex_head_notification = ExExNotification::ChainCommitted { + new: Arc::new(Chain::new( + vec![exex_head_block + .clone() + .seal_with_senders() + .ok_or_eyre("failed to recover senders")?], + Default::default(), + None, + )), + }; + wal.commit(&exex_head_notification)?; + + let node_head = + Head { number: genesis_block.number, hash: genesis_hash, ..Default::default() }; + let exex_head = ExExHead { + block: BlockNumHash { number: exex_head_block.number, hash: exex_head_block.hash() }, + }; + + let new_notification = ExExNotification::ChainCommitted { + new: Arc::new(Chain::new( + vec![random_block( + &mut rng, + genesis_block.number + 1, + BlockParams { parent: Some(genesis_hash), ..Default::default() }, + ) + .seal_with_senders() + .ok_or_eyre("failed to recover senders")?], + Default::default(), + None, + )), + }; + + let (notifications_tx, notifications_rx) = mpsc::channel(1); + + notifications_tx.send(new_notification.clone()).await?; + + let mut notifications = ExExNotifications::new( + node_head, + provider, + EthExecutorProvider::mainnet(), + notifications_rx, + wal.handle(), + ) + .with_head(exex_head); + + // First notification is the revert of the ExEx head block to get back to the canonical + // chain + assert_eq!( + notifications.next().await.transpose()?, + Some(exex_head_notification.into_inverted()) + ); + + // Second notification is the actual notification that we sent before + assert_eq!(notifications.next().await.transpose()?, Some(new_notification)); + + Ok(()) + } +} diff --git a/crates/exex/exex/src/wal/cache.rs b/crates/exex/exex/src/wal/cache.rs index 3e26fdcf4ca2..882b65e15892 100644 --- a/crates/exex/exex/src/wal/cache.rs +++ b/crates/exex/exex/src/wal/cache.rs @@ -1,137 +1,144 @@ -use std::collections::{BTreeMap, VecDeque}; +use std::{ + cmp::Reverse, + collections::{BinaryHeap, HashSet}, +}; +use alloy_eips::BlockNumHash; +use alloy_primitives::{map::FbHashMap, BlockNumber, B256}; use reth_exex_types::ExExNotification; -use reth_primitives::BlockNumHash; -/// The block cache of the WAL. Acts as a mapping of `File ID -> List of Blocks`. -/// -/// For each notification written to the WAL, there will be an entry per block written to -/// the cache with the same file ID. I.e. for each notification, there may be multiple blocks in the -/// cache. +/// The block cache of the WAL. /// /// This cache is needed to avoid walking the WAL directory every time we want to find a -/// notification corresponding to a block. -#[derive(Debug)] -pub(super) struct BlockCache(BTreeMap>); +/// notification corresponding to a block or a block corresponding to a hash. +#[derive(Debug, Default)] +pub struct BlockCache { + /// A min heap of `(Block Number, File ID)` tuples. + /// + /// Contains one highest block in notification. In a notification with both committed and + /// reverted chain, the highest block is chosen between both chains. + pub(super) notification_max_blocks: BinaryHeap>, + /// A mapping of committed blocks `Block Hash -> Block`. + /// + /// For each [`ExExNotification::ChainCommitted`] notification, there will be an entry per + /// block. + pub(super) committed_blocks: FbHashMap<32, (u32, CachedBlock)>, + /// Block height of the lowest committed block currently in the cache. + pub(super) lowest_committed_block_height: Option, + /// Block height of the highest committed block currently in the cache. + pub(super) highest_committed_block_height: Option, +} impl BlockCache { - /// Creates a new instance of [`BlockCache`]. - pub(super) const fn new() -> Self { - Self(BTreeMap::new()) - } - /// Returns `true` if the cache is empty. pub(super) fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Returns a front-to-back iterator. - pub(super) fn iter(&self) -> impl Iterator + '_ { - self.0.iter().flat_map(|(k, v)| v.iter().map(move |b| (*k, *b))) - } - - /// Provides a reference to the first block from the cache, or `None` if the cache is - /// empty. - pub(super) fn front(&self) -> Option<(u64, CachedBlock)> { - self.0.first_key_value().and_then(|(k, v)| v.front().map(|b| (*k, *b))) + self.notification_max_blocks.is_empty() } - /// Provides a reference to the last block from the cache, or `None` if the cache is - /// empty. - pub(super) fn back(&self) -> Option<(u64, CachedBlock)> { - self.0.last_key_value().and_then(|(k, v)| v.back().map(|b| (*k, *b))) - } - - /// Removes the notification with the given file ID. - pub(super) fn remove_notification(&mut self, key: u64) -> Option> { - self.0.remove(&key) - } - - /// Pops the first block from the cache. If it resulted in the whole file entry being empty, - /// it will also remove the file entry. - pub(super) fn pop_front(&mut self) -> Option<(u64, CachedBlock)> { - let first_entry = self.0.first_entry()?; - let key = *first_entry.key(); - let blocks = first_entry.into_mut(); - let first_block = blocks.pop_front().unwrap(); - if blocks.is_empty() { - self.0.remove(&key); + /// Removes all files from the cache that has notifications with a tip block less than or equal + /// to the given block number. + /// + /// # Returns + /// + /// A set of file IDs that were removed. + pub(super) fn remove_before(&mut self, block_number: BlockNumber) -> HashSet { + let mut file_ids = HashSet::default(); + + while let Some(block @ Reverse((max_block, file_id))) = + self.notification_max_blocks.peek().copied() + { + if max_block <= block_number { + let popped_block = self.notification_max_blocks.pop().unwrap(); + debug_assert_eq!(popped_block, block); + file_ids.insert(file_id); + } else { + break + } } - Some((key, first_block)) - } + let (mut lowest_committed_block_height, mut highest_committed_block_height) = (None, None); + self.committed_blocks.retain(|_, (file_id, block)| { + let retain = !file_ids.contains(file_id); - /// Pops the last block from the cache. If it resulted in the whole file entry being empty, - /// it will also remove the file entry. - pub(super) fn pop_back(&mut self) -> Option<(u64, CachedBlock)> { - let last_entry = self.0.last_entry()?; - let key = *last_entry.key(); - let blocks = last_entry.into_mut(); - let last_block = blocks.pop_back().unwrap(); - if blocks.is_empty() { - self.0.remove(&key); - } + if retain { + lowest_committed_block_height = Some( + lowest_committed_block_height + .map_or(block.block.number, |lowest| block.block.number.min(lowest)), + ); + highest_committed_block_height = Some( + highest_committed_block_height + .map_or(block.block.number, |highest| block.block.number.max(highest)), + ); + } + + retain + }); + self.lowest_committed_block_height = lowest_committed_block_height; + self.highest_committed_block_height = highest_committed_block_height; - Some((key, last_block)) + file_ids } - /// Appends a block to the back of the specified file entry. - pub(super) fn insert(&mut self, file_id: u64, block: CachedBlock) { - self.0.entry(file_id).or_default().push_back(block); + /// Returns the file ID for the notification containing the given committed block hash, if it + /// exists. + pub(super) fn get_file_id_by_committed_block_hash(&self, block_hash: &B256) -> Option { + self.committed_blocks.get(block_hash).map(|entry| entry.0) } /// Inserts the blocks from the notification into the cache with the given file ID. - /// - /// First, inserts the reverted blocks (if any), then the committed blocks (if any). pub(super) fn insert_notification_blocks_with_file_id( &mut self, - file_id: u64, + file_id: u32, notification: &ExExNotification, ) { let reverted_chain = notification.reverted_chain(); let committed_chain = notification.committed_chain(); - if let Some(reverted_chain) = reverted_chain { - for block in reverted_chain.blocks().values() { - self.insert( - file_id, - CachedBlock { - action: CachedBlockAction::Revert, - block: (block.number, block.hash()).into(), - }, - ); - } + let max_block = + reverted_chain.iter().chain(&committed_chain).map(|chain| chain.tip().number).max(); + if let Some(max_block) = max_block { + self.notification_max_blocks.push(Reverse((max_block, file_id))); } - if let Some(committed_chain) = committed_chain { + if let Some(committed_chain) = &committed_chain { for block in committed_chain.blocks().values() { - self.insert( - file_id, - CachedBlock { - action: CachedBlockAction::Commit, - block: (block.number, block.hash()).into(), - }, - ); + let cached_block = CachedBlock { + block: (block.number, block.hash()).into(), + parent_hash: block.parent_hash, + }; + self.committed_blocks.insert(block.hash(), (file_id, cached_block)); } + + self.highest_committed_block_height = Some(committed_chain.tip().number); } } + + #[cfg(test)] + pub(super) fn blocks_sorted(&self) -> Vec<(BlockNumber, u32)> { + self.notification_max_blocks + .clone() + .into_sorted_vec() + .into_iter() + .map(|entry| entry.0) + .collect() + } + + #[cfg(test)] + pub(super) fn committed_blocks_sorted(&self) -> Vec<(B256, u32, CachedBlock)> { + use itertools::Itertools; + + self.committed_blocks + .iter() + .map(|(hash, (file_id, block))| (*hash, *file_id, *block)) + .sorted_by_key(|(_, _, block)| (block.block.number, block.block.hash)) + .collect() + } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(super) struct CachedBlock { - pub(super) action: CachedBlockAction, /// The block number and hash of the block. pub(super) block: BlockNumHash, -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub(super) enum CachedBlockAction { - Commit, - Revert, -} - -impl CachedBlockAction { - pub(super) const fn is_commit(&self) -> bool { - matches!(self, Self::Commit) - } + /// The hash of the parent block. + pub(super) parent_hash: B256, } diff --git a/crates/exex/exex/src/wal/metrics.rs b/crates/exex/exex/src/wal/metrics.rs new file mode 100644 index 000000000000..7726fc978d47 --- /dev/null +++ b/crates/exex/exex/src/wal/metrics.rs @@ -0,0 +1,18 @@ +use metrics::Gauge; +use reth_metrics::Metrics; + +/// Metrics for the [WAL](`super::Wal`) +#[derive(Metrics)] +#[metrics(scope = "exex.wal")] +pub(super) struct Metrics { + /// Size of all notifications in WAL in bytes + pub size_bytes: Gauge, + /// Total number of notifications in WAL + pub notifications_total: Gauge, + /// Total number of committed blocks in WAL + pub committed_blocks_total: Gauge, + /// Lowest committed block height in WAL + pub lowest_committed_block_height: Gauge, + /// Highest committed block height in WAL + pub highest_committed_block_height: Gauge, +} diff --git a/crates/exex/exex/src/wal/mod.rs b/crates/exex/exex/src/wal/mod.rs index a5e0188ca593..2341b56d1044 100644 --- a/crates/exex/exex/src/wal/mod.rs +++ b/crates/exex/exex/src/wal/mod.rs @@ -1,53 +1,113 @@ #![allow(dead_code)] mod cache; +pub use cache::BlockCache; mod storage; - -use std::path::Path; - -use cache::BlockCache; +pub use storage::Storage; +mod metrics; +use metrics::Metrics; + +use std::{ + path::Path, + sync::{ + atomic::{AtomicU32, Ordering}, + Arc, + }, +}; + +use alloy_eips::BlockNumHash; +use alloy_primitives::B256; +use parking_lot::{RwLock, RwLockReadGuard}; use reth_exex_types::ExExNotification; -use reth_primitives::BlockNumHash; use reth_tracing::tracing::{debug, instrument}; -use storage::Storage; -/// WAL is a write-ahead log (WAL) that stores the notifications sent to a particular ExEx. +/// WAL is a write-ahead log (WAL) that stores the notifications sent to ExExes. /// /// WAL is backed by a directory of binary files represented by [`Storage`] and a block cache /// represented by [`BlockCache`]. The role of the block cache is to avoid walking the WAL directory -/// and decoding notifications every time we want to rollback/finalize the WAL. +/// and decoding notifications every time we want to iterate or finalize the WAL. /// /// The expected mode of operation is as follows: /// 1. On every new canonical chain notification, call [`Wal::commit`]. -/// 2. When ExEx is on a wrong fork, rollback the WAL using [`Wal::rollback`]. The caller is -/// expected to create reverts from the removed notifications and backfill the blocks between the -/// returned block and the given rollback block. After that, commit new notifications as usual -/// with [`Wal::commit`]. -/// 3. When the chain is finalized, call [`Wal::finalize`] to prevent the infinite growth of the +/// 2. When the chain is finalized, call [`Wal::finalize`] to prevent the infinite growth of the /// WAL. +#[derive(Debug, Clone)] +pub struct Wal { + inner: Arc, +} + +impl Wal { + /// Creates a new instance of [`Wal`]. + pub fn new(directory: impl AsRef) -> eyre::Result { + Ok(Self { inner: Arc::new(WalInner::new(directory)?) }) + } + + /// Returns a read-only handle to the WAL. + pub fn handle(&self) -> WalHandle { + WalHandle { wal: self.inner.clone() } + } + + /// Commits the notification to WAL. + pub fn commit(&self, notification: &ExExNotification) -> eyre::Result<()> { + self.inner.commit(notification) + } + + /// Finalizes the WAL up to the given canonical block, inclusive. + /// + /// The caller should check that all ExExes are on the canonical chain and will not need any + /// blocks from the WAL below the provided block, inclusive. + pub fn finalize(&self, to_block: BlockNumHash) -> eyre::Result<()> { + self.inner.finalize(to_block) + } + + /// Returns an iterator over all notifications in the WAL. + pub fn iter_notifications( + &self, + ) -> eyre::Result> + '_>> { + self.inner.iter_notifications() + } +} + +/// Inner type for the WAL. #[derive(Debug)] -pub(crate) struct Wal { +struct WalInner { + next_file_id: AtomicU32, /// The underlying WAL storage backed by a file. storage: Storage, /// WAL block cache. See [`cache::BlockCache`] docs for more details. - block_cache: BlockCache, + block_cache: RwLock, + metrics: Metrics, } -impl Wal { - /// Creates a new instance of [`Wal`]. - pub(crate) fn new(directory: impl AsRef) -> eyre::Result { - let mut wal = Self { storage: Storage::new(directory)?, block_cache: BlockCache::new() }; +impl WalInner { + fn new(directory: impl AsRef) -> eyre::Result { + let mut wal = Self { + next_file_id: AtomicU32::new(0), + storage: Storage::new(directory)?, + block_cache: RwLock::new(BlockCache::default()), + metrics: Metrics::default(), + }; wal.fill_block_cache()?; Ok(wal) } + fn block_cache(&self) -> RwLockReadGuard<'_, BlockCache> { + self.block_cache.read() + } + /// Fills the block cache with the notifications from the storage. - #[instrument(target = "exex::wal", skip(self))] + #[instrument(skip(self))] fn fill_block_cache(&mut self) -> eyre::Result<()> { let Some(files_range) = self.storage.files_range()? else { return Ok(()) }; + self.next_file_id.store(files_range.end() + 1, Ordering::Relaxed); + + let mut block_cache = self.block_cache.write(); + let mut notifications_size = 0; for entry in self.storage.iter_notifications(files_range) { - let (file_id, notification) = entry?; + let (file_id, size, notification) = entry?; + + notifications_size += size; let committed_chain = notification.committed_chain(); let reverted_chain = notification.reverted_chain(); @@ -60,171 +120,98 @@ impl Wal { "Inserting block cache entries" ); - self.block_cache.insert_notification_blocks_with_file_id(file_id, ¬ification); + block_cache.insert_notification_blocks_with_file_id(file_id, ¬ification); } + self.update_metrics(&block_cache, notifications_size as i64); + Ok(()) } - /// Commits the notification to WAL. - #[instrument(target = "exex::wal", skip_all, fields( + #[instrument(skip_all, fields( reverted_block_range = ?notification.reverted_chain().as_ref().map(|chain| chain.range()), committed_block_range = ?notification.committed_chain().as_ref().map(|chain| chain.range()) ))] - pub(crate) fn commit(&mut self, notification: &ExExNotification) -> eyre::Result<()> { - debug!("Writing notification to WAL"); - let file_id = self.block_cache.back().map_or(0, |block| block.0 + 1); - self.storage.write_notification(file_id, notification)?; + fn commit(&self, notification: &ExExNotification) -> eyre::Result<()> { + let mut block_cache = self.block_cache.write(); + + let file_id = self.next_file_id.fetch_add(1, Ordering::Relaxed); + let size = self.storage.write_notification(file_id, notification)?; + + debug!(target: "exex::wal", ?file_id, "Inserting notification blocks into the block cache"); + block_cache.insert_notification_blocks_with_file_id(file_id, notification); - debug!(?file_id, "Inserting notification blocks into the block cache"); - self.block_cache.insert_notification_blocks_with_file_id(file_id, notification); + self.update_metrics(&block_cache, size as i64); Ok(()) } - /// Rollbacks the WAL to the given block, inclusive. - /// - /// 1. Walks the WAL from the end and searches for the first notification where committed chain - /// contains a block with the same number and hash as `to_block`. - /// 2. If the notification is found, truncates the WAL. It means that if the found notification - /// contains both given block and blocks before it, the whole notification will be truncated. - /// - /// # Returns - /// - /// 1. The block number and hash of the lowest removed block. - /// 2. The notifications that were removed. - #[instrument(target = "exex::wal", skip(self))] - pub(crate) fn rollback( - &mut self, - to_block: BlockNumHash, - ) -> eyre::Result)>> { - // First, pop items from the back of the cache until we find the notification with the - // specified block. When found, save the file ID of that notification. - let mut remove_from_file_id = None; - let mut remove_to_file_id = None; - let mut lowest_removed_block = None; - while let Some((file_id, block)) = self.block_cache.pop_back() { - debug!(?file_id, ?block, "Popped back block from the block cache"); - if block.action.is_commit() && block.block.number == to_block.number { - debug!( - ?file_id, - ?block, - ?remove_from_file_id, - ?lowest_removed_block, - "Found the requested block" - ); - - if block.block.hash != to_block.hash { - eyre::bail!("block hash mismatch in WAL") - } - - remove_from_file_id = Some(file_id); - - let notification = self.storage.read_notification(file_id)?; - lowest_removed_block = notification - .committed_chain() - .as_ref() - .map(|chain| chain.first()) - .map(|block| (block.number, block.hash()).into()); - - break - } - - remove_from_file_id = Some(file_id); - remove_to_file_id.get_or_insert(file_id); - } + #[instrument(skip(self))] + fn finalize(&self, to_block: BlockNumHash) -> eyre::Result<()> { + let mut block_cache = self.block_cache.write(); + let file_ids = block_cache.remove_before(to_block.number); - // If the specified block is still not found, we can't do anything and just return. The - // cache was empty. - let Some((remove_from_file_id, remove_to_file_id)) = - remove_from_file_id.zip(remove_to_file_id) - else { - debug!("No blocks were rolled back"); - return Ok(None) - }; + // Remove notifications from the storage. + if file_ids.is_empty() { + debug!(target: "exex::wal", "No notifications were finalized from the storage"); + return Ok(()) + } - // Remove the rest of the block cache entries for the file ID that we found. - self.block_cache.remove_notification(remove_from_file_id); - debug!(?remove_from_file_id, "Block cache was rolled back"); + let (removed_notifications, removed_size) = self.storage.remove_notifications(file_ids)?; + debug!(target: "exex::wal", ?removed_notifications, ?removed_size, "Storage was finalized"); - // Remove notifications from the storage. - let removed_notifications = - self.storage.take_notifications(remove_from_file_id..=remove_to_file_id)?; - debug!(removed_notifications = ?removed_notifications.len(), "Storage was rolled back"); + self.update_metrics(&block_cache, -(removed_size as i64)); - Ok(Some((lowest_removed_block.expect("qed"), removed_notifications))) + Ok(()) } - /// Finalizes the WAL to the given block, inclusive. - /// - /// 1. Finds a notification with first unfinalized block (first notification containing a - /// committed block higher than `to_block`). - /// 2. Removes the notifications from the beginning of WAL until the found notification. If this - /// notification includes both finalized and non-finalized blocks, it will not be removed. - #[instrument(target = "exex::wal", skip(self))] - pub(crate) fn finalize(&mut self, to_block: BlockNumHash) -> eyre::Result<()> { - // First, walk cache to find the file ID of the notification with the finalized block and - // save the file ID with the last unfinalized block. Do not remove any notifications - // yet. - let mut unfinalized_from_file_id = None; - { - let mut block_cache = self.block_cache.iter().peekable(); - while let Some((file_id, block)) = block_cache.next() { - debug!(?file_id, ?block, "Iterating over the block cache"); - if block.action.is_commit() && - block.block.number == to_block.number && - block.block.hash == to_block.hash - { - let notification = self.storage.read_notification(file_id)?; - if notification.committed_chain().unwrap().blocks().len() == 1 { - unfinalized_from_file_id = block_cache.peek().map(|(file_id, _)| *file_id); - } else { - unfinalized_from_file_id = Some(file_id); - } - - debug!( - ?file_id, - ?block, - ?unfinalized_from_file_id, - "Found the finalized block in the block cache" - ); - break - } - - unfinalized_from_file_id = Some(file_id); - } + fn update_metrics(&self, block_cache: &BlockCache, size_delta: i64) { + self.metrics.size_bytes.increment(size_delta as f64); + self.metrics.notifications_total.set(block_cache.notification_max_blocks.len() as f64); + self.metrics.committed_blocks_total.set(block_cache.committed_blocks.len() as f64); + + if let Some(lowest_committed_block_height) = block_cache.lowest_committed_block_height { + self.metrics.lowest_committed_block_height.set(lowest_committed_block_height as f64); } - // If the finalized block is still not found, we can't do anything and just return. - let Some(remove_to_file_id) = unfinalized_from_file_id else { - debug!("Could not find the finalized block in WAL"); - return Ok(()) + if let Some(highest_committed_block_height) = block_cache.highest_committed_block_height { + self.metrics.highest_committed_block_height.set(highest_committed_block_height as f64); + } + } + + /// Returns an iterator over all notifications in the WAL. + fn iter_notifications( + &self, + ) -> eyre::Result> + '_>> { + let Some(range) = self.storage.files_range()? else { + return Ok(Box::new(std::iter::empty())) }; - // Remove notifications from the storage from the beginning up to the unfinalized block, not - // inclusive. - let (mut file_range_start, mut file_range_end) = (None, None); - while let Some((file_id, _)) = self.block_cache.front() { - if file_id == remove_to_file_id { - break - } - self.block_cache.pop_front(); - - file_range_start.get_or_insert(file_id); - file_range_end = Some(file_id); - } - debug!(?remove_to_file_id, "Block cache was finalized"); + Ok(Box::new(self.storage.iter_notifications(range).map(|entry| Ok(entry?.2)))) + } +} - // Remove notifications from the storage. - if let Some((file_range_start, file_range_end)) = file_range_start.zip(file_range_end) { - let removed_notifications = - self.storage.remove_notifications(file_range_start..=file_range_end)?; - debug!(?removed_notifications, "Storage was finalized"); - } else { - debug!("No notifications were finalized from the storage"); - } +/// A read-only handle to the WAL that can be shared. +#[derive(Debug)] +pub struct WalHandle { + wal: Arc, +} - Ok(()) +impl WalHandle { + /// Returns the notification for the given committed block hash if it exists. + pub fn get_committed_notification_by_block_hash( + &self, + block_hash: &B256, + ) -> eyre::Result> { + let Some(file_id) = self.wal.block_cache().get_file_id_by_committed_block_hash(block_hash) + else { + return Ok(None) + }; + + self.wal + .storage + .read_notification(file_id) + .map(|entry| entry.map(|(notification, _)| notification)) } } @@ -232,27 +219,36 @@ impl Wal { mod tests { use std::sync::Arc; + use alloy_primitives::B256; use eyre::OptionExt; + use itertools::Itertools; use reth_exex_types::ExExNotification; use reth_provider::Chain; use reth_testing_utils::generators::{ self, random_block, random_block_range, BlockParams, BlockRangeParams, }; - use crate::wal::{ - cache::{CachedBlock, CachedBlockAction}, - Wal, - }; + use crate::wal::{cache::CachedBlock, Wal}; fn read_notifications(wal: &Wal) -> eyre::Result> { - let Some(files_range) = wal.storage.files_range()? else { return Ok(Vec::new()) }; + let Some(files_range) = wal.inner.storage.files_range()? else { return Ok(Vec::new()) }; - wal.storage + wal.inner + .storage .iter_notifications(files_range) - .map(|entry| Ok(entry?.1)) + .map(|entry| Ok(entry?.2)) .collect::>() } + fn sort_committed_blocks( + committed_blocks: Vec<(B256, u32, CachedBlock)>, + ) -> Vec<(B256, u32, CachedBlock)> { + committed_blocks + .into_iter() + .sorted_by_key(|(_, _, block)| (block.block.number, block.block.hash)) + .collect() + } + #[test] fn test_wal() -> eyre::Result<()> { reth_tracing::init_test_tracing(); @@ -261,8 +257,8 @@ mod tests { // Create an instance of the WAL in a temporary directory let temp_dir = tempfile::tempdir()?; - let mut wal = Wal::new(&temp_dir)?; - assert!(wal.block_cache.is_empty()); + let wal = Wal::new(&temp_dir)?; + assert!(wal.inner.block_cache().is_empty()); // Create 4 canonical blocks and one reorged block with number 2 let blocks = random_block_range(&mut rng, 0..=3, BlockRangeParams::default()) @@ -321,72 +317,47 @@ mod tests { // First notification (commit block 0, 1) let file_id = 0; - let committed_notification_1_cache = vec![ + let committed_notification_1_cache_blocks = (blocks[1].number, file_id); + let committed_notification_1_cache_committed_blocks = vec![ ( + blocks[0].hash(), file_id, CachedBlock { - action: CachedBlockAction::Commit, block: (blocks[0].number, blocks[0].hash()).into(), + parent_hash: blocks[0].parent_hash, }, ), ( + blocks[1].hash(), file_id, CachedBlock { - action: CachedBlockAction::Commit, block: (blocks[1].number, blocks[1].hash()).into(), + parent_hash: blocks[1].parent_hash, }, ), ]; wal.commit(&committed_notification_1)?; - assert_eq!(wal.block_cache.iter().collect::>(), committed_notification_1_cache); - assert_eq!(read_notifications(&wal)?, vec![committed_notification_1.clone()]); - - // Second notification (revert block 1) - wal.commit(&reverted_notification)?; - let file_id = 1; - let reverted_notification_cache = vec![( - file_id, - CachedBlock { - action: CachedBlockAction::Revert, - block: (blocks[1].number, blocks[1].hash()).into(), - }, - )]; assert_eq!( - wal.block_cache.iter().collect::>(), - [committed_notification_1_cache.clone(), reverted_notification_cache.clone()].concat() + wal.inner.block_cache().blocks_sorted(), + [committed_notification_1_cache_blocks] ); assert_eq!( - read_notifications(&wal)?, - vec![committed_notification_1.clone(), reverted_notification.clone()] - ); - - // Now, rollback to block 1 and verify that both the block cache and the storage are - // empty. We expect the rollback to delete the first notification (commit block 0, 1), - // because we can't delete blocks partly from the notification, and also the second - // notification (revert block 1). Additionally, check that the block that the rolled - // back to is the block with number 0. - let rollback_result = wal.rollback((blocks[1].number, blocks[1].hash()).into())?; - assert_eq!(wal.block_cache.iter().collect::>(), vec![]); - assert_eq!(read_notifications(&wal)?, vec![]); - assert_eq!( - rollback_result, - Some(( - (blocks[0].number, blocks[0].hash()).into(), - vec![committed_notification_1.clone(), reverted_notification.clone()] - )) + wal.inner.block_cache().committed_blocks_sorted(), + committed_notification_1_cache_committed_blocks ); + assert_eq!(read_notifications(&wal)?, vec![committed_notification_1.clone()]); - // Commit notifications 1 and 2 again - wal.commit(&committed_notification_1)?; + // Second notification (revert block 1) + wal.commit(&reverted_notification)?; + let file_id = 1; + let reverted_notification_cache_blocks = (blocks[1].number, file_id); assert_eq!( - wal.block_cache.iter().collect::>(), - [committed_notification_1_cache.clone()].concat() + wal.inner.block_cache().blocks_sorted(), + [reverted_notification_cache_blocks, committed_notification_1_cache_blocks] ); - assert_eq!(read_notifications(&wal)?, vec![committed_notification_1.clone()]); - wal.commit(&reverted_notification)?; assert_eq!( - wal.block_cache.iter().collect::>(), - [committed_notification_1_cache.clone(), reverted_notification_cache.clone()].concat() + wal.inner.block_cache().committed_blocks_sorted(), + committed_notification_1_cache_committed_blocks ); assert_eq!( read_notifications(&wal)?, @@ -396,30 +367,42 @@ mod tests { // Third notification (commit block 1, 2) wal.commit(&committed_notification_2)?; let file_id = 2; - let committed_notification_2_cache = vec![ + let committed_notification_2_cache_blocks = (blocks[2].number, file_id); + let committed_notification_2_cache_committed_blocks = vec![ ( + block_1_reorged.hash(), file_id, CachedBlock { - action: CachedBlockAction::Commit, block: (block_1_reorged.number, block_1_reorged.hash()).into(), + parent_hash: block_1_reorged.parent_hash, }, ), ( + blocks[2].hash(), file_id, CachedBlock { - action: CachedBlockAction::Commit, block: (blocks[2].number, blocks[2].hash()).into(), + parent_hash: blocks[2].parent_hash, }, ), ]; assert_eq!( - wal.block_cache.iter().collect::>(), + wal.inner.block_cache().blocks_sorted(), [ - committed_notification_1_cache.clone(), - reverted_notification_cache.clone(), - committed_notification_2_cache.clone() + committed_notification_2_cache_blocks, + reverted_notification_cache_blocks, + committed_notification_1_cache_blocks, ] - .concat() + ); + assert_eq!( + wal.inner.block_cache().committed_blocks_sorted(), + sort_committed_blocks( + [ + committed_notification_1_cache_committed_blocks.clone(), + committed_notification_2_cache_committed_blocks.clone() + ] + .concat() + ) ); assert_eq!( read_notifications(&wal)?, @@ -433,44 +416,50 @@ mod tests { // Fourth notification (revert block 2, commit block 2, 3) wal.commit(&reorged_notification)?; let file_id = 3; - let reorged_notification_cache = vec![ + let reorged_notification_cache_blocks = (blocks[3].number, file_id); + let reorged_notification_cache_committed_blocks = vec![ ( + block_2_reorged.hash(), file_id, CachedBlock { - action: CachedBlockAction::Revert, - block: (blocks[2].number, blocks[2].hash()).into(), - }, - ), - ( - file_id, - CachedBlock { - action: CachedBlockAction::Commit, block: (block_2_reorged.number, block_2_reorged.hash()).into(), + parent_hash: block_2_reorged.parent_hash, }, ), ( + blocks[3].hash(), file_id, CachedBlock { - action: CachedBlockAction::Commit, block: (blocks[3].number, blocks[3].hash()).into(), + parent_hash: blocks[3].parent_hash, }, ), ]; assert_eq!( - wal.block_cache.iter().collect::>(), + wal.inner.block_cache().blocks_sorted(), [ - committed_notification_1_cache, - reverted_notification_cache, - committed_notification_2_cache.clone(), - reorged_notification_cache.clone() + reorged_notification_cache_blocks, + committed_notification_2_cache_blocks, + reverted_notification_cache_blocks, + committed_notification_1_cache_blocks, ] - .concat() + ); + assert_eq!( + wal.inner.block_cache().committed_blocks_sorted(), + sort_committed_blocks( + [ + committed_notification_1_cache_committed_blocks, + committed_notification_2_cache_committed_blocks.clone(), + reorged_notification_cache_committed_blocks.clone() + ] + .concat() + ) ); assert_eq!( read_notifications(&wal)?, vec![ - committed_notification_1.clone(), - reverted_notification.clone(), + committed_notification_1, + reverted_notification, committed_notification_2.clone(), reorged_notification.clone() ] @@ -482,8 +471,39 @@ mod tests { // the notifications before it. wal.finalize((block_1_reorged.number, block_1_reorged.hash()).into())?; assert_eq!( - wal.block_cache.iter().collect::>(), - [committed_notification_2_cache, reorged_notification_cache].concat() + wal.inner.block_cache().blocks_sorted(), + [reorged_notification_cache_blocks, committed_notification_2_cache_blocks] + ); + assert_eq!( + wal.inner.block_cache().committed_blocks_sorted(), + sort_committed_blocks( + [ + committed_notification_2_cache_committed_blocks.clone(), + reorged_notification_cache_committed_blocks.clone() + ] + .concat() + ) + ); + assert_eq!( + read_notifications(&wal)?, + vec![committed_notification_2.clone(), reorged_notification.clone()] + ); + + // Re-open the WAL and verify that the cache population works correctly + let wal = Wal::new(&temp_dir)?; + assert_eq!( + wal.inner.block_cache().blocks_sorted(), + [reorged_notification_cache_blocks, committed_notification_2_cache_blocks] + ); + assert_eq!( + wal.inner.block_cache().committed_blocks_sorted(), + sort_committed_blocks( + [ + committed_notification_2_cache_committed_blocks, + reorged_notification_cache_committed_blocks + ] + .concat() + ) ); assert_eq!(read_notifications(&wal)?, vec![committed_notification_2, reorged_notification]); diff --git a/crates/exex/exex/src/wal/storage.rs b/crates/exex/exex/src/wal/storage.rs index 283b303a346f..af3a590e5860 100644 --- a/crates/exex/exex/src/wal/storage.rs +++ b/crates/exex/exex/src/wal/storage.rs @@ -1,6 +1,5 @@ use std::{ fs::File, - io::{Read, Write}, ops::RangeInclusive, path::{Path, PathBuf}, }; @@ -14,8 +13,8 @@ use tracing::instrument; /// /// Each notification is represented by a single file that contains a MessagePack-encoded /// notification. -#[derive(Debug)] -pub(super) struct Storage { +#[derive(Debug, Clone)] +pub struct Storage { /// The path to the WAL file. path: PathBuf, } @@ -29,11 +28,11 @@ impl Storage { Ok(Self { path: path.as_ref().to_path_buf() }) } - fn file_path(&self, id: u64) -> PathBuf { + fn file_path(&self, id: u32) -> PathBuf { self.path.join(format!("{id}.wal")) } - fn parse_filename(filename: &str) -> eyre::Result { + fn parse_filename(filename: &str) -> eyre::Result { filename .strip_suffix(".wal") .and_then(|s| s.parse().ok()) @@ -41,18 +40,31 @@ impl Storage { } /// Removes notification for the given file ID from the storage. - #[instrument(target = "exex::wal::storage", skip(self))] - fn remove_notification(&self, file_id: u64) { + /// + /// # Returns + /// + /// The size of the file that was removed in bytes, if any. + #[instrument(skip(self))] + fn remove_notification(&self, file_id: u32) -> Option { + let path = self.file_path(file_id); + let size = path.metadata().ok()?.len(); + match reth_fs_util::remove_file(self.file_path(file_id)) { - Ok(()) => debug!("Notification was removed from the storage"), - Err(err) => debug!(?err, "Failed to remove notification from the storage"), + Ok(()) => { + debug!(target: "exex::wal::storage", "Notification was removed from the storage"); + Some(size) + } + Err(err) => { + debug!(target: "exex::wal::storage", ?err, "Failed to remove notification from the storage"); + None + } } } /// Returns the range of file IDs in the storage. /// /// If there are no files in the storage, returns `None`. - pub(super) fn files_range(&self) -> eyre::Result>> { + pub(super) fn files_range(&self) -> eyre::Result>> { let mut min_id = None; let mut max_id = None; @@ -61,88 +73,96 @@ impl Storage { let file_name = entry.file_name(); let file_id = Self::parse_filename(&file_name.to_string_lossy())?; - min_id = min_id.map_or(Some(file_id), |min_id: u64| Some(min_id.min(file_id))); - max_id = max_id.map_or(Some(file_id), |max_id: u64| Some(max_id.max(file_id))); + min_id = min_id.map_or(Some(file_id), |min_id: u32| Some(min_id.min(file_id))); + max_id = max_id.map_or(Some(file_id), |max_id: u32| Some(max_id.max(file_id))); } Ok(min_id.zip(max_id).map(|(min_id, max_id)| min_id..=max_id)) } - /// Removes notifications from the storage according to the given range. - /// - /// # Returns - /// - /// Number of removed notifications. - pub(super) fn remove_notifications(&self, range: RangeInclusive) -> eyre::Result { - for id in range.clone() { - self.remove_notification(id); - } - - Ok(range.count()) - } - - /// Removes notifications from the storage according to the given range. + /// Removes notifications from the storage according to the given list of file IDs. /// /// # Returns /// - /// Notifications that were removed. - pub(super) fn take_notifications( + /// Number of removed notifications and the total size of the removed files in bytes. + pub(super) fn remove_notifications( &self, - range: RangeInclusive, - ) -> eyre::Result> { - let notifications = self.iter_notifications(range).collect::>>()?; - - for (id, _) in ¬ifications { - self.remove_notification(*id); + file_ids: impl IntoIterator, + ) -> eyre::Result<(usize, u64)> { + let mut deleted_total = 0; + let mut deleted_size = 0; + + for id in file_ids { + if let Some(size) = self.remove_notification(id) { + deleted_total += 1; + deleted_size += size; + } } - Ok(notifications.into_iter().map(|(_, notification)| notification).collect()) + Ok((deleted_total, deleted_size)) } pub(super) fn iter_notifications( &self, - range: RangeInclusive, - ) -> impl Iterator> + '_ { - range.map(move |id| self.read_notification(id).map(|notification| (id, notification))) + range: RangeInclusive, + ) -> impl Iterator> + '_ { + range.map(move |id| { + let (notification, size) = + self.read_notification(id)?.ok_or_eyre("notification {id} not found")?; + + Ok((id, size, notification)) + }) } - /// Reads the notification from the file with the given id. - pub(super) fn read_notification(&self, file_id: u64) -> eyre::Result { - debug!(?file_id, "Reading notification from WAL"); - + /// Reads the notification from the file with the given ID. + #[instrument(skip(self))] + pub(super) fn read_notification( + &self, + file_id: u32, + ) -> eyre::Result> { let file_path = self.file_path(file_id); - let mut file = File::open(&file_path)?; - read_notification(&mut file) + debug!(target: "exex::wal::storage", ?file_path, "Reading notification from WAL"); + + let mut file = match File::open(&file_path) { + Ok(file) => file, + Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(None), + Err(err) => return Err(reth_fs_util::FsPathError::open(err, &file_path).into()), + }; + let size = file.metadata()?.len(); + + // Deserialize using the bincode- and msgpack-compatible serde wrapper + let notification: reth_exex_types::serde_bincode_compat::ExExNotification<'_> = + rmp_serde::decode::from_read(&mut file).map_err(|err| { + eyre::eyre!("failed to decode notification from {file_path:?}: {err:?}") + })?; + + Ok(Some((notification.into(), size))) } - /// Writes the notification to the file with the given id. + /// Writes the notification to the file with the given ID. + /// + /// # Returns + /// + /// The size of the file that was written in bytes. + #[instrument(skip(self, notification))] pub(super) fn write_notification( &self, - file_id: u64, + file_id: u32, notification: &ExExNotification, - ) -> eyre::Result<()> { - debug!(?file_id, "Writing notification to WAL"); - + ) -> eyre::Result { let file_path = self.file_path(file_id); - let mut file = File::create_new(&file_path)?; - write_notification(&mut file, notification)?; + debug!(target: "exex::wal::storage", ?file_path, "Writing notification to WAL"); - Ok(()) - } -} + // Serialize using the bincode- and msgpack-compatible serde wrapper + let notification = + reth_exex_types::serde_bincode_compat::ExExNotification::from(notification); -// TODO(alexey): use rmp-serde when Alloy and Reth serde issues are resolved + reth_fs_util::atomic_write_file(&file_path, |file| { + rmp_serde::encode::write(file, ¬ification) + })?; -fn write_notification(mut w: &mut impl Write, notification: &ExExNotification) -> eyre::Result<()> { - // rmp_serde::encode::write(w, notification)?; - serde_json::to_writer(&mut w, notification)?; - w.flush()?; - Ok(()) -} - -fn read_notification(r: &mut impl Read) -> eyre::Result { - // Ok(rmp_serde::from_read(r)?) - Ok(serde_json::from_reader(r)?) + Ok(file_path.metadata()?.len()) + } } #[cfg(test)] @@ -179,7 +199,10 @@ mod tests { let file_id = 0; storage.write_notification(file_id, ¬ification)?; let deserialized_notification = storage.read_notification(file_id)?; - assert_eq!(deserialized_notification, notification); + assert_eq!( + deserialized_notification.map(|(notification, _)| notification), + Some(notification) + ); Ok(()) } diff --git a/crates/exex/test-utils/Cargo.toml b/crates/exex/test-utils/Cargo.toml index e3c95feadb08..8488cdb8b731 100644 --- a/crates/exex/test-utils/Cargo.toml +++ b/crates/exex/test-utils/Cargo.toml @@ -40,4 +40,5 @@ tokio.workspace = true ## misc eyre.workspace = true rand.workspace = true +tempfile.workspace = true thiserror.workspace = true diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 4117c0c73c9a..b8be08616b4a 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -8,6 +8,13 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use std::{ + fmt::Debug, + future::{poll_fn, Future}, + sync::Arc, + task::Poll, +}; + use futures_util::FutureExt; use reth_blockchain_tree::noop::NoopBlockchainTree; use reth_chainspec::{ChainSpec, MAINNET}; @@ -20,7 +27,7 @@ use reth_db_common::init::init_genesis; use reth_ethereum_engine_primitives::EthereumEngineValidator; use reth_evm::test_utils::MockExecutorProvider; use reth_execution_types::Chain; -use reth_exex::{ExExContext, ExExEvent, ExExNotification, ExExNotifications}; +use reth_exex::{ExExContext, ExExEvent, ExExNotification, ExExNotifications, Wal}; use reth_network::{config::SecretKey, NetworkConfigBuilder, NetworkManager}; use reth_node_api::{ FullNodeTypes, FullNodeTypesAdapter, NodeTypes, NodeTypesWithDBAdapter, NodeTypesWithEngine, @@ -41,19 +48,15 @@ use reth_node_ethereum::{ EthEngineTypes, EthEvmConfig, }; use reth_payload_builder::noop::NoopPayloadBuilderService; -use reth_primitives::{Head, SealedBlockWithSenders}; +use reth_primitives::{BlockNumHash, Head, SealedBlockWithSenders}; use reth_provider::{ providers::{BlockchainProvider, StaticFileProvider}, BlockReader, ProviderFactory, }; use reth_tasks::TaskManager; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; -use std::{ - fmt::Debug, - future::{poll_fn, Future}, - sync::Arc, - task::Poll, -}; + +use tempfile::TempDir; use thiserror::Error; use tokio::sync::mpsc::{Sender, UnboundedReceiver}; @@ -182,6 +185,8 @@ pub struct TestExExHandle { pub notifications_tx: Sender, /// Node task manager pub tasks: TaskManager, + /// WAL temp directory handle + _wal_directory: TempDir, } impl TestExExHandle { @@ -222,7 +227,7 @@ impl TestExExHandle { /// Asserts that the Execution Extension emitted a `FinishedHeight` event with the correct /// height. #[track_caller] - pub fn assert_event_finished_height(&mut self, height: u64) -> eyre::Result<()> { + pub fn assert_event_finished_height(&mut self, height: BlockNumHash) -> eyre::Result<()> { let event = self.events_rx.try_recv()?; assert_eq!(event, ExExEvent::FinishedHeight(height)); Ok(()) @@ -303,6 +308,9 @@ pub async fn test_exex_context_with_chain_spec( total_difficulty: Default::default(), }; + let wal_directory = tempfile::tempdir()?; + let wal = Wal::new(wal_directory.path())?; + let (events_tx, events_rx) = tokio::sync::mpsc::unbounded_channel(); let (notifications_tx, notifications_rx) = tokio::sync::mpsc::channel(1); let notifications = ExExNotifications::new( @@ -310,6 +318,7 @@ pub async fn test_exex_context_with_chain_spec( components.provider.clone(), components.components.executor.clone(), notifications_rx, + wal.handle(), ); let ctx = ExExContext { @@ -321,7 +330,17 @@ pub async fn test_exex_context_with_chain_spec( components, }; - Ok((ctx, TestExExHandle { genesis, provider_factory, events_rx, notifications_tx, tasks })) + Ok(( + ctx, + TestExExHandle { + genesis, + provider_factory, + events_rx, + notifications_tx, + tasks, + _wal_directory: wal_directory, + }, + )) } /// Creates a new [`ExExContext`] with (mainnet)[`MAINNET`] chain spec. @@ -366,3 +385,13 @@ impl> + Unpin + Send> PollOnce for F { .await } } + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn check_test_context_creation() { + let _ = test_exex_context().await.unwrap(); + } +} diff --git a/crates/exex/types/Cargo.toml b/crates/exex/types/Cargo.toml index 17b8c8634304..a146cbc22739 100644 --- a/crates/exex/types/Cargo.toml +++ b/crates/exex/types/Cargo.toml @@ -13,7 +13,8 @@ workspace = true [dependencies] # reth -reth-provider.workspace = true +reth-chain-state.workspace = true +reth-execution-types.workspace = true # reth alloy-primitives.workspace = true @@ -21,7 +22,16 @@ alloy-eips.workspace = true # misc serde = { workspace = true, optional = true } +serde_with = { workspace = true, optional = true } + +[dev-dependencies] +reth-primitives = { workspace = true, features = ["arbitrary"] } + +arbitrary.workspace = true +bincode.workspace = true +rand.workspace = true [features] default = [] -serde = ["dep:serde", "reth-provider/serde"] +serde = ["dep:serde", "reth-execution-types/serde"] +serde-bincode-compat = ["reth-execution-types/serde-bincode-compat", "serde_with"] diff --git a/crates/exex/types/src/head.rs b/crates/exex/types/src/head.rs index 730b5724b37e..8863ab327d06 100644 --- a/crates/exex/types/src/head.rs +++ b/crates/exex/types/src/head.rs @@ -1,6 +1,8 @@ use alloy_eips::BlockNumHash; -/// A head of the ExEx. It determines the highest block committed to the internal ExEx state. +/// A head of the ExEx. It contains the highest host block committed to the +/// internal ExEx state. I.e. the latest block that the ExEx has fully +/// processed. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct ExExHead { /// The head block. diff --git a/crates/exex/types/src/lib.rs b/crates/exex/types/src/lib.rs index 8e71fbc619b3..ffed819d6ec1 100644 --- a/crates/exex/types/src/lib.rs +++ b/crates/exex/types/src/lib.rs @@ -1,4 +1,4 @@ -//! Commonly used types for exex usage. +//! Commonly used ExEx types. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", @@ -15,3 +15,15 @@ mod notification; pub use finished_height::FinishedExExHeight; pub use head::ExExHead; pub use notification::ExExNotification; + +/// Bincode-compatible serde implementations for commonly used ExEx types. +/// +/// `bincode` crate doesn't work with optionally serializable serde fields, but some of the +/// ExEx types require optional serialization for RPC compatibility. This module makes so that +/// all fields are serialized. +/// +/// Read more: +#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +pub mod serde_bincode_compat { + pub use super::notification::serde_bincode_compat::*; +} diff --git a/crates/exex/types/src/notification.rs b/crates/exex/types/src/notification.rs index 390d9dc665a7..53411250270d 100644 --- a/crates/exex/types/src/notification.rs +++ b/crates/exex/types/src/notification.rs @@ -1,6 +1,7 @@ use std::sync::Arc; -use reth_provider::{CanonStateNotification, Chain}; +use reth_chain_state::CanonStateNotification; +use reth_execution_types::Chain; /// Notifications sent to an `ExEx`. #[derive(Debug, Clone, PartialEq, Eq)] @@ -43,6 +44,20 @@ impl ExExNotification { Self::ChainCommitted { .. } => None, } } + + /// Converts the notification into a notification that is the inverse of the original one. + /// + /// - For [`Self::ChainCommitted`], it's [`Self::ChainReverted`]. + /// - For [`Self::ChainReverted`], it's [`Self::ChainCommitted`]. + /// - For [`Self::ChainReorged`], it's [`Self::ChainReorged`] with the new chain as the old + /// chain and the old chain as the new chain. + pub fn into_inverted(self) -> Self { + match self { + Self::ChainCommitted { new } => Self::ChainReverted { old: new }, + Self::ChainReverted { old } => Self::ChainCommitted { new: old }, + Self::ChainReorged { old, new } => Self::ChainReorged { old: new, new: old }, + } + } } impl From for ExExNotification { @@ -53,3 +68,143 @@ impl From for ExExNotification { } } } + +/// Bincode-compatible [`ExExNotification`] serde implementation. +#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +pub(super) mod serde_bincode_compat { + use std::sync::Arc; + + use reth_execution_types::serde_bincode_compat::Chain; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + use serde_with::{DeserializeAs, SerializeAs}; + + /// Bincode-compatible [`super::ExExNotification`] serde implementation. + /// + /// Intended to use with the [`serde_with::serde_as`] macro in the following way: + /// ```rust + /// use reth_exex_types::{serde_bincode_compat, ExExNotification}; + /// use serde::{Deserialize, Serialize}; + /// use serde_with::serde_as; + /// + /// #[serde_as] + /// #[derive(Serialize, Deserialize)] + /// struct Data { + /// #[serde_as(as = "serde_bincode_compat::ExExNotification")] + /// notification: ExExNotification, + /// } + /// ``` + #[derive(Debug, Serialize, Deserialize)] + #[allow(missing_docs)] + pub enum ExExNotification<'a> { + ChainCommitted { new: Chain<'a> }, + ChainReorged { old: Chain<'a>, new: Chain<'a> }, + ChainReverted { old: Chain<'a> }, + } + + impl<'a> From<&'a super::ExExNotification> for ExExNotification<'a> { + fn from(value: &'a super::ExExNotification) -> Self { + match value { + super::ExExNotification::ChainCommitted { new } => { + ExExNotification::ChainCommitted { new: Chain::from(new.as_ref()) } + } + super::ExExNotification::ChainReorged { old, new } => { + ExExNotification::ChainReorged { + old: Chain::from(old.as_ref()), + new: Chain::from(new.as_ref()), + } + } + super::ExExNotification::ChainReverted { old } => { + ExExNotification::ChainReverted { old: Chain::from(old.as_ref()) } + } + } + } + } + + impl<'a> From> for super::ExExNotification { + fn from(value: ExExNotification<'a>) -> Self { + match value { + ExExNotification::ChainCommitted { new } => { + Self::ChainCommitted { new: Arc::new(new.into()) } + } + ExExNotification::ChainReorged { old, new } => { + Self::ChainReorged { old: Arc::new(old.into()), new: Arc::new(new.into()) } + } + ExExNotification::ChainReverted { old } => { + Self::ChainReverted { old: Arc::new(old.into()) } + } + } + } + } + + impl<'a> SerializeAs for ExExNotification<'a> { + fn serialize_as( + source: &super::ExExNotification, + serializer: S, + ) -> Result + where + S: Serializer, + { + ExExNotification::from(source).serialize(serializer) + } + } + + impl<'de> DeserializeAs<'de, super::ExExNotification> for ExExNotification<'de> { + fn deserialize_as(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + ExExNotification::deserialize(deserializer).map(Into::into) + } + } + + #[cfg(test)] + mod tests { + use std::sync::Arc; + + use arbitrary::Arbitrary; + use rand::Rng; + use reth_execution_types::Chain; + use reth_primitives::SealedBlockWithSenders; + use serde::{Deserialize, Serialize}; + use serde_with::serde_as; + + use super::super::{serde_bincode_compat, ExExNotification}; + + #[test] + fn test_exex_notification_bincode_roundtrip() { + #[serde_as] + #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] + struct Data { + #[serde_as(as = "serde_bincode_compat::ExExNotification")] + notification: ExExNotification, + } + + let mut bytes = [0u8; 1024]; + rand::thread_rng().fill(bytes.as_mut_slice()); + let data = Data { + notification: ExExNotification::ChainReorged { + old: Arc::new(Chain::new( + vec![SealedBlockWithSenders::arbitrary(&mut arbitrary::Unstructured::new( + &bytes, + )) + .unwrap()], + Default::default(), + None, + )), + new: Arc::new(Chain::new( + vec![SealedBlockWithSenders::arbitrary(&mut arbitrary::Unstructured::new( + &bytes, + )) + .unwrap()], + Default::default(), + None, + )), + }, + }; + + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data = bincode::deserialize(&encoded).unwrap(); + assert_eq!(decoded, data); + } + } +} diff --git a/crates/fs-util/src/lib.rs b/crates/fs-util/src/lib.rs index 91e60c313f8e..0cfcf04539bd 100644 --- a/crates/fs-util/src/lib.rs +++ b/crates/fs-util/src/lib.rs @@ -8,8 +8,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] use serde::{de::DeserializeOwned, Serialize}; use std::{ - fs::{self, File, ReadDir}, - io::{self, BufWriter, Write}, + fs::{self, File, OpenOptions, ReadDir}, + io::{self, BufWriter, Error, ErrorKind, Write}, path::{Path, PathBuf}, }; @@ -138,6 +138,14 @@ pub enum FsPathError { /// The path related to the operation. path: PathBuf, }, + /// Error variant for failed fsync operation with additional path context. + #[error("failed to sync path {path:?}: {source}")] + Fsync { + /// The source `io::Error`. + source: io::Error, + /// The path related to the operation. + path: PathBuf, + }, } impl FsPathError { @@ -195,6 +203,11 @@ impl FsPathError { pub fn metadata(source: io::Error, path: impl Into) -> Self { Self::Metadata { source, path: path.into() } } + + /// Returns the complementary error variant for `fsync`. + pub fn fsync(source: io::Error, path: impl Into) -> Self { + Self::Fsync { source, path: path.into() } + } } /// Wrapper for `std::fs::read_to_string` @@ -277,3 +290,61 @@ pub fn write_json_file(path: &Path, obj: &T) -> Result<()> { .map_err(|source| FsPathError::WriteJson { source, path: path.into() })?; writer.flush().map_err(|e| FsPathError::write(e, path)) } + +/// Writes atomically to file. +/// +/// 1. Creates a temporary file with a `.tmp` extension in the same file directory. +/// 2. Writes content with `write_fn`. +/// 3. Fsyncs the temp file to disk. +/// 4. Renames the temp file to the target path. +/// 5. Fsyncs the file directory. +/// +/// Atomic writes are hard: +/// * +/// * +pub fn atomic_write_file(file_path: &Path, write_fn: F) -> Result<()> +where + F: FnOnce(&mut File) -> std::result::Result<(), E>, + E: Into>, +{ + let mut tmp_path = file_path.to_path_buf(); + tmp_path.set_extension("tmp"); + + // Write to the temporary file + let mut file = + File::create(&tmp_path).map_err(|err| FsPathError::create_file(err, &tmp_path))?; + + write_fn(&mut file).map_err(|err| FsPathError::Write { + source: Error::new(ErrorKind::Other, err.into()), + path: tmp_path.clone(), + })?; + + // fsync() file + file.sync_all().map_err(|err| FsPathError::fsync(err, &tmp_path))?; + + // Rename file, not move + rename(&tmp_path, file_path)?; + + // fsync() directory + if let Some(parent) = file_path.parent() { + #[cfg(windows)] + OpenOptions::new() + .read(true) + .write(true) + .custom_flags(0x02000000) // FILE_FLAG_BACKUP_SEMANTICS + .open(parent) + .map_err(|err| FsPathError::open(err, parent))? + .sync_all() + .map_err(|err| FsPathError::fsync(err, parent))?; + + #[cfg(not(windows))] + OpenOptions::new() + .read(true) + .open(parent) + .map_err(|err| FsPathError::open(err, parent))? + .sync_all() + .map_err(|err| FsPathError::fsync(err, parent))?; + } + + Ok(()) +} diff --git a/crates/metrics/Cargo.toml b/crates/metrics/Cargo.toml index 015f24d232f8..df3c7fa2161a 100644 --- a/crates/metrics/Cargo.toml +++ b/crates/metrics/Cargo.toml @@ -12,11 +12,9 @@ description = "reth metrics utilities" workspace = true [dependencies] -# reth -reth-metrics-derive.workspace = true - # metrics metrics.workspace = true +metrics-derive.workspace = true # async tokio = { workspace = true, features = ["full"], optional = true } diff --git a/crates/metrics/metrics-derive/Cargo.toml b/crates/metrics/metrics-derive/Cargo.toml deleted file mode 100644 index 509dec73057c..000000000000 --- a/crates/metrics/metrics-derive/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "reth-metrics-derive" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true - -[lints] -workspace = true - -[lib] -proc-macro = true - -[dependencies] -proc-macro2.workspace = true -syn = { workspace = true, features = ["extra-traits"] } -quote.workspace = true -regex = "1.6.0" - -[dev-dependencies] -metrics.workspace = true -serial_test.workspace = true -trybuild = "1.0" diff --git a/crates/metrics/metrics-derive/src/expand.rs b/crates/metrics/metrics-derive/src/expand.rs deleted file mode 100644 index 9f9148120e26..000000000000 --- a/crates/metrics/metrics-derive/src/expand.rs +++ /dev/null @@ -1,436 +0,0 @@ -use quote::{quote, ToTokens}; -use regex::Regex; -use std::sync::LazyLock; -use syn::{ - punctuated::Punctuated, Attribute, Data, DeriveInput, Error, Expr, Field, Lit, LitBool, LitStr, - Meta, MetaNameValue, Result, Token, -}; - -use crate::{metric::Metric, with_attrs::WithAttrs}; - -/// Metric name regex according to Prometheus data model -/// -/// See -static METRIC_NAME_RE: LazyLock = - LazyLock::new(|| Regex::new(r"^[a-zA-Z_:.][a-zA-Z0-9_:.]*$").unwrap()); - -/// Supported metrics separators -const SUPPORTED_SEPARATORS: &[&str] = &[".", "_", ":"]; - -enum MetricField<'a> { - Included(Metric<'a>), - Skipped(&'a Field), -} - -impl<'a> MetricField<'a> { - const fn field(&self) -> &'a Field { - match self { - MetricField::Included(Metric { field, .. }) | MetricField::Skipped(field) => field, - } - } -} - -pub(crate) fn derive(node: &DeriveInput) -> Result { - let ty = &node.ident; - let vis = &node.vis; - let ident_name = ty.to_string(); - - let metrics_attr = parse_metrics_attr(node)?; - let metric_fields = parse_metric_fields(node)?; - - let describe_doc = quote! { - /// Describe all exposed metrics. Internally calls `describe_*` macros from - /// the metrics crate according to the metric type. - /// - /// See - }; - let register_and_describe = match &metrics_attr.scope { - MetricsScope::Static(scope) => { - let (defaults, labeled_defaults, describes): (Vec<_>, Vec<_>, Vec<_>) = metric_fields - .iter() - .map(|metric| { - let field_name = &metric.field().ident; - match metric { - MetricField::Included(metric) => { - let metric_name = format!( - "{}{}{}", - scope.value(), - metrics_attr.separator(), - metric.name() - ); - let registrar = metric.register_stmt()?; - let describe = metric.describe_stmt()?; - let description = &metric.description; - Ok(( - quote! { - #field_name: #registrar(#metric_name), - }, - quote! { - #field_name: #registrar(#metric_name, labels.clone()), - }, - Some(quote! { - #describe(#metric_name, #description); - }), - )) - } - MetricField::Skipped(_) => Ok(( - quote! { - #field_name: Default::default(), - }, - quote! { - #field_name: Default::default(), - }, - None, - )), - } - }) - .collect::>>()? - .into_iter() - .fold((vec![], vec![], vec![]), |mut acc, x| { - acc.0.push(x.0); - acc.1.push(x.1); - if let Some(describe) = x.2 { - acc.2.push(describe); - } - acc - }); - - quote! { - impl Default for #ty { - fn default() -> Self { - #ty::describe(); - - Self { - #(#defaults)* - } - } - } - - impl #ty { - /// Create new instance of metrics with provided labels. - #vis fn new_with_labels(labels: impl metrics::IntoLabels + Clone) -> Self { - Self { - #(#labeled_defaults)* - } - } - - #describe_doc - #vis fn describe() { - #(#describes)* - } - } - } - } - MetricsScope::Dynamic => { - let (defaults, labeled_defaults, describes): (Vec<_>, Vec<_>, Vec<_>) = metric_fields - .iter() - .map(|metric| { - let field_name = &metric.field().ident; - match metric { - MetricField::Included(metric) => { - let name = metric.name(); - let separator = metrics_attr.separator(); - let metric_name = quote! { - format!("{}{}{}", scope, #separator, #name) - }; - - let registrar = metric.register_stmt()?; - let describe = metric.describe_stmt()?; - let description = &metric.description; - - Ok(( - quote! { - #field_name: #registrar(#metric_name), - }, - quote! { - #field_name: #registrar(#metric_name, labels.clone()), - }, - Some(quote! { - #describe(#metric_name, #description); - }), - )) - } - MetricField::Skipped(_) => Ok(( - quote! { - #field_name: Default::default(), - }, - quote! { - #field_name: Default::default(), - }, - None, - )), - } - }) - .collect::>>()? - .into_iter() - .fold((vec![], vec![], vec![]), |mut acc, x| { - acc.0.push(x.0); - acc.1.push(x.1); - if let Some(describe) = x.2 { - acc.2.push(describe); - } - acc - }); - - quote! { - impl #ty { - /// Create new instance of metrics with provided scope. - #vis fn new(scope: &str) -> Self { - #ty::describe(scope); - - Self { - #(#defaults)* - } - } - - /// Create new instance of metrics with provided labels. - #vis fn new_with_labels(scope: &str, labels: impl metrics::IntoLabels + Clone) -> Self { - Self { - #(#labeled_defaults)* - } - } - - #describe_doc - #vis fn describe(scope: &str) { - #(#describes)* - } - } - } - } - }; - Ok(quote! { - #register_and_describe - - impl std::fmt::Debug for #ty { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct(#ident_name).finish() - } - } - }) -} - -pub(crate) struct MetricsAttr { - pub(crate) scope: MetricsScope, - pub(crate) separator: Option, -} - -impl MetricsAttr { - const DEFAULT_SEPARATOR: &'static str = "."; - - fn separator(&self) -> String { - match &self.separator { - Some(sep) => sep.value(), - None => Self::DEFAULT_SEPARATOR.to_owned(), - } - } -} - -pub(crate) enum MetricsScope { - Static(LitStr), - Dynamic, -} - -fn parse_metrics_attr(node: &DeriveInput) -> Result { - let metrics_attr = parse_single_required_attr(node, "metrics")?; - let parsed = - metrics_attr.parse_args_with(Punctuated::::parse_terminated)?; - let (mut scope, mut separator, mut dynamic) = (None, None, None); - for kv in parsed { - let lit = match kv.value { - Expr::Lit(ref expr) => &expr.lit, - _ => continue, - }; - if kv.path.is_ident("scope") { - if scope.is_some() { - return Err(Error::new_spanned(kv, "Duplicate `scope` value provided.")) - } - let scope_lit = parse_str_lit(lit)?; - validate_metric_name(&scope_lit)?; - scope = Some(scope_lit); - } else if kv.path.is_ident("separator") { - if separator.is_some() { - return Err(Error::new_spanned(kv, "Duplicate `separator` value provided.")) - } - let separator_lit = parse_str_lit(lit)?; - if !SUPPORTED_SEPARATORS.contains(&&*separator_lit.value()) { - return Err(Error::new_spanned( - kv, - format!( - "Unsupported `separator` value. Supported: {}.", - SUPPORTED_SEPARATORS - .iter() - .map(|sep| format!("`{sep}`")) - .collect::>() - .join(", ") - ), - )) - } - separator = Some(separator_lit); - } else if kv.path.is_ident("dynamic") { - if dynamic.is_some() { - return Err(Error::new_spanned(kv, "Duplicate `dynamic` flag provided.")) - } - dynamic = Some(parse_bool_lit(lit)?.value); - } else { - return Err(Error::new_spanned(kv, "Unsupported attribute entry.")) - } - } - - let scope = match (scope, dynamic) { - (Some(scope), None | Some(false)) => MetricsScope::Static(scope), - (None, Some(true)) => MetricsScope::Dynamic, - (Some(_), Some(_)) => { - return Err(Error::new_spanned(node, "`scope = ..` conflicts with `dynamic = true`.")) - } - _ => { - return Err(Error::new_spanned( - node, - "Either `scope = ..` or `dynamic = true` must be set.", - )) - } - }; - - Ok(MetricsAttr { scope, separator }) -} - -fn parse_metric_fields(node: &DeriveInput) -> Result>> { - let Data::Struct(ref data) = node.data else { - return Err(Error::new_spanned(node, "Only structs are supported.")) - }; - - let mut metrics = Vec::with_capacity(data.fields.len()); - for field in &data.fields { - let (mut describe, mut rename, mut skip) = (None, None, false); - if let Some(metric_attr) = parse_single_attr(field, "metric")? { - let parsed = - metric_attr.parse_args_with(Punctuated::::parse_terminated)?; - for meta in parsed { - match meta { - Meta::Path(path) if path.is_ident("skip") => skip = true, - Meta::NameValue(kv) => { - let lit = match kv.value { - Expr::Lit(ref expr) => &expr.lit, - _ => continue, - }; - if kv.path.is_ident("describe") { - if describe.is_some() { - return Err(Error::new_spanned( - kv, - "Duplicate `describe` value provided.", - )) - } - describe = Some(parse_str_lit(lit)?); - } else if kv.path.is_ident("rename") { - if rename.is_some() { - return Err(Error::new_spanned( - kv, - "Duplicate `rename` value provided.", - )) - } - let rename_lit = parse_str_lit(lit)?; - validate_metric_name(&rename_lit)?; - rename = Some(rename_lit) - } else { - return Err(Error::new_spanned(kv, "Unsupported attribute entry.")) - } - } - _ => return Err(Error::new_spanned(meta, "Unsupported attribute entry.")), - } - } - } - - if skip { - metrics.push(MetricField::Skipped(field)); - continue - } - - let description = match describe { - Some(lit_str) => lit_str.value(), - // Parse docs only if `describe` attribute was not provided - None => match parse_docs_to_string(field)? { - Some(docs_str) => docs_str, - None => { - return Err(Error::new_spanned( - field, - "Either doc comment or `describe = ..` must be set.", - )) - } - }, - }; - - metrics.push(MetricField::Included(Metric::new(field, description, rename))); - } - - Ok(metrics) -} - -fn validate_metric_name(name: &LitStr) -> Result<()> { - if METRIC_NAME_RE.is_match(&name.value()) { - Ok(()) - } else { - Err(Error::new_spanned(name, format!("Value must match regex {}", METRIC_NAME_RE.as_str()))) - } -} - -fn parse_single_attr<'a, T: WithAttrs + ToTokens>( - token: &'a T, - ident: &str, -) -> Result> { - let mut attr_iter = token.attrs().iter().filter(|a| a.path().is_ident(ident)); - if let Some(attr) = attr_iter.next() { - if let Some(next_attr) = attr_iter.next() { - Err(Error::new_spanned( - next_attr, - format!("Duplicate `#[{ident}(..)]` attribute provided."), - )) - } else { - Ok(Some(attr)) - } - } else { - Ok(None) - } -} - -fn parse_single_required_attr<'a, T: WithAttrs + ToTokens>( - token: &'a T, - ident: &str, -) -> Result<&'a Attribute> { - if let Some(attr) = parse_single_attr(token, ident)? { - Ok(attr) - } else { - Err(Error::new_spanned(token, format!("`#[{ident}(..)]` attribute must be provided."))) - } -} - -fn parse_docs_to_string(token: &T) -> Result> { - let mut doc_str = None; - for attr in token.attrs() { - if let syn::Meta::NameValue(ref meta) = attr.meta { - if let Expr::Lit(ref lit) = meta.value { - if let Lit::Str(ref doc) = lit.lit { - let doc_value = doc.value().trim().to_string(); - doc_str = Some( - doc_str - .map(|prev_doc_value| format!("{prev_doc_value} {doc_value}")) - .unwrap_or(doc_value), - ); - } - } - } - } - Ok(doc_str) -} - -fn parse_str_lit(lit: &Lit) -> Result { - match lit { - Lit::Str(lit_str) => Ok(lit_str.to_owned()), - _ => Err(Error::new_spanned(lit, "Value **must** be a string literal.")), - } -} - -fn parse_bool_lit(lit: &Lit) -> Result { - match lit { - Lit::Bool(lit_bool) => Ok(lit_bool.to_owned()), - _ => Err(Error::new_spanned(lit, "Value **must** be a string literal.")), - } -} diff --git a/crates/metrics/metrics-derive/src/lib.rs b/crates/metrics/metrics-derive/src/lib.rs deleted file mode 100644 index 48b1099f476e..000000000000 --- a/crates/metrics/metrics-derive/src/lib.rs +++ /dev/null @@ -1,139 +0,0 @@ -//! This crate provides [Metrics] derive macro - -#![doc( - html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", - html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", - issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" -)] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -use proc_macro::TokenStream; -use syn::{parse_macro_input, DeriveInput}; - -mod expand; -mod metric; -mod with_attrs; - -/// The [Metrics] derive macro instruments all of the struct fields and -/// creates a [Default] implementation for the struct registering all of -/// the metrics. -/// -/// Additionally, it creates a `describe` method on the struct, which -/// internally calls the describe statements for all metric fields. -/// -/// Sample usage: -/// ``` -/// use metrics::{Counter, Gauge, Histogram}; -/// use reth_metrics_derive::Metrics; -/// -/// #[derive(Metrics)] -/// #[metrics(scope = "metrics_custom")] -/// pub struct CustomMetrics { -/// /// A gauge with doc comment description. -/// gauge: Gauge, -/// #[metric(rename = "second_gauge", describe = "A gauge with metric attribute description.")] -/// gauge2: Gauge, -/// /// Some doc comment -/// #[metric(describe = "Metric attribute description will be preferred over doc comment.")] -/// counter: Counter, -/// /// A renamed histogram. -/// #[metric(rename = "histogram")] -/// histo: Histogram, -/// } -/// ``` -/// -/// The example above will be expanded to: -/// ``` -/// pub struct CustomMetrics { -/// /// A gauge with doc comment description. -/// gauge: metrics::Gauge, -/// gauge2: metrics::Gauge, -/// /// Some doc comment -/// counter: metrics::Counter, -/// /// A renamed histogram. -/// histo: metrics::Histogram, -/// } -/// -/// impl Default for CustomMetrics { -/// fn default() -> Self { -/// Self { -/// gauge: metrics::gauge!("metrics_custom_gauge"), -/// gauge2: metrics::gauge!("metrics_custom_second_gauge"), -/// counter: metrics::counter!("metrics_custom_counter"), -/// histo: metrics::histogram!("metrics_custom_histogram"), -/// } -/// } -/// } -/// -/// impl CustomMetrics { -/// /// Describe all exposed metrics -/// pub fn describe() { -/// metrics::describe_gauge!( -/// "metrics_custom_gauge", -/// "A gauge with doc comment description." -/// ); -/// metrics::describe_gauge!( -/// "metrics_custom_second_gauge", -/// "A gauge with metric attribute description." -/// ); -/// metrics::describe_counter!( -/// "metrics_custom_counter", -/// "Metric attribute description will be preferred over doc comment." -/// ); -/// metrics::describe_histogram!("metrics_custom_histogram", "A renamed histogram."); -/// } -/// } -/// -/// impl std::fmt::Debug for CustomMetrics { -/// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { -/// f.debug_struct("CustomMetrics").finish() -/// } -/// } -/// ``` -/// -/// Similarly, you can derive metrics with "dynamic" scope, -/// meaning their scope can be set at the time of instantiation. -/// For example: -/// ``` -/// use reth_metrics_derive::Metrics; -/// -/// #[derive(Metrics)] -/// #[metrics(dynamic = true)] -/// pub struct DynamicScopeMetrics { -/// /// A gauge with doc comment description. -/// gauge: metrics::Gauge, -/// } -/// ``` -/// -/// The example with dynamic scope will expand to -/// ``` -/// pub struct DynamicScopeMetrics { -/// /// A gauge with doc comment description. -/// gauge: metrics::Gauge, -/// } -/// -/// impl DynamicScopeMetrics { -/// pub fn new(scope: &str) -> Self { -/// Self { gauge: metrics::gauge!(format!("{}{}{}", scope, "_", "gauge")) } -/// } -/// -/// pub fn describe(scope: &str) { -/// metrics::describe_gauge!( -/// format!("{}{}{}", scope, "_", "gauge"), -/// "A gauge with doc comment description." -/// ); -/// } -/// } -/// -/// impl std::fmt::Debug for DynamicScopeMetrics { -/// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { -/// f.debug_struct("DynamicScopeMetrics").finish() -/// } -/// } -/// ``` -#[proc_macro_derive(Metrics, attributes(metrics, metric))] -pub fn derive_metrics(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as DeriveInput); - expand::derive(&input).unwrap_or_else(|err| err.to_compile_error()).into() -} diff --git a/crates/metrics/metrics-derive/src/metric.rs b/crates/metrics/metrics-derive/src/metric.rs deleted file mode 100644 index e8dfb24847a8..000000000000 --- a/crates/metrics/metrics-derive/src/metric.rs +++ /dev/null @@ -1,59 +0,0 @@ -use quote::quote; -use syn::{Error, Field, LitStr, Result, Type}; - -const COUNTER_TY: &str = "Counter"; -const HISTOGRAM_TY: &str = "Histogram"; -const GAUGE_TY: &str = "Gauge"; - -pub(crate) struct Metric<'a> { - pub(crate) field: &'a Field, - pub(crate) description: String, - rename: Option, -} - -impl<'a> Metric<'a> { - pub(crate) const fn new(field: &'a Field, description: String, rename: Option) -> Self { - Self { field, description, rename } - } - - pub(crate) fn name(&self) -> String { - match self.rename.as_ref() { - Some(name) => name.value(), - None => self.field.ident.as_ref().map(ToString::to_string).unwrap_or_default(), - } - } - - pub(crate) fn register_stmt(&self) -> Result { - if let Type::Path(ref path_ty) = self.field.ty { - if let Some(last) = path_ty.path.segments.last() { - let registrar = match last.ident.to_string().as_str() { - COUNTER_TY => quote! { metrics::counter! }, - HISTOGRAM_TY => quote! { metrics::histogram! }, - GAUGE_TY => quote! { metrics::gauge! }, - _ => return Err(Error::new_spanned(path_ty, "Unsupported metric type")), - }; - - return Ok(quote! { #registrar }) - } - } - - Err(Error::new_spanned(&self.field.ty, "Unsupported metric type")) - } - - pub(crate) fn describe_stmt(&self) -> Result { - if let Type::Path(ref path_ty) = self.field.ty { - if let Some(last) = path_ty.path.segments.last() { - let descriptor = match last.ident.to_string().as_str() { - COUNTER_TY => quote! { metrics::describe_counter! }, - HISTOGRAM_TY => quote! { metrics::describe_histogram! }, - GAUGE_TY => quote! { metrics::describe_gauge! }, - _ => return Err(Error::new_spanned(path_ty, "Unsupported metric type")), - }; - - return Ok(quote! { #descriptor }) - } - } - - Err(Error::new_spanned(&self.field.ty, "Unsupported metric type")) - } -} diff --git a/crates/metrics/metrics-derive/src/with_attrs.rs b/crates/metrics/metrics-derive/src/with_attrs.rs deleted file mode 100644 index 9095d99609f2..000000000000 --- a/crates/metrics/metrics-derive/src/with_attrs.rs +++ /dev/null @@ -1,17 +0,0 @@ -use syn::{Attribute, DeriveInput, Field}; - -pub(crate) trait WithAttrs { - fn attrs(&self) -> &[Attribute]; -} - -impl WithAttrs for DeriveInput { - fn attrs(&self) -> &[Attribute] { - &self.attrs - } -} - -impl WithAttrs for Field { - fn attrs(&self) -> &[Attribute] { - &self.attrs - } -} diff --git a/crates/metrics/metrics-derive/tests/compile-fail/metric_attr.rs b/crates/metrics/metrics-derive/tests/compile-fail/metric_attr.rs deleted file mode 100644 index 8a8b277baf0d..000000000000 --- a/crates/metrics/metrics-derive/tests/compile-fail/metric_attr.rs +++ /dev/null @@ -1,62 +0,0 @@ -extern crate metrics; -extern crate reth_metrics_derive; - -use metrics::Gauge; -use reth_metrics_derive::Metrics; - -fn main() {} - -#[derive(Metrics)] -#[metrics(scope = "some_scope")] -struct CustomMetrics { - gauge: Gauge, -} - -#[derive(Metrics)] -#[metrics(scope = "some_scope")] -struct CustomMetrics2 { - #[metric()] - gauge: Gauge, -} - -#[derive(Metrics)] -#[metrics(scope = "some_scope")] -struct CustomMetrics3 { - #[metric(random = "value")] - gauge: Gauge, -} - -#[derive(Metrics)] -#[metrics(scope = "some_scope")] -struct CustomMetrics4 { - #[metric(describe = 123)] - gauge: Gauge, -} - -#[derive(Metrics)] -#[metrics(scope = "some_scope")] -struct CustomMetrics5 { - #[metric(rename = 123)] - gauge: Gauge, -} - -#[derive(Metrics)] -#[metrics(scope = "some_scope")] -struct CustomMetrics6 { - #[metric(describe = "", describe = "")] - gauge: Gauge, -} - -#[derive(Metrics)] -#[metrics(scope = "some_scope")] -struct CustomMetrics7 { - #[metric(rename = "_gauge", rename = "_gauge")] - gauge: Gauge, -} - -#[derive(Metrics)] -#[metrics(scope = "some_scope")] -struct CustomMetrics8 { - #[metric(describe = "")] - gauge: String, -} diff --git a/crates/metrics/metrics-derive/tests/compile-fail/metric_attr.stderr b/crates/metrics/metrics-derive/tests/compile-fail/metric_attr.stderr deleted file mode 100644 index 96659e49f222..000000000000 --- a/crates/metrics/metrics-derive/tests/compile-fail/metric_attr.stderr +++ /dev/null @@ -1,48 +0,0 @@ -error: Either doc comment or `describe = ..` must be set. - --> tests/compile-fail/metric_attr.rs:12:5 - | -12 | gauge: Gauge, - | ^^^^^^^^^^^^ - -error: Either doc comment or `describe = ..` must be set. - --> tests/compile-fail/metric_attr.rs:18:5 - | -18 | / #[metric()] -19 | | gauge: Gauge, - | |________________^ - -error: Unsupported attribute entry. - --> tests/compile-fail/metric_attr.rs:25:14 - | -25 | #[metric(random = "value")] - | ^^^^^^^^^^^^^^^^ - -error: Value **must** be a string literal. - --> tests/compile-fail/metric_attr.rs:32:25 - | -32 | #[metric(describe = 123)] - | ^^^ - -error: Value **must** be a string literal. - --> tests/compile-fail/metric_attr.rs:39:23 - | -39 | #[metric(rename = 123)] - | ^^^ - -error: Duplicate `describe` value provided. - --> tests/compile-fail/metric_attr.rs:46:29 - | -46 | #[metric(describe = "", describe = "")] - | ^^^^^^^^^^^^^ - -error: Duplicate `rename` value provided. - --> tests/compile-fail/metric_attr.rs:53:33 - | -53 | #[metric(rename = "_gauge", rename = "_gauge")] - | ^^^^^^^^^^^^^^^^^ - -error: Unsupported metric type - --> tests/compile-fail/metric_attr.rs:61:12 - | -61 | gauge: String, - | ^^^^^^ diff --git a/crates/metrics/metrics-derive/tests/compile-fail/metrics_attr.rs b/crates/metrics/metrics-derive/tests/compile-fail/metrics_attr.rs deleted file mode 100644 index 6c8d3f129b10..000000000000 --- a/crates/metrics/metrics-derive/tests/compile-fail/metrics_attr.rs +++ /dev/null @@ -1,56 +0,0 @@ -extern crate reth_metrics_derive; -use reth_metrics_derive::Metrics; - -fn main() {} - -#[derive(Metrics)] -struct CustomMetrics; - -#[derive(Metrics)] -#[metrics()] -#[metrics()] -struct CustomMetrics2; - -#[derive(Metrics)] -#[metrics()] -struct CustomMetrics3; - -#[derive(Metrics)] -#[metrics(scope = value)] -struct CustomMetrics4; - -#[derive(Metrics)] -#[metrics(scope = 123)] -struct CustomMetrics5; - -#[derive(Metrics)] -#[metrics(scope = "some-scope")] -struct CustomMetrics6; - -#[derive(Metrics)] -#[metrics(scope = "some_scope", scope = "another_scope")] -struct CustomMetrics7; - -#[derive(Metrics)] -#[metrics(separator = value)] -struct CustomMetrics8; - -#[derive(Metrics)] -#[metrics(separator = 123)] -struct CustomMetrics9; - -#[derive(Metrics)] -#[metrics(separator = "x")] -struct CustomMetrics10; - -#[derive(Metrics)] -#[metrics(separator = "_", separator = ":")] -struct CustomMetrics11; - -#[derive(Metrics)] -#[metrics(random = "value")] -struct CustomMetrics12; - -#[derive(Metrics)] -#[metrics(scope = "scope", dynamic = true)] -struct CustomMetrics13; diff --git a/crates/metrics/metrics-derive/tests/compile-fail/metrics_attr.stderr b/crates/metrics/metrics-derive/tests/compile-fail/metrics_attr.stderr deleted file mode 100644 index 5121258d5cb7..000000000000 --- a/crates/metrics/metrics-derive/tests/compile-fail/metrics_attr.stderr +++ /dev/null @@ -1,81 +0,0 @@ -error: `#[metrics(..)]` attribute must be provided. - --> tests/compile-fail/metrics_attr.rs:7:1 - | -7 | struct CustomMetrics; - | ^^^^^^^^^^^^^^^^^^^^^ - -error: Duplicate `#[metrics(..)]` attribute provided. - --> tests/compile-fail/metrics_attr.rs:11:1 - | -11 | #[metrics()] - | ^^^^^^^^^^^^ - -error: Either `scope = ..` or `dynamic = true` must be set. - --> tests/compile-fail/metrics_attr.rs:15:1 - | -15 | / #[metrics()] -16 | | struct CustomMetrics3; - | |______________________^ - -error: Either `scope = ..` or `dynamic = true` must be set. - --> tests/compile-fail/metrics_attr.rs:19:1 - | -19 | / #[metrics(scope = value)] -20 | | struct CustomMetrics4; - | |______________________^ - -error: Value **must** be a string literal. - --> tests/compile-fail/metrics_attr.rs:23:19 - | -23 | #[metrics(scope = 123)] - | ^^^ - -error: Value must match regex ^[a-zA-Z_:.][a-zA-Z0-9_:.]*$ - --> tests/compile-fail/metrics_attr.rs:27:19 - | -27 | #[metrics(scope = "some-scope")] - | ^^^^^^^^^^^^ - -error: Duplicate `scope` value provided. - --> tests/compile-fail/metrics_attr.rs:31:33 - | -31 | #[metrics(scope = "some_scope", scope = "another_scope")] - | ^^^^^^^^^^^^^^^^^^^^^^^ - -error: Either `scope = ..` or `dynamic = true` must be set. - --> tests/compile-fail/metrics_attr.rs:35:1 - | -35 | / #[metrics(separator = value)] -36 | | struct CustomMetrics8; - | |______________________^ - -error: Value **must** be a string literal. - --> tests/compile-fail/metrics_attr.rs:39:23 - | -39 | #[metrics(separator = 123)] - | ^^^ - -error: Unsupported `separator` value. Supported: `.`, `_`, `:`. - --> tests/compile-fail/metrics_attr.rs:43:11 - | -43 | #[metrics(separator = "x")] - | ^^^^^^^^^^^^^^^ - -error: Duplicate `separator` value provided. - --> tests/compile-fail/metrics_attr.rs:47:28 - | -47 | #[metrics(separator = "_", separator = ":")] - | ^^^^^^^^^^^^^^^ - -error: Unsupported attribute entry. - --> tests/compile-fail/metrics_attr.rs:51:11 - | -51 | #[metrics(random = "value")] - | ^^^^^^^^^^^^^^^^ - -error: `scope = ..` conflicts with `dynamic = true`. - --> tests/compile-fail/metrics_attr.rs:55:1 - | -55 | / #[metrics(scope = "scope", dynamic = true)] -56 | | struct CustomMetrics13; - | |_______________________^ diff --git a/crates/metrics/metrics-derive/tests/metrics.rs b/crates/metrics/metrics-derive/tests/metrics.rs deleted file mode 100644 index a07ccc8a7ce1..000000000000 --- a/crates/metrics/metrics-derive/tests/metrics.rs +++ /dev/null @@ -1,351 +0,0 @@ -#![allow(missing_docs)] -use metrics::{ - Counter, Gauge, Histogram, Key, KeyName, Label, Metadata, Recorder, SharedString, Unit, -}; -use reth_metrics_derive::Metrics; -use serial_test::serial; -use std::{ - collections::HashMap, - sync::{LazyLock, Mutex}, -}; - -#[allow(dead_code)] -#[derive(Metrics)] -#[metrics(scope = "metrics_custom")] -struct CustomMetrics { - #[metric(skip)] - skipped_field_a: u8, - /// A gauge with doc comment description. - gauge: Gauge, - #[metric(rename = "second_gauge", describe = "A gauge with metric attribute description.")] - gauge2: Gauge, - #[metric(skip)] - skipped_field_b: u16, - /// Some doc comment - #[metric(describe = "Metric attribute description will be preferred over doc comment.")] - counter: Counter, - #[metric(skip)] - skipped_field_c: u32, - #[metric(skip)] - skipped_field_d: u64, - /// A renamed histogram. - #[metric(rename = "histogram")] - histo: Histogram, - #[metric(skip)] - skipped_field_e: u128, -} - -#[allow(dead_code)] -#[derive(Metrics)] -#[metrics(dynamic = true)] -struct DynamicScopeMetrics { - #[metric(skip)] - skipped_field_a: u8, - /// A gauge with doc comment description. - gauge: Gauge, - #[metric(rename = "second_gauge", describe = "A gauge with metric attribute description.")] - gauge2: Gauge, - #[metric(skip)] - skipped_field_b: u16, - /// Some doc comment - #[metric(describe = "Metric attribute description will be preferred over doc comment.")] - counter: Counter, - #[metric(skip)] - skipped_field_c: u32, - #[metric(skip)] - skipped_field_d: u64, - /// A renamed histogram. - #[metric(rename = "histogram")] - histo: Histogram, - #[metric(skip)] - skipped_field_e: u128, -} - -static RECORDER: LazyLock = LazyLock::new(TestRecorder::new); - -fn test_describe(scope: &str) { - assert_eq!(RECORDER.metrics_len(), 4); - - let gauge = RECORDER.get_metric(&format!("{scope}.gauge")); - assert!(gauge.is_some()); - assert_eq!( - gauge.unwrap(), - TestMetric { - ty: TestMetricTy::Gauge, - description: Some("A gauge with doc comment description.".to_owned()), - labels: None, - } - ); - - let second_gauge = RECORDER.get_metric(&format!("{scope}.second_gauge")); - assert!(second_gauge.is_some()); - assert_eq!( - second_gauge.unwrap(), - TestMetric { - ty: TestMetricTy::Gauge, - description: Some("A gauge with metric attribute description.".to_owned()), - labels: None, - } - ); - - let counter = RECORDER.get_metric(&format!("{scope}.counter")); - assert!(counter.is_some()); - assert_eq!( - counter.unwrap(), - TestMetric { - ty: TestMetricTy::Counter, - description: Some( - "Metric attribute description will be preferred over doc comment.".to_owned() - ), - labels: None, - } - ); - - let histogram = RECORDER.get_metric(&format!("{scope}.histogram")); - assert!(histogram.is_some()); - assert_eq!( - histogram.unwrap(), - TestMetric { - ty: TestMetricTy::Histogram, - description: Some("A renamed histogram.".to_owned()), - labels: None, - } - ); -} - -#[test] -#[serial] -fn describe_metrics() { - let _guard = RECORDER.enter(); - - CustomMetrics::describe(); - - test_describe("metrics_custom"); -} - -#[test] -#[serial] -fn describe_dynamic_metrics() { - let _guard = RECORDER.enter(); - - let scope = "local_scope"; - - DynamicScopeMetrics::describe(scope); - - test_describe(scope); -} - -fn test_register(scope: &str) { - assert_eq!(RECORDER.metrics_len(), 4); - - let gauge = RECORDER.get_metric(&format!("{scope}.gauge")); - assert!(gauge.is_some()); - assert_eq!( - gauge.unwrap(), - TestMetric { ty: TestMetricTy::Gauge, description: None, labels: None } - ); - - let second_gauge = RECORDER.get_metric(&format!("{scope}.second_gauge")); - assert!(second_gauge.is_some()); - assert_eq!( - second_gauge.unwrap(), - TestMetric { ty: TestMetricTy::Gauge, description: None, labels: None } - ); - - let counter = RECORDER.get_metric(&format!("{scope}.counter")); - assert!(counter.is_some()); - assert_eq!( - counter.unwrap(), - TestMetric { ty: TestMetricTy::Counter, description: None, labels: None } - ); - - let histogram = RECORDER.get_metric(&format!("{scope}.histogram")); - assert!(histogram.is_some()); - assert_eq!( - histogram.unwrap(), - TestMetric { ty: TestMetricTy::Histogram, description: None, labels: None } - ); -} - -#[test] -#[serial] -fn register_metrics() { - let _guard = RECORDER.enter(); - - let _metrics = CustomMetrics::default(); - - test_register("metrics_custom"); -} - -#[test] -#[serial] -fn register_dynamic_metrics() { - let _guard = RECORDER.enter(); - - let scope = "local_scope"; - - let _metrics = DynamicScopeMetrics::new(scope); - - test_register(scope); -} - -fn test_labels(scope: &str) { - let test_labels = vec![Label::new("key", "value")]; - - let gauge = RECORDER.get_metric(&format!("{scope}.gauge")); - assert!(gauge.is_some()); - let labels = gauge.unwrap().labels; - assert!(labels.is_some()); - assert_eq!(labels.unwrap(), test_labels,); - - let second_gauge = RECORDER.get_metric(&format!("{scope}.second_gauge")); - assert!(second_gauge.is_some()); - let labels = second_gauge.unwrap().labels; - assert!(labels.is_some()); - assert_eq!(labels.unwrap(), test_labels,); - - let counter = RECORDER.get_metric(&format!("{scope}.counter")); - assert!(counter.is_some()); - let labels = counter.unwrap().labels; - assert!(labels.is_some()); - assert_eq!(labels.unwrap(), test_labels,); - - let histogram = RECORDER.get_metric(&format!("{scope}.histogram")); - assert!(histogram.is_some()); - let labels = histogram.unwrap().labels; - assert!(labels.is_some()); - assert_eq!(labels.unwrap(), test_labels,); -} - -#[test] -#[serial] -fn label_metrics() { - let _guard = RECORDER.enter(); - - let _metrics = CustomMetrics::new_with_labels(&[("key", "value")]); - - test_labels("metrics_custom"); -} - -#[test] -#[serial] -fn dynamic_label_metrics() { - let _guard = RECORDER.enter(); - - let scope = "local_scope"; - - let _metrics = DynamicScopeMetrics::new_with_labels(scope, &[("key", "value")]); - - test_labels(scope); -} - -struct TestRecorder { - // Metrics map: key => Option - metrics: Mutex>, -} - -#[derive(PartialEq, Clone, Debug)] -enum TestMetricTy { - Counter, - Gauge, - Histogram, -} - -#[derive(PartialEq, Clone, Debug)] -struct TestMetric { - ty: TestMetricTy, - description: Option, - labels: Option>, -} - -impl TestRecorder { - fn new() -> Self { - Self { metrics: Mutex::new(HashMap::default()) } - } - - /// Sets this recorder as the global recorder for the duration of the returned guard. - #[must_use] - fn enter(&'static self) -> impl Drop { - struct Reset { - recorder: &'static TestRecorder, - } - impl Drop for Reset { - fn drop(&mut self) { - self.recorder.clear(); - } - } - - let _ = metrics::set_global_recorder(self); - Reset { recorder: self } - } - - fn metrics_len(&self) -> usize { - self.metrics.lock().expect("failed to lock metrics").len() - } - - fn get_metric(&self, key: &str) -> Option { - self.metrics.lock().expect("failed to lock metrics").get(key).cloned() - } - - fn record_metric( - &self, - key: &str, - ty: TestMetricTy, - description: Option, - labels: Option>, - ) { - self.metrics - .lock() - .expect("failed to lock metrics") - .insert(key.to_owned(), TestMetric { ty, description, labels }); - } - - fn clear(&self) { - self.metrics.lock().expect("failed to lock metrics").clear(); - } -} - -impl Recorder for &'static TestRecorder { - fn describe_counter(&self, key: KeyName, _unit: Option, description: SharedString) { - self.record_metric( - key.as_str(), - TestMetricTy::Counter, - Some(description.into_owned()), - None, - ) - } - - fn describe_gauge(&self, key: KeyName, _unit: Option, description: SharedString) { - self.record_metric(key.as_str(), TestMetricTy::Gauge, Some(description.into_owned()), None) - } - - fn describe_histogram(&self, key: KeyName, _unit: Option, description: SharedString) { - self.record_metric( - key.as_str(), - TestMetricTy::Histogram, - Some(description.into_owned()), - None, - ) - } - - fn register_counter(&self, key: &Key, _metadata: &Metadata<'_>) -> Counter { - let labels_vec: Vec

, @@ -292,7 +287,7 @@ impl OptimismPayloadBuilder { impl PayloadServiceBuilder for OptimismPayloadBuilder where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, >, Pool: TransactionPool + Unpin + 'static, { @@ -301,11 +296,7 @@ where ctx: &BuilderContext, pool: Pool, ) -> eyre::Result> { - self.spawn( - OptimismEvmConfig::new(Arc::new(OpChainSpec { inner: (*ctx.chain_spec()).clone() })), - ctx, - pool, - ) + self.spawn(OptimismEvmConfig::new(ctx.chain_spec()), ctx, pool) } } @@ -320,7 +311,7 @@ pub struct OptimismNetworkBuilder { impl NetworkBuilder for OptimismNetworkBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, Pool: TransactionPool + Unpin + 'static, { async fn build_network( @@ -377,7 +368,7 @@ pub struct OptimismConsensusBuilder; impl ConsensusBuilder for OptimismConsensusBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type Consensus = Arc; @@ -397,7 +388,7 @@ pub struct OptimismEngineValidatorBuilder; impl EngineValidatorBuilder for OptimismEngineValidatorBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, OptimismEngineValidator: EngineValidator, { diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 79d8c0314deb..811c37e91cb5 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -1,7 +1,8 @@ //! OP transaction pool types +use alloy_eips::eip2718::Encodable2718; use parking_lot::RwLock; use reth_chainspec::ChainSpec; -use reth_evm_optimism::RethL1BlockInfo; +use reth_optimism_evm::RethL1BlockInfo; use reth_primitives::{Block, GotExpected, InvalidTransactionError, SealedBlock}; use reth_provider::{BlockReaderIdExt, StateProviderFactory}; use reth_revm::L1BlockInfo; @@ -98,7 +99,7 @@ where /// Update the L1 block info. fn update_l1_block_info(&self, block: &Block) { self.block_info.timestamp.store(block.timestamp, Ordering::Relaxed); - if let Ok(cost_addition) = reth_evm_optimism::extract_l1_info(block) { + if let Ok(cost_addition) = reth_optimism_evm::extract_l1_info(block) { *self.block_info.l1_block_info.write() = cost_addition; } } @@ -139,7 +140,7 @@ where let l1_block_info = self.block_info.l1_block_info.read().clone(); let mut encoded = Vec::with_capacity(valid_tx.transaction().encoded_length()); - valid_tx.transaction().clone().into_consensus().encode_enveloped(&mut encoded); + valid_tx.transaction().clone().into_consensus().encode_2718(&mut encoded); let cost_addition = match l1_block_info.l1_tx_data_fee( &self.chain_spec(), @@ -229,18 +230,17 @@ pub struct OpL1BlockInfo { #[cfg(test)] mod tests { use crate::txpool::OpTransactionValidator; + use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{TxKind, U256}; + use op_alloy_consensus::TxDeposit; use reth::primitives::Signature; use reth_chainspec::MAINNET; - use reth_primitives::{ - Transaction, TransactionSigned, TransactionSignedEcRecovered, TxDeposit, - }; + use reth_primitives::{Transaction, TransactionSigned, TransactionSignedEcRecovered}; use reth_provider::test_utils::MockEthProvider; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, validate::EthTransactionValidatorBuilder, EthPooledTransaction, TransactionOrigin, TransactionValidationOutcome, }; - #[test] fn validate_optimism_transaction() { let client = MockEthProvider::default(); @@ -266,7 +266,7 @@ mod tests { let signed_tx = TransactionSigned::from_transaction_and_signature(deposit_tx, signature); let signed_recovered = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, signer); - let len = signed_recovered.length_without_header(); + let len = signed_recovered.encode_2718_len(); let pooled_tx = EthPooledTransaction::new(signed_recovered, len); let outcome = validator.validate_one(origin, pooled_tx); diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index e4993e8f3138..ebd35cc8a5c8 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -1,6 +1,6 @@ use crate::utils::{advance_chain, setup}; +use alloy_rpc_types_engine::PayloadStatusEnum; use reth::blockchain_tree::error::BlockchainTreeError; -use reth_rpc_types::engine::PayloadStatusEnum; use std::sync::Arc; use tokio::sync::Mutex; diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index 6b8e07a42e38..1e9ffa652f1c 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -5,10 +5,10 @@ use alloy_primitives::{Address, B256}; use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; use reth_chainspec::ChainSpecBuilder; use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet::Wallet, NodeHelperType}; -use reth_node_optimism::{ +use reth_optimism_chainspec::{OpChainSpec, BASE_MAINNET}; +use reth_optimism_node::{ node::OptimismAddOns, OptimismBuiltPayload, OptimismNode, OptimismPayloadBuilderAttributes, }; -use reth_optimism_chainspec::BASE_MAINNET; use reth_payload_builder::EthPayloadBuilderAttributes; use tokio::sync::Mutex; @@ -19,13 +19,13 @@ pub(crate) async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskMa let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); reth_e2e_test_utils::setup( num_nodes, - Arc::new( + Arc::new(OpChainSpec::new( ChainSpecBuilder::default() .chain(BASE_MAINNET.chain) .genesis(genesis) .ecotone_activated() .build(), - ), + )), false, ) .await diff --git a/crates/optimism/node/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs index cc9c772c027b..20363828e861 100644 --- a/crates/optimism/node/tests/it/builder.rs +++ b/crates/optimism/node/tests/it/builder.rs @@ -3,12 +3,13 @@ use reth_db::test_utils::create_test_rw_db; use reth_node_api::FullNodeComponents; use reth_node_builder::{NodeBuilder, NodeConfig}; -use reth_node_optimism::node::{OptimismAddOns, OptimismNode}; +use reth_optimism_chainspec::BASE_MAINNET; +use reth_optimism_node::{node::OptimismAddOns, OptimismNode}; #[test] fn test_basic_setup() { // parse CLI -> config - let config = NodeConfig::test(); + let config = NodeConfig::new(BASE_MAINNET.clone()); let db = create_test_rw_db(); let _builder = NodeBuilder::new(config) .with_database(db) diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 8fba06f228b0..117f63201a48 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -18,7 +18,6 @@ reth-primitives.workspace = true reth-revm.workspace = true reth-transaction-pool.workspace = true reth-provider.workspace = true -reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true reth-evm.workspace = true reth-execution-types.workspace = true @@ -29,15 +28,19 @@ reth-trie.workspace = true reth-chain-state.workspace = true # op-reth -reth-evm-optimism.workspace = true +reth-optimism-chainspec.workspace = true +reth-optimism-consensus.workspace = true +reth-optimism-evm.workspace = true reth-optimism-forks.workspace = true # ethereum revm.workspace = true +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true op-alloy-rpc-types-engine.workspace = true revm-primitives.workspace = true +alloy-rpc-types-engine.workspace = true # misc tracing.workspace = true @@ -50,6 +53,6 @@ optimism = [ "reth-primitives/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", - "reth-evm-optimism/optimism", + "reth-optimism-evm/optimism", "reth-revm/optimism", ] diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 5e443a4f5a08..5e9c1b5d18be 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -5,19 +5,18 @@ use std::sync::Arc; use alloy_primitives::U256; use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; -use reth_chainspec::{ChainSpec, ChainSpecProvider, EthereumHardforks}; -use reth_evm::{ - system_calls::pre_block_beacon_root_contract_call, ConfigureEvm, ConfigureEvmEnv, - NextBlockEnvAttributes, -}; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; use reth_execution_types::ExecutionOutcome; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; use reth_optimism_forks::OptimismHardfork; use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; use reth_primitives::{ constants::BEACON_NONCE, proofs, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, Header, IntoRecoveredTransaction, Receipt, TxType, EMPTY_OMMER_ROOT_HASH, + Block, BlockBody, Header, Receipt, TxType, EMPTY_OMMER_ROOT_HASH, }; use reth_provider::StateProviderFactory; use reth_revm::database::StateProviderDatabase; @@ -26,9 +25,9 @@ use reth_transaction_pool::{ }; use reth_trie::HashedPostState; use revm::{ - db::states::bundle_state::BundleRetention, + db::{states::bundle_state::BundleRetention, State}, primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState}, - DatabaseCommit, State, + DatabaseCommit, }; use revm_primitives::calc_excess_blob_gas; use tracing::{debug, trace, warn}; @@ -93,7 +92,7 @@ where /// Implementation of the [`PayloadBuilder`] trait for [`OptimismPayloadBuilder`]. impl PayloadBuilder for OptimismPayloadBuilder where - Client: StateProviderFactory + ChainSpecProvider, + Client: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, EvmConfig: ConfigureEvm
, { @@ -164,7 +163,7 @@ pub(crate) fn optimism_payload( ) -> Result, PayloadBuilderError> where EvmConfig: ConfigureEvm
, - Client: StateProviderFactory + ChainSpecProvider, + Client: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, { let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; @@ -202,28 +201,29 @@ where ); // apply eip-4788 pre block contract call - pre_block_beacon_root_contract_call( - &mut db, - &evm_config, - &chain_spec, - &initialized_cfg, - &initialized_block_env, - attributes.payload_attributes.parent_beacon_block_root, - ) - .map_err(|err| { - warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), - %err, - "failed to apply beacon root contract call for empty payload" - ); - PayloadBuilderError::Internal(err.into()) - })?; + let mut system_caller = SystemCaller::new(&evm_config, &chain_spec); + + system_caller + .pre_block_beacon_root_contract_call( + &mut db, + &initialized_cfg, + &initialized_block_env, + attributes.payload_attributes.parent_beacon_block_root, + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to apply beacon root contract call for payload" + ); + PayloadBuilderError::Internal(err.into()) + })?; // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism // blocks will always have at least a single transaction in them (the L1 info transaction), // so we can safely assume that this will always be triggered upon the transition and that // the above check for empty blocks will never be hit on OP chains. - reth_evm_optimism::ensure_create2_deployer( + reth_optimism_evm::ensure_create2_deployer( chain_spec.clone(), attributes.payload_attributes.timestamp, &mut db, @@ -443,11 +443,9 @@ where Vec::new(), ); let receipts_root = execution_outcome - .optimism_receipts_root_slow( - block_number, - &chain_spec, - attributes.payload_attributes.timestamp, - ) + .generic_receipts_root_slow(block_number, |receipts| { + calculate_receipt_root_no_memo_optimism(receipts, &chain_spec, attributes.timestamp()) + }) .expect("Number is in range"); let logs_bloom = execution_outcome.block_logs_bloom(block_number).expect("Number is in range"); @@ -459,7 +457,7 @@ where warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, - "failed to calculate state root for empty payload" + "failed to calculate state root for payload" ); })? }; @@ -477,7 +475,7 @@ where excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_block.timestamp) { let parent_excess_blob_gas = parent_block.excess_blob_gas.unwrap_or_default(); let parent_blob_gas_used = parent_block.blob_gas_used.unwrap_or_default(); - Some(calc_excess_blob_gas(parent_excess_blob_gas as u64, parent_blob_gas_used as u64)) + Some(calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) } else { // for the first post-fork block, both parent.blob_gas_used and // parent.excess_blob_gas are evaluated as 0 @@ -499,11 +497,11 @@ where timestamp: attributes.payload_attributes.timestamp, mix_hash: attributes.payload_attributes.prev_randao, nonce: BEACON_NONCE.into(), - base_fee_per_gas: Some(base_fee.into()), + base_fee_per_gas: Some(base_fee), number: parent_block.number + 1, - gas_limit: block_gas_limit.into(), + gas_limit: block_gas_limit, difficulty: U256::ZERO, - gas_used: cumulative_gas_used.into(), + gas_used: cumulative_gas_used, extra_data, parent_beacon_block_root: attributes.payload_attributes.parent_beacon_block_root, blob_gas_used, diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 28618026fc3d..cb3b939136f3 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -2,21 +2,23 @@ //! Optimism builder support +use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{Address, B256, U256}; use alloy_rlp::Encodable; +use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1, PayloadId}; /// Re-export for use in downstream arguments. pub use op_alloy_rpc_types_engine::OptimismPayloadAttributes; use op_alloy_rpc_types_engine::{ OptimismExecutionPayloadEnvelopeV3, OptimismExecutionPayloadEnvelopeV4, }; use reth_chain_state::ExecutedBlock; -use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_chainspec::EthereumHardforks; +use reth_optimism_chainspec::OpChainSpec; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; use reth_primitives::{ transaction::WithEncoded, BlobTransactionSidecar, SealedBlock, TransactionSigned, Withdrawals, }; -use reth_rpc_types::engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1, PayloadId}; use reth_rpc_types_compat::engine::payload::{ block_to_payload_v1, block_to_payload_v3, block_to_payload_v4, convert_block_to_payload_field_v2, @@ -52,8 +54,15 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { .unwrap_or_default() .into_iter() .map(|data| { - TransactionSigned::decode_enveloped(&mut data.as_ref()) - .map(|tx| WithEncoded::new(data, tx)) + let mut buf = data.as_ref(); + let tx = + TransactionSigned::decode_2718(&mut buf).map_err(alloy_rlp::Error::from)?; + + if !buf.is_empty() { + return Err(alloy_rlp::Error::UnexpectedLength); + } + + Ok(WithEncoded::new(data, tx)) }) .collect::>()?; @@ -119,7 +128,7 @@ pub struct OptimismBuiltPayload { /// empty. pub(crate) sidecars: Vec, /// The rollup's chainspec. - pub(crate) chain_spec: Arc, + pub(crate) chain_spec: Arc, /// The payload attributes. pub(crate) attributes: OptimismPayloadBuilderAttributes, } @@ -132,7 +141,7 @@ impl OptimismBuiltPayload { id: PayloadId, block: SealedBlock, fees: U256, - chain_spec: Arc, + chain_spec: Arc, attributes: OptimismPayloadBuilderAttributes, executed_block: Option, ) -> Self { diff --git a/crates/optimism/primitives/src/bedrock.rs b/crates/optimism/primitives/src/bedrock.rs index 00c8093ddef1..4ece12ad679e 100644 --- a/crates/optimism/primitives/src/bedrock.rs +++ b/crates/optimism/primitives/src/bedrock.rs @@ -1,7 +1,7 @@ //! OP mainnet bedrock related data. -use alloy_primitives::{b256, bloom, bytes, B256, B64, U256}; -use reth_primitives::{address, Header}; +use alloy_primitives::{address, b256, bloom, bytes, B256, B64, U256}; +use reth_primitives::Header; use reth_primitives_traits::constants::EMPTY_OMMER_ROOT_HASH; /// Transaction 0x9ed8f713b2cc6439657db52dcd2fdb9cc944915428f3c6e2a7703e242b259cb9 in block 985, diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 443830cc5321..24c3eb02d247 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -28,10 +28,13 @@ reth-node-builder.workspace = true reth-chainspec.workspace = true # op-reth -reth-evm-optimism.workspace = true +reth-optimism-chainspec.workspace = true +reth-optimism-consensus.workspace = true +reth-optimism-evm.workspace = true reth-optimism-forks.workspace = true # ethereum +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true alloy-rpc-types.workspace = true @@ -59,7 +62,7 @@ reth-optimism-chainspec.workspace = true [features] optimism = [ - "reth-evm-optimism/optimism", + "reth-optimism-evm/optimism", "reth-primitives/optimism", "reth-provider/optimism", "reth-rpc-eth-api/optimism", diff --git a/crates/optimism/rpc/src/error.rs b/crates/optimism/rpc/src/error.rs index 35bc147986a9..b4d349e1cc45 100644 --- a/crates/optimism/rpc/src/error.rs +++ b/crates/optimism/rpc/src/error.rs @@ -2,7 +2,7 @@ use alloy_rpc_types::error::EthRpcErrorCode; use jsonrpsee_types::error::INTERNAL_ERROR_CODE; -use reth_evm_optimism::OptimismBlockExecutionError; +use reth_optimism_evm::OptimismBlockExecutionError; use reth_primitives::revm_primitives::{InvalidTransaction, OptimismInvalidTransaction}; use reth_rpc_eth_api::AsEthApiError; use reth_rpc_eth_types::EthApiError; diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 41267a1e7823..d5066be0c620 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -3,8 +3,9 @@ use alloy_rpc_types::BlockId; use op_alloy_network::Network; use op_alloy_rpc_types::OpTransactionReceipt; -use reth_chainspec::{ChainSpec, ChainSpecProvider}; +use reth_chainspec::ChainSpecProvider; use reth_node_api::{FullNodeComponents, NodeTypes}; +use reth_optimism_chainspec::OpChainSpec; use reth_primitives::TransactionMeta; use reth_provider::{BlockReaderIdExt, HeaderProvider}; use reth_rpc_eth_api::{ @@ -21,7 +22,7 @@ where Error = OpEthApiError, NetworkTypes: Network, >, - N: FullNodeComponents>, + N: FullNodeComponents>, { #[inline] fn provider(&self) -> impl HeaderProvider { @@ -44,7 +45,7 @@ where let block = block.unseal(); let l1_block_info = - reth_evm_optimism::extract_l1_info(&block).map_err(OpEthApiError::from)?; + reth_optimism_evm::extract_l1_info(&block).map_err(OpEthApiError::from)?; return block .body @@ -58,9 +59,8 @@ where index: idx as u64, block_hash, block_number, - base_fee: base_fee.map(|base_fee| base_fee as u64), - excess_blob_gas: excess_blob_gas - .map(|excess_blob_gas| excess_blob_gas as u64), + base_fee, + excess_blob_gas, timestamp, }; diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index ded4c656b063..f1c10e6f1726 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,6 +1,6 @@ use alloy_primitives::{Bytes, TxKind, U256}; use alloy_rpc_types_eth::transaction::TransactionRequest; -use reth_chainspec::ChainSpec; +use reth_chainspec::EthereumHardforks; use reth_evm::ConfigureEvm; use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_primitives::{ @@ -18,7 +18,7 @@ use crate::{OpEthApi, OpEthApiError}; impl EthCall for OpEthApi where Self: Call, - N: FullNodeComponents>, + N: FullNodeComponents>, { } @@ -86,10 +86,7 @@ where #[allow(clippy::needless_update)] let env = TxEnv { - gas_limit: gas_limit - .try_into() - .map_err(|_| RpcInvalidTransactionError::GasUintOverflow) - .map_err(Self::Error::from_eth_err)?, + gas_limit, nonce, caller: from.unwrap_or_default(), gas_price, diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 403e3b8a73b8..57ce44100f26 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -14,7 +14,7 @@ use std::{fmt, sync::Arc}; use alloy_primitives::U256; use derive_more::Deref; use op_alloy_network::Optimism; -use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_chainspec::EthereumHardforks; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; use reth_node_api::{BuilderProvider, FullNodeComponents, FullNodeTypes, NodeTypes}; @@ -67,7 +67,7 @@ pub struct OpEthApi { inner: Arc>, /// Sequencer client, configured to forward submitted transactions to sequencer of given OP /// network. - sequencer_client: OnceCell, + sequencer_client: Arc>, } impl OpEthApi { @@ -93,7 +93,7 @@ impl OpEthApi { ctx.config.proof_permits, ); - Self { inner: Arc::new(inner), sequencer_client: OnceCell::new() } + Self { inner: Arc::new(inner), sequencer_client: Arc::new(OnceCell::new()) } } } @@ -239,7 +239,7 @@ where impl AddDevSigners for OpEthApi where - N: FullNodeComponents>, + N: FullNodeComponents>, { fn with_dev_accounts(&self) { *self.signers().write() = DevSigner::random_signers(20) diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index c96dff40b091..5b716f39320a 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -4,6 +4,7 @@ use alloy_primitives::{BlockNumber, B256}; use reth_chainspec::EthereumHardforks; use reth_evm::ConfigureEvm; use reth_node_api::{FullNodeComponents, NodeTypes}; +use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; use reth_primitives::{ revm_primitives::BlockEnv, BlockNumberOrTag, Header, Receipt, SealedBlockWithSenders, }; @@ -79,16 +80,18 @@ where fn receipts_root( &self, - _block_env: &BlockEnv, + block_env: &BlockEnv, execution_outcome: &ExecutionOutcome, block_number: BlockNumber, ) -> B256 { execution_outcome - .optimism_receipts_root_slow( - block_number, - self.provider().chain_spec().as_ref(), - _block_env.timestamp.to::(), - ) + .generic_receipts_root_slow(block_number, |receipts| { + calculate_receipt_root_no_memo_optimism( + receipts, + self.provider().chain_spec().as_ref(), + block_env.timestamp.to::(), + ) + }) .expect("Block is present") } } diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index a98ee68d6f14..76b03e845a09 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -1,13 +1,15 @@ //! Loads and formats OP receipt RPC response. +use alloy_eips::eip2718::Encodable2718; use alloy_rpc_types::{AnyReceiptEnvelope, Log, TransactionReceipt}; use op_alloy_consensus::{OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope}; use op_alloy_rpc_types::{ receipt::L1BlockInfo, OpTransactionReceipt, OptimismTransactionReceiptFields, }; use reth_chainspec::ChainSpec; -use reth_evm_optimism::RethL1BlockInfo; use reth_node_api::{FullNodeComponents, NodeTypes}; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_evm::RethL1BlockInfo; use reth_optimism_forks::OptimismHardforks; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; use reth_provider::ChainSpecProvider; @@ -19,7 +21,7 @@ use crate::{OpEthApi, OpEthApiError}; impl LoadReceipt for OpEthApi where Self: Send + Sync, - N: FullNodeComponents>, + N: FullNodeComponents>, { #[inline] fn cache(&self) -> &EthStateCache { @@ -42,7 +44,7 @@ where let block = block.unseal(); let l1_block_info = - reth_evm_optimism::extract_l1_info(&block).map_err(OpEthApiError::from)?; + reth_optimism_evm::extract_l1_info(&block).map_err(OpEthApiError::from)?; Ok(OpReceiptBuilder::new( &self.inner.provider().chain_spec(), @@ -118,7 +120,7 @@ impl OpReceiptFieldsBuilder { tx: &TransactionSigned, l1_block_info: revm::L1BlockInfo, ) -> Result { - let raw_tx = tx.envelope_encoded(); + let raw_tx = tx.encoded_2718(); let timestamp = self.l1_block_timestamp; self.l1_fee = Some( @@ -205,7 +207,7 @@ pub struct OpReceiptBuilder { impl OpReceiptBuilder { /// Returns a new builder. pub fn new( - chain_spec: &ChainSpec, + chain_spec: &OpChainSpec, transaction: &TransactionSigned, meta: TransactionMeta, receipt: &Receipt, @@ -299,6 +301,7 @@ impl OpReceiptBuilder { #[cfg(test)] mod test { use alloy_primitives::hex; + use op_alloy_network::eip2718::Decodable2718; use reth_optimism_chainspec::OP_MAINNET; use reth_primitives::{Block, BlockBody}; @@ -340,14 +343,13 @@ mod test { #[test] fn op_receipt_fields_from_block_and_tx() { // rig - let tx_0 = TransactionSigned::decode_enveloped( + let tx_0 = TransactionSigned::decode_2718( &mut TX_SET_L1_BLOCK_OP_MAINNET_BLOCK_124665056.as_slice(), ) .unwrap(); - let tx_1 = - TransactionSigned::decode_enveloped(&mut TX_1_OP_MAINNET_BLOCK_124665056.as_slice()) - .unwrap(); + let tx_1 = TransactionSigned::decode_2718(&mut TX_1_OP_MAINNET_BLOCK_124665056.as_slice()) + .unwrap(); let block = Block { body: BlockBody { transactions: [tx_0, tx_1.clone()].to_vec(), ..Default::default() }, @@ -355,7 +357,7 @@ mod test { }; let l1_block_info = - reth_evm_optimism::extract_l1_info(&block).expect("should extract l1 info"); + reth_optimism_evm::extract_l1_info(&block).expect("should extract l1 info"); // test assert!(OP_MAINNET.is_fjord_active_at_timestamp(BLOCK_124665056_TIMESTAMP)); diff --git a/crates/optimism/storage/Cargo.toml b/crates/optimism/storage/Cargo.toml new file mode 100644 index 000000000000..107b64db3de0 --- /dev/null +++ b/crates/optimism/storage/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "reth-optimism-storage" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +reth-primitives.workspace = true + +[dev-dependencies] +reth-codecs.workspace = true +reth-db-api.workspace = true +reth-prune-types.workspace = true +reth-stages-types.workspace = true + +[features] +optimism = ["reth-primitives/optimism"] \ No newline at end of file diff --git a/crates/optimism/storage/src/lib.rs b/crates/optimism/storage/src/lib.rs new file mode 100644 index 000000000000..d435ed1d884a --- /dev/null +++ b/crates/optimism/storage/src/lib.rs @@ -0,0 +1,79 @@ +//! Standalone crate for Optimism-Storage Reth. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] + +#[cfg(test)] +mod tests { + use reth_codecs::{test_utils::UnusedBits, validate_bitflag_backwards_compat}; + use reth_db_api::models::{ + CompactClientVersion, CompactU256, CompactU64, StoredBlockBodyIndices, StoredBlockOmmers, + StoredBlockWithdrawals, + }; + use reth_primitives::{Account, Receipt, ReceiptWithBloom, Requests, Withdrawals}; + use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; + use reth_stages_types::{ + AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, + HeadersCheckpoint, IndexHistoryCheckpoint, StageCheckpoint, StageUnitCheckpoint, + StorageHashingCheckpoint, + }; + + #[test] + fn test_ensure_backwards_compatibility() { + assert_eq!(Account::bitflag_encoded_bytes(), 2); + assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1); + assert_eq!(CompactClientVersion::bitflag_encoded_bytes(), 0); + assert_eq!(CompactU256::bitflag_encoded_bytes(), 1); + assert_eq!(CompactU64::bitflag_encoded_bytes(), 1); + assert_eq!(EntitiesCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(ExecutionCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(HeadersCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(IndexHistoryCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); + assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); + assert_eq!(Receipt::bitflag_encoded_bytes(), 2); + assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); + assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); + assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); + assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); + + // In case of failure, refer to the documentation of the + // [`validate_bitflag_backwards_compat`] macro for detailed instructions on handling + // it. + validate_bitflag_backwards_compat!(Account, UnusedBits::NotZero); + validate_bitflag_backwards_compat!(AccountHashingCheckpoint, UnusedBits::NotZero); + validate_bitflag_backwards_compat!(CheckpointBlockRange, UnusedBits::Zero); + validate_bitflag_backwards_compat!(CompactClientVersion, UnusedBits::Zero); + validate_bitflag_backwards_compat!(CompactU256, UnusedBits::NotZero); + validate_bitflag_backwards_compat!(CompactU64, UnusedBits::NotZero); + validate_bitflag_backwards_compat!(EntitiesCheckpoint, UnusedBits::Zero); + validate_bitflag_backwards_compat!(ExecutionCheckpoint, UnusedBits::Zero); + validate_bitflag_backwards_compat!(HeadersCheckpoint, UnusedBits::Zero); + validate_bitflag_backwards_compat!(IndexHistoryCheckpoint, UnusedBits::Zero); + validate_bitflag_backwards_compat!(PruneCheckpoint, UnusedBits::NotZero); + validate_bitflag_backwards_compat!(PruneMode, UnusedBits::Zero); + validate_bitflag_backwards_compat!(PruneSegment, UnusedBits::Zero); + validate_bitflag_backwards_compat!(Receipt, UnusedBits::NotZero); + validate_bitflag_backwards_compat!(ReceiptWithBloom, UnusedBits::Zero); + validate_bitflag_backwards_compat!(StageCheckpoint, UnusedBits::NotZero); + validate_bitflag_backwards_compat!(StageUnitCheckpoint, UnusedBits::Zero); + validate_bitflag_backwards_compat!(StoredBlockBodyIndices, UnusedBits::Zero); + validate_bitflag_backwards_compat!(StoredBlockOmmers, UnusedBits::Zero); + validate_bitflag_backwards_compat!(StoredBlockWithdrawals, UnusedBits::Zero); + validate_bitflag_backwards_compat!(StorageHashingCheckpoint, UnusedBits::NotZero); + validate_bitflag_backwards_compat!(Withdrawals, UnusedBits::Zero); + validate_bitflag_backwards_compat!(Requests, UnusedBits::Zero); + } +} diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 7d1f3fb9eada..f9487ec784ca 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -351,7 +351,7 @@ where { /// Spawns a new payload build task. fn spawn_build_job(&mut self) { - trace!(target: "payload_builder", "spawn new payload build task"); + trace!(target: "payload_builder", id = %self.config.payload_id(), "spawn new payload build task"); let (tx, rx) = oneshot::channel(); let client = self.client.clone(); let pool = self.pool.clone(); diff --git a/crates/payload/builder/src/database.rs b/crates/payload/builder/src/database.rs index 6abaed342408..ea1ae0854348 100644 --- a/crates/payload/builder/src/database.rs +++ b/crates/payload/builder/src/database.rs @@ -158,6 +158,6 @@ struct CachedAccount { impl CachedAccount { fn new(info: Option) -> Self { - Self { info, storage: HashMap::new() } + Self { info, storage: HashMap::default() } } } diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index 51f9efd301ef..70b4296da4e6 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -27,8 +27,9 @@ //! use std::future::Future; //! use std::pin::Pin; //! use std::task::{Context, Poll}; +//! use alloy_primitives::U256; //! use reth_payload_builder::{EthBuiltPayload, PayloadBuilderError, KeepPayloadJobAlive, EthPayloadBuilderAttributes, PayloadJob, PayloadJobGenerator}; -//! use reth_primitives::{Block, Header, U256}; +//! use reth_primitives::{Block, Header}; //! //! /// The generator type that creates new jobs that builds empty blocks. //! pub struct EmptyBlockPayloadJobGenerator; diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index 7de256c7b4ba..1ebf6770c991 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -303,7 +303,7 @@ where let (fut, keep_alive) = self.payload_jobs[job].0.resolve(); if keep_alive == KeepPayloadJobAlive::No { - let (_, id) = self.payload_jobs.remove(job); + let (_, id) = self.payload_jobs.swap_remove(job); trace!(%id, "terminated resolved job"); } diff --git a/crates/payload/primitives/src/error.rs b/crates/payload/primitives/src/error.rs index 0b8113f67dc2..00df9e8d290f 100644 --- a/crates/payload/primitives/src/error.rs +++ b/crates/payload/primitives/src/error.rs @@ -35,14 +35,14 @@ pub enum PayloadBuilderError { WithdrawalsBeforeShanghai, /// Any other payload building errors. #[error(transparent)] - Other(Box), + Other(Box), } impl PayloadBuilderError { /// Create a new error from a boxed error. pub fn other(error: E) -> Self where - E: std::error::Error + Send + Sync + 'static, + E: core::error::Error + Send + Sync + 'static, { Self::Other(Box::new(error)) } @@ -84,7 +84,7 @@ pub enum EngineObjectValidationError { UnsupportedFork, /// Another type of error that is not covered by the above variants. #[error("Invalid params: {0}")] - InvalidParams(#[from] Box), + InvalidParams(#[from] Box), } /// Thrown when validating an execution payload OR payload attributes fails due to: @@ -117,7 +117,7 @@ impl EngineObjectValidationError { /// Creates an instance of the `InvalidParams` variant with the given error. pub fn invalid_params(error: E) -> Self where - E: std::error::Error + Send + Sync + 'static, + E: core::error::Error + Send + Sync + 'static, { Self::InvalidParams(Box::new(error)) } diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 9551b75a7774..b0647691f760 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -80,7 +80,7 @@ pub trait PayloadBuilderAttributes: Send + Sync + std::fmt::Debug { /// [`PayloadBuilderAttributes::try_new`]. type RpcPayloadAttributes; /// The error type used in [`PayloadBuilderAttributes::try_new`]. - type Error: std::error::Error; + type Error: core::error::Error; /// Creates a new payload builder for the given parent block and the attributes. /// @@ -164,7 +164,7 @@ pub trait PayloadAttributesBuilder: std::fmt::Debug + Send + Sync + 'static { /// The payload attributes type returned by the builder. type PayloadAttributes: PayloadAttributes; /// The error type returned by [`PayloadAttributesBuilder::build`]. - type Error: std::error::Error + Send + Sync; + type Error: core::error::Error + Send + Sync; /// Return a new payload attribute from the builder. fn build(&self) -> Result; diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index ca57a2e57576..2fec75666568 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -20,16 +20,17 @@ alloy-genesis.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true -derive_more.workspace = true revm-primitives = { workspace = true, features = ["serde"] } # misc -roaring = "0.10.2" byteorder = "1" +derive_more.workspace = true +roaring = "0.10.2" +serde_with = { workspace = true, optional = true } # required by reth-codecs -modular-bitfield.workspace = true bytes.workspace = true +modular-bitfield.workspace = true serde.workspace = true # arbitrary utils @@ -38,14 +39,18 @@ proptest = { workspace = true, optional = true } proptest-arbitrary-interop = { workspace = true, optional = true } [dev-dependencies] +reth-testing-utils.workspace = true + alloy-primitives = { workspace = true, features = ["arbitrary"] } alloy-consensus = { workspace = true, features = ["arbitrary"] } + arbitrary = { workspace = true, features = ["derive"] } -proptest.workspace = true +bincode.workspace = true proptest-arbitrary-interop.workspace = true -test-fuzz.workspace = true +proptest.workspace = true rand.workspace = true serde_json.workspace = true +test-fuzz.workspace = true [features] default = ["std"] @@ -59,3 +64,4 @@ arbitrary = [ "dep:proptest", "dep:proptest-arbitrary-interop", ] +serde-bincode-compat = ["serde_with", "alloy-consensus/serde-bincode-compat"] diff --git a/crates/primitives-traits/src/error.rs b/crates/primitives-traits/src/error.rs index ef088a920e15..97b33bd618ae 100644 --- a/crates/primitives-traits/src/error.rs +++ b/crates/primitives-traits/src/error.rs @@ -19,8 +19,7 @@ impl fmt::Display for GotExpected { } } -#[cfg(feature = "std")] -impl std::error::Error for GotExpected {} +impl core::error::Error for GotExpected {} impl From<(T, T)> for GotExpected { #[inline] @@ -57,8 +56,7 @@ impl fmt::Display for GotExpectedBoxed { } } -#[cfg(feature = "std")] -impl std::error::Error for GotExpectedBoxed {} +impl core::error::Error for GotExpectedBoxed {} impl Deref for GotExpectedBoxed { type Target = GotExpected; diff --git a/crates/primitives-traits/src/header/mod.rs b/crates/primitives-traits/src/header/mod.rs index e4905c865ce7..fa9c33245359 100644 --- a/crates/primitives-traits/src/header/mod.rs +++ b/crates/primitives-traits/src/header/mod.rs @@ -11,6 +11,12 @@ pub use alloy_consensus::Header; use alloy_primitives::{Address, BlockNumber, B256, U256}; +/// Bincode-compatible header type serde implementations. +#[cfg(feature = "serde-bincode-compat")] +pub mod serde_bincode_compat { + pub use super::sealed::serde_bincode_compat::SealedHeader; +} + /// Trait for extracting specific Ethereum block data from a header pub trait BlockHeader { /// Retrieves the beneficiary (miner) of the block @@ -52,7 +58,7 @@ impl BlockHeader for Header { } fn gas_limit(&self) -> u64 { - self.gas_limit as u64 + self.gas_limit } fn timestamp(&self) -> u64 { @@ -64,10 +70,10 @@ impl BlockHeader for Header { } fn base_fee_per_gas(&self) -> Option { - self.base_fee_per_gas.map(|base_fee| base_fee as u64) + self.base_fee_per_gas } fn excess_blob_gas(&self) -> Option { - self.excess_blob_gas.map(|excess_blob_gas| excess_blob_gas as u64) + self.excess_blob_gas } } diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index 80a5414df988..f2047e079c8d 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -7,13 +7,13 @@ use alloy_rlp::{Decodable, Encodable}; use bytes::BufMut; use core::mem; use derive_more::{AsRef, Deref}; -use reth_codecs::{add_arbitrary_tests, Compact}; +use reth_codecs::add_arbitrary_tests; use serde::{Deserialize, Serialize}; /// A [`Header`] that is sealed at a precalculated hash, use [`SealedHeader::unseal()`] if you want /// to modify header. -#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize, Compact)] -#[add_arbitrary_tests(rlp, compact)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] +#[add_arbitrary_tests(rlp)] pub struct SealedHeader { /// Locked Header hash. hash: BlockHash, @@ -133,18 +133,102 @@ impl SealedHeader { #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for SealedHeader { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let mut header = Header::arbitrary(u)?; - header.gas_limit = (header.gas_limit as u64).into(); - header.gas_used = (header.gas_used as u64).into(); - header.base_fee_per_gas = - header.base_fee_per_gas.map(|base_fee_per_gas| (base_fee_per_gas as u64).into()); - header.blob_gas_used = - header.blob_gas_used.map(|blob_gas_used| (blob_gas_used as u64).into()); - header.excess_blob_gas = - header.excess_blob_gas.map(|excess_blob_gas| (excess_blob_gas as u64).into()); + let header = Header::arbitrary(u)?; let sealed = header.seal_slow(); let (header, seal) = sealed.into_parts(); Ok(Self::new(header, seal)) } } + +/// Bincode-compatible [`SealedHeader`] serde implementation. +#[cfg(feature = "serde-bincode-compat")] +pub(super) mod serde_bincode_compat { + use alloy_consensus::serde_bincode_compat::Header; + use alloy_primitives::BlockHash; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + use serde_with::{DeserializeAs, SerializeAs}; + + /// Bincode-compatible [`super::SealedHeader`] serde implementation. + /// + /// Intended to use with the [`serde_with::serde_as`] macro in the following way: + /// ```rust + /// use reth_primitives_traits::{serde_bincode_compat, SealedHeader}; + /// use serde::{Deserialize, Serialize}; + /// use serde_with::serde_as; + /// + /// #[serde_as] + /// #[derive(Serialize, Deserialize)] + /// struct Data { + /// #[serde_as(as = "serde_bincode_compat::SealedHeader")] + /// header: SealedHeader, + /// } + /// ``` + #[derive(Debug, Serialize, Deserialize)] + pub struct SealedHeader<'a> { + hash: BlockHash, + header: Header<'a>, + } + + impl<'a> From<&'a super::SealedHeader> for SealedHeader<'a> { + fn from(value: &'a super::SealedHeader) -> Self { + Self { hash: value.hash, header: Header::from(&value.header) } + } + } + + impl<'a> From> for super::SealedHeader { + fn from(value: SealedHeader<'a>) -> Self { + Self { hash: value.hash, header: value.header.into() } + } + } + + impl<'a> SerializeAs for SealedHeader<'a> { + fn serialize_as(source: &super::SealedHeader, serializer: S) -> Result + where + S: Serializer, + { + SealedHeader::from(source).serialize(serializer) + } + } + + impl<'de> DeserializeAs<'de, super::SealedHeader> for SealedHeader<'de> { + fn deserialize_as(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + SealedHeader::deserialize(deserializer).map(Into::into) + } + } + + #[cfg(test)] + mod tests { + use super::super::{serde_bincode_compat, SealedHeader}; + + use arbitrary::Arbitrary; + use rand::Rng; + use reth_testing_utils::generators; + use serde::{Deserialize, Serialize}; + use serde_with::serde_as; + + #[test] + fn test_sealed_header_bincode_roundtrip() { + #[serde_as] + #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] + struct Data { + #[serde_as(as = "serde_bincode_compat::SealedHeader")] + transaction: SealedHeader, + } + + let mut bytes = [0u8; 1024]; + generators::rng().fill(bytes.as_mut_slice()); + let data = Data { + transaction: SealedHeader::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) + .unwrap(), + }; + + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data = bincode::deserialize(&encoded).unwrap(); + assert_eq!(decoded, data); + } + } +} diff --git a/crates/primitives-traits/src/header/test_utils.rs b/crates/primitives-traits/src/header/test_utils.rs index a6f37e220063..ef5c0d025360 100644 --- a/crates/primitives-traits/src/header/test_utils.rs +++ b/crates/primitives-traits/src/header/test_utils.rs @@ -27,8 +27,8 @@ pub const fn generate_valid_header( // Set fields based on EIP-4844 being active if eip_4844_active { - header.blob_gas_used = Some(blob_gas_used as u128); - header.excess_blob_gas = Some(excess_blob_gas as u128); + header.blob_gas_used = Some(blob_gas_used); + header.excess_blob_gas = Some(excess_blob_gas); header.parent_beacon_block_root = Some(parent_beacon_block_root); } else { header.blob_gas_used = None; diff --git a/crates/primitives-traits/src/integer_list.rs b/crates/primitives-traits/src/integer_list.rs index 767fb3ec30a1..570c96c9fdaf 100644 --- a/crates/primitives-traits/src/integer_list.rs +++ b/crates/primitives-traits/src/integer_list.rs @@ -4,7 +4,7 @@ use core::fmt; use derive_more::Deref; use roaring::RoaringTreemap; use serde::{ - de::{SeqAccess, Unexpected, Visitor}, + de::{SeqAccess, Visitor}, ser::SerializeSeq, Deserialize, Deserializer, Serialize, Serializer, }; @@ -16,34 +16,54 @@ pub struct IntegerList(pub RoaringTreemap); impl fmt::Debug for IntegerList { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let vec: Vec = self.0.iter().collect(); - write!(f, "IntegerList {vec:?}") + f.write_str("IntegerList")?; + f.debug_list().entries(self.0.iter()).finish() } } impl IntegerList { + /// Creates a new empty `IntegerList`. + pub fn empty() -> Self { + Self(RoaringTreemap::new()) + } + /// Creates an `IntegerList` from a list of integers. /// - /// # Returns - /// - /// Returns an error if the list is empty or not pre-sorted. - pub fn new>(list: T) -> Result { - Ok(Self( - RoaringTreemap::from_sorted_iter(list.as_ref().iter().copied()) - .map_err(|_| RoaringBitmapError::InvalidInput)?, - )) + /// Returns an error if the list is not pre-sorted. + pub fn new(list: impl IntoIterator) -> Result { + RoaringTreemap::from_sorted_iter(list) + .map(Self) + .map_err(|_| IntegerListError::UnsortedInput) } // Creates an IntegerList from a pre-sorted list of integers. /// /// # Panics /// - /// Panics if the list is empty or not pre-sorted. - pub fn new_pre_sorted>(list: T) -> Self { - Self( - RoaringTreemap::from_sorted_iter(list.as_ref().iter().copied()) - .expect("IntegerList must be pre-sorted and non-empty"), - ) + /// Panics if the list is not pre-sorted. + #[inline] + #[track_caller] + pub fn new_pre_sorted(list: impl IntoIterator) -> Self { + Self::new(list).expect("IntegerList must be pre-sorted and non-empty") + } + + /// Appends a list of integers to the current list. + pub fn append(&mut self, list: impl IntoIterator) -> Result { + self.0.append(list).map_err(|_| IntegerListError::UnsortedInput) + } + + /// Pushes a new integer to the list. + pub fn push(&mut self, value: u64) -> Result<(), IntegerListError> { + if self.0.push(value) { + Ok(()) + } else { + Err(IntegerListError::UnsortedInput) + } + } + + /// Clears the list. + pub fn clear(&mut self) { + self.0.clear(); } /// Serializes a [`IntegerList`] into a sequence of bytes. @@ -59,36 +79,21 @@ impl IntegerList { } /// Deserializes a sequence of bytes into a proper [`IntegerList`]. - pub fn from_bytes(data: &[u8]) -> Result { + pub fn from_bytes(data: &[u8]) -> Result { Ok(Self( RoaringTreemap::deserialize_from(data) - .map_err(|_| RoaringBitmapError::FailedToDeserialize)?, + .map_err(|_| IntegerListError::FailedToDeserialize)?, )) } } -macro_rules! impl_uint { - ($($w:tt),+) => { - $( - impl From> for IntegerList { - fn from(v: Vec<$w>) -> Self { - Self::new_pre_sorted(v.iter().map(|v| *v as u64).collect::>()) - } - } - )+ - }; -} - -impl_uint!(usize, u64, u32, u8, u16); - impl Serialize for IntegerList { fn serialize(&self, serializer: S) -> Result where S: Serializer, { - let vec = self.0.iter().collect::>(); let mut seq = serializer.serialize_seq(Some(self.len() as usize))?; - for e in vec { + for e in &self.0 { seq.serialize_element(&e)?; } seq.end() @@ -107,12 +112,11 @@ impl<'de> Visitor<'de> for IntegerListVisitor { where E: SeqAccess<'de>, { - let mut list = Vec::new(); + let mut list = IntegerList::empty(); while let Some(item) = seq.next_element()? { - list.push(item); + list.push(item).map_err(serde::de::Error::custom)?; } - - IntegerList::new(list).map_err(|_| serde::de::Error::invalid_value(Unexpected::Seq, &self)) + Ok(list) } } @@ -132,17 +136,17 @@ use arbitrary::{Arbitrary, Unstructured}; impl<'a> Arbitrary<'a> for IntegerList { fn arbitrary(u: &mut Unstructured<'a>) -> Result { let mut nums: Vec = Vec::arbitrary(u)?; - nums.sort(); + nums.sort_unstable(); Self::new(nums).map_err(|_| arbitrary::Error::IncorrectFormat) } } /// Primitives error type. #[derive(Debug, derive_more::Display, derive_more::Error)] -pub enum RoaringBitmapError { - /// The provided input is invalid. - #[display("the provided input is invalid")] - InvalidInput, +pub enum IntegerListError { + /// The provided input is unsorted. + #[display("the provided input is unsorted")] + UnsortedInput, /// Failed to deserialize data into type. #[display("failed to deserialize data into type")] FailedToDeserialize, @@ -152,6 +156,12 @@ pub enum RoaringBitmapError { mod tests { use super::*; + #[test] + fn empty_list() { + assert_eq!(IntegerList::empty().len(), 0); + assert_eq!(IntegerList::new_pre_sorted(std::iter::empty()).len(), 0); + } + #[test] fn test_integer_list() { let original_list = [1, 2, 3]; diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index e5c57de74b9c..ccc3ea13baf6 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -1,4 +1,4 @@ -//! Common abstracted types in reth. +//! Common abstracted types in Reth. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", @@ -21,7 +21,7 @@ pub mod account; pub use account::{Account, Bytecode}; mod integer_list; -pub use integer_list::{IntegerList, RoaringBitmapError}; +pub use integer_list::{IntegerList, IntegerListError}; pub mod request; pub use request::{Request, Requests}; @@ -43,3 +43,15 @@ pub mod header; #[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] pub use header::test_utils; pub use header::{BlockHeader, Header, HeaderError, SealedHeader}; + +/// Bincode-compatible serde implementations for common abstracted types in Reth. +/// +/// `bincode` crate doesn't work with optionally serializable serde fields, but some of the +/// Reth types require optional serialization for RPC compatibility. This module makes so that +/// all fields are serialized. +/// +/// Read more: +#[cfg(feature = "serde-bincode-compat")] +pub mod serde_bincode_compat { + pub use super::header::{serde_bincode_compat as header, serde_bincode_compat::*}; +} diff --git a/crates/primitives-traits/src/withdrawal.rs b/crates/primitives-traits/src/withdrawal.rs index 995e60292c6e..f6b0607e7f03 100644 --- a/crates/primitives-traits/src/withdrawal.rs +++ b/crates/primitives-traits/src/withdrawal.rs @@ -133,7 +133,7 @@ mod tests { // #[test] fn test_withdrawal_serde_roundtrip() { - let input = r#"[{"index":"0x0","validatorIndex":"0x0","address":"0x0000000000000000000000000000000000001000","amount":"0x1"},{"index":"0x1","validatorIndex":"0x1","address":"0x0000000000000000000000000000000000001001","amount":"0x1"},{"index":"0x2","validatorIndex":"0x2","address":"0x0000000000000000000000000000000000001002","amount":"0x1"},{"index":"0x3","validatorIndex":"0x3","address":"0x0000000000000000000000000000000000001003","amount":"0x1"},{"index":"0x4","validatorIndex":"0x4","address":"0x0000000000000000000000000000000000001004","amount":"0x1"},{"index":"0x5","validatorIndex":"0x5","address":"0x0000000000000000000000000000000000001005","amount":"0x1"},{"index":"0x6","validatorIndex":"0x6","address":"0x0000000000000000000000000000000000001006","amount":"0x1"},{"index":"0x7","validatorIndex":"0x7","address":"0x0000000000000000000000000000000000001007","amount":"0x1"},{"index":"0x8","validatorIndex":"0x8","address":"0x0000000000000000000000000000000000001008","amount":"0x1"},{"index":"0x9","validatorIndex":"0x9","address":"0x0000000000000000000000000000000000001009","amount":"0x1"},{"index":"0xa","validatorIndex":"0xa","address":"0x000000000000000000000000000000000000100a","amount":"0x1"},{"index":"0xb","validatorIndex":"0xb","address":"0x000000000000000000000000000000000000100b","amount":"0x1"},{"index":"0xc","validatorIndex":"0xc","address":"0x000000000000000000000000000000000000100c","amount":"0x1"},{"index":"0xd","validatorIndex":"0xd","address":"0x000000000000000000000000000000000000100d","amount":"0x1"},{"index":"0xe","validatorIndex":"0xe","address":"0x000000000000000000000000000000000000100e","amount":"0x1"},{"index":"0xf","validatorIndex":"0xf","address":"0x000000000000000000000000000000000000100f","amount":"0x1"}]"#; + let input = r#"[{"index":"0x0","validatorIndex":"0x0","address":"0x0000000000000000000000000000000000001000","amount":"0x1"},{"index":"0x1","validatorIndex":"0x1","address":"0x0000000000000000000000000000000000001001","amount":"0x1"},{"index":"0x2","validatorIndex":"0x2","address":"0x0000000000000000000000000000000000001002","amount":"0x1"},{"index":"0x3","validatorIndex":"0x3","address":"0x0000000000000000000000000000000000001003","amount":"0x1"},{"index":"0x4","validatorIndex":"0x4","address":"0x0000000000000000000000000000000000001004","amount":"0x1"},{"index":"0x5","validatorIndex":"0x5","address":"0x0000000000000000000000000000000000001005","amount":"0x1"},{"index":"0x6","validatorIndex":"0x6","address":"0x0000000000000000000000000000000000001006","amount":"0x1"},{"index":"0x7","validatorIndex":"0x7","address":"0x0000000000000000000000000000000000001007","amount":"0x1"},{"index":"0x8","validatorIndex":"0x8","address":"0x0000000000000000000000000000000000001008","amount":"0x1"},{"index":"0x9","validatorIndex":"0x9","address":"0x0000000000000000000000000000000000001009","amount":"0x1"},{"index":"0xa","validatorIndex":"0xa","address":"0x000000000000000000000000000000000000100A","amount":"0x1"},{"index":"0xb","validatorIndex":"0xb","address":"0x000000000000000000000000000000000000100b","amount":"0x1"},{"index":"0xc","validatorIndex":"0xc","address":"0x000000000000000000000000000000000000100C","amount":"0x1"},{"index":"0xd","validatorIndex":"0xd","address":"0x000000000000000000000000000000000000100D","amount":"0x1"},{"index":"0xe","validatorIndex":"0xe","address":"0x000000000000000000000000000000000000100e","amount":"0x1"},{"index":"0xf","validatorIndex":"0xf","address":"0x000000000000000000000000000000000000100f","amount":"0x1"}]"#; let withdrawals: Vec = serde_json::from_str(input).unwrap(); let s = serde_json::to_string(&withdrawals).unwrap(); diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 72de34e3f38e..8596f8d766c5 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -18,12 +18,10 @@ reth-ethereum-forks.workspace = true reth-static-file-types.workspace = true reth-trie-common.workspace = true revm-primitives = { workspace = true, features = ["serde"] } -reth-chainspec = { workspace = true, optional = true } reth-codecs = { workspace = true, optional = true } # op-reth reth-optimism-chainspec = { workspace = true, optional = true } -reth-optimism-forks = { workspace = true, optional = true } # ethereum alloy-consensus.workspace = true @@ -53,9 +51,11 @@ c-kzg = { workspace = true, features = ["serde"], optional = true } bytes.workspace = true derive_more.workspace = true modular-bitfield = { workspace = true, optional = true } +once_cell.workspace = true +rand = { workspace = true, optional = true } rayon.workspace = true serde.workspace = true -once_cell.workspace = true +serde_with = { workspace = true, optional = true } zstd = { workspace = true, features = ["experimental"], optional = true } # arbitrary utils @@ -64,22 +64,24 @@ proptest = { workspace = true, optional = true } [dev-dependencies] # eth -reth-primitives-traits = { workspace = true, features = ["arbitrary"] } -revm-primitives = { workspace = true, features = ["arbitrary"] } reth-chainspec.workspace = true reth-codecs.workspace = true +reth-primitives-traits = { workspace = true, features = ["arbitrary"] } +reth-testing-utils.workspace = true +revm-primitives = { workspace = true, features = ["arbitrary"] } + alloy-eips = { workspace = true, features = ["arbitrary"] } alloy-genesis.workspace = true -assert_matches.workspace = true arbitrary = { workspace = true, features = ["derive"] } -proptest.workspace = true +assert_matches.workspace = true +bincode.workspace = true +modular-bitfield.workspace = true proptest-arbitrary-interop.workspace = true +proptest.workspace = true rand.workspace = true serde_json.workspace = true test-fuzz.workspace = true -modular-bitfield.workspace = true - criterion.workspace = true pprof = { workspace = true, features = [ @@ -94,29 +96,28 @@ std = ["reth-primitives-traits/std"] reth-codec = ["dep:reth-codecs", "dep:zstd", "dep:modular-bitfield", "std"] asm-keccak = ["alloy-primitives/asm-keccak"] arbitrary = [ - "reth-primitives-traits/arbitrary", - "revm-primitives/arbitrary", - "reth-chainspec?/arbitrary", - "reth-ethereum-forks/arbitrary", - "alloy-eips/arbitrary", "dep:arbitrary", "dep:proptest", + "alloy-eips/arbitrary", + "rand", "reth-codec", + "reth-ethereum-forks/arbitrary", + "reth-primitives-traits/arbitrary", + "revm-primitives/arbitrary", + "secp256k1", ] secp256k1 = ["dep:secp256k1"] c-kzg = [ "dep:c-kzg", - "revm-primitives/c-kzg", - "alloy-eips/kzg", "alloy-consensus/kzg", + "alloy-eips/kzg", + "revm-primitives/c-kzg", ] optimism = [ - "reth-chainspec/optimism", - "revm-primitives/optimism", - "reth-codecs?/optimism", - "dep:reth-optimism-chainspec", "dep:op-alloy-consensus", - "reth-optimism-forks", + "dep:reth-optimism-chainspec", + "reth-codecs?/optimism", + "revm-primitives/optimism", ] alloy-compat = [ "dep:alloy-rpc-types", @@ -124,6 +125,12 @@ alloy-compat = [ "dep:op-alloy-rpc-types", ] test-utils = ["reth-primitives-traits/test-utils"] +serde-bincode-compat = [ + "alloy-consensus/serde-bincode-compat", + "op-alloy-consensus?/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat", + "serde_with", +] [[bench]] name = "recover_ecdsa_crit" diff --git a/crates/primitives/benches/recover_ecdsa_crit.rs b/crates/primitives/benches/recover_ecdsa_crit.rs index e1e896dbbf8f..8e8e279b2a4a 100644 --- a/crates/primitives/benches/recover_ecdsa_crit.rs +++ b/crates/primitives/benches/recover_ecdsa_crit.rs @@ -1,8 +1,9 @@ #![allow(missing_docs)] +use alloy_primitives::hex_literal::hex; use alloy_rlp::Decodable; use criterion::{criterion_group, criterion_main, Criterion}; use pprof::criterion::{Output, PProfProfiler}; -use reth_primitives::{hex_literal::hex, TransactionSigned}; +use reth_primitives::TransactionSigned; /// Benchmarks the recovery of the public key from the ECDSA message using criterion. pub fn criterion_benchmark(c: &mut Criterion) { diff --git a/crates/primitives/benches/validate_blob_tx.rs b/crates/primitives/benches/validate_blob_tx.rs index 622168bb35f8..61fe161f2f74 100644 --- a/crates/primitives/benches/validate_blob_tx.rs +++ b/crates/primitives/benches/validate_blob_tx.rs @@ -1,5 +1,6 @@ #![allow(missing_docs)] +use alloy_consensus::TxEip4844; use alloy_eips::eip4844::env_settings::EnvKzgSettings; use alloy_primitives::hex; use criterion::{ @@ -11,7 +12,7 @@ use proptest::{ test_runner::{RngAlgorithm, TestRng, TestRunner}, }; use proptest_arbitrary_interop::arb; -use reth_primitives::{BlobTransactionSidecar, TxEip4844}; +use reth_primitives::BlobTransactionSidecar; use revm_primitives::MAX_BLOB_NUMBER_PER_BLOCK; // constant seed to use for the rng diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index 8f8ec1b397ca..bf7f557b799a 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -2,10 +2,10 @@ use crate::{ constants::EMPTY_TRANSACTIONS, transaction::extract_chain_id, Block, BlockBody, Signature, - Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, - TxEip1559, TxEip2930, TxEip4844, TxLegacy, TxType, + Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxType, }; use alloc::{string::ToString, vec::Vec}; +use alloy_consensus::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}; use alloy_primitives::{Parity, TxKind}; use alloy_rlp::Error as RlpError; use alloy_serde::WithOtherFields; @@ -196,7 +196,7 @@ impl TryFrom> for Transaction { let fields = other .deserialize_into::() .map_err(|e| ConversionError::Custom(e.to_string()))?; - Ok(Self::Deposit(crate::transaction::TxDeposit { + Ok(Self::Deposit(op_alloy_consensus::TxDeposit { source_hash: fields .source_hash .ok_or_else(|| ConversionError::Custom("MissingSourceHash".to_string()))?, diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 5914602127e5..0464c28dee0f 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,12 +1,12 @@ use crate::{ - Address, Bytes, GotExpected, Header, SealedHeader, TransactionSigned, - TransactionSignedEcRecovered, Withdrawals, B256, + GotExpected, Header, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, Withdrawals, }; use alloc::vec::Vec; pub use alloy_eips::eip1898::{ BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, ForkBlock, RpcBlockHash, }; -use alloy_primitives::Sealable; +use alloy_eips::eip2718::Encodable2718; +use alloy_primitives::{Address, Bytes, Sealable, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; use derive_more::{Deref, DerefMut}; #[cfg(any(test, feature = "arbitrary"))] @@ -213,22 +213,7 @@ impl<'a> arbitrary::Arbitrary<'a> for Block { .collect::>>()?; // then generate up to 2 ommers - let ommers = (0..2) - .map(|_| { - let mut header = Header::arbitrary(u)?; - header.gas_limit = (header.gas_limit as u64).into(); - header.gas_used = (header.gas_used as u64).into(); - header.base_fee_per_gas = header - .base_fee_per_gas - .map(|base_fee_per_gas| (base_fee_per_gas as u64).into()); - header.blob_gas_used = - header.blob_gas_used.map(|blob_gas_used| (blob_gas_used as u64).into()); - header.excess_blob_gas = - header.excess_blob_gas.map(|excess_blob_gas| (excess_blob_gas as u64).into()); - - Ok(header) - }) - .collect::>>()?; + let ommers = (0..2).map(|_| Header::arbitrary(u)).collect::>>()?; Ok(Self { header: u.arbitrary()?, @@ -479,9 +464,10 @@ impl SealedBlock { Ok(()) } - /// Returns a vector of transactions RLP encoded with [`TransactionSigned::encode_enveloped`]. + /// Returns a vector of transactions RLP encoded with + /// [`alloy_eips::eip2718::Encodable2718::encoded_2718`]. pub fn raw_transactions(&self) -> Vec { - self.body.transactions().map(|tx| tx.envelope_encoded()).collect() + self.body.transactions().map(|tx| tx.encoded_2718().into()).collect() } } @@ -555,6 +541,22 @@ impl SealedBlockWithSenders { } } +#[cfg(any(test, feature = "arbitrary"))] +impl<'a> arbitrary::Arbitrary<'a> for SealedBlockWithSenders { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + let block = SealedBlock::arbitrary(u)?; + + let senders = block + .body + .transactions + .iter() + .map(|tx| tx.recover_signer().unwrap()) + .collect::>(); + + Ok(Self { block, senders }) + } +} + /// A response to `GetBlockBodies`, containing bodies if any bodies were found. /// /// Withdrawals can be optionally included at the end of the RLP encoded message. @@ -576,17 +578,8 @@ pub struct BlockBody { impl BlockBody { /// Create a [`Block`] from the body and its header. - // todo(onbjerg): should this not just take `self`? its used in one place - pub fn create_block(&self, header: Header) -> Block { - Block { - header, - body: Self { - transactions: self.transactions.clone(), - ommers: self.ommers.clone(), - withdrawals: self.withdrawals.clone(), - requests: self.requests.clone(), - }, - } + pub const fn into_block(self, header: Header) -> Block { + Block { header, body: self } } /// Calculate the transaction root for the block body. @@ -695,16 +688,7 @@ impl<'a> arbitrary::Arbitrary<'a> for BlockBody { // then generate up to 2 ommers let ommers = (0..2) .map(|_| { - let mut header = Header::arbitrary(u)?; - header.gas_limit = (header.gas_limit as u64).into(); - header.gas_used = (header.gas_used as u64).into(); - header.base_fee_per_gas = header - .base_fee_per_gas - .map(|base_fee_per_gas| (base_fee_per_gas as u64).into()); - header.blob_gas_used = - header.blob_gas_used.map(|blob_gas_used| (blob_gas_used as u64).into()); - header.excess_blob_gas = - header.excess_blob_gas.map(|excess_blob_gas| (excess_blob_gas as u64).into()); + let header = Header::arbitrary(u)?; Ok(header) }) @@ -715,11 +699,265 @@ impl<'a> arbitrary::Arbitrary<'a> for BlockBody { } } +/// Bincode-compatible block type serde implementations. +#[cfg(feature = "serde-bincode-compat")] +pub(super) mod serde_bincode_compat { + use alloc::{borrow::Cow, vec::Vec}; + use alloy_consensus::serde_bincode_compat::Header; + use alloy_primitives::Address; + use reth_primitives_traits::{serde_bincode_compat::SealedHeader, Requests, Withdrawals}; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + use serde_with::{DeserializeAs, SerializeAs}; + + use crate::transaction::serde_bincode_compat::TransactionSigned; + + /// Bincode-compatible [`super::BlockBody`] serde implementation. + /// + /// Intended to use with the [`serde_with::serde_as`] macro in the following way: + /// ```rust + /// use reth_primitives::{serde_bincode_compat, BlockBody}; + /// use serde::{Deserialize, Serialize}; + /// use serde_with::serde_as; + /// + /// #[serde_as] + /// #[derive(Serialize, Deserialize)] + /// struct Data { + /// #[serde_as(as = "serde_bincode_compat::BlockBody")] + /// body: BlockBody, + /// } + /// ``` + #[derive(Debug, Serialize, Deserialize)] + pub struct BlockBody<'a> { + transactions: Vec>, + ommers: Vec>, + withdrawals: Cow<'a, Option>, + requests: Cow<'a, Option>, + } + + impl<'a> From<&'a super::BlockBody> for BlockBody<'a> { + fn from(value: &'a super::BlockBody) -> Self { + Self { + transactions: value.transactions.iter().map(Into::into).collect(), + ommers: value.ommers.iter().map(Into::into).collect(), + withdrawals: Cow::Borrowed(&value.withdrawals), + requests: Cow::Borrowed(&value.requests), + } + } + } + + impl<'a> From> for super::BlockBody { + fn from(value: BlockBody<'a>) -> Self { + Self { + transactions: value.transactions.into_iter().map(Into::into).collect(), + ommers: value.ommers.into_iter().map(Into::into).collect(), + withdrawals: value.withdrawals.into_owned(), + requests: value.requests.into_owned(), + } + } + } + + impl<'a> SerializeAs for BlockBody<'a> { + fn serialize_as(source: &super::BlockBody, serializer: S) -> Result + where + S: Serializer, + { + BlockBody::from(source).serialize(serializer) + } + } + + impl<'de> DeserializeAs<'de, super::BlockBody> for BlockBody<'de> { + fn deserialize_as(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + BlockBody::deserialize(deserializer).map(Into::into) + } + } + + /// Bincode-compatible [`super::SealedBlock`] serde implementation. + /// + /// Intended to use with the [`serde_with::serde_as`] macro in the following way: + /// ```rust + /// use reth_primitives::{serde_bincode_compat, SealedBlock}; + /// use serde::{Deserialize, Serialize}; + /// use serde_with::serde_as; + /// + /// #[serde_as] + /// #[derive(Serialize, Deserialize)] + /// struct Data { + /// #[serde_as(as = "serde_bincode_compat::SealedBlock")] + /// block: SealedBlock, + /// } + /// ``` + #[derive(Debug, Serialize, Deserialize)] + pub struct SealedBlock<'a> { + header: SealedHeader<'a>, + body: BlockBody<'a>, + } + + impl<'a> From<&'a super::SealedBlock> for SealedBlock<'a> { + fn from(value: &'a super::SealedBlock) -> Self { + Self { header: SealedHeader::from(&value.header), body: BlockBody::from(&value.body) } + } + } + + impl<'a> From> for super::SealedBlock { + fn from(value: SealedBlock<'a>) -> Self { + Self { header: value.header.into(), body: value.body.into() } + } + } + + impl<'a> SerializeAs for SealedBlock<'a> { + fn serialize_as(source: &super::SealedBlock, serializer: S) -> Result + where + S: Serializer, + { + SealedBlock::from(source).serialize(serializer) + } + } + + impl<'de> DeserializeAs<'de, super::SealedBlock> for SealedBlock<'de> { + fn deserialize_as(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + SealedBlock::deserialize(deserializer).map(Into::into) + } + } + + /// Bincode-compatible [`super::SealedBlockWithSenders`] serde implementation. + /// + /// Intended to use with the [`serde_with::serde_as`] macro in the following way: + /// ```rust + /// use reth_primitives::{serde_bincode_compat, SealedBlockWithSenders}; + /// use serde::{Deserialize, Serialize}; + /// use serde_with::serde_as; + /// + /// #[serde_as] + /// #[derive(Serialize, Deserialize)] + /// struct Data { + /// #[serde_as(as = "serde_bincode_compat::SealedBlockWithSenders")] + /// block: SealedBlockWithSenders, + /// } + /// ``` + #[derive(Debug, Serialize, Deserialize)] + pub struct SealedBlockWithSenders<'a> { + block: SealedBlock<'a>, + senders: Cow<'a, Vec
>, + } + + impl<'a> From<&'a super::SealedBlockWithSenders> for SealedBlockWithSenders<'a> { + fn from(value: &'a super::SealedBlockWithSenders) -> Self { + Self { block: SealedBlock::from(&value.block), senders: Cow::Borrowed(&value.senders) } + } + } + + impl<'a> From> for super::SealedBlockWithSenders { + fn from(value: SealedBlockWithSenders<'a>) -> Self { + Self { block: value.block.into(), senders: value.senders.into_owned() } + } + } + + impl<'a> SerializeAs for SealedBlockWithSenders<'a> { + fn serialize_as( + source: &super::SealedBlockWithSenders, + serializer: S, + ) -> Result + where + S: Serializer, + { + SealedBlockWithSenders::from(source).serialize(serializer) + } + } + + impl<'de> DeserializeAs<'de, super::SealedBlockWithSenders> for SealedBlockWithSenders<'de> { + fn deserialize_as(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + SealedBlockWithSenders::deserialize(deserializer).map(Into::into) + } + } + + #[cfg(test)] + mod tests { + use super::super::{serde_bincode_compat, BlockBody, SealedBlock, SealedBlockWithSenders}; + + use arbitrary::Arbitrary; + use rand::Rng; + use reth_testing_utils::generators; + use serde::{Deserialize, Serialize}; + use serde_with::serde_as; + + #[test] + fn test_block_body_bincode_roundtrip() { + #[serde_as] + #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] + struct Data { + #[serde_as(as = "serde_bincode_compat::BlockBody")] + block_body: BlockBody, + } + + let mut bytes = [0u8; 1024]; + generators::rng().fill(bytes.as_mut_slice()); + let data = Data { + block_body: BlockBody::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) + .unwrap(), + }; + + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data = bincode::deserialize(&encoded).unwrap(); + assert_eq!(decoded, data); + } + + #[test] + fn test_sealed_block_bincode_roundtrip() { + #[serde_as] + #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] + struct Data { + #[serde_as(as = "serde_bincode_compat::SealedBlock")] + block: SealedBlock, + } + + let mut bytes = [0u8; 1024]; + generators::rng().fill(bytes.as_mut_slice()); + let data = Data { + block: SealedBlock::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(), + }; + + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data = bincode::deserialize(&encoded).unwrap(); + assert_eq!(decoded, data); + } + + #[test] + fn test_sealed_block_with_senders_bincode_roundtrip() { + #[serde_as] + #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] + struct Data { + #[serde_as(as = "serde_bincode_compat::SealedBlockWithSenders")] + block: SealedBlockWithSenders, + } + + let mut bytes = [0u8; 1024]; + generators::rng().fill(bytes.as_mut_slice()); + let data = Data { + block: SealedBlockWithSenders::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) + .unwrap(), + }; + + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data = bincode::deserialize(&encoded).unwrap(); + assert_eq!(decoded, data); + } + } +} + #[cfg(test)] mod tests { use super::{BlockNumberOrTag::*, *}; - use crate::hex_literal::hex; use alloy_eips::eip1898::HexStringMissingPrefixError; + use alloy_primitives::hex_literal::hex; use alloy_rlp::{Decodable, Encodable}; use std::str::FromStr; diff --git a/crates/primitives/src/compression/mod.rs b/crates/primitives/src/compression/mod.rs index 476f5d06b2ad..ecceafc20682 100644 --- a/crates/primitives/src/compression/mod.rs +++ b/crates/primitives/src/compression/mod.rs @@ -1,6 +1,5 @@ use alloc::vec::Vec; use core::cell::RefCell; -use std::thread_local; use zstd::bulk::{Compressor, Decompressor}; /// Compression/Decompression dictionary for `Receipt`. @@ -10,7 +9,8 @@ pub static TRANSACTION_DICTIONARY: &[u8] = include_bytes!("./transaction_diction // We use `thread_local` compressors and decompressors because dictionaries can be quite big, and // zstd-rs recommends to use one context/compressor per thread -thread_local! { +#[cfg(feature = "std")] +std::thread_local! { /// Thread Transaction compressor. pub static TRANSACTION_COMPRESSOR: RefCell> = RefCell::new( Compressor::with_dictionary(0, TRANSACTION_DICTIONARY) @@ -38,6 +38,33 @@ thread_local! { )); } +/// Fn creates tx [`Compressor`] +pub fn create_tx_compressor() -> Compressor<'static> { + Compressor::with_dictionary(0, RECEIPT_DICTIONARY).expect("Failed to instantiate tx compressor") +} + +/// Fn creates tx [`Decompressor`] +pub fn create_tx_decompressor() -> ReusableDecompressor { + ReusableDecompressor::new( + Decompressor::with_dictionary(TRANSACTION_DICTIONARY) + .expect("Failed to instantiate tx decompressor"), + ) +} + +/// Fn creates receipt [`Compressor`] +pub fn create_receipt_compressor() -> Compressor<'static> { + Compressor::with_dictionary(0, RECEIPT_DICTIONARY) + .expect("Failed to instantiate receipt compressor") +} + +/// Fn creates receipt [`Decompressor`] +pub fn create_receipt_decompressor() -> ReusableDecompressor { + ReusableDecompressor::new( + Decompressor::with_dictionary(RECEIPT_DICTIONARY) + .expect("Failed to instantiate receipt decompressor"), + ) +} + /// Reusable decompressor that uses its own internal buffer. #[allow(missing_debug_implementations)] pub struct ReusableDecompressor { diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 6aadae2082de..ec65cbf20e52 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -1,4 +1,4 @@ -//! Commonly used types in reth. +//! Commonly used types in Reth. //! //! This crate contains Ethereum primitive types and helper functions. //! @@ -62,52 +62,33 @@ pub use transaction::BlobTransactionValidationError; pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, - IntoRecoveredTransaction, InvalidTransactionError, Signature, Transaction, TransactionMeta, - TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxEip1559, TxEip2930, - TxEip4844, TxEip7702, TxHashOrNumber, TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, - EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, LEGACY_TX_TYPE_ID, + InvalidTransactionError, Signature, Transaction, TransactionMeta, TransactionSigned, + TransactionSignedEcRecovered, TransactionSignedNoHash, TxHashOrNumber, TxType, + EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, }; // Re-exports -pub use alloy_primitives::{ - self, address, b256, bloom, bytes, - bytes::{Buf, BufMut, BytesMut}, - eip191_hash_message, hex, hex_literal, keccak256, ruint, - utils::format_ether, - Address, BlockHash, BlockNumber, Bloom, BloomInput, Bytes, ChainId, Selector, StorageKey, - StorageValue, TxHash, TxIndex, TxNumber, B128, B256, B512, B64, U128, U256, U64, U8, -}; pub use reth_ethereum_forks::*; pub use revm_primitives::{self, JumpTable}; -#[doc(hidden)] -#[deprecated = "use B64 instead"] -pub type H64 = B64; -#[doc(hidden)] -#[deprecated = "use B128 instead"] -pub type H128 = B128; -#[doc(hidden)] -#[deprecated = "use Address instead"] -pub type H160 = Address; -#[doc(hidden)] -#[deprecated = "use B256 instead"] -pub type H256 = B256; -#[doc(hidden)] -#[deprecated = "use B512 instead"] -pub type H512 = B512; - #[cfg(any(test, feature = "arbitrary"))] pub use arbitrary; #[cfg(feature = "c-kzg")] pub use c_kzg as kzg; -/// Optimism specific re-exports -#[cfg(feature = "optimism")] -mod optimism { - pub use crate::transaction::{optimism_deposit_tx_signature, TxDeposit, DEPOSIT_TX_TYPE_ID}; - pub use reth_optimism_chainspec::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}; +/// Bincode-compatible serde implementations for commonly used types in Reth. +/// +/// `bincode` crate doesn't work with optionally serializable serde fields, but some of the +/// Reth types require optional serialization for RPC compatibility. This module makes so that +/// all fields are serialized. +/// +/// Read more: +#[cfg(feature = "serde-bincode-compat")] +pub mod serde_bincode_compat { + pub use super::{ + block::serde_bincode_compat::*, + transaction::{serde_bincode_compat as transaction, serde_bincode_compat::*}, + }; } - -#[cfg(feature = "optimism")] -pub use optimism::*; diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index 7f41fa9bd20f..a12a5d6be89f 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -1,11 +1,12 @@ //! Helper function for calculating Merkle proofs and hashes. use crate::{ - constants::EMPTY_OMMER_ROOT_HASH, keccak256, Header, Receipt, ReceiptWithBloom, - ReceiptWithBloomRef, Request, TransactionSigned, Withdrawal, B256, + constants::EMPTY_OMMER_ROOT_HASH, Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, + Request, TransactionSigned, Withdrawal, }; use alloc::vec::Vec; -use alloy_eips::eip7685::Encodable7685; +use alloy_eips::{eip2718::Encodable2718, eip7685::Encodable7685}; +use alloy_primitives::{keccak256, B256}; use reth_trie_common::root::{ordered_trie_root, ordered_trie_root_with_encoder}; /// Calculate a transaction root. @@ -15,7 +16,7 @@ pub fn calculate_transaction_root(transactions: &[T]) -> B256 where T: AsRef, { - ordered_trie_root_with_encoder(transactions, |tx: &T, buf| tx.as_ref().encode_inner(buf, false)) + ordered_trie_root_with_encoder(transactions, |tx: &T, buf| tx.as_ref().encode_2718(buf)) } /// Calculates the root hash of the withdrawals. @@ -49,47 +50,6 @@ pub fn calculate_receipt_root_no_memo(receipts: &[&Receipt]) -> B256 { }) } -/// Calculates the receipt root for a header for the reference type of [Receipt]. -/// -/// NOTE: Prefer calculate receipt root optimism if you have log blooms memoized. -#[cfg(feature = "optimism")] -pub fn calculate_receipt_root_no_memo_optimism( - receipts: &[&Receipt], - chain_spec: impl reth_chainspec::Hardforks, - timestamp: u64, -) -> B256 { - // There is a minor bug in op-geth and op-erigon where in the Regolith hardfork, - // the receipt root calculation does not include the deposit nonce in the receipt - // encoding. In the Regolith Hardfork, we must strip the deposit nonce from the - // receipts before calculating the receipt root. This was corrected in the Canyon - // hardfork. - - if chain_spec - .is_fork_active_at_timestamp(reth_optimism_forks::OptimismHardfork::Regolith, timestamp) && - !chain_spec.is_fork_active_at_timestamp( - reth_optimism_forks::OptimismHardfork::Canyon, - timestamp, - ) - { - let receipts = receipts - .iter() - .map(|r| { - let mut r = (*r).clone(); - r.deposit_nonce = None; - r - }) - .collect::>(); - - return ordered_trie_root_with_encoder(&receipts, |r, buf| { - ReceiptWithBloomRef::from(r).encode_inner(buf, false) - }) - } - - ordered_trie_root_with_encoder(receipts, |r, buf| { - ReceiptWithBloomRef::from(*r).encode_inner(buf, false) - }) -} - /// Calculates the root hash for ommer/uncle headers. pub fn calculate_ommers_root(ommers: &[Header]) -> B256 { // Check if `ommers` list is empty @@ -105,9 +65,9 @@ pub fn calculate_ommers_root(ommers: &[Header]) -> B256 { #[cfg(test)] mod tests { use super::*; - use crate::{constants::EMPTY_ROOT_HASH, hex_literal::hex, Block, U256}; + use crate::{constants::EMPTY_ROOT_HASH, Block}; use alloy_genesis::GenesisAccount; - use alloy_primitives::{b256, Address}; + use alloy_primitives::{b256, hex_literal::hex, Address, U256}; use alloy_rlp::Decodable; use reth_chainspec::{HOLESKY, MAINNET, SEPOLIA}; use reth_trie_common::root::{state_root_ref_unhashed, state_root_unhashed}; diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 6b1b62ba6ffb..5c794be5061e 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -1,11 +1,11 @@ #[cfg(feature = "reth-codec")] use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; use crate::{ - logs_bloom, Bloom, Bytes, TxType, B256, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, - EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, + logs_bloom, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, + EIP7702_TX_TYPE_ID, }; use alloc::{vec, vec::Vec}; -use alloy_primitives::Log; +use alloy_primitives::{Bloom, Bytes, Log, B256}; use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable}; use bytes::{Buf, BufMut}; use core::{cmp::Ordering, ops::Deref}; @@ -100,26 +100,11 @@ impl Receipts { self.receipt_vec.push(receipts); } - /// Retrieves the receipt root for all recorded receipts from index. - pub fn root_slow(&self, index: usize) -> Option { - Some(crate::proofs::calculate_receipt_root_no_memo( - &self.receipt_vec[index].iter().map(Option::as_ref).collect::>>()?, - )) - } - - /// Retrieves the receipt root for all recorded receipts from index. - #[cfg(feature = "optimism")] - pub fn optimism_root_slow( - &self, - index: usize, - chain_spec: impl reth_chainspec::Hardforks, - timestamp: u64, - ) -> Option { - Some(crate::proofs::calculate_receipt_root_no_memo_optimism( - &self.receipt_vec[index].iter().map(Option::as_ref).collect::>>()?, - chain_spec, - timestamp, - )) + /// Retrieves all recorded receipts from index and calculates the root using the given closure. + pub fn root_slow(&self, index: usize, f: impl FnOnce(&[&Receipt]) -> B256) -> Option { + let receipts = + self.receipt_vec[index].iter().map(Option::as_ref).collect::>>()?; + Some(f(receipts.as_slice())) } } @@ -347,7 +332,7 @@ impl Decodable for ReceiptWithBloom { Self::decode_receipt(buf, TxType::Eip7702) } #[cfg(feature = "optimism")] - crate::DEPOSIT_TX_TYPE_ID => { + crate::transaction::DEPOSIT_TX_TYPE_ID => { buf.advance(1); Self::decode_receipt(buf, TxType::Deposit) } @@ -483,7 +468,7 @@ impl<'a> ReceiptWithBloomEncoder<'a> { } #[cfg(feature = "optimism")] TxType::Deposit => { - out.put_u8(crate::DEPOSIT_TX_TYPE_ID); + out.put_u8(crate::transaction::DEPOSIT_TX_TYPE_ID); } } out.put_slice(payload.as_ref()); @@ -516,8 +501,7 @@ impl<'a> Encodable for ReceiptWithBloomEncoder<'a> { #[cfg(test)] mod tests { use super::*; - use crate::hex_literal::hex; - use alloy_primitives::{address, b256, bytes}; + use alloy_primitives::{address, b256, bytes, hex_literal::hex}; // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 #[test] diff --git a/crates/primitives/src/traits/block/body.rs b/crates/primitives/src/traits/block/body.rs new file mode 100644 index 000000000000..ff8f71b76162 --- /dev/null +++ b/crates/primitives/src/traits/block/body.rs @@ -0,0 +1,152 @@ +//! Block body abstraction. + +use alloc::fmt; +use core::ops; + +use alloy_consensus::{BlockHeader, Transaction, TxType}; +use alloy_primitives::{Address, B256}; + +use crate::{proofs, traits::Block, Requests, Withdrawals}; + +/// Abstraction for block's body. +pub trait BlockBody: + Clone + + fmt::Debug + + PartialEq + + Eq + + Default + + serde::Serialize + + for<'de> serde::Deserialize<'de> + + alloy_rlp::Encodable + + alloy_rlp::Decodable +{ + /// Ordered list of signed transactions as committed in block. + // todo: requires trait for signed transaction + type SignedTransaction: Transaction; + + /// Header type (uncle blocks). + type Header: BlockHeader; + + /// Returns reference to transactions in block. + fn transactions(&self) -> &[Self::SignedTransaction]; + + /// Returns [`Withdrawals`] in the block, if any. + // todo: branch out into extension trait + fn withdrawals(&self) -> Option<&Withdrawals>; + + /// Returns reference to uncle block headers. + fn ommers(&self) -> &[Self::Header]; + + /// Returns [`Request`] in block, if any. + fn requests(&self) -> Option<&Requests>; + + /// Create a [`Block`] from the body and its header. + fn into_block>(self, header: Self::Header) -> T { + T::from((header, self)) + } + + /// Calculate the transaction root for the block body. + fn calculate_tx_root(&self) -> B256; + + /// Calculate the ommers root for the block body. + fn calculate_ommers_root(&self) -> B256; + + /// Calculate the withdrawals root for the block body, if withdrawals exist. If there are no + /// withdrawals, this will return `None`. + fn calculate_withdrawals_root(&self) -> Option { + Some(proofs::calculate_withdrawals_root(self.withdrawals()?)) + } + + /// Calculate the requests root for the block body, if requests exist. If there are no + /// requests, this will return `None`. + fn calculate_requests_root(&self) -> Option { + Some(proofs::calculate_requests_root(self.requests()?)) + } + + /// Recover signer addresses for all transactions in the block body. + fn recover_signers(&self) -> Option>; + + /// Returns whether or not the block body contains any blob transactions. + fn has_blob_transactions(&self) -> bool { + self.transactions().iter().any(|tx| tx.ty() as u8 == TxType::Eip4844 as u8) + } + + /// Returns whether or not the block body contains any EIP-7702 transactions. + fn has_eip7702_transactions(&self) -> bool { + self.transactions().iter().any(|tx| tx.ty() as u8 == TxType::Eip7702 as u8) + } + + /// Returns an iterator over all blob transactions of the block + fn blob_transactions_iter(&self) -> impl Iterator + '_ { + self.transactions().iter().filter(|tx| tx.ty() as u8 == TxType::Eip4844 as u8) + } + + /// Returns only the blob transactions, if any, from the block body. + fn blob_transactions(&self) -> Vec<&Self::SignedTransaction> { + self.blob_transactions_iter().collect() + } + + /// Returns an iterator over all blob versioned hashes from the block body. + fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_; + + /// Returns all blob versioned hashes from the block body. + fn blob_versioned_hashes(&self) -> Vec<&B256> { + self.blob_versioned_hashes_iter().collect() + } + + /// Calculates a heuristic for the in-memory size of the [`BlockBody`]. + fn size(&self) -> usize; +} + +impl BlockBody for T +where + T: ops::Deref + + Clone + + fmt::Debug + + PartialEq + + Eq + + Default + + serde::Serialize + + for<'de> serde::Deserialize<'de> + + alloy_rlp::Encodable + + alloy_rlp::Decodable, +{ + type Header = ::Header; + type SignedTransaction = ::SignedTransaction; + + fn transactions(&self) -> &Vec { + self.deref().transactions() + } + + fn withdrawals(&self) -> Option<&Withdrawals> { + self.deref().withdrawals() + } + + fn ommers(&self) -> &Vec { + self.deref().ommers() + } + + fn requests(&self) -> Option<&Requests> { + self.deref().requests() + } + + fn calculate_tx_root(&self) -> B256 { + self.deref().calculate_tx_root() + } + + fn calculate_ommers_root(&self) -> B256 { + self.deref().calculate_ommers_root() + } + + fn recover_signers(&self) -> Option> { + self.deref().recover_signers() + } + + fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_ { + self.deref().blob_versioned_hashes_iter() + } + + fn size(&self) -> usize { + self.deref().size() + } +} diff --git a/crates/primitives/src/traits/block/mod.rs b/crates/primitives/src/traits/block/mod.rs new file mode 100644 index 000000000000..451a54c3457c --- /dev/null +++ b/crates/primitives/src/traits/block/mod.rs @@ -0,0 +1,137 @@ +//! Block abstraction. + +pub mod body; + +use alloc::fmt; +use core::ops; + +use alloy_consensus::BlockHeader; +use alloy_primitives::{Address, Sealable, B256}; + +use crate::{traits::BlockBody, BlockWithSenders, SealedBlock, SealedHeader}; + +/// Abstraction of block data type. +pub trait Block: + fmt::Debug + + Clone + + PartialEq + + Eq + + Default + + serde::Serialize + + for<'a> serde::Deserialize<'a> + + From<(Self::Header, Self::Body)> + + Into<(Self::Header, Self::Body)> +{ + /// Header part of the block. + type Header: BlockHeader + Sealable; + + /// The block's body contains the transactions in the block. + type Body: BlockBody; + + /// Returns reference to [`BlockHeader`] type. + fn header(&self) -> &Self::Header; + + /// Returns reference to [`BlockBody`] type. + fn body(&self) -> &Self::Body; + + /// Calculate the header hash and seal the block so that it can't be changed. + fn seal_slow(self) -> SealedBlock { + let (header, body) = self.into(); + let sealed = header.seal_slow(); + let (header, seal) = sealed.into_parts(); + SealedBlock { header: SealedHeader::new(header, seal), body } + } + + /// Seal the block with a known hash. + /// + /// WARNING: This method does not perform validation whether the hash is correct. + fn seal(self, hash: B256) -> SealedBlock { + let (header, body) = self.into(); + SealedBlock { header: SealedHeader::new(header, hash), body } + } + + /// Expensive operation that recovers transaction signer. See + /// [`SealedBlockWithSenders`](reth_primitives::SealedBlockWithSenders). + fn senders(&self) -> Option> { + self.body().recover_signers() + } + + /// Transform into a [`BlockWithSenders`]. + /// + /// # Panics + /// + /// If the number of senders does not match the number of transactions in the block + /// and the signer recovery for one of the transactions fails. + /// + /// Note: this is expected to be called with blocks read from disk. + #[track_caller] + fn with_senders_unchecked(self, senders: Vec
) -> BlockWithSenders { + self.try_with_senders_unchecked(senders).expect("stored block is valid") + } + + /// Transform into a [`BlockWithSenders`] using the given senders. + /// + /// If the number of senders does not match the number of transactions in the block, this falls + /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. + /// See also [`TransactionSigned::recover_signer_unchecked`] + /// + /// Returns an error if a signature is invalid. + #[track_caller] + fn try_with_senders_unchecked( + self, + senders: Vec
, + ) -> Result, Self> { + let senders = if self.body().transactions().len() == senders.len() { + senders + } else { + let Some(senders) = self.body().recover_signers() else { return Err(self) }; + senders + }; + + Ok(BlockWithSenders { block: self, senders }) + } + + /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained + /// transactions. + /// + /// Returns `None` if a transaction is invalid. + fn with_recovered_senders(self) -> Option> { + let senders = self.senders()?; + Some(BlockWithSenders { block: self, senders }) + } + + /// Calculates a heuristic for the in-memory size of the [`Block`]. + fn size(&self) -> usize; +} + +impl Block for T +where + T: ops::Deref + + fmt::Debug + + Clone + + PartialEq + + Eq + + Default + + serde::Serialize + + for<'a> serde::Deserialize<'a> + + From<(::Header, ::Body)> + + Into<(::Header, ::Body)>, +{ + type Header = ::Header; + type Body = ::Body; + + #[inline] + fn header(&self) -> &Self::Header { + self.deref().header() + } + + #[inline] + fn body(&self) -> &Self::Body { + self.deref().body() + } + + #[inline] + fn size(&self) -> usize { + self.deref().size() + } +} diff --git a/crates/primitives/src/traits/mod.rs b/crates/primitives/src/traits/mod.rs new file mode 100644 index 000000000000..8c84c6729753 --- /dev/null +++ b/crates/primitives/src/traits/mod.rs @@ -0,0 +1,7 @@ +//! Abstractions of primitive data types + +pub mod block; + +pub use block::{body::BlockBody, Block}; + +pub use alloy_consensus::BlockHeader; diff --git a/crates/primitives/src/transaction/access_list.rs b/crates/primitives/src/transaction/access_list.rs index 7a0782bef761..8406e5a5b481 100644 --- a/crates/primitives/src/transaction/access_list.rs +++ b/crates/primitives/src/transaction/access_list.rs @@ -2,8 +2,8 @@ #[cfg(test)] mod tests { - use crate::{Address, B256}; use alloy_eips::eip2930::{AccessList, AccessListItem}; + use alloy_primitives::{Address, B256}; use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; use proptest::proptest; use proptest_arbitrary_interop::arb; diff --git a/crates/primitives/src/transaction/compat.rs b/crates/primitives/src/transaction/compat.rs index 319ae55b1af1..81281186f64c 100644 --- a/crates/primitives/src/transaction/compat.rs +++ b/crates/primitives/src/transaction/compat.rs @@ -1,5 +1,5 @@ -use crate::{Address, Transaction, TransactionSigned, U256}; -use alloy_primitives::TxKind; +use crate::{Transaction, TransactionSigned}; +use alloy_primitives::{Address, TxKind, U256}; use revm_primitives::{AuthorizationList, TxEnv}; /// Implements behaviour to fill a [`TxEnv`] from another transaction. @@ -11,16 +11,12 @@ pub trait FillTxEnv { impl FillTxEnv for TransactionSigned { fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address) { #[cfg(feature = "optimism")] - let envelope = { - let mut envelope = alloc::vec::Vec::with_capacity(self.length_without_header()); - self.encode_enveloped(&mut envelope); - envelope - }; + let envelope = alloy_eips::eip2718::Encodable2718::encoded_2718(self); tx_env.caller = sender; match self.as_ref() { Transaction::Legacy(tx) => { - tx_env.gas_limit = tx.gas_limit as u64; + tx_env.gas_limit = tx.gas_limit; tx_env.gas_price = U256::from(tx.gas_price); tx_env.gas_priority_fee = None; tx_env.transact_to = tx.to; @@ -34,7 +30,7 @@ impl FillTxEnv for TransactionSigned { tx_env.authorization_list = None; } Transaction::Eip2930(tx) => { - tx_env.gas_limit = tx.gas_limit as u64; + tx_env.gas_limit = tx.gas_limit; tx_env.gas_price = U256::from(tx.gas_price); tx_env.gas_priority_fee = None; tx_env.transact_to = tx.to; @@ -48,7 +44,7 @@ impl FillTxEnv for TransactionSigned { tx_env.authorization_list = None; } Transaction::Eip1559(tx) => { - tx_env.gas_limit = tx.gas_limit as u64; + tx_env.gas_limit = tx.gas_limit; tx_env.gas_price = U256::from(tx.max_fee_per_gas); tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); tx_env.transact_to = tx.to; @@ -62,7 +58,7 @@ impl FillTxEnv for TransactionSigned { tx_env.authorization_list = None; } Transaction::Eip4844(tx) => { - tx_env.gas_limit = tx.gas_limit as u64; + tx_env.gas_limit = tx.gas_limit; tx_env.gas_price = U256::from(tx.max_fee_per_gas); tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); tx_env.transact_to = TxKind::Call(tx.to); @@ -76,7 +72,7 @@ impl FillTxEnv for TransactionSigned { tx_env.authorization_list = None; } Transaction::Eip7702(tx) => { - tx_env.gas_limit = tx.gas_limit as u64; + tx_env.gas_limit = tx.gas_limit; tx_env.gas_price = U256::from(tx.max_fee_per_gas); tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); tx_env.transact_to = tx.to.into(); @@ -93,7 +89,7 @@ impl FillTxEnv for TransactionSigned { #[cfg(feature = "optimism")] Transaction::Deposit(tx) => { tx_env.access_list.clear(); - tx_env.gas_limit = tx.gas_limit as u64; + tx_env.gas_limit = tx.gas_limit; tx_env.gas_price = U256::ZERO; tx_env.gas_priority_fee = None; tx_env.transact_to = tx.to; diff --git a/crates/primitives/src/transaction/error.rs b/crates/primitives/src/transaction/error.rs index de4efa4d8f0b..790292cd82b7 100644 --- a/crates/primitives/src/transaction/error.rs +++ b/crates/primitives/src/transaction/error.rs @@ -1,4 +1,5 @@ -use crate::{GotExpectedBoxed, U256}; +use crate::GotExpectedBoxed; +use alloy_primitives::U256; /// Represents error variants that can happen when trying to validate a /// [Transaction](crate::Transaction) @@ -61,8 +62,7 @@ pub enum InvalidTransactionError { SignerAccountHasBytecode, } -#[cfg(feature = "std")] -impl std::error::Error for InvalidTransactionError {} +impl core::error::Error for InvalidTransactionError {} /// Represents error variants that can happen when trying to convert a transaction to /// [`PooledTransactionsElement`](crate::PooledTransactionsElement) @@ -87,5 +87,4 @@ pub enum TryFromRecoveredTransactionError { BlobSidecarMissing, } -#[cfg(feature = "std")] -impl std::error::Error for TryFromRecoveredTransactionError {} +impl core::error::Error for TryFromRecoveredTransactionError {} diff --git a/crates/primitives/src/transaction/meta.rs b/crates/primitives/src/transaction/meta.rs index 6fc752aad2fe..c7cb9d8b697d 100644 --- a/crates/primitives/src/transaction/meta.rs +++ b/crates/primitives/src/transaction/meta.rs @@ -1,4 +1,4 @@ -use crate::B256; +use alloy_primitives::B256; /// Additional fields in the context of a block that contains this transaction. #[derive(Debug, Clone, Copy, Default, Eq, PartialEq)] diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 01c290a0f5b3..b16c2c88ab52 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,16 +1,16 @@ //! Transaction types. -use crate::{keccak256, Address, BlockHashOrNumber, Bytes, TxHash, B256, U256}; +use crate::BlockHashOrNumber; use alloy_eips::eip7702::SignedAuthorization; -use alloy_primitives::TxKind; +use alloy_primitives::{keccak256, Address, TxKind, B256, U256}; -use alloy_consensus::SignableTransaction; -use alloy_eips::eip2930::AccessList; -use alloy_primitives::Parity; -use alloy_rlp::{ - Decodable, Encodable, Error as RlpError, Header, EMPTY_LIST_CODE, EMPTY_STRING_CODE, +use alloy_consensus::{SignableTransaction, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}; +use alloy_eips::{ + eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, + eip2930::AccessList, }; -use bytes::Buf; +use alloy_primitives::{Bytes, TxHash}; +use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use core::mem; use derive_more::{AsRef, Deref}; use once_cell::sync::Lazy; @@ -18,8 +18,6 @@ use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use serde::{Deserialize, Serialize}; use signature::{decode_with_eip155_chain_id, with_eip155_parity}; -pub use alloy_consensus::{TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}; - pub use error::{ InvalidTransactionError, TransactionConversionError, TryFromRecoveredTransactionError, }; @@ -53,9 +51,9 @@ pub(crate) mod util; mod variant; #[cfg(feature = "optimism")] -pub use op_alloy_consensus::TxDeposit; +use op_alloy_consensus::TxDeposit; #[cfg(feature = "optimism")] -pub use reth_optimism_chainspec::optimism_deposit_tx_signature; +use reth_optimism_chainspec::optimism_deposit_tx_signature; #[cfg(feature = "optimism")] pub use tx_type::DEPOSIT_TX_TYPE_ID; #[cfg(any(test, feature = "reth-codec"))] @@ -142,35 +140,29 @@ impl<'a> arbitrary::Arbitrary<'a> for Transaction { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { let mut tx = match TxType::arbitrary(u)? { TxType::Legacy => { - let mut tx = TxLegacy::arbitrary(u)?; - tx.gas_limit = (tx.gas_limit as u64).into(); + let tx = TxLegacy::arbitrary(u)?; Self::Legacy(tx) } TxType::Eip2930 => { - let mut tx = TxEip2930::arbitrary(u)?; - tx.gas_limit = (tx.gas_limit as u64).into(); + let tx = TxEip2930::arbitrary(u)?; Self::Eip2930(tx) } TxType::Eip1559 => { - let mut tx = TxEip1559::arbitrary(u)?; - tx.gas_limit = (tx.gas_limit as u64).into(); + let tx = TxEip1559::arbitrary(u)?; Self::Eip1559(tx) } TxType::Eip4844 => { - let mut tx = TxEip4844::arbitrary(u)?; - tx.gas_limit = (tx.gas_limit as u64).into(); + let tx = TxEip4844::arbitrary(u)?; Self::Eip4844(tx) } TxType::Eip7702 => { - let mut tx = TxEip7702::arbitrary(u)?; - tx.gas_limit = (tx.gas_limit as u64).into(); + let tx = TxEip7702::arbitrary(u)?; Self::Eip7702(tx) } #[cfg(feature = "optimism")] TxType::Deposit => { - let mut tx = TxDeposit::arbitrary(u)?; - tx.gas_limit = (tx.gas_limit as u64).into(); + let tx = TxDeposit::arbitrary(u)?; Self::Deposit(tx) } }; @@ -322,9 +314,9 @@ impl Transaction { Self::Eip1559(TxEip1559 { gas_limit, .. }) | Self::Eip4844(TxEip4844 { gas_limit, .. }) | Self::Eip7702(TxEip7702 { gas_limit, .. }) | - Self::Eip2930(TxEip2930 { gas_limit, .. }) => *gas_limit as u64, + Self::Eip2930(TxEip2930 { gas_limit, .. }) => *gas_limit, #[cfg(feature = "optimism")] - Self::Deposit(TxDeposit { gas_limit, .. }) => *gas_limit as u64, + Self::Deposit(TxDeposit { gas_limit, .. }) => *gas_limit, } } @@ -562,13 +554,13 @@ impl Transaction { /// This sets the transaction's gas limit. pub fn set_gas_limit(&mut self, gas_limit: u64) { match self { - Self::Legacy(tx) => tx.gas_limit = gas_limit.into(), - Self::Eip2930(tx) => tx.gas_limit = gas_limit.into(), - Self::Eip1559(tx) => tx.gas_limit = gas_limit.into(), - Self::Eip4844(tx) => tx.gas_limit = gas_limit.into(), - Self::Eip7702(tx) => tx.gas_limit = gas_limit.into(), + Self::Legacy(tx) => tx.gas_limit = gas_limit, + Self::Eip2930(tx) => tx.gas_limit = gas_limit, + Self::Eip1559(tx) => tx.gas_limit = gas_limit, + Self::Eip4844(tx) => tx.gas_limit = gas_limit, + Self::Eip7702(tx) => tx.gas_limit = gas_limit, #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.gas_limit = gas_limit.into(), + Self::Deposit(tx) => tx.gas_limit = gas_limit, } } @@ -738,6 +730,8 @@ impl reth_codecs::Compact for Transaction { // A panic will be triggered if an identifier larger than 3 is passed from the database. For // optimism a identifier with value [`DEPOSIT_TX_TYPE_ID`] is allowed. fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { + use bytes::Buf; + match identifier { COMPACT_IDENTIFIER_LEGACY => { let (tx, buf) = TxLegacy::from_compact(buf, buf.len()); @@ -868,7 +862,7 @@ impl TransactionSignedNoHash { /// Recover signer from signature and hash _without ensuring that the signature has a low `s` /// value_. /// - /// Re-uses a given buffer to avoid numerous reallocations when recovering batches. **Clears the + /// Reuses a given buffer to avoid numerous reallocations when recovering batches. **Clears the /// buffer before use.** /// /// Returns `None` if the transaction's signature is invalid, see also @@ -958,15 +952,20 @@ impl reth_codecs::Compact for TransactionSignedNoHash { let zstd_bit = self.transaction.input().len() >= 32; let tx_bits = if zstd_bit { - crate::compression::TRANSACTION_COMPRESSOR.with(|compressor| { - let mut compressor = compressor.borrow_mut(); - let mut tmp = Vec::with_capacity(256); + let mut tmp = Vec::with_capacity(256); + if cfg!(feature = "std") { + crate::compression::TRANSACTION_COMPRESSOR.with(|compressor| { + let mut compressor = compressor.borrow_mut(); + let tx_bits = self.transaction.to_compact(&mut tmp); + buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); + tx_bits as u8 + }) + } else { + let mut compressor = crate::compression::create_tx_compressor(); let tx_bits = self.transaction.to_compact(&mut tmp); - buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); - tx_bits as u8 - }) + } } else { self.transaction.to_compact(buf) as u8 }; @@ -978,6 +977,8 @@ impl reth_codecs::Compact for TransactionSignedNoHash { } fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { + use bytes::Buf; + // The first byte uses 4 bits as flags: IsCompressed[1], TxType[2], Signature[1] let bitflags = buf.get_u8() as usize; @@ -986,17 +987,26 @@ impl reth_codecs::Compact for TransactionSignedNoHash { let zstd_bit = bitflags >> 3; let (transaction, buf) = if zstd_bit != 0 { - crate::compression::TRANSACTION_DECOMPRESSOR.with(|decompressor| { - let mut decompressor = decompressor.borrow_mut(); + if cfg!(feature = "std") { + crate::compression::TRANSACTION_DECOMPRESSOR.with(|decompressor| { + let mut decompressor = decompressor.borrow_mut(); - // TODO: enforce that zstd is only present at a "top" level type + // TODO: enforce that zstd is only present at a "top" level type + let transaction_type = (bitflags & 0b110) >> 1; + let (transaction, _) = + Transaction::from_compact(decompressor.decompress(buf), transaction_type); + + (transaction, buf) + }) + } else { + let mut decompressor = crate::compression::create_tx_decompressor(); let transaction_type = (bitflags & 0b110) >> 1; let (transaction, _) = Transaction::from_compact(decompressor.decompress(buf), transaction_type); (transaction, buf) - }) + } } else { let transaction_type = bitflags >> 1; Transaction::from_compact(buf, transaction_type) @@ -1190,62 +1200,10 @@ impl TransactionSigned { } } - /// Returns the enveloped encoded transactions. - /// - /// See also [`TransactionSigned::encode_enveloped`] - pub fn envelope_encoded(&self) -> Bytes { - let mut buf = Vec::new(); - self.encode_enveloped(&mut buf); - buf.into() - } - - /// Encodes the transaction into the "raw" format (e.g. `eth_sendRawTransaction`). - /// This format is also referred to as "binary" encoding. - /// - /// For legacy transactions, it encodes the RLP of the transaction into the buffer: - /// `rlp(tx-data)` - /// For EIP-2718 typed it encodes the type of the transaction followed by the rlp of the - /// transaction: `tx-type || rlp(tx-data)` - pub fn encode_enveloped(&self, out: &mut dyn bytes::BufMut) { - self.encode_inner(out, false) - } - - /// Inner encoding function that is used for both rlp [`Encodable`] trait and for calculating - /// hash that for eip2718 does not require rlp header - pub(crate) fn encode_inner(&self, out: &mut dyn bytes::BufMut, with_header: bool) { - self.transaction.encode_with_signature(&self.signature, out, with_header); - } - - /// Output the length of the `encode_inner(out`, true). Note to assume that `with_header` is - /// only `true`. - pub(crate) fn payload_len_inner(&self) -> usize { - match &self.transaction { - Transaction::Legacy(legacy_tx) => legacy_tx.encoded_len_with_signature( - &with_eip155_parity(&self.signature, legacy_tx.chain_id), - ), - Transaction::Eip2930(access_list_tx) => { - access_list_tx.encoded_len_with_signature(&self.signature, true) - } - Transaction::Eip1559(dynamic_fee_tx) => { - dynamic_fee_tx.encoded_len_with_signature(&self.signature, true) - } - Transaction::Eip4844(blob_tx) => { - blob_tx.encoded_len_with_signature(&self.signature, true) - } - Transaction::Eip7702(set_code_tx) => { - set_code_tx.encoded_len_with_signature(&self.signature, true) - } - #[cfg(feature = "optimism")] - Transaction::Deposit(deposit_tx) => deposit_tx.encoded_len(true), - } - } - /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with /// tx type. pub fn recalculate_hash(&self) -> B256 { - let mut buf = Vec::new(); - self.encode_inner(&mut buf, false); - keccak256(&buf) + keccak256(self.encoded_2718()) } /// Create a new signed transaction from a transaction and its signature. @@ -1323,134 +1281,6 @@ impl TransactionSigned { let signed = Self { transaction: Transaction::Legacy(transaction), hash, signature }; Ok(signed) } - - /// Decodes an enveloped EIP-2718 typed transaction. - /// - /// This should _only_ be used internally in general transaction decoding methods, - /// which have already ensured that the input is a typed transaction with the following format: - /// `tx-type || rlp(tx-data)` - /// - /// Note that this format does not start with any RLP header, and instead starts with a single - /// byte indicating the transaction type. - /// - /// CAUTION: this expects that `data` is `tx-type || rlp(tx-data)` - pub fn decode_enveloped_typed_transaction(data: &mut &[u8]) -> alloy_rlp::Result { - // keep this around so we can use it to calculate the hash - let original_encoding_without_header = *data; - - let tx_type = *data.first().ok_or(RlpError::InputTooShort)?; - data.advance(1); - - // decode the list header for the rest of the transaction - let header = Header::decode(data)?; - if !header.list { - return Err(RlpError::Custom("typed tx fields must be encoded as a list")) - } - - let remaining_len = data.len(); - - // length of tx encoding = tx type byte (size = 1) + length of header + payload length - let tx_length = 1 + header.length() + header.payload_length; - - // decode common fields - let Ok(tx_type) = TxType::try_from(tx_type) else { - return Err(RlpError::Custom("unsupported typed transaction type")) - }; - - let transaction = match tx_type { - TxType::Eip2930 => Transaction::Eip2930(TxEip2930::decode_fields(data)?), - TxType::Eip1559 => Transaction::Eip1559(TxEip1559::decode_fields(data)?), - TxType::Eip4844 => Transaction::Eip4844(TxEip4844::decode_fields(data)?), - TxType::Eip7702 => Transaction::Eip7702(TxEip7702::decode_fields(data)?), - #[cfg(feature = "optimism")] - TxType::Deposit => Transaction::Deposit(TxDeposit::decode_fields(data)?), - TxType::Legacy => return Err(RlpError::Custom("unexpected legacy tx type")), - }; - - #[cfg(not(feature = "optimism"))] - let signature = Signature::decode_rlp_vrs(data)?; - - #[cfg(feature = "optimism")] - let signature = if tx_type == TxType::Deposit { - optimism_deposit_tx_signature() - } else { - Signature::decode_rlp_vrs(data)? - }; - - if !matches!(signature.v(), Parity::Parity(_)) { - return Err(alloy_rlp::Error::Custom("invalid parity for typed transaction")); - } - - let bytes_consumed = remaining_len - data.len(); - if bytes_consumed != header.payload_length { - return Err(RlpError::UnexpectedLength) - } - - let hash = keccak256(&original_encoding_without_header[..tx_length]); - let signed = Self { transaction, hash, signature }; - Ok(signed) - } - - /// Decodes the "raw" format of transaction (similar to `eth_sendRawTransaction`). - /// - /// This should be used for any RPC method that accepts a raw transaction. - /// Currently, this includes: - /// * `eth_sendRawTransaction`. - /// * All versions of `engine_newPayload`, in the `transactions` field. - /// - /// A raw transaction is either a legacy transaction or EIP-2718 typed transaction. - /// - /// For legacy transactions, the format is encoded as: `rlp(tx-data)`. This format will start - /// with a RLP list header. - /// - /// For EIP-2718 typed transactions, the format is encoded as the type of the transaction - /// followed by the rlp of the transaction: `type || rlp(tx-data)`. - /// - /// Both for legacy and EIP-2718 transactions, an error will be returned if there is an excess - /// of bytes in input data. - pub fn decode_enveloped(input_data: &mut &[u8]) -> alloy_rlp::Result { - if input_data.is_empty() { - return Err(RlpError::InputTooShort) - } - - // Check if the tx is a list - let output_data = if input_data[0] >= EMPTY_LIST_CODE { - // decode as legacy transaction - Self::decode_rlp_legacy_transaction(input_data)? - } else { - Self::decode_enveloped_typed_transaction(input_data)? - }; - - if !input_data.is_empty() { - return Err(RlpError::UnexpectedLength) - } - - Ok(output_data) - } - - /// Returns the length without an RLP header - this is used for eth/68 sizes. - pub fn length_without_header(&self) -> usize { - // method computes the payload len without a RLP header - match &self.transaction { - Transaction::Legacy(legacy_tx) => legacy_tx.encoded_len_with_signature( - &with_eip155_parity(&self.signature, legacy_tx.chain_id), - ), - Transaction::Eip2930(access_list_tx) => { - access_list_tx.encoded_len_with_signature(&self.signature, false) - } - Transaction::Eip1559(dynamic_fee_tx) => { - dynamic_fee_tx.encoded_len_with_signature(&self.signature, false) - } - Transaction::Eip4844(blob_tx) => { - blob_tx.encoded_len_with_signature(&self.signature, false) - } - Transaction::Eip7702(set_code_tx) => { - set_code_tx.encoded_len_with_signature(&self.signature, false) - } - #[cfg(feature = "optimism")] - Transaction::Deposit(deposit_tx) => deposit_tx.encoded_len(false), - } - } } impl From for TransactionSigned { @@ -1469,11 +1299,16 @@ impl Encodable for TransactionSigned { /// transaction: /// `rlp(tx-type || rlp(tx-data))` fn encode(&self, out: &mut dyn bytes::BufMut) { - self.encode_inner(out, true); + self.network_encode(out); } fn length(&self) -> usize { - self.payload_len_inner() + let mut payload_length = self.encode_2718_len(); + if !self.is_legacy() { + payload_length += Header { list: false, payload_length }.length(); + } + + payload_length } } @@ -1504,38 +1339,76 @@ impl Decodable for TransactionSigned { /// This is because [`Header::decode`] does not advance the buffer, and returns a length-1 /// string header if the first byte is less than `0xf7`. fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - if buf.is_empty() { - return Err(RlpError::InputTooShort) - } + Self::network_decode(buf).map_err(Into::into) + } +} - // decode header - let mut original_encoding = *buf; - let header = Header::decode(buf)?; +impl Encodable2718 for TransactionSigned { + fn type_flag(&self) -> Option { + match self.transaction.tx_type() { + TxType::Legacy => None, + tx_type => Some(tx_type as u8), + } + } - let remaining_len = buf.len(); + fn encode_2718_len(&self) -> usize { + match &self.transaction { + Transaction::Legacy(legacy_tx) => legacy_tx.encoded_len_with_signature( + &with_eip155_parity(&self.signature, legacy_tx.chain_id), + ), + Transaction::Eip2930(access_list_tx) => { + access_list_tx.encoded_len_with_signature(&self.signature, false) + } + Transaction::Eip1559(dynamic_fee_tx) => { + dynamic_fee_tx.encoded_len_with_signature(&self.signature, false) + } + Transaction::Eip4844(blob_tx) => { + blob_tx.encoded_len_with_signature(&self.signature, false) + } + Transaction::Eip7702(set_code_tx) => { + set_code_tx.encoded_len_with_signature(&self.signature, false) + } + #[cfg(feature = "optimism")] + Transaction::Deposit(deposit_tx) => deposit_tx.encoded_len(false), + } + } - // if the transaction is encoded as a string then it is a typed transaction - if header.list { - let tx = Self::decode_rlp_legacy_transaction(&mut original_encoding)?; + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + self.transaction.encode_with_signature(&self.signature, out, false) + } +} - // advance the buffer based on how far `decode_rlp_legacy_transaction` advanced the - // buffer - *buf = original_encoding; - Ok(tx) - } else { - let tx = Self::decode_enveloped_typed_transaction(buf)?; - - let bytes_consumed = remaining_len - buf.len(); - // because Header::decode works for single bytes (including the tx type), returning a - // string Header with payload_length of 1, we need to make sure this check is only - // performed for transactions with a string header - if bytes_consumed != header.payload_length && original_encoding[0] > EMPTY_STRING_CODE { - return Err(RlpError::UnexpectedLength) +impl Decodable2718 for TransactionSigned { + fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result { + match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? { + TxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), + TxType::Eip2930 => { + let (tx, signature, hash) = TxEip2930::decode_signed_fields(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip2930(tx), signature, hash }) } - - Ok(tx) + TxType::Eip1559 => { + let (tx, signature, hash) = TxEip1559::decode_signed_fields(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip1559(tx), signature, hash }) + } + TxType::Eip7702 => { + let (tx, signature, hash) = TxEip7702::decode_signed_fields(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip7702(tx), signature, hash }) + } + TxType::Eip4844 => { + let (tx, signature, hash) = TxEip4844::decode_signed_fields(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip4844(tx), signature, hash }) + } + #[cfg(feature = "optimism")] + TxType::Deposit => Ok(Self::from_transaction_and_signature( + Transaction::Deposit(TxDeposit::decode(buf)?), + optimism_deposit_tx_signature(), + )), } } + + fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result { + Ok(Self::decode_rlp_legacy_transaction(buf)?) + } } #[cfg(any(test, feature = "arbitrary"))] @@ -1543,7 +1416,14 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { #[allow(unused_mut)] let mut transaction = Transaction::arbitrary(u)?; - let mut signature = Signature::arbitrary(u)?; + + let secp = secp256k1::Secp256k1::new(); + let key_pair = secp256k1::Keypair::new(&secp, &mut rand::thread_rng()); + let mut signature = crate::sign_message( + B256::from_slice(&key_pair.secret_bytes()[..]), + transaction.signature_hash(), + ) + .unwrap(); signature = if matches!(transaction, Transaction::Legacy(_)) { if let Some(chain_id) = transaction.chain_id() { @@ -1636,15 +1516,6 @@ impl Decodable for TransactionSignedEcRecovered { } } -/// Ensures the transaction can be sent over the -/// network -pub trait IntoRecoveredTransaction { - /// Converts to this type into a [`TransactionSignedEcRecovered`]. - /// - /// Note: this takes `&self` since indented usage is via `Arc`. - fn to_recovered_transaction(&self) -> TransactionSignedEcRecovered; -} - /// Generic wrapper with encoded Bytes, such as transaction data. #[derive(Debug, Clone, PartialEq, Eq)] pub struct WithEncoded(Bytes, pub T); @@ -1699,15 +1570,221 @@ impl WithEncoded> { } } +/// Bincode-compatible transaction type serde implementations. +#[cfg(feature = "serde-bincode-compat")] +pub mod serde_bincode_compat { + use alloc::borrow::Cow; + use alloy_consensus::{ + transaction::serde_bincode_compat::{TxEip1559, TxEip2930, TxEip7702, TxLegacy}, + TxEip4844, + }; + use alloy_primitives::{Signature, TxHash}; + #[cfg(feature = "optimism")] + use op_alloy_consensus::serde_bincode_compat::TxDeposit; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + use serde_with::{DeserializeAs, SerializeAs}; + + /// Bincode-compatible [`super::Transaction`] serde implementation. + /// + /// Intended to use with the [`serde_with::serde_as`] macro in the following way: + /// ```rust + /// use reth_primitives::{serde_bincode_compat, Transaction}; + /// use serde::{Deserialize, Serialize}; + /// use serde_with::serde_as; + /// + /// #[serde_as] + /// #[derive(Serialize, Deserialize)] + /// struct Data { + /// #[serde_as(as = "serde_bincode_compat::transaction::Transaction")] + /// transaction: Transaction, + /// } + /// ``` + #[derive(Debug, Serialize, Deserialize)] + #[allow(missing_docs)] + pub enum Transaction<'a> { + Legacy(TxLegacy<'a>), + Eip2930(TxEip2930<'a>), + Eip1559(TxEip1559<'a>), + Eip4844(Cow<'a, TxEip4844>), + Eip7702(TxEip7702<'a>), + #[cfg(feature = "optimism")] + #[cfg(feature = "optimism")] + Deposit(TxDeposit<'a>), + } + + impl<'a> From<&'a super::Transaction> for Transaction<'a> { + fn from(value: &'a super::Transaction) -> Self { + match value { + super::Transaction::Legacy(tx) => Self::Legacy(TxLegacy::from(tx)), + super::Transaction::Eip2930(tx) => Self::Eip2930(TxEip2930::from(tx)), + super::Transaction::Eip1559(tx) => Self::Eip1559(TxEip1559::from(tx)), + super::Transaction::Eip4844(tx) => Self::Eip4844(Cow::Borrowed(tx)), + super::Transaction::Eip7702(tx) => Self::Eip7702(TxEip7702::from(tx)), + #[cfg(feature = "optimism")] + super::Transaction::Deposit(tx) => Self::Deposit(TxDeposit::from(tx)), + } + } + } + + impl<'a> From> for super::Transaction { + fn from(value: Transaction<'a>) -> Self { + match value { + Transaction::Legacy(tx) => Self::Legacy(tx.into()), + Transaction::Eip2930(tx) => Self::Eip2930(tx.into()), + Transaction::Eip1559(tx) => Self::Eip1559(tx.into()), + Transaction::Eip4844(tx) => Self::Eip4844(tx.into_owned()), + Transaction::Eip7702(tx) => Self::Eip7702(tx.into()), + #[cfg(feature = "optimism")] + Transaction::Deposit(tx) => Self::Deposit(tx.into()), + } + } + } + + impl<'a> SerializeAs for Transaction<'a> { + fn serialize_as(source: &super::Transaction, serializer: S) -> Result + where + S: Serializer, + { + Transaction::from(source).serialize(serializer) + } + } + + impl<'de> DeserializeAs<'de, super::Transaction> for Transaction<'de> { + fn deserialize_as(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Transaction::deserialize(deserializer).map(Into::into) + } + } + + /// Bincode-compatible [`super::TransactionSigned`] serde implementation. + /// + /// Intended to use with the [`serde_with::serde_as`] macro in the following way: + /// ```rust + /// use reth_primitives::{serde_bincode_compat, TransactionSigned}; + /// use serde::{Deserialize, Serialize}; + /// use serde_with::serde_as; + /// + /// #[serde_as] + /// #[derive(Serialize, Deserialize)] + /// struct Data { + /// #[serde_as(as = "serde_bincode_compat::transaction::TransactionSigned")] + /// transaction: TransactionSigned, + /// } + /// ``` + #[derive(Debug, Serialize, Deserialize)] + pub struct TransactionSigned<'a> { + hash: TxHash, + signature: Signature, + transaction: Transaction<'a>, + } + + impl<'a> From<&'a super::TransactionSigned> for TransactionSigned<'a> { + fn from(value: &'a super::TransactionSigned) -> Self { + Self { + hash: value.hash, + signature: value.signature, + transaction: Transaction::from(&value.transaction), + } + } + } + + impl<'a> From> for super::TransactionSigned { + fn from(value: TransactionSigned<'a>) -> Self { + Self { + hash: value.hash, + signature: value.signature, + transaction: value.transaction.into(), + } + } + } + + impl<'a> SerializeAs for TransactionSigned<'a> { + fn serialize_as( + source: &super::TransactionSigned, + serializer: S, + ) -> Result + where + S: Serializer, + { + TransactionSigned::from(source).serialize(serializer) + } + } + + impl<'de> DeserializeAs<'de, super::TransactionSigned> for TransactionSigned<'de> { + fn deserialize_as(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + TransactionSigned::deserialize(deserializer).map(Into::into) + } + } + + #[cfg(test)] + mod tests { + use super::super::{serde_bincode_compat, Transaction, TransactionSigned}; + + use arbitrary::Arbitrary; + use rand::Rng; + use reth_testing_utils::generators; + use serde::{Deserialize, Serialize}; + use serde_with::serde_as; + + #[test] + fn test_transaction_bincode_roundtrip() { + #[serde_as] + #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] + struct Data { + #[serde_as(as = "serde_bincode_compat::Transaction")] + transaction: Transaction, + } + + let mut bytes = [0u8; 1024]; + generators::rng().fill(bytes.as_mut_slice()); + let data = Data { + transaction: Transaction::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) + .unwrap(), + }; + + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data = bincode::deserialize(&encoded).unwrap(); + assert_eq!(decoded, data); + } + + #[test] + fn test_transaction_signed_bincode_roundtrip() { + #[serde_as] + #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] + struct Data { + #[serde_as(as = "serde_bincode_compat::TransactionSigned")] + transaction: TransactionSigned, + } + + let mut bytes = [0u8; 1024]; + generators::rng().fill(bytes.as_mut_slice()); + let data = Data { + transaction: TransactionSigned::arbitrary(&mut arbitrary::Unstructured::new( + &bytes, + )) + .unwrap(), + }; + + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data = bincode::deserialize(&encoded).unwrap(); + assert_eq!(decoded, data); + } + } +} + #[cfg(test)] mod tests { use crate::{ - hex, transaction::{signature::Signature, TxEip1559, TxKind, TxLegacy}, - Address, Bytes, Transaction, TransactionSigned, TransactionSignedEcRecovered, - TransactionSignedNoHash, B256, U256, + Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, }; - use alloy_primitives::{address, b256, bytes, Parity}; + use alloy_eips::eip2718::{Decodable2718, Encodable2718}; + use alloy_primitives::{address, b256, bytes, hex, Address, Bytes, Parity, B256, U256}; use alloy_rlp::{Decodable, Encodable, Error as RlpError}; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_codecs::Compact; @@ -1748,7 +1825,7 @@ mod tests { // random mainnet tx let tx_bytes = hex!("02f872018307910d808507204d2cb1827d0094388c818ca8b9251b393131c08a736a67ccb19297880320d04823e2701c80c001a0cf024f4815304df2867a1a74e9d2707b6abda0337d2d54a4438d453f4160f190a07ac0e6b3bc9395b5b9c8b9e6d77204a236577a5b18467b9175c01de4faa208d9"); - let decoded = TransactionSigned::decode_enveloped(&mut &tx_bytes[..]).unwrap(); + let decoded = TransactionSigned::decode_2718(&mut &tx_bytes[..]).unwrap(); assert_eq!( decoded.recover_signer(), Some(Address::from_str("0x95222290DD7278Aa3Ddd389Cc1E1d165CC4BAfe5").unwrap()) @@ -1764,7 +1841,7 @@ mod tests { // https://sepolia.etherscan.io/getRawTx?tx=0x9a22ccb0029bc8b0ddd073be1a1d923b7ae2b2ea52100bae0db4424f9107e9c0 let raw_tx = alloy_primitives::hex::decode("0x03f9011d83aa36a7820fa28477359400852e90edd0008252089411e9ca82a3a762b4b5bd264d4173a242e7a770648080c08504a817c800f8a5a0012ec3d6f66766bedb002a190126b3549fce0047de0d4c25cffce0dc1c57921aa00152d8e24762ff22b1cfd9f8c0683786a7ca63ba49973818b3d1e9512cd2cec4a0013b98c6c83e066d5b14af2b85199e3d4fc7d1e778dd53130d180f5077e2d1c7a001148b495d6e859114e670ca54fb6e2657f0cbae5b08063605093a4b3dc9f8f1a0011ac212f13c5dff2b2c6b600a79635103d6f580a4221079951181b25c7e654901a0c8de4cced43169f9aa3d36506363b2d2c44f6c49fc1fd91ea114c86f3757077ea01e11fdd0d1934eda0492606ee0bb80a7bf8f35cc5f86ec60fe5031ba48bfd544").unwrap(); - let decoded = TransactionSigned::decode_enveloped(&mut raw_tx.as_slice()).unwrap(); + let decoded = TransactionSigned::decode_2718(&mut raw_tx.as_slice()).unwrap(); assert_eq!(decoded.tx_type(), TxType::Eip4844); let from = decoded.recover_signer(); @@ -1864,7 +1941,7 @@ mod tests { nonce: 26, max_priority_fee_per_gas: 1500000000, max_fee_per_gas: 1500000013, - gas_limit: MIN_TRANSACTION_GAS as u128, + gas_limit: MIN_TRANSACTION_GAS, to: Address::from_slice(&hex!("61815774383099e24810ab832a5b2a5425c154d5")[..]).into(), value: U256::from(3000000000000000000u64), input: Default::default(), @@ -1918,7 +1995,7 @@ mod tests { #[test] fn decode_raw_tx_and_recover_signer() { - use crate::hex_literal::hex; + use alloy_primitives::hex_literal::hex; // transaction is from ropsten let hash: B256 = @@ -1938,7 +2015,7 @@ mod tests { let input = hex!("02f871018302a90f808504890aef60826b6c94ddf4c5025d1a5742cf12f74eec246d4432c295e487e09c3bbcc12b2b80c080a0f21a4eacd0bf8fea9c5105c543be5a1d8c796516875710fafafdf16d16d8ee23a001280915021bb446d1973501a67f93d2b38894a514b976e7b46dc2fe54598d76"); let decoded = TransactionSigned::decode(&mut &input[..]).unwrap(); - let encoded = decoded.envelope_encoded(); + let encoded = decoded.encoded_2718(); assert_eq!(encoded[..], input); } @@ -1946,9 +2023,9 @@ mod tests { fn test_envelop_decode() { // random tx: let input = bytes!("02f871018302a90f808504890aef60826b6c94ddf4c5025d1a5742cf12f74eec246d4432c295e487e09c3bbcc12b2b80c080a0f21a4eacd0bf8fea9c5105c543be5a1d8c796516875710fafafdf16d16d8ee23a001280915021bb446d1973501a67f93d2b38894a514b976e7b46dc2fe54598d76"); - let decoded = TransactionSigned::decode_enveloped(&mut input.as_ref()).unwrap(); + let decoded = TransactionSigned::decode_2718(&mut input.as_ref()).unwrap(); - let encoded = decoded.envelope_encoded(); + let encoded = decoded.encoded_2718(); assert_eq!(encoded, input); } @@ -2027,13 +2104,13 @@ mod tests { #[test] fn recover_enveloped() { let data = hex!("02f86f0102843b9aca0085029e7822d68298f094d9e1459a7a482635700cbc20bbaf52d495ab9c9680841b55ba3ac080a0c199674fcb29f353693dd779c017823b954b3c69dffa3cd6b2a6ff7888798039a028ca912de909e7e6cdef9cdcaf24c54dd8c1032946dfa1d85c206b32a9064fe8"); - let tx = TransactionSigned::decode_enveloped(&mut data.as_slice()).unwrap(); + let tx = TransactionSigned::decode_2718(&mut data.as_slice()).unwrap(); let sender = tx.recover_signer().unwrap(); assert_eq!(sender, address!("001e2b7dE757bA469a57bF6b23d982458a07eFcE")); assert_eq!(tx.to(), Some(address!("D9e1459A7A482635700cBc20BBAF52D495Ab9C96"))); assert_eq!(tx.input().as_ref(), hex!("1b55ba3a")); - let encoded = tx.envelope_encoded(); - assert_eq!(encoded.as_ref(), data.as_slice()); + let encoded = tx.encoded_2718(); + assert_eq!(encoded.as_ref(), data.to_vec()); } // @@ -2041,7 +2118,7 @@ mod tests { #[test] fn recover_pre_eip2() { let data = hex!("f8ea0c850ba43b7400832dc6c0942935aa0a2d2fbb791622c29eb1c117b65b7a908580b884590528a9000000000000000000000001878ace42092b7f1ae1f28d16c1272b1aa80ca4670000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000d02ab486cedc0000000000000000000000000000000000000000000000000000557fe293cabc08cf1ca05bfaf3fda0a56b49cc78b22125feb5ae6a99d2b4781f00507d8b02c173771c85a0b5da0dbe6c5bc53740d0071fc83eb17ba0f709e49e9ae7df60dee625ef51afc5"); - let tx = TransactionSigned::decode_enveloped(&mut data.as_slice()).unwrap(); + let tx = TransactionSigned::decode_2718(&mut data.as_slice()).unwrap(); let sender = tx.recover_signer(); assert!(sender.is_none()); let sender = tx.recover_signer_unchecked().unwrap(); @@ -2101,28 +2178,8 @@ mod tests { fn create_txs_disallowed_for_eip4844() { let data = [3, 208, 128, 128, 123, 128, 120, 128, 129, 129, 128, 192, 129, 129, 192, 128, 128, 9]; - let res = TransactionSigned::decode_enveloped(&mut &data[..]); + let res = TransactionSigned::decode_2718(&mut &data[..]); assert!(res.is_err()); } - - #[test] - fn decode_envelope_fails_on_trailing_bytes_legacy() { - let data = [201, 3, 56, 56, 128, 43, 36, 27, 128, 3, 192]; - - let result = TransactionSigned::decode_enveloped(&mut data.as_ref()); - - assert!(result.is_err()); - assert_eq!(result, Err(RlpError::UnexpectedLength)); - } - - #[test] - fn decode_envelope_fails_on_trailing_bytes_eip2718() { - let data = hex!("02f872018307910d808507204d2cb1827d0094388c818ca8b9251b393131c08a736a67ccb19297880320d04823e2701c80c001a0cf024f4815304df2867a1a74e9d2707b6abda0337d2d54a4438d453f4160f190a07ac0e6b3bc9395b5b9c8b9e6d77204a236577a5b18467b9175c01de4faa208d900"); - - let result = TransactionSigned::decode_enveloped(&mut data.as_ref()); - - assert!(result.is_err()); - assert_eq!(result, Err(RlpError::UnexpectedLength)); - } } diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 0a068d8c4961..cc2dc5766394 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -7,12 +7,16 @@ use super::{ TxEip7702, }; use crate::{ - Address, BlobTransaction, BlobTransactionSidecar, Bytes, Signature, Transaction, - TransactionSigned, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, TxHash, - TxLegacy, B256, EIP4844_TX_TYPE_ID, + BlobTransaction, BlobTransactionSidecar, Signature, Transaction, TransactionSigned, + TransactionSignedEcRecovered, EIP4844_TX_TYPE_ID, }; use alloc::vec::Vec; -use alloy_consensus::{SignableTransaction, TxEip4844WithSidecar}; +use alloy_consensus::{ + transaction::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}, + SignableTransaction, TxEip4844WithSidecar, +}; +use alloy_eips::eip2718::{Decodable2718, Eip2718Error}; +use alloy_primitives::{Address, Bytes, TxHash, B256}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header, EMPTY_LIST_CODE}; use bytes::Buf; use derive_more::{AsRef, Deref}; @@ -219,6 +223,9 @@ impl PooledTransactionsElement { // decode the type byte, only decode BlobTransaction if it is a 4844 transaction let tx_type = *data.first().ok_or(RlpError::InputTooShort)?; + // First, we advance the buffer past the type byte + data.advance(1); + if tx_type == EIP4844_TX_TYPE_ID { // Recall that the blob transaction response `TransactionPayload` is encoded like // this: `rlp([tx_payload_body, blobs, commitments, proofs])` @@ -228,18 +235,17 @@ impl PooledTransactionsElement { // // This makes the full encoding: // `tx_type (0x03) || rlp([[chain_id, nonce, ...], blobs, commitments, proofs])` - // - // First, we advance the buffer past the type byte - data.advance(1); // Now, we decode the inner blob transaction: // `rlp([[chain_id, nonce, ...], blobs, commitments, proofs])` let blob_tx = BlobTransaction::decode_inner(data)?; Ok(Self::BlobTransaction(blob_tx)) } else { - // DO NOT advance the buffer for the type, since we want the enveloped decoding to - // decode it again and advance the buffer on its own. - let typed_tx = TransactionSigned::decode_enveloped_typed_transaction(data)?; + let typed_tx = + TransactionSigned::typed_decode(tx_type, data).map_err(|err| match err { + Eip2718Error::RlpError(err) => err, + _ => RlpError::Custom("failed to decode EIP-2718 transaction"), + })?; // because we checked the tx type, we can be sure that the transaction is not a // blob transaction or legacy @@ -334,7 +340,7 @@ impl PooledTransactionsElement { /// Returns the enveloped encoded transactions. /// - /// See also [`TransactionSigned::encode_enveloped`] + /// See also [`alloy_eips::eip2718::Encodable2718::encoded_2718`] pub fn envelope_encoded(&self) -> Bytes { let mut buf = Vec::new(); self.encode_enveloped(&mut buf); @@ -588,6 +594,9 @@ impl Decodable for PooledTransactionsElement { let tx_type = *buf.first().ok_or(RlpError::InputTooShort)?; let remaining_len = buf.len(); + // Aadvance the buffer past the type byte + buf.advance(1); + if tx_type == EIP4844_TX_TYPE_ID { // Recall that the blob transaction response `TransactionPayload` is encoded like // this: `rlp([tx_payload_body, blobs, commitments, proofs])` @@ -597,11 +606,8 @@ impl Decodable for PooledTransactionsElement { // // This makes the full encoding: // `tx_type (0x03) || rlp([[chain_id, nonce, ...], blobs, commitments, proofs])` - // - // First, we advance the buffer past the type byte - buf.advance(1); - // Now, we decode the inner blob transaction: + // Decode the inner blob transaction: // `rlp([[chain_id, nonce, ...], blobs, commitments, proofs])` let blob_tx = BlobTransaction::decode_inner(buf)?; @@ -613,9 +619,8 @@ impl Decodable for PooledTransactionsElement { Ok(Self::BlobTransaction(blob_tx)) } else { - // DO NOT advance the buffer for the type, since we want the enveloped decoding to - // decode it again and advance the buffer on its own. - let typed_tx = TransactionSigned::decode_enveloped_typed_transaction(buf)?; + let typed_tx = + TransactionSigned::typed_decode(tx_type, buf).map_err(RlpError::from)?; // check that the bytes consumed match the payload length let bytes_consumed = remaining_len - buf.len(); diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index c0242acf9747..52c3c68ef9db 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -1,9 +1,8 @@ #![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] -use crate::{ - keccak256, Signature, Transaction, TransactionSigned, TxEip4844, TxHash, EIP4844_TX_TYPE_ID, -}; -use alloy_consensus::TxEip4844WithSidecar; +use crate::{Signature, Transaction, TransactionSigned, EIP4844_TX_TYPE_ID}; +use alloy_consensus::{transaction::TxEip4844, TxEip4844WithSidecar}; +use alloy_primitives::{keccak256, TxHash}; use alloy_rlp::{Decodable, Error as RlpError, Header}; use serde::{Deserialize, Serialize}; @@ -283,8 +282,9 @@ pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar #[cfg(all(test, feature = "c-kzg"))] mod tests { use super::*; - use crate::{hex, kzg::Blob, PooledTransactionsElement}; + use crate::{kzg::Blob, PooledTransactionsElement}; use alloy_eips::eip4844::Bytes48; + use alloy_primitives::hex; use alloy_rlp::Encodable; use std::{fs, path::PathBuf, str::FromStr}; diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 4b4b5bb4547e..39c0f92fda88 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -1,6 +1,5 @@ -use crate::{transaction::util::secp256k1, Address, B256, U256}; - -use alloy_primitives::Parity; +use crate::transaction::util::secp256k1; +use alloy_primitives::{Address, Parity, B256, U256}; use alloy_rlp::{Decodable, Error as RlpError}; pub use alloy_primitives::Signature; @@ -116,13 +115,13 @@ pub const fn extract_chain_id(v: u64) -> alloy_rlp::Result<(bool, Option)> #[cfg(test)] mod tests { use crate::{ - hex, transaction::signature::{ legacy_parity, recover_signer, recover_signer_unchecked, SECP256K1N_HALF, }, - Address, Signature, B256, U256, + Signature, }; - use alloy_primitives::Parity; + use alloy_eips::eip2718::Decodable2718; + use alloy_primitives::{hex, Address, Parity, B256, U256}; use std::str::FromStr; #[test] @@ -166,7 +165,7 @@ mod tests { // // Block number: 46170 let raw_tx = hex!("f86d8085746a52880082520894c93f2250589a6563f5359051c1ea25746549f0d889208686e75e903bc000801ba034b6fdc33ea520e8123cf5ac4a9ff476f639cab68980cd9366ccae7aef437ea0a0e517caa5f50e27ca0d1e9a92c503b4ccb039680c6d9d0c71203ed611ea4feb33"); - let tx = crate::transaction::TransactionSigned::decode_enveloped(&mut &raw_tx[..]).unwrap(); + let tx = crate::transaction::TransactionSigned::decode_2718(&mut &raw_tx[..]).unwrap(); let signature = tx.signature(); // make sure we know it's greater than SECP256K1N_HALF diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index c4ddbb41cac4..c55e0d3c6193 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -1,4 +1,4 @@ -use crate::{U64, U8}; +use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable}; use serde::{Deserialize, Serialize}; @@ -23,23 +23,23 @@ pub(crate) const COMPACT_IDENTIFIER_EIP1559: usize = 2; #[cfg(any(test, feature = "reth-codec"))] pub(crate) const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; -/// Identifier for legacy transaction, however [`TxLegacy`](crate::TxLegacy) this is technically not -/// typed. +/// Identifier for legacy transaction, however [`TxLegacy`](alloy_consensus::TxLegacy) this is +/// technically not typed. pub const LEGACY_TX_TYPE_ID: u8 = 0; -/// Identifier for [`TxEip2930`](crate::TxEip2930) transaction. +/// Identifier for [`TxEip2930`](alloy_consensus::TxEip2930) transaction. pub const EIP2930_TX_TYPE_ID: u8 = 1; -/// Identifier for [`TxEip1559`](crate::TxEip1559) transaction. +/// Identifier for [`TxEip1559`](alloy_consensus::TxEip1559) transaction. pub const EIP1559_TX_TYPE_ID: u8 = 2; -/// Identifier for [`TxEip4844`](crate::TxEip4844) transaction. +/// Identifier for [`TxEip4844`](alloy_consensus::TxEip4844) transaction. pub const EIP4844_TX_TYPE_ID: u8 = 3; -/// Identifier for [`TxEip7702`](crate::TxEip7702) transaction. +/// Identifier for [`TxEip7702`](alloy_consensus::TxEip7702) transaction. pub const EIP7702_TX_TYPE_ID: u8 = 4; -/// Identifier for [`TxDeposit`](crate::TxDeposit) transaction. +/// Identifier for [`TxDeposit`](op_alloy_consensus::TxDeposit) transaction. #[cfg(feature = "optimism")] pub const DEPOSIT_TX_TYPE_ID: u8 = 126; @@ -246,7 +246,7 @@ impl From for TxType { #[cfg(test)] mod tests { - use crate::hex; + use alloy_primitives::hex; use rand::Rng; use reth_codecs::Compact; diff --git a/crates/primitives/src/transaction/util.rs b/crates/primitives/src/transaction/util.rs index f3f49cb316c1..6205ec886ca0 100644 --- a/crates/primitives/src/transaction/util.rs +++ b/crates/primitives/src/transaction/util.rs @@ -1,4 +1,5 @@ -use crate::{Address, Signature}; +use crate::Signature; +use alloy_primitives::Address; use revm_primitives::B256; #[cfg(feature = "secp256k1")] @@ -14,13 +15,12 @@ pub(crate) mod secp256k1 { #[cfg(feature = "secp256k1")] mod impl_secp256k1 { use super::*; - use crate::keccak256; pub(crate) use ::secp256k1::Error; use ::secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, Message, PublicKey, SecretKey, SECP256K1, }; - use alloy_primitives::Parity; + use alloy_primitives::{keccak256, Parity}; use revm_primitives::U256; /// Recovers the address of the sender using secp256k1 pubkey recovery. @@ -65,8 +65,7 @@ mod impl_secp256k1 { #[cfg_attr(feature = "secp256k1", allow(unused, unreachable_pub))] mod impl_k256 { use super::*; - use crate::keccak256; - use alloy_primitives::Parity; + use alloy_primitives::{keccak256, Parity}; pub(crate) use k256::ecdsa::Error; use k256::ecdsa::{RecoveryId, SigningKey, VerifyingKey}; use revm_primitives::U256; diff --git a/crates/primitives/src/transaction/variant.rs b/crates/primitives/src/transaction/variant.rs index 3e96b6dda899..888c83946cab 100644 --- a/crates/primitives/src/transaction/variant.rs +++ b/crates/primitives/src/transaction/variant.rs @@ -2,9 +2,9 @@ //! `TransactionSignedEcRecovered` use crate::{ - Address, Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, - B256, + Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, }; +use alloy_primitives::{Address, B256}; use core::ops::Deref; /// Represents various different transaction formats used in reth. diff --git a/crates/prune/prune/src/segments/user/account_history.rs b/crates/prune/prune/src/segments/user/account_history.rs index 016d9a22fba2..01f8c0850a18 100644 --- a/crates/prune/prune/src/segments/user/account_history.rs +++ b/crates/prune/prune/src/segments/user/account_history.rs @@ -275,10 +275,8 @@ mod tests { .iter() .filter(|(key, _)| key.highest_block_number > last_pruned_block_number) .map(|(key, blocks)| { - let new_blocks = blocks - .iter() - .skip_while(|block| *block <= last_pruned_block_number) - .collect::>(); + let new_blocks = + blocks.iter().skip_while(|block| *block <= last_pruned_block_number); (key.clone(), BlockNumberList::new_pre_sorted(new_blocks)) }) .collect::>(); diff --git a/crates/prune/prune/src/segments/user/storage_history.rs b/crates/prune/prune/src/segments/user/storage_history.rs index 5291d822cefa..315ad750a8b7 100644 --- a/crates/prune/prune/src/segments/user/storage_history.rs +++ b/crates/prune/prune/src/segments/user/storage_history.rs @@ -281,10 +281,8 @@ mod tests { .iter() .filter(|(key, _)| key.sharded_key.highest_block_number > last_pruned_block_number) .map(|(key, blocks)| { - let new_blocks = blocks - .iter() - .skip_while(|block| *block <= last_pruned_block_number) - .collect::>(); + let new_blocks = + blocks.iter().skip_while(|block| *block <= last_pruned_block_number); (key.clone(), BlockNumberList::new_pre_sorted(new_blocks)) }) .collect::>(); diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index b4f169249444..9e4501f62770 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -21,6 +21,7 @@ reth-consensus-common.workspace = true reth-prune-types.workspace = true reth-storage-api.workspace = true reth-trie = { workspace = true, optional = true } +alloy-primitives.workspace = true # revm revm.workspace = true @@ -28,6 +29,7 @@ revm.workspace = true [dev-dependencies] reth-trie.workspace = true reth-ethereum-forks.workspace = true +alloy-primitives.workspace = true [features] default = ["std", "c-kzg"] diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index 4502732a429d..a63681aa1321 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -1,10 +1,7 @@ //! Helper for handling execution of multiple blocks. -use crate::{ - precompile::{Address, HashSet}, - primitives::alloy_primitives::BlockNumber, -}; use alloc::vec::Vec; +use alloy_primitives::{map::HashSet, Address, BlockNumber}; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; use reth_primitives::{Receipt, Receipts, Request, Requests}; use reth_prune_types::{PruneMode, PruneModes, PruneSegmentError, MINIMUM_PRUNING_DISTANCE}; @@ -152,7 +149,7 @@ impl BlockBatchRecord { if !contract_log_pruner.is_empty() { let (prev_block, filter) = - self.pruning_address_filter.get_or_insert_with(|| (0, HashSet::new())); + self.pruning_address_filter.get_or_insert_with(|| (0, Default::default())); for (_, addresses) in contract_log_pruner.range(*prev_block..=block_number) { filter.extend(addresses.iter().copied()); } @@ -182,7 +179,8 @@ impl BlockBatchRecord { mod tests { use super::*; use alloc::collections::BTreeMap; - use reth_primitives::{Address, Log, Receipt}; + use alloy_primitives::Address; + use reth_primitives::{Log, Receipt}; use reth_prune_types::{PruneMode, ReceiptsLogPruneConfig}; #[test] diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index fb5f71045ea1..8f40d2be8d91 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -1,6 +1,7 @@ use crate::primitives::alloy_primitives::{BlockNumber, StorageKey, StorageValue}; +use alloy_primitives::{Address, B256, U256}; use core::ops::{Deref, DerefMut}; -use reth_primitives::{Account, Address, B256, U256}; +use reth_primitives::Account; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use revm::{ db::DatabaseRef, diff --git a/crates/revm/src/state_change.rs b/crates/revm/src/state_change.rs index 6376957f3111..afe92561bcd3 100644 --- a/crates/revm/src/state_change.rs +++ b/crates/revm/src/state_change.rs @@ -1,19 +1,19 @@ -use crate::precompile::HashMap; -use reth_chainspec::{ChainSpec, EthereumHardforks}; +use alloy_primitives::{map::HashMap, Address, U256}; +use reth_chainspec::EthereumHardforks; use reth_consensus_common::calc; -use reth_primitives::{Address, Block, Withdrawal, Withdrawals, U256}; +use reth_primitives::{Block, Withdrawal, Withdrawals}; /// Collect all balance changes at the end of the block. /// /// Balance changes might include the block reward, uncle rewards, withdrawals, or irregular /// state changes (DAO fork). #[inline] -pub fn post_block_balance_increments( +pub fn post_block_balance_increments( chain_spec: &ChainSpec, block: &Block, total_difficulty: U256, ) -> HashMap { - let mut balance_increments = HashMap::new(); + let mut balance_increments = HashMap::default(); // Add block rewards if they are enabled. if let Some(base_block_reward) = @@ -51,7 +51,8 @@ pub fn post_block_withdrawals_balance_increments( block_timestamp: u64, withdrawals: &[Withdrawal], ) -> HashMap { - let mut balance_increments = HashMap::with_capacity(withdrawals.len()); + let mut balance_increments = + HashMap::with_capacity_and_hasher(withdrawals.len(), Default::default()); insert_post_block_withdrawals_balance_increments( chain_spec, block_timestamp, @@ -88,6 +89,7 @@ pub fn insert_post_block_withdrawals_balance_increments::new(); // Create an empty HashMap to hold the balance increments - let mut balance_increments = HashMap::new(); + let mut balance_increments = HashMap::default(); // Act // Call the function with the prepared inputs @@ -210,7 +212,7 @@ mod tests { ]; // Create an empty HashMap to hold the balance increments - let mut balance_increments = HashMap::new(); + let mut balance_increments = HashMap::default(); // Act // Call the function with the prepared inputs @@ -259,7 +261,7 @@ mod tests { ]; // Create an empty HashMap to hold the balance increments - let mut balance_increments = HashMap::new(); + let mut balance_increments = HashMap::default(); // Act // Call the function with the prepared inputs @@ -295,7 +297,7 @@ mod tests { let withdrawals = None; // No withdrawals provided // Create an empty HashMap to hold the balance increments - let mut balance_increments = HashMap::new(); + let mut balance_increments = HashMap::default(); // Act // Call the function with the prepared inputs diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index fe377bc5fc3d..d42ec4959907 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -1,10 +1,10 @@ -use std::collections::HashSet; - -use crate::precompile::HashMap; use alloc::vec::Vec; -use reth_primitives::{ - keccak256, Account, Address, BlockNumber, Bytecode, Bytes, StorageKey, B256, U256, +use alloy_primitives::{ + keccak256, + map::{HashMap, HashSet}, + Address, BlockNumber, Bytes, StorageKey, B256, U256, }; +use reth_primitives::{Account, Bytecode}; use reth_storage_api::{ AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, @@ -136,7 +136,7 @@ impl StateProvider for StateProviderTest { &self, account: Address, storage_key: StorageKey, - ) -> ProviderResult> { + ) -> ProviderResult> { Ok(self.accounts.get(&account).and_then(|(storage, _)| storage.get(&storage_key).copied())) } diff --git a/crates/rpc/ipc/src/client/mod.rs b/crates/rpc/ipc/src/client/mod.rs index e8eff9c8f454..8f2fe0255c7d 100644 --- a/crates/rpc/ipc/src/client/mod.rs +++ b/crates/rpc/ipc/src/client/mod.rs @@ -90,7 +90,7 @@ impl IpcClientBuilder { /// use jsonrpsee::{core::client::ClientT, rpc_params}; /// use reth_ipc::client::IpcClientBuilder; /// - /// # async fn run_client() -> Result<(), Box> { + /// # async fn run_client() -> Result<(), Box> { /// let client = IpcClientBuilder::default().build("/tmp/my-uds").await?; /// let response: String = client.request("say_hello", rpc_params![]).await?; /// # Ok(()) } diff --git a/crates/rpc/ipc/src/server/connection.rs b/crates/rpc/ipc/src/server/connection.rs index 5e7497cb9e59..aaf6731d045e 100644 --- a/crates/rpc/ipc/src/server/connection.rs +++ b/crates/rpc/ipc/src/server/connection.rs @@ -82,7 +82,7 @@ impl IpcConnDriver { impl Future for IpcConnDriver where S: Service> + Send + 'static, - S::Error: Into>, + S::Error: Into>, S::Future: Send + Unpin, T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 168819754f7c..a02d3ca32b65 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -72,7 +72,7 @@ where Service: Service< String, Response = Option, - Error = Box, + Error = Box, Future: Send + Unpin, > + Send, > + Send @@ -86,7 +86,7 @@ where /// ``` /// use jsonrpsee::RpcModule; /// use reth_ipc::server::Builder; - /// async fn run_server() -> Result<(), Box> { + /// async fn run_server() -> Result<(), Box> { /// let server = Builder::default().build("/tmp/my-uds".into()); /// let mut module = RpcModule::new(()); /// module.register_method("say_hello", |_, _, _| "lo")?; @@ -366,7 +366,7 @@ where /// response will be emitted via the `method_sink`. type Response = Option; - type Error = Box; + type Error = Box; type Future = Pin> + Send>>; @@ -441,7 +441,7 @@ fn process_connection<'b, RpcMiddleware, HttpMiddleware>( + Service< String, Response = Option, - Error = Box, + Error = Box, >, <>>::Service as Service>::Future: Send + Unpin, @@ -496,7 +496,7 @@ async fn to_ipc_service( rx: mpsc::Receiver, ) where S: Service> + Send + 'static, - S::Error: Into>, + S::Error: Into>, S::Future: Send + Unpin, T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { @@ -823,7 +823,7 @@ mod tests { async fn pipe_from_stream_with_bounded_buffer( pending: PendingSubscriptionSink, stream: BroadcastStream, - ) -> Result<(), Box> { + ) -> Result<(), Box> { let sink = pending.accept().await.unwrap(); let closed = sink.closed(); diff --git a/crates/rpc/ipc/src/server/rpc_service.rs b/crates/rpc/ipc/src/server/rpc_service.rs index 2f645605da72..5e89c6a0d7f3 100644 --- a/crates/rpc/ipc/src/server/rpc_service.rs +++ b/crates/rpc/ipc/src/server/rpc_service.rs @@ -6,8 +6,8 @@ use jsonrpsee::{ IdProvider, }, types::{error::reject_too_many_subscriptions, ErrorCode, ErrorObject, Request}, - BoundedSubscriptions, ConnectionId, Extensions, MethodCallback, MethodResponse, MethodSink, - Methods, SubscriptionState, + BoundedSubscriptions, ConnectionId, MethodCallback, MethodResponse, MethodSink, Methods, + SubscriptionState, }; use std::sync::Arc; @@ -58,7 +58,7 @@ impl<'a> RpcServiceT<'a> for RpcService { let params = req.params(); let name = req.method_name(); let id = req.id().clone(); - let extensions = Extensions::new(); + let extensions = req.extensions.clone(); match self.methods.method_with_name(name) { None => { diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index 08a23c8e8a27..6e9e469ec443 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -14,7 +14,6 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true -reth-rpc-types.workspace = true reth-rpc-eth-api.workspace = true reth-engine-primitives.workspace = true reth-network-peers.workspace = true @@ -33,6 +32,7 @@ alloy-rpc-types-txpool.workspace = true alloy-rpc-types-admin.workspace = true alloy-serde.workspace = true alloy-rpc-types-beacon.workspace = true +alloy-rpc-types-engine.workspace = true # misc jsonrpsee = { workspace = true, features = ["server", "macros"] } diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index 5bd54ff02b19..50181d23a75c 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -9,15 +9,15 @@ use alloy_primitives::{Address, BlockHash, Bytes, B256, U256, U64}; use alloy_rpc_types::{ state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; -use alloy_serde::JsonStorageKey; -use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_engine_primitives::EngineTypes; -use reth_rpc_types::engine::{ +use alloy_rpc_types_engine::{ ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadBodiesV2, ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, }; +use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_serde::JsonStorageKey; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use reth_engine_primitives::EngineTypes; // NOTE: We can't use associated types in the `EngineApi` trait because of jsonrpsee, so we use a // generic here. It would be nice if the rpc macro would understand which types need to have serde. // By default, if the trait has a generic, the rpc macro will add e.g. `Engine: DeserializeOwned` to @@ -219,7 +219,7 @@ pub trait EngineApi { #[method(name = "getBlobsV1")] async fn get_blobs_v1( &self, - transaction_ids: Vec, + versioned_hashes: Vec, ) -> RpcResult>>; } diff --git a/crates/rpc/rpc-api/src/trace.rs b/crates/rpc/rpc-api/src/trace.rs index 91f3c253d692..58dda422ab86 100644 --- a/crates/rpc/rpc-api/src/trace.rs +++ b/crates/rpc/rpc-api/src/trace.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{Bytes, B256}; +use alloy_primitives::{map::HashSet, Bytes, B256}; use alloy_rpc_types::{state::StateOverride, BlockOverrides, Index}; use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_rpc_types_trace::{ @@ -8,7 +8,6 @@ use alloy_rpc_types_trace::{ }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_primitives::BlockId; -use std::collections::HashSet; /// Ethereum trace API #[cfg_attr(not(feature = "client"), rpc(server, namespace = "trace"))] diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 04e1a281e065..817d2a3d76b8 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -62,7 +62,6 @@ reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-rpc-api = { workspace = true, features = ["client"] } reth-rpc-engine-api.workspace = true -reth-rpc-types.workspace = true reth-tracing.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true @@ -72,6 +71,7 @@ reth-rpc-types-compat.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true alloy-rpc-types-trace.workspace = true +alloy-rpc-types-engine.workspace = true tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } serde_json.workspace = true diff --git a/crates/rpc/rpc-builder/src/config.rs b/crates/rpc/rpc-builder/src/config.rs index 4e86b2c81f6d..4ff98ae8d501 100644 --- a/crates/rpc/rpc-builder/src/config.rs +++ b/crates/rpc/rpc-builder/src/config.rs @@ -6,7 +6,7 @@ use reth_rpc_eth_types::{EthConfig, EthStateCacheConfig, GasPriceOracleConfig}; use reth_rpc_layer::{JwtError, JwtSecret}; use reth_rpc_server_types::RpcModuleSelection; use tower::layer::util::Identity; -use tracing::debug; +use tracing::{debug, warn}; use crate::{ auth::AuthServerConfig, error::RpcError, IpcServerBuilder, RpcModuleConfig, RpcServerConfig, @@ -168,6 +168,13 @@ impl RethRpcServerConfig for RpcServerArgs { fn rpc_server_config(&self) -> RpcServerConfig { let mut config = RpcServerConfig::default().with_jwt_secret(self.rpc_secret_key()); + if self.http_api.is_some() && !self.http { + warn!( + target: "reth::cli", + "The --http.api flag is set but --http is not enabled. HTTP RPC API will not be exposed." + ); + } + if self.http { let socket_address = SocketAddr::new(self.http_addr, self.http_port); config = config diff --git a/crates/rpc/rpc-builder/tests/it/auth.rs b/crates/rpc/rpc-builder/tests/it/auth.rs index 79a42d121c0b..71e8bf39f9ea 100644 --- a/crates/rpc/rpc-builder/tests/it/auth.rs +++ b/crates/rpc/rpc-builder/tests/it/auth.rs @@ -2,12 +2,12 @@ use crate::utils::launch_auth; use alloy_primitives::U64; +use alloy_rpc_types_engine::{ForkchoiceState, PayloadId, TransitionConfiguration}; use jsonrpsee::core::client::{ClientT, SubscriptionClientT}; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_primitives::Block; use reth_rpc_api::clients::EngineApiClient; use reth_rpc_layer::JwtSecret; -use reth_rpc_types::engine::{ForkchoiceState, PayloadId, TransitionConfiguration}; use reth_rpc_types_compat::engine::payload::{ block_to_payload_v1, convert_block_to_payload_input_v2, }; diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index dbdd2407fbb8..847de99564ef 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -1,5 +1,6 @@ use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; +use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_chainspec::MAINNET; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; @@ -15,7 +16,6 @@ use reth_rpc_builder::{ use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_rpc_layer::JwtSecret; use reth_rpc_server_types::RpcModuleSelection; -use reth_rpc_types::engine::{ClientCode, ClientVersionV1}; use reth_tasks::TokioTaskExecutor; use reth_transaction_pool::{ noop::NoopTransactionPool, diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 57f0832b5ff6..4463d375a034 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -16,7 +16,6 @@ workspace = true reth-chainspec.workspace = true reth-primitives.workspace = true reth-rpc-api.workspace = true -reth-rpc-types.workspace = true reth-storage-api.workspace = true reth-beacon-consensus.workspace = true reth-payload-builder.workspace = true @@ -30,6 +29,7 @@ reth-transaction-pool.workspace = true # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-rpc-types-engine = { workspace = true, features = ["jsonrpsee-types"] } # async tokio = { workspace = true, features = ["sync"] } diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 2b37f9d15f44..907297de1776 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -3,6 +3,12 @@ use crate::{ }; use alloy_eips::eip4844::BlobAndProofV1; use alloy_primitives::{BlockHash, BlockNumber, B256, U64}; +use alloy_rpc_types_engine::{ + CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, + ExecutionPayloadBodiesV2, ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, + ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, + TransitionConfiguration, +}; use async_trait::async_trait; use jsonrpsee_core::RpcResult; use reth_beacon_consensus::BeaconConsensusEngineHandle; @@ -16,12 +22,6 @@ use reth_payload_primitives::{ }; use reth_primitives::{Block, BlockHashOrNumber, EthereumHardfork}; use reth_rpc_api::EngineApiServer; -use reth_rpc_types::engine::{ - CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, - ExecutionPayloadBodiesV2, ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, - ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, - TransitionConfiguration, -}; use reth_rpc_types_compat::engine::payload::{ convert_payload_input_v2_to_payload, convert_to_payload_body_v1, convert_to_payload_body_v2, }; @@ -948,6 +948,7 @@ where #[cfg(test)] mod tests { use super::*; + use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use assert_matches::assert_matches; use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage}; use reth_chainspec::{ChainSpec, MAINNET}; @@ -955,7 +956,6 @@ mod tests { use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::SealedBlock; use reth_provider::test_utils::MockEthProvider; - use reth_rpc_types::engine::{ClientCode, ClientVersionV1}; use reth_rpc_types_compat::engine::payload::execution_payload_from_sealed_block; use reth_tasks::TokioTaskExecutor; use reth_testing_utils::generators::random_block; diff --git a/crates/rpc/rpc-engine-api/src/error.rs b/crates/rpc/rpc-engine-api/src/error.rs index b0ba93d6e455..677bd2fb246d 100644 --- a/crates/rpc/rpc-engine-api/src/error.rs +++ b/crates/rpc/rpc-engine-api/src/error.rs @@ -4,7 +4,6 @@ use jsonrpsee_types::error::{ }; use reth_beacon_consensus::{BeaconForkChoiceUpdateError, BeaconOnNewPayloadError}; use reth_payload_primitives::{EngineObjectValidationError, PayloadBuilderError}; -use reth_rpc_types::ToRpcError; use thiserror::Error; /// The Engine API result type @@ -85,22 +84,22 @@ pub enum EngineApiError { NewPayload(#[from] BeaconOnNewPayloadError), /// Encountered an internal error. #[error(transparent)] - Internal(#[from] Box), + Internal(#[from] Box), /// Fetching the payload failed #[error(transparent)] GetPayloadError(#[from] PayloadBuilderError), /// The payload or attributes are known to be malformed before processing. #[error(transparent)] EngineObjectValidationError(#[from] EngineObjectValidationError), - /// Any other error + /// Any other rpc error #[error("{0}")] - Other(Box), + Other(jsonrpsee_types::ErrorObject<'static>), } impl EngineApiError { /// Crates a new [`EngineApiError::Other`] variant. - pub fn other(err: E) -> Self { - Self::Other(Box::new(err)) + pub const fn other(err: jsonrpsee_types::ErrorObject<'static>) -> Self { + Self::Other(err) } } @@ -187,7 +186,7 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { SERVER_ERROR_MSG, Some(ErrorData::new(error)), ), - EngineApiError::Other(err) => err.to_rpc_error(), + EngineApiError::Other(err) => err, } } } @@ -195,7 +194,7 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { #[cfg(test)] mod tests { use super::*; - use reth_rpc_types::engine::ForkchoiceUpdateError; + use alloy_rpc_types_engine::ForkchoiceUpdateError; #[track_caller] fn ensure_engine_rpc_error( diff --git a/crates/rpc/rpc-engine-api/src/metrics.rs b/crates/rpc/rpc-engine-api/src/metrics.rs index 0ae97768b6c0..2c4216664ae2 100644 --- a/crates/rpc/rpc-engine-api/src/metrics.rs +++ b/crates/rpc/rpc-engine-api/src/metrics.rs @@ -1,9 +1,9 @@ use std::time::Duration; use crate::EngineApiError; +use alloy_rpc_types_engine::{ForkchoiceUpdated, PayloadStatus, PayloadStatusEnum}; use metrics::{Counter, Histogram}; use reth_metrics::Metrics; -use reth_rpc_types::engine::{ForkchoiceUpdated, PayloadStatus, PayloadStatusEnum}; /// All beacon consensus engine metrics #[derive(Default)] @@ -61,16 +61,16 @@ pub(crate) struct ForkchoiceUpdatedResponseMetrics { /// The total count of forkchoice updated messages received. pub(crate) forkchoice_updated_messages: Counter, /// The total count of forkchoice updated messages that we responded to with - /// [Invalid](reth_rpc_types::engine::PayloadStatusEnum#Invalid). + /// [`Invalid`](alloy_rpc_types_engine::PayloadStatusEnum#Invalid). pub(crate) forkchoice_updated_invalid: Counter, /// The total count of forkchoice updated messages that we responded to with - /// [Valid](reth_rpc_types::engine::PayloadStatusEnum#Valid). + /// [`Valid`](alloy_rpc_types_engine::PayloadStatusEnum#Valid). pub(crate) forkchoice_updated_valid: Counter, /// The total count of forkchoice updated messages that we responded to with - /// [Syncing](reth_rpc_types::engine::PayloadStatusEnum#Syncing). + /// [`Syncing`](alloy_rpc_types_engine::PayloadStatusEnum#Syncing). pub(crate) forkchoice_updated_syncing: Counter, /// The total count of forkchoice updated messages that we responded to with - /// [Accepted](reth_rpc_types::engine::PayloadStatusEnum#Accepted). + /// [`Accepted`](alloy_rpc_types_engine::PayloadStatusEnum#Accepted). pub(crate) forkchoice_updated_accepted: Counter, /// The total count of forkchoice updated messages that were unsuccessful, i.e. we responded /// with an error type that is not a [`PayloadStatusEnum`]. @@ -84,16 +84,16 @@ pub(crate) struct NewPayloadStatusResponseMetrics { /// The total count of new payload messages received. pub(crate) new_payload_messages: Counter, /// The total count of new payload messages that we responded to with - /// [Invalid](reth_rpc_types::engine::PayloadStatusEnum#Invalid). + /// [Invalid](alloy_rpc_types_engine::PayloadStatusEnum#Invalid). pub(crate) new_payload_invalid: Counter, /// The total count of new payload messages that we responded to with - /// [Valid](reth_rpc_types::engine::PayloadStatusEnum#Valid). + /// [Valid](alloy_rpc_types_engine::PayloadStatusEnum#Valid). pub(crate) new_payload_valid: Counter, /// The total count of new payload messages that we responded to with - /// [Syncing](reth_rpc_types::engine::PayloadStatusEnum#Syncing). + /// [Syncing](alloy_rpc_types_engine::PayloadStatusEnum#Syncing). pub(crate) new_payload_syncing: Counter, /// The total count of new payload messages that we responded to with - /// [Accepted](reth_rpc_types::engine::PayloadStatusEnum#Accepted). + /// [Accepted](alloy_rpc_types_engine::PayloadStatusEnum#Accepted). pub(crate) new_payload_accepted: Counter, /// The total count of new payload messages that were unsuccessful, i.e. we responded with an /// error type that is not a [`PayloadStatusEnum`]. diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index ccb6878e9d5a..c08c30c1de09 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -1,15 +1,12 @@ //! Some payload tests -use alloy_primitives::{Bytes, U256}; +use alloy_primitives::{Bytes, Sealable, U256}; use alloy_rlp::{Decodable, Error as RlpError}; -use assert_matches::assert_matches; -use reth_primitives::{ - alloy_primitives::Sealable, proofs, Block, SealedBlock, SealedHeader, TransactionSigned, - Withdrawals, -}; -use reth_rpc_types::engine::{ +use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadV1, PayloadError, }; +use assert_matches::assert_matches; +use reth_primitives::{proofs, Block, SealedBlock, SealedHeader, TransactionSigned, Withdrawals}; use reth_rpc_types_compat::engine::payload::{ block_to_payload, block_to_payload_v1, convert_to_payload_body_v1, try_into_sealed_block, try_payload_v1_to_block, diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index 4dd61324c389..23dd46baecf1 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -32,6 +32,7 @@ reth-network-api.workspace = true reth-trie.workspace = true # ethereum +alloy-eips.workspace = true alloy-dyn-abi = { workspace = true, features = ["eip712"] } alloy-json-rpc.workspace = true alloy-network.workspace = true @@ -39,7 +40,6 @@ alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true alloy-rpc-types.workspace = true alloy-rpc-types-mev.workspace = true -alloy-eips.workspace = true # rpc jsonrpsee = { workspace = true, features = ["server", "macros"] } diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 94eaeb52d45f..8d34020d67bc 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -88,7 +88,7 @@ pub trait EthCall: Call + LoadPendingBlock { self.evm_env_at(block.unwrap_or_default()).await?; // Gas cap for entire operation - let total_gas_limit = self.call_gas_limit() as u128; + let total_gas_limit = self.call_gas_limit(); let base_block = self.block(block).await?.ok_or(EthApiError::HeaderNotFound(block))?; let mut parent_hash = base_block.header.hash(); @@ -1042,10 +1042,7 @@ pub trait Call: LoadState + SpawnBlocking { #[allow(clippy::needless_update)] let env = TxEnv { - gas_limit: gas_limit - .try_into() - .map_err(|_| RpcInvalidTransactionError::GasUintOverflow) - .map_err(Self::Error::from_eth_err)?, + gas_limit, nonce, caller: from.unwrap_or_default(), gas_price, diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index 82d9e9ab4ef7..b6dcef4708ef 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -162,7 +162,7 @@ pub trait EthFees: LoadFee { for header in &headers { - base_fee_per_gas.push(header.base_fee_per_gas.unwrap_or_default()); + base_fee_per_gas.push(header.base_fee_per_gas.unwrap_or_default() as u128); gas_used_ratio.push(header.gas_used as f64 / header.gas_limit as f64); base_fee_per_blob_gas.push(header.blob_fee().unwrap_or_default()); blob_gas_used_ratio.push( @@ -180,8 +180,8 @@ pub trait EthFees: LoadFee { rewards.push( calculate_reward_percentiles_for_block( percentiles, - header.gas_used as u64, - header.base_fee_per_gas.unwrap_or_default() as u64, + header.gas_used, + header.base_fee_per_gas.unwrap_or_default(), &transactions, &receipts, ) @@ -204,7 +204,7 @@ pub trait EthFees: LoadFee { last_header.gas_used , last_header.gas_limit, last_header.base_fee_per_gas.unwrap_or_default() , - ), + ) as u128, ); // Same goes for the `base_fee_per_blob_gas`: diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index d4d5fd23d458..8014851e3f93 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -4,14 +4,12 @@ use std::time::{Duration, Instant}; use crate::{EthApiTypes, FromEthApiError, FromEvmError}; + use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rpc_types::BlockNumberOrTag; use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_evm::{ - system_calls::{pre_block_beacon_root_contract_call, pre_block_blockhashes_contract_call}, - ConfigureEvm, ConfigureEvmEnv, -}; +use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv}; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ constants::{eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE, EMPTY_ROOT_HASH}, @@ -20,8 +18,8 @@ use reth_primitives::{ BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, ResultAndState, SpecId, }, - Block, BlockBody, Header, IntoRecoveredTransaction, Receipt, Requests, SealedBlockWithSenders, - SealedHeader, TransactionSignedEcRecovered, EMPTY_OMMER_ROOT_HASH, + Block, BlockBody, Header, Receipt, Requests, SealedBlockWithSenders, SealedHeader, + TransactionSignedEcRecovered, EMPTY_OMMER_ROOT_HASH, }; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, @@ -262,31 +260,27 @@ pub trait LoadPendingBlock: EthApiTypes { let chain_spec = self.provider().chain_spec(); + let evm_config = self.evm_config().clone(); + let mut system_caller = SystemCaller::new(&evm_config, chain_spec.clone()); + let parent_beacon_block_root = if origin.is_actual_pending() { // apply eip-4788 pre block contract call if we got the block from the CL with the real // parent beacon block root - pre_block_beacon_root_contract_call( - &mut db, - self.evm_config(), - chain_spec.as_ref(), - &cfg, - &block_env, - origin.header().parent_beacon_block_root, - ) - .map_err(|err| EthApiError::Internal(err.into()))?; + system_caller + .pre_block_beacon_root_contract_call( + &mut db, + &cfg, + &block_env, + origin.header().parent_beacon_block_root, + ) + .map_err(|err| EthApiError::Internal(err.into()))?; origin.header().parent_beacon_block_root } else { None }; - pre_block_blockhashes_contract_call( - &mut db, - self.evm_config(), - chain_spec.as_ref(), - &cfg, - &block_env, - origin.header().hash(), - ) - .map_err(|err| EthApiError::Internal(err.into()))?; + system_caller + .pre_block_blockhashes_contract_call(&mut db, &cfg, &block_env, origin.header().hash()) + .map_err(|err| EthApiError::Internal(err.into()))?; let mut receipts = Vec::new(); @@ -444,11 +438,11 @@ pub trait LoadPendingBlock: EthApiTypes { timestamp: block_env.timestamp.to::(), mix_hash: block_env.prevrandao.unwrap_or_default(), nonce: BEACON_NONCE.into(), - base_fee_per_gas: Some(base_fee.into()), + base_fee_per_gas: Some(base_fee), number: block_number, - gas_limit: block_gas_limit.into(), + gas_limit: block_gas_limit, difficulty: U256::ZERO, - gas_used: cumulative_gas_used.into(), + gas_used: cumulative_gas_used, blob_gas_used: blob_gas_used.map(Into::into), excess_blob_gas: block_env.get_blob_excess_gas().map(Into::into), extra_data: Default::default(), diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 15e272c07def..7b5d13b2a7dd 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -1,9 +1,11 @@ //! Loads a pending block from database. Helper trait for `eth_` call and trace RPC methods. +use crate::FromEvmError; use alloy_primitives::B256; use alloy_rpc_types::{BlockId, TransactionInfo}; use futures::Future; -use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; +use reth_chainspec::ChainSpecProvider; +use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv}; use reth_primitives::Header; use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_types::{ @@ -14,8 +16,6 @@ use revm::{db::CacheDB, Database, DatabaseCommit, GetInspector, Inspector}; use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; use revm_primitives::{EnvWithHandlerCfg, EvmState, ExecutionResult, ResultAndState}; -use crate::FromEvmError; - use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; /// Executes CPU heavy tasks. @@ -190,12 +190,31 @@ pub trait Trace: LoadState { // we need to get the state of the parent block because we're essentially replaying the // block the transaction is included in let parent_block = block.parent_hash; + let parent_beacon_block_root = block.parent_beacon_block_root; let block_txs = block.into_transactions_ecrecovered(); let this = self.clone(); self.spawn_with_state_at_block(parent_block.into(), move |state| { let mut db = CacheDB::new(StateProviderDatabase::new(state)); + // apply relevant system calls + let mut system_caller = SystemCaller::new( + Trace::evm_config(&this), + LoadState::provider(&this).chain_spec(), + ); + system_caller + .pre_block_beacon_root_contract_call( + &mut db, + &cfg, + &block_env, + parent_beacon_block_root, + ) + .map_err(|_| { + EthApiError::EvmCustom( + "failed to apply 4788 beacon root system call".to_string(), + ) + })?; + // replay all transactions prior to the targeted transaction this.replay_transactions_until( &mut db, @@ -306,6 +325,27 @@ pub trait Trace: LoadState { let block_number = block_env.number.saturating_to::(); let base_fee = block_env.basefee.saturating_to::(); + // now get the state + let state = this.state_at_block_id(state_at.into())?; + let mut db = + CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); + + // apply relevant system calls + let mut system_caller = SystemCaller::new( + Trace::evm_config(&this), + LoadState::provider(&this).chain_spec(), + ); + system_caller + .pre_block_beacon_root_contract_call( + &mut db, + &cfg, + &block_env, + block.header().parent_beacon_block_root, + ) + .map_err(|_| { + EthApiError::EvmCustom("failed to apply 4788 system call".to_string()) + })?; + // prepare transactions, we do everything upfront to reduce time spent with open // state let max_transactions = @@ -332,11 +372,6 @@ pub trait Trace: LoadState { }) .peekable(); - // now get the state - let state = this.state_at_block_id(state_at.into())?; - let mut db = - CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); - while let Some((tx_info, tx)) = transactions.next() { let env = EnvWithHandlerCfg::new_with_cfg_env(cfg.clone(), block_env.clone(), tx); diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index a78f2cd330a6..d98cb69bfc30 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -2,6 +2,7 @@ //! network. use alloy_dyn_abi::TypedData; +use alloy_eips::eip2718::Encodable2718; use alloy_network::TransactionBuilder; use alloy_primitives::{Address, Bytes, TxHash, B256}; use alloy_rpc_types::{BlockNumberOrTag, TransactionInfo}; @@ -106,7 +107,7 @@ pub trait EthTransactions: LoadTransaction { Ok(LoadTransaction::provider(this) .transaction_by_hash(hash) .map_err(Self::Error::from_eth_err)? - .map(|tx| tx.envelope_encoded())) + .map(|tx| tx.encoded_2718().into())) }) .await } @@ -305,7 +306,7 @@ pub trait EthTransactions: LoadTransaction { async move { if let Some(block) = self.block_with_senders(block_id).await? { if let Some(tx) = block.transactions().nth(index) { - return Ok(Some(tx.envelope_encoded())) + return Ok(Some(tx.encoded_2718().into())) } } @@ -359,16 +360,16 @@ pub trait EthTransactions: LoadTransaction { if request.nonce.is_none() { let nonce = self.transaction_count(from, Some(BlockId::pending())).await?; // note: `.to()` can't panic because the nonce is constructed from a `u64` - request.nonce = Some(nonce.to::()); + request.nonce = Some(nonce.to()); } let chain_id = self.chain_id(); - request.chain_id = Some(chain_id.to::()); + request.chain_id = Some(chain_id.to()); let estimated_gas = self.estimate_gas_at(request.clone(), BlockId::pending(), None).await?; let gas_limit = estimated_gas; - request.set_gas_limit(gas_limit.to::()); + request.set_gas_limit(gas_limit.to()); let signed_tx = self.sign_request(&from, request).await?; diff --git a/crates/rpc/rpc-eth-api/src/pubsub.rs b/crates/rpc/rpc-eth-api/src/pubsub.rs index 32c668688771..b70dacb26fa9 100644 --- a/crates/rpc/rpc-eth-api/src/pubsub.rs +++ b/crates/rpc/rpc-eth-api/src/pubsub.rs @@ -11,7 +11,7 @@ pub trait EthPubSubApi { #[subscription( name = "subscribe" => "subscription", unsubscribe = "unsubscribe", - item = reth_rpc_types::pubsub::SubscriptionResult + item = alloy_rpc_types::pubsub::SubscriptionResult )] async fn subscribe( &self, diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index 1e2265589fae..2c6f51ff462b 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -22,7 +22,6 @@ reth-primitives = { workspace = true, features = ["secp256k1"] } reth-storage-api.workspace = true reth-revm.workspace = true reth-rpc-server-types.workspace = true -reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true reth-tasks.workspace = true reth-transaction-pool.workspace = true diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 10ab9a5fe8bc..ad9804893a70 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -2,7 +2,10 @@ //! in default implementation of //! `reth_rpc_eth_api::helpers::Call`. -use alloy_primitives::{Address, B256, U256}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + Address, B256, U256, +}; use reth_errors::ProviderResult; use reth_revm::{database::StateProviderDatabase, db::CacheDB, DatabaseRef}; use reth_storage_api::StateProvider; @@ -70,7 +73,7 @@ impl<'a> reth_storage_api::StateProofProvider for StateProviderTraitObjWrapper<' fn multiproof( &self, input: reth_trie::TrieInput, - targets: std::collections::HashMap>, + targets: HashMap>, ) -> ProviderResult { self.0.multiproof(input, targets) } @@ -79,7 +82,8 @@ impl<'a> reth_storage_api::StateProofProvider for StateProviderTraitObjWrapper<' &self, input: reth_trie::TrieInput, target: reth_trie::HashedPostState, - ) -> reth_errors::ProviderResult> { + ) -> reth_errors::ProviderResult> + { self.0.witness(input, target) } } diff --git a/crates/rpc/rpc-eth-types/src/error.rs b/crates/rpc/rpc-eth-types/src/error.rs index 90aec212730d..fbb93164ce9f 100644 --- a/crates/rpc/rpc-eth-types/src/error.rs +++ b/crates/rpc/rpc-eth-types/src/error.rs @@ -10,7 +10,6 @@ use reth_primitives::{revm_primitives::InvalidHeader, BlockId}; use reth_rpc_server_types::result::{ block_id_to_str, internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code, }; -use reth_rpc_types::ToRpcError; use reth_transaction_pool::error::{ Eip4844PoolTransactionError, Eip7702PoolTransactionError, InvalidPoolTransactionError, PoolError, PoolErrorKind, PoolTransactionError, @@ -19,6 +18,18 @@ use revm::primitives::{EVMError, ExecutionResult, HaltReason, InvalidTransaction use revm_inspectors::tracing::MuxError; use tracing::error; +/// A trait to convert an error to an RPC error. +pub trait ToRpcError: core::error::Error + Send + Sync + 'static { + /// Converts the error to a JSON-RPC error object. + fn to_rpc_error(&self) -> jsonrpsee_types::ErrorObject<'static>; +} + +impl ToRpcError for jsonrpsee_types::ErrorObject<'static> { + fn to_rpc_error(&self) -> jsonrpsee_types::ErrorObject<'static> { + self.clone() + } +} + /// Result alias pub type EthResult = Result; @@ -591,7 +602,7 @@ impl std::fmt::Display for RevertError { } } -impl std::error::Error for RevertError {} +impl core::error::Error for RevertError {} /// A helper error type that's mainly used to mirror `geth` Txpool's error messages #[derive(Debug, thiserror::Error)] @@ -643,7 +654,7 @@ pub enum RpcPoolError { AddressAlreadyReserved, /// Other unspecified error #[error(transparent)] - Other(Box), + Other(Box), } impl From for jsonrpsee_types::error::ErrorObject<'static> { diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index 3f6f3ee74bab..08ac56845ffe 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -359,16 +359,16 @@ impl FeeHistoryEntry { /// Note: This does not calculate the rewards for the block. pub fn new(block: &SealedBlock) -> Self { Self { - base_fee_per_gas: block.base_fee_per_gas.unwrap_or_default() as u64, + base_fee_per_gas: block.base_fee_per_gas.unwrap_or_default(), gas_used_ratio: block.gas_used as f64 / block.gas_limit as f64, base_fee_per_blob_gas: block.blob_fee(), blob_gas_used_ratio: block.blob_gas_used() as f64 / reth_primitives::constants::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, - excess_blob_gas: block.excess_blob_gas.map(|excess_blob| excess_blob as u64), - blob_gas_used: block.blob_gas_used.map(|block_gas| block_gas as u64), - gas_used: block.gas_used as u64, + excess_blob_gas: block.excess_blob_gas, + blob_gas_used: block.blob_gas_used, + gas_used: block.gas_used, header_hash: block.hash(), - gas_limit: block.gas_limit as u64, + gas_limit: block.gas_limit, rewards: Vec::new(), timestamp: block.timestamp, } @@ -377,11 +377,11 @@ impl FeeHistoryEntry { /// Returns the base fee for the next block according to the EIP-1559 spec. pub fn next_block_base_fee(&self, chain_spec: impl EthChainSpec) -> u64 { calc_next_block_base_fee( - self.gas_used as u128, - self.gas_limit as u128, - self.base_fee_per_gas as u128, + self.gas_used, + self.gas_limit, + self.base_fee_per_gas, chain_spec.base_fee_params_at_timestamp(self.timestamp), - ) as u64 + ) } /// Returns the blob fee for the next block according to the EIP-4844 spec. diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index e2dcb0b06555..01591bc4de38 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -221,9 +221,7 @@ where let parent_hash = block.parent_hash; // sort the functions by ascending effective tip first - block.body.transactions.sort_by_cached_key(|tx| { - tx.effective_tip_per_gas(base_fee_per_gas.map(|base_fee| base_fee as u64)) - }); + block.body.transactions.sort_by_cached_key(|tx| tx.effective_tip_per_gas(base_fee_per_gas)); let mut prices = Vec::with_capacity(limit); @@ -231,8 +229,7 @@ where let mut effective_gas_tip = None; // ignore transactions with a tip under the configured threshold if let Some(ignore_under) = self.ignore_price { - let tip = - tx.effective_tip_per_gas(base_fee_per_gas.map(|base_fee| base_fee as u64)); + let tip = tx.effective_tip_per_gas(base_fee_per_gas); effective_gas_tip = Some(tip); if tip < Some(ignore_under) { continue @@ -249,9 +246,7 @@ where // a `None` effective_gas_tip represents a transaction where the max_fee_per_gas is // less than the base fee which would be invalid let effective_gas_tip = effective_gas_tip - .unwrap_or_else(|| { - tx.effective_tip_per_gas(base_fee_per_gas.map(|base_fee| base_fee as u64)) - }) + .unwrap_or_else(|| tx.effective_tip_per_gas(base_fee_per_gas)) .ok_or(RpcInvalidTransactionError::FeeCapTooLow)?; prices.push(U256::from(effective_gas_tip)); diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index f630b9aed0bc..bb44dc0e6669 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -141,7 +141,7 @@ pub fn append_matching_block_logs( let transaction_id = first_tx_num + receipt_idx as u64; let transaction = provider .transaction_by_id(transaction_id)? - .ok_or(ProviderError::TransactionNotFound(transaction_id.into()))?; + .ok_or_else(|| ProviderError::TransactionNotFound(transaction_id.into()))?; transaction_hash = Some(transaction.hash()); } diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 0db5e14da559..561aa360d86f 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -16,7 +16,6 @@ use reth_primitives::{ }; use reth_revm::database::StateProviderDatabase; use reth_rpc_server_types::result::rpc_err; -use reth_rpc_types::ToRpcError; use reth_rpc_types_compat::{block::from_block, TransactionCompat}; use reth_storage_api::StateRootProvider; use reth_trie::{HashedPostState, HashedStorage}; @@ -24,7 +23,8 @@ use revm::{db::CacheDB, Database}; use revm_primitives::{keccak256, Address, BlockEnv, Bytes, ExecutionResult, TxKind, B256, U256}; use crate::{ - cache::db::StateProviderTraitObjWrapper, EthApiError, RevertError, RpcInvalidTransactionError, + cache::db::StateProviderTraitObjWrapper, error::ToRpcError, EthApiError, RevertError, + RpcInvalidTransactionError, }; /// Errors which may occur during `eth_simulateV1` execution. @@ -61,7 +61,7 @@ impl ToRpcError for EthSimulateError { pub fn resolve_transactions( txs: &mut [TransactionRequest], validation: bool, - block_gas_limit: u128, + block_gas_limit: u64, chain_id: u64, db: &mut DB, ) -> Result, EthApiError> @@ -71,7 +71,7 @@ where let mut transactions = Vec::with_capacity(txs.len()); let default_gas_limit = { - let total_specified_gas = txs.iter().filter_map(|tx| tx.gas).sum::(); + let total_specified_gas = txs.iter().filter_map(|tx| tx.gas).sum::(); let txs_without_gas_limit = txs.iter().filter(|tx| tx.gas.is_none()).count(); if total_specified_gas > block_gas_limit { @@ -79,7 +79,7 @@ where } if txs_without_gas_limit > 0 { - (block_gas_limit - total_specified_gas) / txs_without_gas_limit as u128 + (block_gas_limit - total_specified_gas) / txs_without_gas_limit as u64 } else { 0 } @@ -195,7 +195,7 @@ pub fn build_block( ExecutionResult::Halt { reason, gas_used } => { let error = RpcInvalidTransactionError::halt(reason, tx.gas_limit()); SimCallResult { - return_value: Bytes::new(), + return_data: Bytes::new(), error: Some(SimulateError { code: error.error_code(), message: error.to_string(), @@ -208,7 +208,7 @@ pub fn build_block( ExecutionResult::Revert { output, gas_used } => { let error = RevertError::new(output.clone()); SimCallResult { - return_value: output, + return_data: output, error: Some(SimulateError { code: error.error_code(), message: error.to_string(), @@ -219,7 +219,7 @@ pub fn build_block( } } ExecutionResult::Success { output, gas_used, logs, .. } => SimCallResult { - return_value: output.into_data(), + return_data: output.into_data(), error: None, gas_used, logs: logs @@ -282,7 +282,7 @@ pub fn build_block( timestamp: block_env.timestamp.to(), base_fee_per_gas: Some(block_env.basefee.to()), gas_limit: block_env.gas_limit.to(), - gas_used: calls.iter().map(|c| c.gas_used).sum::() as u128, + gas_used: calls.iter().map(|c| c.gas_used).sum::(), blob_gas_used: Some(0), parent_hash, receipts_root: calculate_receipt_root(&receipts), diff --git a/crates/rpc/rpc-server-types/Cargo.toml b/crates/rpc/rpc-server-types/Cargo.toml index e908af0af75a..08ecd3947742 100644 --- a/crates/rpc/rpc-server-types/Cargo.toml +++ b/crates/rpc/rpc-server-types/Cargo.toml @@ -15,10 +15,10 @@ workspace = true reth-errors.workspace = true reth-network-api.workspace = true reth-primitives.workspace = true -reth-rpc-types.workspace = true # ethereum alloy-primitives.workspace = true +alloy-rpc-types-engine.workspace = true # rpc jsonrpsee-core.workspace = true diff --git a/crates/rpc/rpc-server-types/src/result.rs b/crates/rpc/rpc-server-types/src/result.rs index 3dc76f0d8f4b..78e6436643a7 100644 --- a/crates/rpc/rpc-server-types/src/result.rs +++ b/crates/rpc/rpc-server-types/src/result.rs @@ -2,9 +2,9 @@ use std::fmt; +use alloy_rpc_types_engine::PayloadError; use jsonrpsee_core::RpcResult; use reth_primitives::BlockId; -use reth_rpc_types::engine::PayloadError; /// Helper trait to easily convert various `Result` types into [`RpcResult`] pub trait ToRpcResult: Sized { diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index 2326a9e891db..13914a59eb3e 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -1,6 +1,6 @@ //! Helpers for testing trace calls. -use alloy_primitives::{Bytes, TxHash, B256}; +use alloy_primitives::{map::HashSet, Bytes, TxHash, B256}; use alloy_rpc_types::Index; use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_rpc_types_trace::{ @@ -13,7 +13,6 @@ use jsonrpsee::core::client::Error as RpcError; use reth_primitives::BlockId; use reth_rpc_api::clients::TraceApiClient; use std::{ - collections::HashSet, pin::Pin, task::{Context, Poll}, }; @@ -540,7 +539,7 @@ mod tests { "0xea2817f1aeeb587b82f4ab87a6dbd3560fc35ed28de1be280cb40b2a24ab48bb".parse().unwrap(), ]; - let trace_types = HashSet::from([TraceType::StateDiff, TraceType::VmTrace]); + let trace_types = HashSet::from_iter([TraceType::StateDiff, TraceType::VmTrace]); let mut stream = client.replay_transactions(transactions, trace_types); let mut successes = 0; @@ -572,7 +571,7 @@ mod tests { let call_request_1 = TransactionRequest::default(); let call_request_2 = TransactionRequest::default(); - let trace_types = HashSet::from([TraceType::StateDiff, TraceType::VmTrace]); + let trace_types = HashSet::from_iter([TraceType::StateDiff, TraceType::VmTrace]); let calls = vec![(call_request_1, trace_types.clone()), (call_request_2, trace_types)]; let mut stream = client.trace_call_many_stream(calls, None); diff --git a/crates/rpc/rpc-testing-util/tests/it/trace.rs b/crates/rpc/rpc-testing-util/tests/it/trace.rs index 607833518ca9..b0fccefbb464 100644 --- a/crates/rpc/rpc-testing-util/tests/it/trace.rs +++ b/crates/rpc/rpc-testing-util/tests/it/trace.rs @@ -1,5 +1,6 @@ //! Integration tests for the trace API. +use alloy_primitives::map::HashSet; use alloy_rpc_types::{Block, Transaction}; use alloy_rpc_types_trace::{ filter::TraceFilter, parity::TraceType, tracerequest::TraceCallRequest, @@ -10,7 +11,7 @@ use jsonrpsee_http_client::HttpClient; use reth_primitives::Receipt; use reth_rpc_api_testing_util::{debug::DebugApiExt, trace::TraceApiExt, utils::parse_env_url}; use reth_rpc_eth_api::EthApiClient; -use std::{collections::HashSet, time::Instant}; +use std::time::Instant; /// This is intended to be run locally against a running node. /// @@ -44,7 +45,7 @@ async fn replay_transactions() { "0xea2817f1aeeb587b82f4ab87a6dbd3560fc35ed28de1be280cb40b2a24ab48bb".parse().unwrap(), ]; - let trace_types = HashSet::from([TraceType::StateDiff, TraceType::VmTrace]); + let trace_types = HashSet::from_iter([TraceType::StateDiff, TraceType::VmTrace]); let mut stream = client.replay_transactions(tx_hashes, trace_types); let now = Instant::now(); diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index bf569ec567ce..a9d82d95779c 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -14,15 +14,16 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true -reth-rpc-types.workspace = true reth-trie-common.workspace = true # ethereum +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true alloy-rpc-types.workspace = true alloy-rpc-types-eth = { workspace = true, default-features = false, features = ["serde"] } alloy-serde.workspace = true +alloy-rpc-types-engine.workspace = true [dev-dependencies] serde_json.workspace = true diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index 6febe8e81c20..fc8ea9e1c487 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -145,9 +145,9 @@ pub fn from_primitive_with_hash(primitive_header: reth_primitives::SealedHeader) difficulty, mix_hash: Some(mix_hash), nonce: Some(nonce), - base_fee_per_gas: base_fee_per_gas.map(u128::from), - blob_gas_used: blob_gas_used.map(u128::from), - excess_blob_gas: excess_blob_gas.map(u128::from), + base_fee_per_gas, + blob_gas_used, + excess_blob_gas, parent_beacon_block_root, total_difficulty: None, requests_root, diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index f4e9dc6c7710..84943b60e208 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -1,17 +1,18 @@ //! Standalone Conversion Functions for Handling Different Versions of Execution Payloads in //! Ethereum's Engine +use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_primitives::{B256, U256}; +use alloy_rpc_types_engine::{ + payload::{ExecutionPayloadBodyV1, ExecutionPayloadFieldV2, ExecutionPayloadInputV2}, + ExecutionPayload, ExecutionPayloadBodyV2, ExecutionPayloadV1, ExecutionPayloadV2, + ExecutionPayloadV3, ExecutionPayloadV4, PayloadError, +}; use reth_primitives::{ constants::{EMPTY_OMMER_ROOT_HASH, MAXIMUM_EXTRA_DATA_SIZE}, proofs::{self}, Block, BlockBody, Header, Request, SealedBlock, TransactionSigned, Withdrawals, }; -use reth_rpc_types::engine::{ - payload::{ExecutionPayloadBodyV1, ExecutionPayloadFieldV2, ExecutionPayloadInputV2}, - ExecutionPayload, ExecutionPayloadBodyV2, ExecutionPayloadV1, ExecutionPayloadV2, - ExecutionPayloadV3, ExecutionPayloadV4, PayloadError, -}; /// Converts [`ExecutionPayloadV1`] to [`Block`] pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result { @@ -26,7 +27,17 @@ pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result, _>>()?; let transactions_root = proofs::calculate_transaction_root(&transactions); @@ -39,8 +50,8 @@ pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result Result ExecutionPayloadV1 { logs_bloom: value.logs_bloom, prev_randao: value.mix_hash, block_number: value.number, - gas_limit: value.gas_limit as u64, - gas_used: value.gas_used as u64, + gas_limit: value.gas_limit, + gas_used: value.gas_used, timestamp: value.timestamp, extra_data: value.extra_data.clone(), base_fee_per_gas: U256::from(value.base_fee_per_gas.unwrap_or_default()), @@ -166,8 +177,8 @@ pub fn block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { logs_bloom: value.logs_bloom, prev_randao: value.mix_hash, block_number: value.number, - gas_limit: value.gas_limit as u64, - gas_used: value.gas_used as u64, + gas_limit: value.gas_limit, + gas_used: value.gas_used, timestamp: value.timestamp, extra_data: value.extra_data.clone(), base_fee_per_gas: U256::from(value.base_fee_per_gas.unwrap_or_default()), @@ -182,8 +193,8 @@ pub fn block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { let transactions = value.raw_transactions(); ExecutionPayloadV3 { - blob_gas_used: value.blob_gas_used.unwrap_or_default() as u64, - excess_blob_gas: value.excess_blob_gas.unwrap_or_default() as u64, + blob_gas_used: value.blob_gas_used.unwrap_or_default(), + excess_blob_gas: value.excess_blob_gas.unwrap_or_default(), payload_inner: ExecutionPayloadV2 { payload_inner: ExecutionPayloadV1 { parent_hash: value.parent_hash, @@ -193,8 +204,8 @@ pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { logs_bloom: value.logs_bloom, prev_randao: value.mix_hash, block_number: value.number, - gas_limit: value.gas_limit as u64, - gas_used: value.gas_used as u64, + gas_limit: value.gas_limit, + gas_used: value.gas_used, timestamp: value.timestamp, extra_data: value.extra_data.clone(), base_fee_per_gas: U256::from(value.base_fee_per_gas.unwrap_or_default()), @@ -360,7 +371,7 @@ pub fn validate_block_hash( pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { let transactions = value.body.transactions.into_iter().map(|tx| { let mut out = Vec::new(); - tx.encode_enveloped(&mut out); + tx.encode_2718(&mut out); out.into() }); ExecutionPayloadBodyV1 { @@ -373,7 +384,7 @@ pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { pub fn convert_to_payload_body_v2(value: Block) -> ExecutionPayloadBodyV2 { let transactions = value.body.transactions.into_iter().map(|tx| { let mut out = Vec::new(); - tx.encode_enveloped(&mut out); + tx.encode_2718(&mut out); out.into() }); @@ -426,8 +437,8 @@ pub fn execution_payload_from_sealed_block(value: SealedBlock) -> ExecutionPaylo logs_bloom: value.logs_bloom, prev_randao: value.mix_hash, block_number: value.number, - gas_limit: value.gas_limit as u64, - gas_used: value.gas_used as u64, + gas_limit: value.gas_limit, + gas_used: value.gas_used, timestamp: value.timestamp, extra_data: value.extra_data.clone(), base_fee_per_gas: U256::from(value.base_fee_per_gas.unwrap_or_default()), @@ -443,9 +454,9 @@ mod tests { validate_block_hash, }; use alloy_primitives::{b256, hex, Bytes, U256}; - use reth_rpc_types::{ - engine::{CancunPayloadFields, ExecutionPayloadV3, ExecutionPayloadV4}, - ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, + use alloy_rpc_types_engine::{ + CancunPayloadFields, ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, + ExecutionPayloadV3, ExecutionPayloadV4, }; #[test] diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index 7e6eb0d42ada..4a00bf7a4390 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -129,7 +129,7 @@ pub fn transaction_to_call_request(tx: TransactionSignedEcRecovered) -> Transact gas_price, max_fee_per_gas, max_priority_fee_per_gas, - gas: Some(gas as u128), + gas: Some(gas), value: Some(value), input: TransactionInput::new(input), nonce: Some(nonce), diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml deleted file mode 100644 index 54840490bb47..000000000000 --- a/crates/rpc/rpc-types/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "reth-rpc-types" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true -description = "Reth RPC types" - -[lints] -workspace = true - -[dependencies] - -# ethereum -alloy-rpc-types-engine = { workspace = true, features = ["std", "serde", "jsonrpsee-types"], optional = true } - -# misc -jsonrpsee-types = { workspace = true, optional = true } - -[dev-dependencies] - -[features] -default = ["jsonrpsee-types"] -jsonrpsee-types = [ - "dep:jsonrpsee-types", - "dep:alloy-rpc-types-engine", - "alloy-rpc-types-engine/jsonrpsee-types", -] \ No newline at end of file diff --git a/crates/rpc/rpc-types/src/eth/error.rs b/crates/rpc/rpc-types/src/eth/error.rs deleted file mode 100644 index 9212b2f17a75..000000000000 --- a/crates/rpc/rpc-types/src/eth/error.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! Implementation specific Errors for the `eth_` namespace. - -/// A trait to convert an error to an RPC error. -#[cfg(feature = "jsonrpsee-types")] -pub trait ToRpcError: std::error::Error + Send + Sync + 'static { - /// Converts the error to a JSON-RPC error object. - fn to_rpc_error(&self) -> jsonrpsee_types::ErrorObject<'static>; -} - -#[cfg(feature = "jsonrpsee-types")] -impl ToRpcError for jsonrpsee_types::ErrorObject<'static> { - fn to_rpc_error(&self) -> jsonrpsee_types::ErrorObject<'static> { - self.clone() - } -} diff --git a/crates/rpc/rpc-types/src/eth/mod.rs b/crates/rpc/rpc-types/src/eth/mod.rs deleted file mode 100644 index 0db9f0e41467..000000000000 --- a/crates/rpc/rpc-types/src/eth/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! Ethereum related types - -pub(crate) mod error; - -// re-export -#[cfg(feature = "jsonrpsee-types")] -pub use alloy_rpc_types_engine as engine; diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs deleted file mode 100644 index 59196990131d..000000000000 --- a/crates/rpc/rpc-types/src/lib.rs +++ /dev/null @@ -1,24 +0,0 @@ -//! Reth RPC type definitions. -//! -//! Provides all relevant types for the various RPC endpoints, grouped by namespace. - -#![doc( - html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", - html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", - issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" -)] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#[allow(hidden_glob_reexports)] -mod eth; - -// Ethereum specific rpc types related to typed transaction requests and the engine API. -#[cfg(feature = "jsonrpsee-types")] -pub use eth::error::ToRpcError; -#[cfg(feature = "jsonrpsee-types")] -pub use eth::{ - engine, - engine::{ - ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, PayloadError, - }, -}; diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 49a1e512ed2f..4665cd002ca4 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -39,6 +39,7 @@ reth-trie.workspace = true alloy-consensus.workspace = true alloy-signer.workspace = true alloy-signer-local.workspace = true +alloy-eips.workspace = true alloy-dyn-abi.workspace = true alloy-genesis.workspace = true alloy-network.workspace = true @@ -90,6 +91,8 @@ reth-testing-utils.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } +alloy-consensus.workspace = true + jsonrpsee-types.workspace = true jsonrpsee = { workspace = true, features = ["client"] } diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 9e8ff892ccd8..d7ee43720c19 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -1,3 +1,4 @@ +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rlp::{Decodable, Encodable}; use alloy_rpc_types::{ @@ -417,9 +418,7 @@ where let frame: FlatCallFrame = inspector .with_transaction_gas_limit(env.tx.gas_limit) .into_parity_builder() - .into_localized_transaction_traces(tx_info) - .pop() - .unwrap(); + .into_localized_transaction_traces(tx_info); Ok(frame) }) .await?; @@ -621,7 +620,7 @@ where let block_executor = this.inner.block_executor.executor(db); let mut hashed_state = HashedPostState::default(); - let mut keys = HashMap::new(); + let mut keys = HashMap::default(); let _ = block_executor .execute_with_state_witness( (&block.clone().unseal(), block.difficulty).into(), @@ -666,7 +665,11 @@ where let state = state_provider.witness(Default::default(), hashed_state).map_err(Into::into)?; - Ok(ExecutionWitness { state, keys: include_preimages.then_some(keys) }) + Ok(ExecutionWitness { + state: HashMap::from_iter(state.into_iter()), + codes: Default::default(), + keys: include_preimages.then_some(keys), + }) }) .await } @@ -769,9 +772,7 @@ where let frame: FlatCallFrame = inspector .with_transaction_gas_limit(env.tx.gas_limit) .into_parity_builder() - .into_localized_transaction_traces(tx_info) - .pop() - .unwrap(); + .into_localized_transaction_traces(tx_info); return Ok((frame.into(), res.state)); } @@ -883,7 +884,7 @@ where .block_with_senders_by_id(block_id, TransactionVariant::NoHash) .to_rpc_result()? .unwrap_or_default(); - Ok(block.into_transactions_ecrecovered().map(|tx| tx.envelope_encoded()).collect()) + Ok(block.into_transactions_ecrecovered().map(|tx| tx.encoded_2718().into()).collect()) } /// Handler for `debug_getRawReceipts` diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index dd8f75898bfe..bede4599e1a1 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -2,7 +2,7 @@ use std::sync::Arc; -use alloy_primitives::{keccak256, U256}; +use alloy_primitives::{Keccak256, U256}; use alloy_rpc_types_mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use jsonrpsee::core::RpcResult; use reth_chainspec::EthChainSpec; @@ -26,7 +26,6 @@ use reth_rpc_eth_api::{ EthCallBundleApiServer, }; use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError, RpcInvalidTransactionError}; - /// `Eth` bundle implementation. pub struct EthBundle { /// All nested fields bundled together. @@ -161,7 +160,7 @@ where let mut coinbase_balance_after_tx = initial_coinbase; let mut total_gas_used = 0u64; let mut total_gas_fess = U256::ZERO; - let mut hash_bytes = Vec::with_capacity(32 * transactions.len()); + let mut hasher = Keccak256::new(); let mut evm = Call::evm_config(ð_api).evm_with_env(db, env); @@ -179,7 +178,7 @@ where let tx = tx.into_transaction(); - hash_bytes.extend_from_slice(tx.hash().as_slice()); + hasher.update(tx.hash()); let gas_price = tx .effective_tip_per_gas(basefee) .ok_or_else(|| RpcInvalidTransactionError::FeeCapTooLow) @@ -244,7 +243,7 @@ where coinbase_diff.checked_div(U256::from(total_gas_used)).unwrap_or_default(); let res = EthCallBundleResponse { bundle_gas_price, - bundle_hash: keccak256(&hash_bytes), + bundle_hash: hasher.finalize(), coinbase_diff, eth_sent_to_coinbase, gas_fees: total_gas_fess, diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 6b258cac4467..304266f6a8b3 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -448,15 +448,15 @@ mod tests { for i in (0..block_count).rev() { let hash = rng.gen(); - let gas_limit: u64 = rng.gen(); - let gas_used: u64 = rng.gen(); - // Note: Generates a u32 to avoid overflows later + // Note: Generates saner values to avoid invalid overflows later + let gas_limit = rng.gen::() as u64; let base_fee_per_gas: Option = rng.gen::().then(|| rng.gen::() as u64); + let gas_used = rng.gen::() as u64; let header = Header { number: newest_block - i, - gas_limit: gas_limit.into(), - gas_used: gas_used.into(), + gas_limit, + gas_used, base_fee_per_gas: base_fee_per_gas.map(Into::into), parent_hash, ..Default::default() @@ -471,9 +471,9 @@ mod tests { if let Some(base_fee_per_gas) = header.base_fee_per_gas { let transaction = TransactionSigned { transaction: reth_primitives::Transaction::Eip1559( - reth_primitives::TxEip1559 { + alloy_consensus::TxEip1559 { max_priority_fee_per_gas: random_fee, - max_fee_per_gas: random_fee + base_fee_per_gas, + max_fee_per_gas: random_fee + base_fee_per_gas as u128, ..Default::default() }, ), @@ -511,7 +511,7 @@ mod tests { last_header.gas_used, last_header.gas_limit, last_header.base_fee_per_gas.unwrap_or_default(), - )); + ) as u128); let eth_api = build_test_eth_api(mock_provider); diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index c4caa7300ee2..274347404c93 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -19,7 +19,7 @@ use async_trait::async_trait; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_chainspec::ChainInfo; use reth_node_api::EthApiTypes; -use reth_primitives::{IntoRecoveredTransaction, TransactionSignedEcRecovered}; +use reth_primitives::TransactionSignedEcRecovered; use reth_provider::{BlockIdReader, BlockReader, EvmEnvProvider, ProviderError}; use reth_rpc_eth_api::{EthFilterApiServer, FullEthApiTypes, RpcTransaction, TransactionCompat}; use reth_rpc_eth_types::{ @@ -379,7 +379,7 @@ where let block = self .provider .header_by_hash_or_number(block_hash.into())? - .ok_or(ProviderError::HeaderNotFound(block_hash.into()))?; + .ok_or_else(|| ProviderError::HeaderNotFound(block_hash.into()))?; // we also need to ensure that the receipts are available and return an error if // not, in case the block hash been reorged @@ -511,7 +511,7 @@ where None => self .provider .block_hash(header.number)? - .ok_or(ProviderError::HeaderNotFound(header.number.into()))?, + .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?, }; if let Some(receipts) = self.eth_cache.get_receipts(block_hash).await? { @@ -551,7 +551,7 @@ pub struct ActiveFilters { impl ActiveFilters { /// Returns an empty instance. pub fn new() -> Self { - Self { inner: Arc::new(Mutex::new(HashMap::new())) } + Self { inner: Arc::new(Mutex::new(HashMap::default())) } } } @@ -698,28 +698,28 @@ impl Iterator for BlockRangeInclusiveIter { #[cfg(test)] mod tests { use super::*; - use rand::{thread_rng, Rng}; + use rand::Rng; + use reth_testing_utils::generators; #[test] fn test_block_range_iter() { - for _ in 0..100 { - let mut rng = thread_rng(); - let start = rng.gen::() as u64; - let end = start.saturating_add(rng.gen::() as u64); - let step = rng.gen::() as u64; - let range = start..=end; - let mut iter = BlockRangeInclusiveIter::new(range.clone(), step); - let (from, mut end) = iter.next().unwrap(); - assert_eq!(from, start); - assert_eq!(end, (from + step).min(*range.end())); - - for (next_from, next_end) in iter { - // ensure range starts with previous end + 1 - assert_eq!(next_from, end + 1); - end = next_end; - } - - assert_eq!(end, *range.end()); + let mut rng = generators::rng(); + + let start = rng.gen::() as u64; + let end = start.saturating_add(rng.gen::() as u64); + let step = rng.gen::() as u64; + let range = start..=end; + let mut iter = BlockRangeInclusiveIter::new(range.clone(), step); + let (from, mut end) = iter.next().unwrap(); + assert_eq!(from, start); + assert_eq!(end, (from + step).min(*range.end())); + + for (next_from, next_end) in iter { + // ensure range starts with previous end + 1 + assert_eq!(next_from, end + 1); + end = next_end; } + + assert_eq!(end, *range.end()); } } diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index 4935cf6248cb..b2ff30b88f24 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -51,9 +51,8 @@ where index: idx as u64, block_hash, block_number, - base_fee: base_fee.map(|base_fee| base_fee as u64), - excess_blob_gas: excess_blob_gas - .map(|excess_blob_gas| excess_blob_gas as u64), + base_fee, + excess_blob_gas, timestamp, }; diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index 38048b14ccd4..b5109d09017a 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -5,6 +5,7 @@ use std::collections::HashMap; use crate::EthApi; use alloy_consensus::TxEnvelope; use alloy_dyn_abi::TypedData; +use alloy_eips::eip2718::Decodable2718; use alloy_network::{eip2718::Encodable2718, EthereumWallet, TransactionBuilder}; use alloy_primitives::{eip191_hash_message, Address, B256}; use alloy_rpc_types_eth::TransactionRequest; @@ -95,7 +96,7 @@ impl EthSigner for DevSigner { // decode transaction into signed transaction type let encoded = txn_envelope.encoded_2718(); - let txn_signed = TransactionSigned::decode_enveloped(&mut encoded.as_ref()) + let txn_signed = TransactionSigned::decode_2718(&mut encoded.as_ref()) .map_err(|_| SignError::InvalidTransactionRequest)?; Ok(txn_signed) @@ -241,7 +242,7 @@ mod tests { chain_id: Some(1u64), from: Some(from), to: Some(TxKind::Create), - gas: Some(1000u128), + gas: Some(1000), gas_price: Some(1000u128), value: Some(U256::from(1000)), input: TransactionInput { diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 8cd659bb81d2..d2b9c268e240 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -1,10 +1,10 @@ //! L1 `eth` API types. use alloy_network::{AnyNetwork, Network}; -use alloy_primitives::TxKind; +use alloy_primitives::{Address, TxKind}; use alloy_rpc_types::{Transaction, TransactionInfo}; use alloy_serde::WithOtherFields; -use reth_primitives::{Address, TransactionSignedEcRecovered}; +use reth_primitives::TransactionSignedEcRecovered; use reth_rpc_types_compat::{ transaction::{from_primitive_signature, GasPrice}, TransactionCompat, @@ -58,7 +58,7 @@ where max_fee_per_gas, max_priority_fee_per_gas: signed_tx.max_priority_fee_per_gas(), signature: Some(signature), - gas: signed_tx.gas_limit() as u128, + gas: signed_tx.gas_limit(), input: signed_tx.input().clone(), chain_id, access_list, diff --git a/crates/rpc/rpc/src/eth/mod.rs b/crates/rpc/rpc/src/eth/mod.rs index 83db119f8804..99919110da7b 100644 --- a/crates/rpc/rpc/src/eth/mod.rs +++ b/crates/rpc/rpc/src/eth/mod.rs @@ -5,6 +5,7 @@ pub mod core; pub mod filter; pub mod helpers; pub mod pubsub; +pub mod sim_bundle; /// Implementation of `eth` namespace API. pub use bundle::EthBundle; diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 17c4f7b74b04..7bd1fd03d3b9 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -16,7 +16,6 @@ use jsonrpsee::{ server::SubscriptionMessage, types::ErrorObject, PendingSubscriptionSink, SubscriptionSink, }; use reth_network_api::NetworkInfo; -use reth_primitives::IntoRecoveredTransaction; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider}; use reth_rpc_eth_api::{pubsub::EthPubSubApiServer, FullEthApiTypes, RpcTransaction}; use reth_rpc_eth_types::logs_utils; diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs new file mode 100644 index 000000000000..46dbb45d962e --- /dev/null +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -0,0 +1,76 @@ +//! `Eth` Sim bundle implementation and helpers. + +use std::sync::Arc; + +use alloy_rpc_types_mev::{SendBundleRequest, SimBundleOverrides, SimBundleResponse}; +use jsonrpsee::core::RpcResult; +use reth_rpc_api::MevSimApiServer; +use reth_rpc_eth_api::helpers::{Call, EthTransactions, LoadPendingBlock}; +use reth_rpc_eth_types::EthApiError; +use reth_tasks::pool::BlockingTaskGuard; +use tracing::info; + +/// `Eth` sim bundle implementation. +pub struct EthSimBundle { + /// All nested fields bundled together. + inner: Arc>, +} + +impl EthSimBundle { + /// Create a new `EthSimBundle` instance. + pub fn new(eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self { + Self { inner: Arc::new(EthSimBundleInner { eth_api, blocking_task_guard }) } + } +} + +impl EthSimBundle +where + Eth: EthTransactions + LoadPendingBlock + Call + 'static, +{ + /// Simulates a bundle of transactions. + pub async fn sim_bundle( + &self, + request: SendBundleRequest, + overrides: SimBundleOverrides, + ) -> RpcResult { + info!("mev_simBundle called, request: {:?}, overrides: {:?}", request, overrides); + Err(EthApiError::Unsupported("mev_simBundle is not supported").into()) + } +} + +#[async_trait::async_trait] +impl MevSimApiServer for EthSimBundle +where + Eth: EthTransactions + LoadPendingBlock + Call + 'static, +{ + async fn sim_bundle( + &self, + request: SendBundleRequest, + overrides: SimBundleOverrides, + ) -> RpcResult { + Self::sim_bundle(self, request, overrides).await + } +} + +/// Container type for `EthSimBundle` internals +#[derive(Debug)] +struct EthSimBundleInner { + /// Access to commonly used code of the `eth` namespace + #[allow(dead_code)] + eth_api: Eth, + // restrict the number of concurrent tracing calls. + #[allow(dead_code)] + blocking_task_guard: BlockingTaskGuard, +} + +impl std::fmt::Debug for EthSimBundle { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("EthSimBundle").finish_non_exhaustive() + } +} + +impl Clone for EthSimBundle { + fn clone(&self) -> Self { + Self { inner: Arc::clone(&self.inner) } + } +} diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 0d4c3de90d92..35972d9136b9 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -1,3 +1,4 @@ +use alloy_consensus::Transaction; use alloy_network::{ReceiptResponse, TransactionResponse}; use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; use alloy_rpc_types::{BlockTransactions, Header, TransactionReceipt}; diff --git a/crates/rpc/rpc/src/reth.rs b/crates/rpc/rpc/src/reth.rs index 3e8e842aeda3..6d5897df1315 100644 --- a/crates/rpc/rpc/src/reth.rs +++ b/crates/rpc/rpc/src/reth.rs @@ -71,7 +71,7 @@ where let state = self.provider().state_by_block_id(block_id)?; let accounts_before = self.provider().account_block_changeset(block_number)?; let hash_map = accounts_before.iter().try_fold( - HashMap::new(), + HashMap::default(), |mut hash_map, account_before| -> RethResult<_> { let current_balance = state.account_balance(account_before.address)?; let prev_balance = account_before.info.map(|info| info.balance); diff --git a/crates/rpc/rpc/src/rpc.rs b/crates/rpc/rpc/src/rpc.rs index f0adc7b1b46c..f097bd9e1336 100644 --- a/crates/rpc/rpc/src/rpc.rs +++ b/crates/rpc/rpc/src/rpc.rs @@ -1,7 +1,8 @@ +use alloy_primitives::map::HashMap; use alloy_rpc_types::RpcModules; use jsonrpsee::core::RpcResult; use reth_rpc_api::RpcApiServer; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; /// `rpc` API implementation. /// diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 55f40fe72c51..185383d811b7 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -1,6 +1,6 @@ -use std::{collections::HashSet, sync::Arc}; +use std::sync::Arc; -use alloy_primitives::{Bytes, B256, U256}; +use alloy_primitives::{map::HashSet, Bytes, B256, U256}; use alloy_rpc_types::{ state::{EvmOverrides, StateOverride}, BlockOverrides, Index, diff --git a/crates/stages/api/src/error.rs b/crates/stages/api/src/error.rs index 1941375d21e2..68e1d00fdae0 100644 --- a/crates/stages/api/src/error.rs +++ b/crates/stages/api/src/error.rs @@ -125,6 +125,9 @@ pub enum StageError { /// The prune checkpoint for the given segment is missing. #[error("missing prune checkpoint for {0}")] MissingPruneCheckpoint(PruneSegment), + /// Post Execute Commit error + #[error("post execute commit error occurred: {_0}")] + PostExecuteCommit(&'static str), /// Internal error #[error(transparent)] Internal(#[from] RethError), @@ -133,12 +136,12 @@ pub enum StageError { /// These types of errors are caught by the [Pipeline][crate::Pipeline] and trigger a restart /// of the stage. #[error(transparent)] - Recoverable(Box), + Recoverable(Box), /// The stage encountered a fatal error. /// /// These types of errors stop the pipeline. #[error(transparent)] - Fatal(Box), + Fatal(Box), } impl StageError { diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 928c43fb62f2..19b68b384853 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -463,6 +463,8 @@ impl Pipeline { self.provider_factory.static_file_provider(), )?; + stage.post_execute_commit()?; + if done { let block_number = checkpoint.block_number; return Ok(if made_progress { @@ -586,6 +588,8 @@ impl std::fmt::Debug for Pipeline { #[cfg(test)] mod tests { + use std::sync::atomic::Ordering; + use super::*; use crate::{test_utils::TestStage, UnwindOutput}; use assert_matches::assert_matches; @@ -628,15 +632,19 @@ mod tests { async fn run_pipeline() { let provider_factory = create_test_provider_factory(); + let stage_a = TestStage::new(StageId::Other("A")) + .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(20), done: true })); + let (stage_a, post_execute_commit_counter_a) = stage_a.with_post_execute_commit_counter(); + let (stage_a, post_unwind_commit_counter_a) = stage_a.with_post_unwind_commit_counter(); + + let stage_b = TestStage::new(StageId::Other("B")) + .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(10), done: true })); + let (stage_b, post_execute_commit_counter_b) = stage_b.with_post_execute_commit_counter(); + let (stage_b, post_unwind_commit_counter_b) = stage_b.with_post_unwind_commit_counter(); + let mut pipeline = Pipeline::::builder() - .add_stage( - TestStage::new(StageId::Other("A")) - .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(20), done: true })), - ) - .add_stage( - TestStage::new(StageId::Other("B")) - .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(10), done: true })), - ) + .add_stage(stage_a) + .add_stage(stage_b) .with_max_block(10) .build( provider_factory.clone(), @@ -689,6 +697,12 @@ mod tests { }, ] ); + + assert_eq!(post_execute_commit_counter_a.load(Ordering::Relaxed), 1); + assert_eq!(post_unwind_commit_counter_a.load(Ordering::Relaxed), 0); + + assert_eq!(post_execute_commit_counter_b.load(Ordering::Relaxed), 1); + assert_eq!(post_unwind_commit_counter_b.load(Ordering::Relaxed), 0); } /// Unwinds a simple pipeline. @@ -696,22 +710,28 @@ mod tests { async fn unwind_pipeline() { let provider_factory = create_test_provider_factory(); + let stage_a = TestStage::new(StageId::Other("A")) + .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(100), done: true })) + .add_unwind(Ok(UnwindOutput { checkpoint: StageCheckpoint::new(1) })); + let (stage_a, post_execute_commit_counter_a) = stage_a.with_post_execute_commit_counter(); + let (stage_a, post_unwind_commit_counter_a) = stage_a.with_post_unwind_commit_counter(); + + let stage_b = TestStage::new(StageId::Other("B")) + .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(10), done: true })) + .add_unwind(Ok(UnwindOutput { checkpoint: StageCheckpoint::new(1) })); + let (stage_b, post_execute_commit_counter_b) = stage_b.with_post_execute_commit_counter(); + let (stage_b, post_unwind_commit_counter_b) = stage_b.with_post_unwind_commit_counter(); + + let stage_c = TestStage::new(StageId::Other("C")) + .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(20), done: true })) + .add_unwind(Ok(UnwindOutput { checkpoint: StageCheckpoint::new(1) })); + let (stage_c, post_execute_commit_counter_c) = stage_c.with_post_execute_commit_counter(); + let (stage_c, post_unwind_commit_counter_c) = stage_c.with_post_unwind_commit_counter(); + let mut pipeline = Pipeline::::builder() - .add_stage( - TestStage::new(StageId::Other("A")) - .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(100), done: true })) - .add_unwind(Ok(UnwindOutput { checkpoint: StageCheckpoint::new(1) })), - ) - .add_stage( - TestStage::new(StageId::Other("B")) - .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(10), done: true })) - .add_unwind(Ok(UnwindOutput { checkpoint: StageCheckpoint::new(1) })), - ) - .add_stage( - TestStage::new(StageId::Other("C")) - .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(20), done: true })) - .add_unwind(Ok(UnwindOutput { checkpoint: StageCheckpoint::new(1) })), - ) + .add_stage(stage_a) + .add_stage(stage_b) + .add_stage(stage_c) .with_max_block(10) .build( provider_factory.clone(), @@ -823,6 +843,15 @@ mod tests { }, ] ); + + assert_eq!(post_execute_commit_counter_a.load(Ordering::Relaxed), 1); + assert_eq!(post_unwind_commit_counter_a.load(Ordering::Relaxed), 1); + + assert_eq!(post_execute_commit_counter_b.load(Ordering::Relaxed), 1); + assert_eq!(post_unwind_commit_counter_b.load(Ordering::Relaxed), 1); + + assert_eq!(post_execute_commit_counter_c.load(Ordering::Relaxed), 1); + assert_eq!(post_unwind_commit_counter_c.load(Ordering::Relaxed), 1); } /// Unwinds a pipeline with intermediate progress. diff --git a/crates/stages/api/src/pipeline/set.rs b/crates/stages/api/src/pipeline/set.rs index d3a9b17893d7..c8fbf4c71d8e 100644 --- a/crates/stages/api/src/pipeline/set.rs +++ b/crates/stages/api/src/pipeline/set.rs @@ -51,7 +51,7 @@ pub struct StageSetBuilder { impl Default for StageSetBuilder { fn default() -> Self { - Self { stages: HashMap::new(), order: Vec::new() } + Self { stages: HashMap::default(), order: Vec::new() } } } diff --git a/crates/stages/api/src/stage.rs b/crates/stages/api/src/stage.rs index 1e201aee6635..368269782a29 100644 --- a/crates/stages/api/src/stage.rs +++ b/crates/stages/api/src/stage.rs @@ -199,8 +199,8 @@ pub trait Stage: Send + Sync { /// Returns `Poll::Ready(Ok(()))` when the stage is ready to execute the given range. /// /// This method is heavily inspired by [tower](https://crates.io/crates/tower)'s `Service` trait. - /// Any asynchronous tasks or communication should be handled in `poll_ready`, e.g. moving - /// downloaded items from downloaders to an internal buffer in the stage. + /// Any asynchronous tasks or communication should be handled in `poll_execute_ready`, e.g. + /// moving downloaded items from downloaders to an internal buffer in the stage. /// /// If the stage has any pending external state, then `Poll::Pending` is returned. /// @@ -208,18 +208,18 @@ pub trait Stage: Send + Sync { /// depending on the specific error. In that case, an unwind must be issued instead. /// /// Once `Poll::Ready(Ok(()))` is returned, the stage may be executed once using `execute`. - /// Until the stage has been executed, repeated calls to `poll_ready` must return either + /// Until the stage has been executed, repeated calls to `poll_execute_ready` must return either /// `Poll::Ready(Ok(()))` or `Poll::Ready(Err(_))`. /// - /// Note that `poll_ready` may reserve shared resources that are consumed in a subsequent call - /// of `execute`, e.g. internal buffers. It is crucial for implementations to not assume that - /// `execute` will always be invoked and to ensure that those resources are appropriately - /// released if the stage is dropped before `execute` is called. + /// Note that `poll_execute_ready` may reserve shared resources that are consumed in a + /// subsequent call of `execute`, e.g. internal buffers. It is crucial for implementations + /// to not assume that `execute` will always be invoked and to ensure that those resources + /// are appropriately released if the stage is dropped before `execute` is called. /// /// For the same reason, it is also important that any shared resources do not exhibit - /// unbounded growth on repeated calls to `poll_ready`. + /// unbounded growth on repeated calls to `poll_execute_ready`. /// - /// Unwinds may happen without consulting `poll_ready` first. + /// Unwinds may happen without consulting `poll_execute_ready` first. fn poll_execute_ready( &mut self, _cx: &mut Context<'_>, diff --git a/crates/stages/api/src/test_utils.rs b/crates/stages/api/src/test_utils.rs index 3cd2f4bc4096..1f15e55140ed 100644 --- a/crates/stages/api/src/test_utils.rs +++ b/crates/stages/api/src/test_utils.rs @@ -1,7 +1,13 @@ #![allow(missing_docs)] use crate::{ExecInput, ExecOutput, Stage, StageError, StageId, UnwindInput, UnwindOutput}; -use std::collections::VecDeque; +use std::{ + collections::VecDeque, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, +}; /// A test stage that can be used for testing. /// @@ -11,11 +17,19 @@ pub struct TestStage { id: StageId, exec_outputs: VecDeque>, unwind_outputs: VecDeque>, + post_execute_commit_counter: Arc, + post_unwind_commit_counter: Arc, } impl TestStage { - pub const fn new(id: StageId) -> Self { - Self { id, exec_outputs: VecDeque::new(), unwind_outputs: VecDeque::new() } + pub fn new(id: StageId) -> Self { + Self { + id, + exec_outputs: VecDeque::new(), + unwind_outputs: VecDeque::new(), + post_execute_commit_counter: Arc::new(AtomicUsize::new(0)), + post_unwind_commit_counter: Arc::new(AtomicUsize::new(0)), + } } pub fn with_exec(mut self, exec_outputs: VecDeque>) -> Self { @@ -40,6 +54,18 @@ impl TestStage { self.unwind_outputs.push_back(output); self } + + pub fn with_post_execute_commit_counter(mut self) -> (Self, Arc) { + let counter = Arc::new(AtomicUsize::new(0)); + self.post_execute_commit_counter = counter.clone(); + (self, counter) + } + + pub fn with_post_unwind_commit_counter(mut self) -> (Self, Arc) { + let counter = Arc::new(AtomicUsize::new(0)); + self.post_unwind_commit_counter = counter.clone(); + (self, counter) + } } impl Stage for TestStage { @@ -53,9 +79,21 @@ impl Stage for TestStage { .unwrap_or_else(|| panic!("Test stage {} executed too many times.", self.id)) } + fn post_execute_commit(&mut self) -> Result<(), StageError> { + self.post_execute_commit_counter.fetch_add(1, Ordering::Relaxed); + + Ok(()) + } + fn unwind(&mut self, _: &Provider, _input: UnwindInput) -> Result { self.unwind_outputs .pop_front() .unwrap_or_else(|| panic!("Test stage {} unwound too many times.", self.id)) } + + fn post_unwind_commit(&mut self) -> Result<(), StageError> { + self.post_unwind_commit_counter.fetch_add(1, Ordering::Relaxed); + + Ok(()) + } } diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index 934e9b109053..3d4227d8a274 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -24,7 +24,7 @@ reth-evm.workspace = true reth-exex.workspace = true reth-network-p2p.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde-bincode-compat"] } reth-provider.workspace = true reth-execution-types.workspace = true reth-prune.workspace = true @@ -37,6 +37,8 @@ reth-trie-db = { workspace = true, features = ["metrics"] } reth-testing-utils = { workspace = true, optional = true } +alloy-primitives.workspace = true + # async tokio = { workspace = true, features = ["sync"] } futures-util.workspace = true @@ -50,6 +52,7 @@ itertools.workspace = true rayon.workspace = true num-traits = "0.2.15" tempfile = { workspace = true, optional = true } +bincode.workspace = true [dev-dependencies] # reth diff --git a/crates/stages/stages/benches/criterion.rs b/crates/stages/stages/benches/criterion.rs index 3957ceac4d70..7519d81a3622 100644 --- a/crates/stages/stages/benches/criterion.rs +++ b/crates/stages/stages/benches/criterion.rs @@ -6,7 +6,7 @@ use reth_chainspec::ChainSpec; use reth_config::config::{EtlConfig, TransactionLookupConfig}; use reth_db::{test_utils::TempDatabase, Database, DatabaseEnv}; -use reth_primitives::BlockNumber; +use alloy_primitives::BlockNumber; use reth_provider::{DatabaseProvider, DatabaseProviderFactory}; use reth_stages::{ stages::{MerkleStage, SenderRecoveryStage, TransactionLookupStage}, diff --git a/crates/stages/stages/benches/setup/account_hashing.rs b/crates/stages/stages/benches/setup/account_hashing.rs index 86831418e53f..9926c1f3d378 100644 --- a/crates/stages/stages/benches/setup/account_hashing.rs +++ b/crates/stages/stages/benches/setup/account_hashing.rs @@ -1,11 +1,11 @@ #![allow(unreachable_pub)] use super::constants; +use alloy_primitives::BlockNumber; use reth_db::tables; use reth_db_api::{ cursor::DbCursorRO, database::Database, transaction::DbTx, DatabaseError as DbError, }; -use reth_primitives::BlockNumber; use reth_stages::{ stages::{AccountHashingStage, SeedOpts}, test_utils::TestStageDB, diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index 41570c057f8e..4812fb13c39a 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -1,4 +1,5 @@ #![allow(unreachable_pub)] +use alloy_primitives::{Address, Sealable, B256, U256}; use itertools::concat; use reth_chainspec::ChainSpec; use reth_db::{tables, test_utils::TempDatabase, Database, DatabaseEnv}; @@ -6,9 +7,7 @@ use reth_db_api::{ cursor::DbCursorRO, transaction::{DbTx, DbTxMut}, }; -use reth_primitives::{ - alloy_primitives::Sealable, Account, Address, SealedBlock, SealedHeader, B256, U256, -}; +use reth_primitives::{Account, SealedBlock, SealedHeader}; use reth_provider::{DatabaseProvider, DatabaseProviderFactory, TrieWriter}; use reth_stages::{ stages::{AccountHashingStage, StorageHashingStage}, diff --git a/crates/stages/stages/src/lib.rs b/crates/stages/stages/src/lib.rs index e33b6e7c0202..38a0f209dbdd 100644 --- a/crates/stages/stages/src/lib.rs +++ b/crates/stages/stages/src/lib.rs @@ -17,7 +17,7 @@ //! # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloaderBuilder; //! # use reth_network_p2p::test_utils::{TestBodiesClient, TestHeadersClient}; //! # use reth_evm_ethereum::execute::EthExecutorProvider; -//! # use reth_primitives::B256; +//! # use alloy_primitives::B256; //! # use reth_chainspec::MAINNET; //! # use reth_prune_types::PruneModes; //! # use reth_network_peers::PeerId; diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index 5a527fbc42c2..a25fcd4e1e57 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -41,11 +41,11 @@ use crate::{ }, StageSet, StageSetBuilder, }; +use alloy_primitives::B256; use reth_config::config::StageConfig; use reth_consensus::Consensus; use reth_evm::execute::BlockExecutorProvider; use reth_network_p2p::{bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader}; -use reth_primitives::B256; use reth_provider::HeaderSyncGapProvider; use reth_prune_types::PruneModes; use reth_stages_api::Stage; diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 7f6d2d6a066f..2d441dee292a 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -6,6 +6,7 @@ use std::{ use futures_util::TryStreamExt; use tracing::*; +use alloy_primitives::TxNumber; use reth_db::tables; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, @@ -13,7 +14,7 @@ use reth_db_api::{ transaction::DbTxMut, }; use reth_network_p2p::bodies::{downloader::BodyDownloader, response::BlockResponse}; -use reth_primitives::{StaticFileSegment, TxNumber}; +use reth_primitives::StaticFileSegment; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, BlockReader, DBProvider, ProviderError, StaticFileProviderFactory, StatsReader, @@ -621,6 +622,7 @@ mod tests { UnwindStageTestRunner, }, }; + use alloy_primitives::{BlockHash, BlockNumber, TxNumber, B256}; use futures_util::Stream; use reth_db::{static_file::HeaderMask, tables}; use reth_db_api::{ @@ -635,10 +637,7 @@ mod tests { }, error::DownloadResult, }; - use reth_primitives::{ - BlockBody, BlockHash, BlockNumber, Header, SealedBlock, SealedHeader, - StaticFileSegment, TxNumber, B256, - }; + use reth_primitives::{BlockBody, Header, SealedBlock, SealedHeader, StaticFileSegment}; use reth_provider::{ providers::StaticFileWriter, test_utils::MockNodeTypesWithDB, HeaderProvider, ProviderFactory, StaticFileProviderFactory, TransactionsProvider, diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 57a9cdaab7ef..a99d8a572e5b 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -1,4 +1,5 @@ use crate::stages::MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD; +use alloy_primitives::{BlockNumber, Sealable}; use num_traits::Zero; use reth_config::config::ExecutionConfig; use reth_db::{static_file::HeaderMask, tables}; @@ -9,9 +10,7 @@ use reth_evm::{ }; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_exex::{ExExManagerHandle, ExExNotification}; -use reth_primitives::{ - alloy_primitives::Sealable, BlockNumber, Header, SealedHeader, StaticFileSegment, -}; +use reth_primitives::{Header, SealedHeader, StaticFileSegment}; use reth_primitives_traits::format_gas_throughput; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, @@ -294,7 +293,7 @@ where target: "sync::stages::execution", start = last_block, end = block_number, - throughput = format_gas_throughput((cumulative_gas - last_cumulative_gas) as u64, execution_duration - last_execution_duration), + throughput = format_gas_throughput(cumulative_gas - last_cumulative_gas, execution_duration - last_execution_duration), "Executed block range" ); @@ -305,7 +304,7 @@ where } stage_progress = block_number; - stage_checkpoint.progress.processed += block.gas_used as u64; + stage_checkpoint.progress.processed += block.gas_used; // If we have ExExes we need to save the block in memory for later if self.exex_manager_handle.has_exexs() { @@ -317,7 +316,7 @@ where if self.thresholds.is_end_of_batch( block_number - start_block, bundle_size_hint, - cumulative_gas as u64, + cumulative_gas, batch_start.elapsed(), ) { break @@ -335,7 +334,7 @@ where target: "sync::stages::execution", start = start_block, end = stage_progress, - throughput = format_gas_throughput(cumulative_gas as u64, execution_duration), + throughput = format_gas_throughput(cumulative_gas, execution_duration), "Finished executing block range" ); @@ -351,12 +350,13 @@ where let previous_input = self.post_execute_commit_input.replace(Chain::new(blocks, state.clone(), None)); - debug_assert!( - previous_input.is_none(), - "Previous post execute commit input wasn't processed" - ); - if let Some(previous_input) = previous_input { - tracing::debug!(target: "sync::stages::execution", ?previous_input, "Previous post execute commit input wasn't processed"); + + if previous_input.is_some() { + // Not processing the previous post execute commit input is a critical error, as it + // means that we didn't send the notification to ExExes + return Err(StageError::PostExecuteCommit( + "Previous post execute commit input wasn't processed", + )) } } @@ -460,7 +460,7 @@ where stage_checkpoint.progress.processed -= provider .block_by_number(block_number)? .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))? - .gas_used as u64; + .gas_used; } } let checkpoint = if let Some(stage_checkpoint) = stage_checkpoint { @@ -573,7 +573,7 @@ fn calculate_gas_used_from_headers( let duration = start.elapsed(); debug!(target: "sync::stages::execution", ?range, ?duration, "Finished calculating gas used from headers"); - Ok(gas_total as u64) + Ok(gas_total) } /// Returns a `StaticFileProviderRWRefMut` static file producer after performing a consistency @@ -659,16 +659,14 @@ where mod tests { use super::*; use crate::test_utils::TestStageDB; + use alloy_primitives::{address, hex_literal::hex, keccak256, Address, B256, U256}; use alloy_rlp::Decodable; use assert_matches::assert_matches; use reth_chainspec::ChainSpecBuilder; use reth_db_api::{models::AccountBeforeTx, transaction::DbTxMut}; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_execution_errors::BlockValidationError; - use reth_primitives::{ - address, hex_literal::hex, keccak256, Account, Address, Bytecode, SealedBlock, - StorageEntry, B256, U256, - }; + use reth_primitives::{Account, Bytecode, SealedBlock, StorageEntry}; use reth_provider::{ test_utils::create_test_provider_factory, AccountReader, DatabaseProviderFactory, ReceiptProvider, StaticFileProviderFactory, @@ -763,7 +761,7 @@ mod tests { total } }) if processed == previous_stage_checkpoint.progress.processed && - total == previous_stage_checkpoint.progress.total + block.gas_used as u64); + total == previous_stage_checkpoint.progress.total + block.gas_used); } #[test] @@ -804,7 +802,7 @@ mod tests { total } }) if processed == previous_stage_checkpoint.progress.processed && - total == previous_stage_checkpoint.progress.total + block.gas_used as u64); + total == previous_stage_checkpoint.progress.total + block.gas_used); } #[test] @@ -837,7 +835,7 @@ mod tests { processed: 0, total } - }) if total == block.gas_used as u64); + }) if total == block.gas_used); } #[tokio::test] @@ -928,7 +926,7 @@ mod tests { })) }, done: true - } if processed == total && total == block.gas_used as u64); + } if processed == total && total == block.gas_used); let provider = factory.provider().unwrap(); @@ -1080,7 +1078,7 @@ mod tests { } })) } - } if total == block.gas_used as u64); + } if total == block.gas_used); // assert unwind stage assert_eq!( diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 2e109f7557f1..14afb37d81db 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -1,3 +1,4 @@ +use alloy_primitives::{keccak256, B256}; use itertools::Itertools; use reth_config::config::{EtlConfig, HashingConfig}; use reth_db::{tables, RawKey, RawTable, RawValue}; @@ -6,7 +7,7 @@ use reth_db_api::{ transaction::{DbTx, DbTxMut}, }; use reth_etl::Collector; -use reth_primitives::{keccak256, Account, B256}; +use reth_primitives::Account; use reth_provider::{AccountExtReader, DBProvider, HashingWriter, StatsReader}; use reth_stages_api::{ AccountHashingCheckpoint, EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, @@ -63,9 +64,9 @@ impl AccountHashingStage { >( provider: &reth_provider::DatabaseProvider, opts: SeedOpts, - ) -> Result, StageError> { + ) -> Result, StageError> { + use alloy_primitives::U256; use reth_db_api::models::AccountBeforeTx; - use reth_primitives::U256; use reth_provider::{StaticFileProviderFactory, StaticFileWriter}; use reth_testing_utils::{ generators, @@ -297,8 +298,9 @@ mod tests { stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError, UnwindStageTestRunner, }; + use alloy_primitives::U256; use assert_matches::assert_matches; - use reth_primitives::{Account, U256}; + use reth_primitives::Account; use reth_provider::providers::StaticFileWriter; use reth_stages_api::StageUnitCheckpoint; use test_utils::*; @@ -348,7 +350,7 @@ mod tests { mod test_utils { use super::*; use crate::test_utils::TestStageDB; - use reth_primitives::Address; + use alloy_primitives::Address; use reth_provider::DatabaseProviderFactory; pub(crate) struct AccountHashingTestRunner { diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index 54a9921dd521..ef070d30c6d6 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -1,3 +1,4 @@ +use alloy_primitives::{bytes::BufMut, keccak256, B256}; use itertools::Itertools; use reth_config::config::{EtlConfig, HashingConfig}; use reth_db::tables; @@ -8,7 +9,7 @@ use reth_db_api::{ transaction::{DbTx, DbTxMut}, }; use reth_etl::Collector; -use reth_primitives::{keccak256, BufMut, StorageEntry, B256}; +use reth_primitives::StorageEntry; use reth_provider::{DBProvider, HashingWriter, StatsReader, StorageReader}; use reth_stages_api::{ EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, @@ -133,7 +134,7 @@ where B256::from_slice(&addr_key[..32]), StorageEntry { key: B256::from_slice(&addr_key[32..]), - value: CompactU256::decompress(value)?.into(), + value: CompactU256::decompress_owned(value)?.into(), }, )?; } @@ -211,13 +212,14 @@ mod tests { stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError, TestStageDB, UnwindStageTestRunner, }; + use alloy_primitives::{Address, U256}; use assert_matches::assert_matches; use rand::Rng; use reth_db_api::{ cursor::{DbCursorRW, DbDupCursorRO}, models::StoredBlockBodyIndices, }; - use reth_primitives::{Address, SealedBlock, U256}; + use reth_primitives::SealedBlock; use reth_provider::providers::StaticFileWriter; use reth_testing_utils::generators::{ self, random_block_range, random_contract_account_range, BlockRangeParams, diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index f2ff1c666e26..199e015c2dce 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -1,5 +1,5 @@ +use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; use futures_util::StreamExt; -use reth_codecs::Compact; use reth_config::config::EtlConfig; use reth_consensus::Consensus; use reth_db::{tables, RawKey, RawTable, RawValue}; @@ -10,7 +10,8 @@ use reth_db_api::{ }; use reth_etl::Collector; use reth_network_p2p::headers::{downloader::HeaderDownloader, error::HeadersDownloaderError}; -use reth_primitives::{BlockHash, BlockNumber, SealedHeader, StaticFileSegment, B256}; +use reth_primitives::{SealedHeader, StaticFileSegment}; +use reth_primitives_traits::serde_bincode_compat; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, BlockHashReader, DBProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, @@ -53,8 +54,8 @@ pub struct HeaderStage { sync_gap: Option, /// ETL collector with `HeaderHash` -> `BlockNumber` hash_collector: Collector, - /// ETL collector with `BlockNumber` -> `SealedHeader` - header_collector: Collector, + /// ETL collector with `BlockNumber` -> `BincodeSealedHeader` + header_collector: Collector, /// Returns true if the ETL collector has all necessary headers to fill the gap. is_etl_ready: bool, } @@ -120,7 +121,11 @@ where info!(target: "sync::stages::headers", progress = %format!("{:.2}%", (index as f64 / total_headers as f64) * 100.0), "Writing headers"); } - let (sealed_header, _) = SealedHeader::from_compact(&header_buf, header_buf.len()); + let sealed_header: SealedHeader = + bincode::deserialize::>(&header_buf) + .map_err(|err| StageError::Fatal(Box::new(err)))? + .into(); + let (header, header_hash) = sealed_header.split(); if header.number == 0 { continue @@ -239,7 +244,15 @@ where let header_number = header.number; self.hash_collector.insert(header.hash(), header_number)?; - self.header_collector.insert(header_number, header)?; + self.header_collector.insert( + header_number, + Bytes::from( + bincode::serialize(&serde_bincode_compat::SealedHeader::from( + &header, + )) + .map_err(|err| StageError::Fatal(Box::new(err)))?, + ), + )?; // Headers are downloaded in reverse, so if we reach here, we know we have // filled the gap. @@ -377,11 +390,10 @@ mod tests { use crate::test_utils::{ stage_test_suite, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, }; + use alloy_primitives::{Sealable, B256}; use assert_matches::assert_matches; use reth_execution_types::ExecutionOutcome; - use reth_primitives::{ - alloy_primitives::Sealable, BlockBody, SealedBlock, SealedBlockWithSenders, B256, - }; + use reth_primitives::{BlockBody, SealedBlock, SealedBlockWithSenders}; use reth_provider::{BlockWriter, ProviderFactory, StaticFileProviderFactory}; use reth_stages_api::StageUnitCheckpoint; use reth_testing_utils::generators::{self, random_header, random_header_range}; diff --git a/crates/stages/stages/src/stages/index_account_history.rs b/crates/stages/stages/src/stages/index_account_history.rs index 8ca8d173fd85..8b10283fb4b7 100644 --- a/crates/stages/stages/src/stages/index_account_history.rs +++ b/crates/stages/stages/src/stages/index_account_history.rs @@ -1,8 +1,8 @@ use super::{collect_history_indices, load_history_indices}; +use alloy_primitives::Address; use reth_config::config::{EtlConfig, IndexHistoryConfig}; use reth_db::tables; use reth_db_api::{models::ShardedKey, table::Decode, transaction::DbTxMut}; -use reth_primitives::Address; use reth_provider::{DBProvider, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter}; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment}; use reth_stages_api::{ @@ -118,7 +118,7 @@ where collector, first_sync, ShardedKey::new, - ShardedKey::
::decode, + ShardedKey::
::decode_owned, |key| key.key, )?; @@ -148,6 +148,7 @@ mod tests { stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError, TestStageDB, UnwindStageTestRunner, }; + use alloy_primitives::{address, BlockNumber, B256}; use itertools::Itertools; use reth_db::BlockNumberList; use reth_db_api::{ @@ -158,7 +159,6 @@ mod tests { }, transaction::DbTx, }; - use reth_primitives::{address, BlockNumber, B256}; use reth_provider::{providers::StaticFileWriter, DatabaseProviderFactory}; use reth_testing_utils::generators::{ self, random_block_range, random_changeset_range, random_contract_account_range, @@ -181,7 +181,7 @@ mod tests { } fn list(list: &[u64]) -> BlockNumberList { - BlockNumberList::new(list).unwrap() + BlockNumberList::new(list.iter().copied()).unwrap() } fn cast( diff --git a/crates/stages/stages/src/stages/index_storage_history.rs b/crates/stages/stages/src/stages/index_storage_history.rs index 00646da2fd22..ac645b8dd754 100644 --- a/crates/stages/stages/src/stages/index_storage_history.rs +++ b/crates/stages/stages/src/stages/index_storage_history.rs @@ -124,7 +124,7 @@ where |AddressStorageKey((address, storage_key)), highest_block_number| { StorageShardedKey::new(address, storage_key, highest_block_number) }, - StorageShardedKey::decode, + StorageShardedKey::decode_owned, |key| AddressStorageKey((key.address, key.sharded_key.key)), )?; @@ -153,6 +153,7 @@ mod tests { stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError, TestStageDB, UnwindStageTestRunner, }; + use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; use itertools::Itertools; use reth_db::BlockNumberList; use reth_db_api::{ @@ -163,7 +164,7 @@ mod tests { }, transaction::DbTx, }; - use reth_primitives::{address, b256, Address, BlockNumber, StorageEntry, B256, U256}; + use reth_primitives::StorageEntry; use reth_provider::{providers::StaticFileWriter, DatabaseProviderFactory}; use reth_testing_utils::generators::{ self, random_block_range, random_changeset_range, random_contract_account_range, @@ -196,7 +197,7 @@ mod tests { } fn list(list: &[u64]) -> BlockNumberList { - BlockNumberList::new(list).unwrap() + BlockNumberList::new(list.iter().copied()).unwrap() } fn cast( diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index 83c8c52ce561..d1d3496d917a 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -1,8 +1,9 @@ +use alloy_primitives::{BlockNumber, Sealable, B256}; use reth_codecs::Compact; use reth_consensus::ConsensusError; use reth_db::tables; use reth_db_api::transaction::{DbTx, DbTxMut}; -use reth_primitives::{alloy_primitives::Sealable, BlockNumber, GotExpected, SealedHeader, B256}; +use reth_primitives::{GotExpected, SealedHeader}; use reth_provider::{ DBProvider, HeaderProvider, ProviderError, StageCheckpointReader, StageCheckpointWriter, StatsReader, TrieWriter, @@ -373,9 +374,10 @@ mod tests { stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, StorageKind, TestRunnerError, TestStageDB, UnwindStageTestRunner, }; + use alloy_primitives::{keccak256, U256}; use assert_matches::assert_matches; use reth_db_api::cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}; - use reth_primitives::{keccak256, SealedBlock, StaticFileSegment, StorageEntry, U256}; + use reth_primitives::{SealedBlock, StaticFileSegment, StorageEntry}; use reth_provider::{providers::StaticFileWriter, StaticFileProviderFactory}; use reth_stages_api::StageUnitCheckpoint; use reth_testing_utils::generators::{ diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index 4eb3c6f141d4..17ffcf2e90eb 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -42,6 +42,7 @@ use utils::*; mod tests { use super::*; use crate::test_utils::{StorageKind, TestStageDB}; + use alloy_primitives::{address, hex_literal::hex, keccak256, BlockNumber, B256, U256}; use alloy_rlp::Decodable; use reth_chainspec::ChainSpecBuilder; use reth_db::{ @@ -55,10 +56,7 @@ mod tests { }; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_exex::ExExManagerHandle; - use reth_primitives::{ - address, hex_literal::hex, keccak256, Account, BlockNumber, Bytecode, SealedBlock, - StaticFileSegment, B256, U256, - }; + use reth_primitives::{Account, Bytecode, SealedBlock, StaticFileSegment}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, test_utils::MockNodeTypesWithDB, diff --git a/crates/stages/stages/src/stages/prune.rs b/crates/stages/stages/src/stages/prune.rs index 3512e6210183..8adf2fcad546 100644 --- a/crates/stages/stages/src/stages/prune.rs +++ b/crates/stages/stages/src/stages/prune.rs @@ -169,7 +169,8 @@ mod tests { stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, StorageKind, TestRunnerError, TestStageDB, UnwindStageTestRunner, }; - use reth_primitives::{SealedBlock, B256}; + use alloy_primitives::B256; + use reth_primitives::SealedBlock; use reth_provider::{ providers::StaticFileWriter, TransactionsProvider, TransactionsProviderExt, }; diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index bf7df6147ff8..a85b0bc60ccb 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -1,3 +1,4 @@ +use alloy_primitives::{Address, TxNumber}; use reth_config::config::SenderRecoveryConfig; use reth_consensus::ConsensusError; use reth_db::{static_file::TransactionMask, tables, RawValue}; @@ -6,7 +7,7 @@ use reth_db_api::{ transaction::{DbTx, DbTxMut}, DbTxUnwindExt, }; -use reth_primitives::{Address, GotExpected, StaticFileSegment, TransactionSignedNoHash, TxNumber}; +use reth_primitives::{GotExpected, StaticFileSegment, TransactionSignedNoHash}; use reth_provider::{ BlockReader, DBProvider, HeaderProvider, ProviderError, PruneCheckpointReader, StaticFileProviderFactory, StatsReader, @@ -230,9 +231,10 @@ where // fetch the sealed header so we can use it in the sender recovery // unwind - let sealed_header = provider - .sealed_header(block_number)? - .ok_or(ProviderError::HeaderNotFound(block_number.into()))?; + let sealed_header = + provider.sealed_header(block_number)?.ok_or_else(|| { + ProviderError::HeaderNotFound(block_number.into()) + })?; Err(StageError::Block { block: Box::new(sealed_header), error: BlockErrorKind::Validation( @@ -333,9 +335,10 @@ struct FailedSenderRecoveryError { #[cfg(test)] mod tests { + use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; use reth_db_api::cursor::DbCursorRO; - use reth_primitives::{BlockNumber, SealedBlock, TransactionSigned, B256}; + use reth_primitives::{SealedBlock, TransactionSigned}; use reth_provider::{ providers::StaticFileWriter, DatabaseProviderFactory, PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index e636c281829d..60c958abf862 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -1,3 +1,4 @@ +use alloy_primitives::{TxHash, TxNumber}; use num_traits::Zero; use reth_config::config::{EtlConfig, TransactionLookupConfig}; use reth_db::{tables, RawKey, RawValue}; @@ -6,7 +7,6 @@ use reth_db_api::{ transaction::{DbTx, DbTxMut}, }; use reth_etl::Collector; -use reth_primitives::{TxHash, TxNumber}; use reth_provider::{ BlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, StatsReader, TransactionsProvider, TransactionsProviderExt, @@ -109,7 +109,7 @@ where } } if input.target_reached() { - return Ok(ExecOutput::done(input.checkpoint())) + return Ok(ExecOutput::done(input.checkpoint())); } // 500MB temporary files @@ -172,7 +172,7 @@ where "Transaction hashes inserted" ); - break + break; } } @@ -199,7 +199,7 @@ where let mut rev_walker = body_cursor.walk_back(Some(*range.end()))?; while let Some((number, body)) = rev_walker.next().transpose()? { if number <= unwind_to { - break + break; } // Delete all transactions that belong to this block @@ -250,8 +250,9 @@ mod tests { stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, StorageKind, TestRunnerError, TestStageDB, UnwindStageTestRunner, }; + use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; - use reth_primitives::{BlockNumber, SealedBlock, B256}; + use reth_primitives::SealedBlock; use reth_provider::{ providers::StaticFileWriter, DatabaseProviderFactory, StaticFileProviderFactory, }; diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index cb9c729aa234..caf039faca10 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -1,4 +1,5 @@ //! Utils for `stages`. +use alloy_primitives::BlockNumber; use reth_config::config::EtlConfig; use reth_db::BlockNumberList; use reth_db_api::{ @@ -9,7 +10,6 @@ use reth_db_api::{ DatabaseError, }; use reth_etl::Collector; -use reth_primitives::BlockNumber; use reth_provider::DBProvider; use reth_stages_api::StageError; use std::{collections::HashMap, hash::Hash, ops::RangeBounds}; @@ -51,14 +51,14 @@ where let mut changeset_cursor = provider.tx_ref().cursor_read::()?; let mut collector = Collector::new(etl_config.file_size, etl_config.dir.clone()); - let mut cache: HashMap> = HashMap::new(); + let mut cache: HashMap> = HashMap::default(); let mut collect = |cache: &HashMap>| { - for (key, indice_list) in cache { - let last = indice_list.last().expect("qed"); + for (key, indices) in cache { + let last = indices.last().expect("qed"); collector.insert( sharded_key_factory(*key, *last), - BlockNumberList::new_pre_sorted(indice_list), + BlockNumberList::new_pre_sorted(indices.iter().copied()), )?; } Ok::<(), StageError>(()) diff --git a/crates/stages/stages/src/test_utils/runner.rs b/crates/stages/stages/src/test_utils/runner.rs index d2c4e68beb50..26f245c1304d 100644 --- a/crates/stages/stages/src/test_utils/runner.rs +++ b/crates/stages/stages/src/test_utils/runner.rs @@ -13,7 +13,7 @@ pub(crate) enum TestRunnerError { #[error(transparent)] Database(#[from] DatabaseError), #[error(transparent)] - Internal(#[from] Box), + Internal(#[from] Box), #[error(transparent)] Provider(#[from] ProviderError), } diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 5fef9d6a2be2..4c43d4cdcd1d 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -1,3 +1,4 @@ +use alloy_primitives::{keccak256, Address, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_chainspec::MAINNET; use reth_db::{ tables, @@ -14,8 +15,7 @@ use reth_db_api::{ DatabaseError as DbError, }; use reth_primitives::{ - keccak256, Account, Address, BlockNumber, Receipt, SealedBlock, SealedHeader, - StaticFileSegment, StorageEntry, TxHash, TxNumber, B256, U256, + Account, Receipt, SealedBlock, SealedHeader, StaticFileSegment, StorageEntry, }; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, diff --git a/crates/static-file/types/src/segment.rs b/crates/static-file/types/src/segment.rs index cb742b3f3184..94f09b64a51a 100644 --- a/crates/static-file/types/src/segment.rs +++ b/crates/static-file/types/src/segment.rs @@ -122,6 +122,12 @@ impl StaticFileSegment { pub const fn is_receipts(&self) -> bool { matches!(self, Self::Receipts) } + + /// Returns `true` if the segment is `StaticFileSegment::Receipts` or + /// `StaticFileSegment::Transactions`. + pub const fn is_tx_based(&self) -> bool { + matches!(self, Self::Receipts | Self::Transactions) + } } /// A segment header that contains information common to all segments. Used for storage. @@ -239,7 +245,7 @@ impl SegmentHeader { match self.segment { StaticFileSegment::Headers => { if let Some(range) = &mut self.block_range { - if num > range.end { + if num > range.end - range.start { self.block_range = None; } else { range.end = range.end.saturating_sub(num); @@ -248,7 +254,7 @@ impl SegmentHeader { } StaticFileSegment::Transactions | StaticFileSegment::Receipts => { if let Some(range) = &mut self.tx_range { - if num > range.end { + if num > range.end - range.start { self.tx_range = None; } else { range.end = range.end.saturating_sub(num); diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 7a999d35f73a..640ec8c9561f 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -27,7 +27,6 @@ op-alloy-consensus = { workspace = true, optional = true } # misc bytes.workspace = true modular-bitfield = { workspace = true, optional = true } -serde = { workspace = true, optional = true } [dev-dependencies] alloy-eips = { workspace = true, default-features = false, features = [ @@ -48,17 +47,17 @@ serde_json.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true +serde.workspace = true [features] default = ["std", "alloy"] -std = ["alloy-primitives/std", "bytes/std", "serde?/std"] +std = ["alloy-primitives/std", "bytes/std"] alloy = [ "dep:alloy-consensus", "dep:alloy-eips", "dep:alloy-genesis", "dep:modular-bitfield", "dep:alloy-trie", - "dep:serde" ] optimism = ["alloy", "dep:op-alloy-consensus"] test-utils = [] diff --git a/crates/storage/codecs/src/alloy/authorization_list.rs b/crates/storage/codecs/src/alloy/authorization_list.rs index 2c1495abf716..3efe13590622 100644 --- a/crates/storage/codecs/src/alloy/authorization_list.rs +++ b/crates/storage/codecs/src/alloy/authorization_list.rs @@ -5,13 +5,12 @@ use alloy_eips::eip7702::{Authorization as AlloyAuthorization, SignedAuthorizati use alloy_primitives::{Address, U256}; use bytes::Buf; use reth_codecs_derive::add_arbitrary_tests; -use serde::{Deserialize, Serialize}; /// Authorization acts as bridge which simplifies Compact implementation for AlloyAuthorization. /// /// Notice: Make sure this struct is 1:1 with `alloy_eips::eip7702::Authorization` -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] +#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] #[add_arbitrary_tests(compact)] pub(crate) struct Authorization { chain_id: U256, diff --git a/crates/storage/codecs/src/alloy/genesis_account.rs b/crates/storage/codecs/src/alloy/genesis_account.rs index a94f4e2ef906..938ad1375b15 100644 --- a/crates/storage/codecs/src/alloy/genesis_account.rs +++ b/crates/storage/codecs/src/alloy/genesis_account.rs @@ -3,7 +3,6 @@ use alloc::vec::Vec; use alloy_genesis::GenesisAccount as AlloyGenesisAccount; use alloy_primitives::{Bytes, B256, U256}; use reth_codecs_derive::add_arbitrary_tests; -use serde::{Deserialize, Serialize}; /// `GenesisAccount` acts as bridge which simplifies Compact implementation for /// `AlloyGenesisAccount`. @@ -23,8 +22,8 @@ pub(crate) struct GenesisAccountRef<'a> { private_key: Option<&'a B256>, } -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] +#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] #[add_arbitrary_tests(compact)] pub(crate) struct GenesisAccount { /// The nonce of the account at genesis. @@ -39,15 +38,15 @@ pub(crate) struct GenesisAccount { private_key: Option, } -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] +#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] #[add_arbitrary_tests(compact)] pub(crate) struct StorageEntries { entries: Vec, } -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] +#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] #[add_arbitrary_tests(compact)] pub(crate) struct StorageEntry { key: B256, diff --git a/crates/storage/codecs/src/alloy/header.rs b/crates/storage/codecs/src/alloy/header.rs index a72021fdcc1b..526bc69b1369 100644 --- a/crates/storage/codecs/src/alloy/header.rs +++ b/crates/storage/codecs/src/alloy/header.rs @@ -1,7 +1,6 @@ use crate::Compact; use alloy_consensus::Header as AlloyHeader; use alloy_primitives::{Address, BlockNumber, Bloom, Bytes, B256, U256}; -use serde::{Deserialize, Serialize}; /// Block header /// @@ -11,7 +10,8 @@ use serde::{Deserialize, Serialize}; /// will automatically apply to this type. /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::Header`] -#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize, Compact)] +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] pub(crate) struct Header { parent_hash: B256, ommers_hash: B256, @@ -32,15 +32,42 @@ pub(crate) struct Header { blob_gas_used: Option, excess_blob_gas: Option, parent_beacon_block_root: Option, - requests_root: Option, + extra_fields: Option, extra_data: Bytes, } +/// [`Header`] extension struct. +/// +/// All new fields should be added here in the form of a `Option`, since `Option` is +/// used as a field of [`Header`] for backwards compatibility. +/// +/// More information: & [`reth_codecs_derive::Compact`]. +#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +pub(crate) struct HeaderExt { + requests_root: Option, +} + +impl HeaderExt { + /// Converts into [`Some`] if any of the field exists. Otherwise, returns [`None`]. + /// + /// Required since [`Header`] uses `Option` as a field. + const fn into_option(self) -> Option { + if self.requests_root.is_some() { + Some(self) + } else { + None + } + } +} + impl Compact for AlloyHeader { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, { + let extra_fields = HeaderExt { requests_root: self.requests_root }; + let header = Header { parent_hash: self.parent_hash, ommers_hash: self.ommers_hash, @@ -52,16 +79,16 @@ impl Compact for AlloyHeader { logs_bloom: self.logs_bloom, difficulty: self.difficulty, number: self.number, - gas_limit: self.gas_limit as u64, - gas_used: self.gas_used as u64, + gas_limit: self.gas_limit, + gas_used: self.gas_used, timestamp: self.timestamp, mix_hash: self.mix_hash, nonce: self.nonce.into(), - base_fee_per_gas: self.base_fee_per_gas.map(|base_fee| base_fee as u64), - blob_gas_used: self.blob_gas_used.map(|blob_gas| blob_gas as u64), - excess_blob_gas: self.excess_blob_gas.map(|excess_blob| excess_blob as u64), + base_fee_per_gas: self.base_fee_per_gas, + blob_gas_used: self.blob_gas_used, + excess_blob_gas: self.excess_blob_gas, parent_beacon_block_root: self.parent_beacon_block_root, - requests_root: self.requests_root, + extra_fields: extra_fields.into_option(), extra_data: self.extra_data.clone(), }; header.to_compact(buf) @@ -80,16 +107,16 @@ impl Compact for AlloyHeader { logs_bloom: header.logs_bloom, difficulty: header.difficulty, number: header.number, - gas_limit: header.gas_limit.into(), - gas_used: header.gas_used.into(), + gas_limit: header.gas_limit, + gas_used: header.gas_used, timestamp: header.timestamp, mix_hash: header.mix_hash, nonce: header.nonce.into(), - base_fee_per_gas: header.base_fee_per_gas.map(Into::into), - blob_gas_used: header.blob_gas_used.map(Into::into), - excess_blob_gas: header.excess_blob_gas.map(Into::into), + base_fee_per_gas: header.base_fee_per_gas, + blob_gas_used: header.blob_gas_used, + excess_blob_gas: header.excess_blob_gas, parent_beacon_block_root: header.parent_beacon_block_root, - requests_root: header.requests_root, + requests_root: header.extra_fields.and_then(|h| h.requests_root), extra_data: header.extra_data, }; (alloy_header, buf) @@ -99,9 +126,59 @@ impl Compact for AlloyHeader { #[cfg(test)] mod tests { use super::*; + use alloy_primitives::{address, b256, bloom, bytes, hex}; + + /// Holesky block #1947953 + const HOLESKY_BLOCK: Header = Header { + parent_hash: b256!("8605e0c46689f66b3deed82598e43d5002b71a929023b665228728f0c6e62a95"), + ommers_hash: b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), + beneficiary: address!("c6e2459991bfe27cca6d86722f35da23a1e4cb97"), + state_root: b256!("edad188ca5647d62f4cca417c11a1afbadebce30d23260767f6f587e9b3b9993"), + transactions_root: b256!("4daf25dc08a841aa22aa0d3cb3e1f159d4dcaf6a6063d4d36bfac11d3fdb63ee"), + receipts_root: b256!("1a1500328e8ade2592bbea1e04f9a9fd8c0142d3175d6e8420984ee159abd0ed"), + withdrawals_root: Some(b256!("d0f7f22d6d915be5a3b9c0fee353f14de5ac5c8ac1850b76ce9be70b69dfe37d")), + logs_bloom: bloom!("36410880400480e1090a001c408880800019808000125124002100400048442220020000408040423088300004d0000050803000862485a02020011600a5010404143021800881e8e08c402940404002105004820c440051640000809c000011080002300208510808150101000038002500400040000230000000110442800000800204420100008110080200088c1610c0b80000c6008900000340400200200210010111020000200041a2010804801100030a0284a8463820120a0601480244521002a10201100400801101006002001000008000000ce011011041086418609002000128800008180141002003004c00800040940c00c1180ca002890040"), + difficulty: U256::ZERO, + number: 0x1db931, + gas_limit: 0x1c9c380, + gas_used: 0x440949, + timestamp: 0x66982980, + mix_hash: b256!("574db0ff0a2243b434ba2a35da8f2f72df08bca44f8733f4908d10dcaebc89f1"), + nonce: 0, + base_fee_per_gas: Some(0x8), + blob_gas_used: Some(0x60000), + excess_blob_gas: Some(0x0), + parent_beacon_block_root: Some(b256!("aa1d9606b7932f2280a19b3498b9ae9eebc6a83f1afde8e45944f79d353db4c1")), + extra_data: bytes!("726574682f76312e302e302f6c696e7578"), + extra_fields: None, + }; #[test] fn test_ensure_backwards_compatibility() { assert_eq!(Header::bitflag_encoded_bytes(), 4); + assert_eq!(HeaderExt::bitflag_encoded_bytes(), 1); + } + + #[test] + fn test_backwards_compatibility() { + let holesky_header_bytes = hex!("81a121788605e0c46689f66b3deed82598e43d5002b71a929023b665228728f0c6e62a951dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347c6e2459991bfe27cca6d86722f35da23a1e4cb97edad188ca5647d62f4cca417c11a1afbadebce30d23260767f6f587e9b3b99934daf25dc08a841aa22aa0d3cb3e1f159d4dcaf6a6063d4d36bfac11d3fdb63ee1a1500328e8ade2592bbea1e04f9a9fd8c0142d3175d6e8420984ee159abd0edd0f7f22d6d915be5a3b9c0fee353f14de5ac5c8ac1850b76ce9be70b69dfe37d36410880400480e1090a001c408880800019808000125124002100400048442220020000408040423088300004d0000050803000862485a02020011600a5010404143021800881e8e08c402940404002105004820c440051640000809c000011080002300208510808150101000038002500400040000230000000110442800000800204420100008110080200088c1610c0b80000c6008900000340400200200210010111020000200041a2010804801100030a0284a8463820120a0601480244521002a10201100400801101006002001000008000000ce011011041086418609002000128800008180141002003004c00800040940c00c1180ca0028900401db93101c9c38044094966982980574db0ff0a2243b434ba2a35da8f2f72df08bca44f8733f4908d10dcaebc89f101080306000000aa1d9606b7932f2280a19b3498b9ae9eebc6a83f1afde8e45944f79d353db4c1726574682f76312e302e302f6c696e7578"); + let (decoded_header, _) = + Header::from_compact(&holesky_header_bytes, holesky_header_bytes.len()); + + assert_eq!(decoded_header, HOLESKY_BLOCK); + + let mut encoded_header = Vec::with_capacity(holesky_header_bytes.len()); + assert_eq!(holesky_header_bytes.len(), decoded_header.to_compact(&mut encoded_header)); + assert_eq!(encoded_header, holesky_header_bytes); + } + + #[test] + fn test_extra_fields() { + let mut header = HOLESKY_BLOCK; + header.extra_fields = Some(HeaderExt { requests_root: Some(B256::random()) }); + + let mut encoded_header = vec![]; + let len = header.to_compact(&mut encoded_header); + assert_eq!(header, Header::from_compact(&encoded_header, len).0); } } diff --git a/crates/storage/codecs/src/alloy/mod.rs b/crates/storage/codecs/src/alloy/mod.rs index 8da0f7a94730..942258d0647e 100644 --- a/crates/storage/codecs/src/alloy/mod.rs +++ b/crates/storage/codecs/src/alloy/mod.rs @@ -16,7 +16,7 @@ mod tests { alloy::{ authorization_list::Authorization, genesis_account::{GenesisAccount, GenesisAccountRef, StorageEntries, StorageEntry}, - header::Header, + header::{Header, HeaderExt}, transaction::{ eip1559::TxEip1559, eip2930::TxEip2930, eip4844::TxEip4844, eip7702::TxEip7702, legacy::TxLegacy, @@ -33,6 +33,7 @@ mod tests { // [`validate_bitflag_backwards_compat`] macro for detailed instructions on handling // it. validate_bitflag_backwards_compat!(Header, UnusedBits::Zero); + validate_bitflag_backwards_compat!(HeaderExt, UnusedBits::NotZero); validate_bitflag_backwards_compat!(TxEip2930, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageEntries, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageEntry, UnusedBits::Zero); diff --git a/crates/storage/codecs/src/alloy/transaction/eip1559.rs b/crates/storage/codecs/src/alloy/transaction/eip1559.rs index a0889492589d..8e7594951faa 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip1559.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip1559.rs @@ -2,8 +2,6 @@ use crate::Compact; use alloy_consensus::TxEip1559 as AlloyTxEip1559; use alloy_eips::eip2930::AccessList; use alloy_primitives::{Bytes, ChainId, TxKind, U256}; -use serde::{Deserialize, Serialize}; - /// [EIP-1559 Transaction](https://eips.ethereum.org/EIPS/eip-1559) /// /// This is a helper type to use derive on it instead of manually managing `bitfield`. @@ -11,9 +9,9 @@ use serde::{Deserialize, Serialize}; /// By deriving `Compact` here, any future changes or enhancements to the `Compact` derive /// will automatically apply to this type. /// -/// Notice: Make sure this struct is 1:1 with [`alloy_consensus::transaction::TxEip1559`] -#[derive(Debug, Clone, PartialEq, Eq, Hash, Compact, Default, Serialize, Deserialize)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +/// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip1559`] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Compact, Default)] +#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] #[cfg_attr(test, crate::add_arbitrary_tests(compact))] pub(crate) struct TxEip1559 { chain_id: ChainId, @@ -35,7 +33,7 @@ impl Compact for AlloyTxEip1559 { let tx = TxEip1559 { chain_id: self.chain_id, nonce: self.nonce, - gas_limit: self.gas_limit as u64, + gas_limit: self.gas_limit, max_fee_per_gas: self.max_fee_per_gas, max_priority_fee_per_gas: self.max_priority_fee_per_gas, to: self.to, @@ -53,7 +51,7 @@ impl Compact for AlloyTxEip1559 { let alloy_tx = Self { chain_id: tx.chain_id, nonce: tx.nonce, - gas_limit: tx.gas_limit.into(), + gas_limit: tx.gas_limit, max_fee_per_gas: tx.max_fee_per_gas, max_priority_fee_per_gas: tx.max_priority_fee_per_gas, to: tx.to, diff --git a/crates/storage/codecs/src/alloy/transaction/eip2930.rs b/crates/storage/codecs/src/alloy/transaction/eip2930.rs index 33b58dfff739..e0c78a3e4c0c 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip2930.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip2930.rs @@ -1,9 +1,8 @@ use crate::Compact; -use alloy_consensus::transaction::TxEip2930 as AlloyTxEip2930; +use alloy_consensus::TxEip2930 as AlloyTxEip2930; use alloy_eips::eip2930::AccessList; use alloy_primitives::{Bytes, ChainId, TxKind, U256}; use reth_codecs_derive::add_arbitrary_tests; -use serde::{Deserialize, Serialize}; /// Transaction with an [`AccessList`] ([EIP-2930](https://eips.ethereum.org/EIPS/eip-2930)). /// @@ -12,9 +11,9 @@ use serde::{Deserialize, Serialize}; /// By deriving `Compact` here, any future changes or enhancements to the `Compact` derive /// will automatically apply to this type. /// -/// Notice: Make sure this struct is 1:1 with [`alloy_consensus::transaction::TxEip2930`] -#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +/// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip2930`] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] #[add_arbitrary_tests(compact)] pub(crate) struct TxEip2930 { chain_id: ChainId, @@ -36,7 +35,7 @@ impl Compact for AlloyTxEip2930 { chain_id: self.chain_id, nonce: self.nonce, gas_price: self.gas_price, - gas_limit: self.gas_limit as u64, + gas_limit: self.gas_limit, to: self.to, value: self.value, access_list: self.access_list.clone(), @@ -51,7 +50,7 @@ impl Compact for AlloyTxEip2930 { chain_id: tx.chain_id, nonce: tx.nonce, gas_price: tx.gas_price, - gas_limit: tx.gas_limit as u128, + gas_limit: tx.gas_limit, to: tx.to, value: tx.value, access_list: tx.access_list, diff --git a/crates/storage/codecs/src/alloy/transaction/eip4844.rs b/crates/storage/codecs/src/alloy/transaction/eip4844.rs index 15a5f443c46d..27c6b9240902 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip4844.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip4844.rs @@ -1,10 +1,9 @@ use crate::{Compact, CompactPlaceholder}; use alloc::vec::Vec; -use alloy_consensus::transaction::TxEip4844 as AlloyTxEip4844; +use alloy_consensus::TxEip4844 as AlloyTxEip4844; use alloy_eips::eip2930::AccessList; use alloy_primitives::{Address, Bytes, ChainId, B256, U256}; use reth_codecs_derive::add_arbitrary_tests; -use serde::{Deserialize, Serialize}; /// [EIP-4844 Blob Transaction](https://eips.ethereum.org/EIPS/eip-4844#blob-transaction) /// @@ -13,9 +12,9 @@ use serde::{Deserialize, Serialize}; /// By deriving `Compact` here, any future changes or enhancements to the `Compact` derive /// will automatically apply to this type. /// -/// Notice: Make sure this struct is 1:1 with [`alloy_consensus::transaction::TxEip4844`] -#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +/// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip4844`] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] #[add_arbitrary_tests(compact)] pub(crate) struct TxEip4844 { chain_id: ChainId, @@ -43,7 +42,7 @@ impl Compact for AlloyTxEip4844 { let tx = TxEip4844 { chain_id: self.chain_id, nonce: self.nonce, - gas_limit: self.gas_limit as u64, + gas_limit: self.gas_limit, max_fee_per_gas: self.max_fee_per_gas, max_priority_fee_per_gas: self.max_priority_fee_per_gas, placeholder: Some(()), @@ -62,7 +61,7 @@ impl Compact for AlloyTxEip4844 { let alloy_tx = Self { chain_id: tx.chain_id, nonce: tx.nonce, - gas_limit: tx.gas_limit as u128, + gas_limit: tx.gas_limit, max_fee_per_gas: tx.max_fee_per_gas, max_priority_fee_per_gas: tx.max_priority_fee_per_gas, to: tx.to, diff --git a/crates/storage/codecs/src/alloy/transaction/eip7702.rs b/crates/storage/codecs/src/alloy/transaction/eip7702.rs index a44e97ee1d52..e714be1c3f66 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip7702.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip7702.rs @@ -1,10 +1,9 @@ use crate::Compact; use alloc::vec::Vec; -use alloy_consensus::transaction::TxEip7702 as AlloyTxEip7702; +use alloy_consensus::TxEip7702 as AlloyTxEip7702; use alloy_eips::{eip2930::AccessList, eip7702::SignedAuthorization}; use alloy_primitives::{Address, Bytes, ChainId, U256}; use reth_codecs_derive::add_arbitrary_tests; -use serde::{Deserialize, Serialize}; /// [EIP-7702 Set Code Transaction](https://eips.ethereum.org/EIPS/eip-7702) /// @@ -13,9 +12,9 @@ use serde::{Deserialize, Serialize}; /// By deriving `Compact` here, any future changes or enhancements to the `Compact` derive /// will automatically apply to this type. /// -/// Notice: Make sure this struct is 1:1 with [`alloy_consensus::transaction::TxEip7702`] -#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +/// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip7702`] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] #[add_arbitrary_tests(compact)] pub(crate) struct TxEip7702 { chain_id: ChainId, @@ -40,7 +39,7 @@ impl Compact for AlloyTxEip7702 { nonce: self.nonce, max_fee_per_gas: self.max_fee_per_gas, max_priority_fee_per_gas: self.max_priority_fee_per_gas, - gas_limit: self.gas_limit as u64, + gas_limit: self.gas_limit, to: self.to, value: self.value, input: self.input.clone(), @@ -57,7 +56,7 @@ impl Compact for AlloyTxEip7702 { nonce: tx.nonce, max_fee_per_gas: tx.max_fee_per_gas, max_priority_fee_per_gas: tx.max_priority_fee_per_gas, - gas_limit: tx.gas_limit as u128, + gas_limit: tx.gas_limit, to: tx.to, value: tx.value, input: tx.input, diff --git a/crates/storage/codecs/src/alloy/transaction/legacy.rs b/crates/storage/codecs/src/alloy/transaction/legacy.rs index 641b27bf53b2..27e799a790e7 100644 --- a/crates/storage/codecs/src/alloy/transaction/legacy.rs +++ b/crates/storage/codecs/src/alloy/transaction/legacy.rs @@ -1,11 +1,10 @@ use crate::Compact; use alloy_consensus::TxLegacy as AlloyTxLegacy; use alloy_primitives::{Bytes, ChainId, TxKind, U256}; -use serde::{Deserialize, Serialize}; /// Legacy transaction. -#[derive(Debug, Clone, PartialEq, Eq, Default, Compact, Serialize, Deserialize)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] +#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] #[cfg_attr(test, crate::add_arbitrary_tests(compact))] pub(crate) struct TxLegacy { /// Added as EIP-155: Simple replay attack protection @@ -51,7 +50,7 @@ impl Compact for AlloyTxLegacy { chain_id: self.chain_id, nonce: self.nonce, gas_price: self.gas_price, - gas_limit: self.gas_limit as u64, + gas_limit: self.gas_limit, to: self.to, value: self.value, input: self.input.clone(), @@ -67,7 +66,7 @@ impl Compact for AlloyTxLegacy { chain_id: tx.chain_id, nonce: tx.nonce, gas_price: tx.gas_price, - gas_limit: tx.gas_limit.into(), + gas_limit: tx.gas_limit, to: tx.to, value: tx.value, input: tx.input, diff --git a/crates/storage/codecs/src/alloy/transaction/optimism.rs b/crates/storage/codecs/src/alloy/transaction/optimism.rs index c84b19559fd5..f4fdcf5ee44c 100644 --- a/crates/storage/codecs/src/alloy/transaction/optimism.rs +++ b/crates/storage/codecs/src/alloy/transaction/optimism.rs @@ -2,7 +2,6 @@ use crate::Compact; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use op_alloy_consensus::TxDeposit as AlloyTxDeposit; use reth_codecs_derive::add_arbitrary_tests; -use serde::{Deserialize, Serialize}; /// Deposit transactions, also known as deposits are initiated on L1, and executed on L2. /// @@ -12,8 +11,8 @@ use serde::{Deserialize, Serialize}; /// will automatically apply to this type. /// /// Notice: Make sure this struct is 1:1 with [`op_alloy_consensus::TxDeposit`] -#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] #[add_arbitrary_tests(compact)] pub(crate) struct TxDeposit { source_hash: B256, @@ -37,7 +36,7 @@ impl Compact for AlloyTxDeposit { to: self.to, mint: self.mint, value: self.value, - gas_limit: self.gas_limit as u64, + gas_limit: self.gas_limit, is_system_transaction: self.is_system_transaction, input: self.input.clone(), }; @@ -52,7 +51,7 @@ impl Compact for AlloyTxDeposit { to: tx.to, mint: tx.mint, value: tx.value, - gas_limit: tx.gas_limit as u128, + gas_limit: tx.gas_limit, is_system_transaction: tx.is_system_transaction, input: tx.input, }; diff --git a/crates/storage/codecs/src/alloy/withdrawal.rs b/crates/storage/codecs/src/alloy/withdrawal.rs index 0ec1693210c0..16324c280cc2 100644 --- a/crates/storage/codecs/src/alloy/withdrawal.rs +++ b/crates/storage/codecs/src/alloy/withdrawal.rs @@ -2,13 +2,12 @@ use crate::Compact; use alloy_eips::eip4895::Withdrawal as AlloyWithdrawal; use alloy_primitives::Address; use reth_codecs_derive::add_arbitrary_tests; -use serde::{Deserialize, Serialize}; /// Withdrawal acts as bridge which simplifies Compact implementation for AlloyWithdrawal. /// /// Notice: Make sure this struct is 1:1 with `alloy_eips::eip4895::Withdrawal` -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] +#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] #[add_arbitrary_tests(compact)] pub(crate) struct Withdrawal { /// Monotonically increasing identifier issued by consensus layer. diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 3b05ce8e94fb..8608c5eb8c19 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -150,21 +150,7 @@ where where B: bytes::BufMut + AsMut<[u8]>, { - encode_varuint(self.len(), buf); - - let mut tmp: Vec = Vec::with_capacity(64); - - for element in self { - tmp.clear(); - - // We don't know the length until we compact it - let length = element.to_compact(&mut tmp); - encode_varuint(length, buf); - - buf.put_slice(&tmp); - } - - 0 + self.as_slice().to_compact(buf) } #[inline] @@ -190,11 +176,7 @@ where where B: bytes::BufMut + AsMut<[u8]>, { - encode_varuint(self.len(), buf); - for element in self { - element.to_compact(buf); - } - 0 + self.as_slice().specialized_to_compact(buf) } /// To be used by fixed sized types like `Vec`. diff --git a/crates/storage/db-api/src/models/accounts.rs b/crates/storage/db-api/src/models/accounts.rs index 338a3a06f600..94922632129b 100644 --- a/crates/storage/db-api/src/models/accounts.rs +++ b/crates/storage/db-api/src/models/accounts.rs @@ -64,11 +64,9 @@ impl Encode for BlockNumberAddress { } impl Decode for BlockNumberAddress { - fn decode>(value: B) -> Result { - let value = value.as_ref(); + fn decode(value: &[u8]) -> Result { let num = u64::from_be_bytes(value[..8].try_into().map_err(|_| DatabaseError::Decode)?); let hash = Address::from_slice(&value[8..]); - Ok(Self((num, hash))) } } @@ -97,11 +95,9 @@ impl Encode for AddressStorageKey { } impl Decode for AddressStorageKey { - fn decode>(value: B) -> Result { - let value = value.as_ref(); + fn decode(value: &[u8]) -> Result { let address = Address::from_slice(&value[..20]); let storage_key = StorageKey::from_slice(&value[20..]); - Ok(Self((address, storage_key))) } } @@ -127,7 +123,7 @@ mod tests { let encoded = Encode::encode(key); assert_eq!(encoded, bytes); - let decoded: BlockNumberAddress = Decode::decode(encoded).unwrap(); + let decoded: BlockNumberAddress = Decode::decode(&encoded).unwrap(); assert_eq!(decoded, key); } @@ -152,7 +148,7 @@ mod tests { let encoded = Encode::encode(key); assert_eq!(encoded, bytes); - let decoded: AddressStorageKey = Decode::decode(encoded).unwrap(); + let decoded: AddressStorageKey = Decode::decode(&encoded).unwrap(); assert_eq!(decoded, key); } diff --git a/crates/storage/db-api/src/models/blocks.rs b/crates/storage/db-api/src/models/blocks.rs index b48baf6d6b26..7268d82dd3cc 100644 --- a/crates/storage/db-api/src/models/blocks.rs +++ b/crates/storage/db-api/src/models/blocks.rs @@ -29,9 +29,6 @@ mod tests { let mut ommer = StoredBlockOmmers::default(); ommer.ommers.push(Header::default()); ommer.ommers.push(Header::default()); - assert_eq!( - ommer.clone(), - StoredBlockOmmers::decompress::>(ommer.compress()).unwrap() - ); + assert_eq!(ommer.clone(), StoredBlockOmmers::decompress(&ommer.compress()).unwrap()); } } diff --git a/crates/storage/db-api/src/models/integer_list.rs b/crates/storage/db-api/src/models/integer_list.rs index f47605bf88b5..480b52a9e2c0 100644 --- a/crates/storage/db-api/src/models/integer_list.rs +++ b/crates/storage/db-api/src/models/integer_list.rs @@ -12,13 +12,14 @@ impl Compress for IntegerList { fn compress(self) -> Self::Compressed { self.to_bytes() } + fn compress_to_buf>(self, buf: &mut B) { self.to_mut_bytes(buf) } } impl Decompress for IntegerList { - fn decompress>(value: B) -> Result { - Self::from_bytes(value.as_ref()).map_err(|_| DatabaseError::Decode) + fn decompress(value: &[u8]) -> Result { + Self::from_bytes(value).map_err(|_| DatabaseError::Decode) } } diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 942b9b27af6d..0f35a558a352 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -5,11 +5,10 @@ use crate::{ DatabaseError, }; use alloy_genesis::GenesisAccount; -use alloy_primitives::{Address, Log, B256, U256}; +use alloy_primitives::{Address, Bytes, Log, B256, U256}; use reth_codecs::{add_arbitrary_tests, Compact}; use reth_primitives::{ - Account, Bytecode, Header, Receipt, Requests, SealedHeader, StorageEntry, - TransactionSignedNoHash, TxType, + Account, Bytecode, Header, Receipt, Requests, StorageEntry, TransactionSignedNoHash, TxType, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; @@ -42,10 +41,10 @@ macro_rules! impl_uints { } impl Decode for $name { - fn decode>(value: B) -> Result { + fn decode(value: &[u8]) -> Result { Ok( $name::from_be_bytes( - value.as_ref().try_into().map_err(|_| $crate::DatabaseError::Decode)? + value.try_into().map_err(|_| $crate::DatabaseError::Decode)? ) ) } @@ -65,8 +64,12 @@ impl Encode for Vec { } impl Decode for Vec { - fn decode>(value: B) -> Result { - Ok(value.as_ref().to_vec()) + fn decode(value: &[u8]) -> Result { + Ok(value.to_vec()) + } + + fn decode_owned(value: Vec) -> Result { + Ok(value) } } @@ -79,8 +82,8 @@ impl Encode for Address { } impl Decode for Address { - fn decode>(value: B) -> Result { - Ok(Self::from_slice(value.as_ref())) + fn decode(value: &[u8]) -> Result { + Ok(Self::from_slice(value)) } } @@ -93,8 +96,8 @@ impl Encode for B256 { } impl Decode for B256 { - fn decode>(value: B) -> Result { - Ok(Self::new(value.as_ref().try_into().map_err(|_| DatabaseError::Decode)?)) + fn decode(value: &[u8]) -> Result { + Ok(Self::new(value.try_into().map_err(|_| DatabaseError::Decode)?)) } } @@ -107,8 +110,12 @@ impl Encode for String { } impl Decode for String { - fn decode>(value: B) -> Result { - Self::from_utf8(value.as_ref().to_vec()).map_err(|_| DatabaseError::Decode) + fn decode(value: &[u8]) -> Result { + Self::decode_owned(value.to_vec()) + } + + fn decode_owned(value: Vec) -> Result { + Self::from_utf8(value).map_err(|_| DatabaseError::Decode) } } @@ -117,16 +124,15 @@ impl Encode for StoredNibbles { // Delegate to the Compact implementation fn encode(self) -> Self::Encoded { - let mut buf = Vec::with_capacity(self.0.len()); - self.to_compact(&mut buf); - buf + // NOTE: This used to be `to_compact`, but all it does is append the bytes to the buffer, + // so we can just use the implementation of `Into>` to reuse the buffer. + self.0.into() } } impl Decode for StoredNibbles { - fn decode>(value: B) -> Result { - let buf = value.as_ref(); - Ok(Self::from_compact(buf, buf.len()).0) + fn decode(value: &[u8]) -> Result { + Ok(Self::from_compact(value, value.len()).0) } } @@ -142,9 +148,8 @@ impl Encode for StoredNibblesSubKey { } impl Decode for StoredNibblesSubKey { - fn decode>(value: B) -> Result { - let buf = value.as_ref(); - Ok(Self::from_compact(buf, buf.len()).0) + fn decode(value: &[u8]) -> Result { + Ok(Self::from_compact(value, value.len()).0) } } @@ -159,9 +164,8 @@ impl Encode for PruneSegment { } impl Decode for PruneSegment { - fn decode>(value: B) -> Result { - let buf = value.as_ref(); - Ok(Self::from_compact(buf, buf.len()).0) + fn decode(value: &[u8]) -> Result { + Ok(Self::from_compact(value, value.len()).0) } } @@ -177,9 +181,8 @@ impl Encode for ClientVersion { } impl Decode for ClientVersion { - fn decode>(value: B) -> Result { - let buf = value.as_ref(); - Ok(Self::from_compact(buf, buf.len()).0) + fn decode(value: &[u8]) -> Result { + Ok(Self::from_compact(value, value.len()).0) } } @@ -196,9 +199,8 @@ macro_rules! impl_compression_for_compact { } impl Decompress for $name { - fn decompress>(value: B) -> Result<$name, $crate::DatabaseError> { - let value = value.as_ref(); - let (obj, _) = Compact::from_compact(&value, value.len()); + fn decompress(value: &[u8]) -> Result<$name, $crate::DatabaseError> { + let (obj, _) = Compact::from_compact(value, value.len()); Ok(obj) } } @@ -207,7 +209,7 @@ macro_rules! impl_compression_for_compact { } impl_compression_for_compact!( - SealedHeader, + Bytes, Header, Account, Log, @@ -236,23 +238,20 @@ impl_compression_for_compact!( macro_rules! impl_compression_fixed_compact { ($($name:tt),+) => { $( - impl Compress for $name - { + impl Compress for $name { type Compressed = Vec; - fn compress_to_buf>(self, buf: &mut B) { - let _ = Compact::to_compact(&self, buf); - } - fn uncompressable_ref(&self) -> Option<&[u8]> { Some(self.as_ref()) } + + fn compress_to_buf>(self, buf: &mut B) { + let _ = Compact::to_compact(&self, buf); + } } - impl Decompress for $name - { - fn decompress>(value: B) -> Result<$name, $crate::DatabaseError> { - let value = value.as_ref(); + impl Decompress for $name { + fn decompress(value: &[u8]) -> Result<$name, $crate::DatabaseError> { let (obj, _) = Compact::from_compact(&value, value.len()); Ok(obj) } @@ -305,16 +304,6 @@ add_wrapper_struct!((ClientVersion, CompactClientVersion)); #[cfg(test)] mod tests { - use super::*; - use reth_codecs::{test_utils::UnusedBits, validate_bitflag_backwards_compat}; - use reth_primitives::{Account, Receipt, ReceiptWithBloom, SealedHeader, Withdrawals}; - use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; - use reth_stages_types::{ - AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, - HeadersCheckpoint, IndexHistoryCheckpoint, StageCheckpoint, StageUnitCheckpoint, - StorageHashingCheckpoint, - }; - // each value in the database has an extra field named flags that encodes metadata about other // fields in the value, e.g. offset and length. // @@ -323,6 +312,15 @@ mod tests { #[cfg(not(feature = "optimism"))] #[test] fn test_ensure_backwards_compatibility() { + use super::*; + use reth_codecs::{test_utils::UnusedBits, validate_bitflag_backwards_compat}; + use reth_primitives::{Account, Receipt, ReceiptWithBloom, Withdrawals}; + use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; + use reth_stages_types::{ + AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, + ExecutionCheckpoint, HeadersCheckpoint, IndexHistoryCheckpoint, StageCheckpoint, + StageUnitCheckpoint, StorageHashingCheckpoint, + }; assert_eq!(Account::bitflag_encoded_bytes(), 2); assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1); @@ -338,7 +336,6 @@ mod tests { assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); assert_eq!(Receipt::bitflag_encoded_bytes(), 1); assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); - assert_eq!(SealedHeader::bitflag_encoded_bytes(), 0); assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); @@ -362,63 +359,6 @@ mod tests { validate_bitflag_backwards_compat!(PruneSegment, UnusedBits::Zero); validate_bitflag_backwards_compat!(Receipt, UnusedBits::Zero); validate_bitflag_backwards_compat!(ReceiptWithBloom, UnusedBits::Zero); - validate_bitflag_backwards_compat!(SealedHeader, UnusedBits::Zero); - validate_bitflag_backwards_compat!(StageCheckpoint, UnusedBits::NotZero); - validate_bitflag_backwards_compat!(StageUnitCheckpoint, UnusedBits::Zero); - validate_bitflag_backwards_compat!(StoredBlockBodyIndices, UnusedBits::Zero); - validate_bitflag_backwards_compat!(StoredBlockOmmers, UnusedBits::Zero); - validate_bitflag_backwards_compat!(StoredBlockWithdrawals, UnusedBits::Zero); - validate_bitflag_backwards_compat!(StorageHashingCheckpoint, UnusedBits::NotZero); - validate_bitflag_backwards_compat!(Withdrawals, UnusedBits::Zero); - validate_bitflag_backwards_compat!(Requests, UnusedBits::Zero); - } - - #[cfg(feature = "optimism")] - #[test] - fn test_ensure_backwards_compatibility() { - assert_eq!(Account::bitflag_encoded_bytes(), 2); - assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1); - assert_eq!(CompactClientVersion::bitflag_encoded_bytes(), 0); - assert_eq!(CompactU256::bitflag_encoded_bytes(), 1); - assert_eq!(CompactU64::bitflag_encoded_bytes(), 1); - assert_eq!(EntitiesCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(ExecutionCheckpoint::bitflag_encoded_bytes(), 0); - assert_eq!(HeadersCheckpoint::bitflag_encoded_bytes(), 0); - assert_eq!(IndexHistoryCheckpoint::bitflag_encoded_bytes(), 0); - assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); - assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); - assert_eq!(Receipt::bitflag_encoded_bytes(), 2); - assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); - assert_eq!(SealedHeader::bitflag_encoded_bytes(), 0); - assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); - assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); - assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); - assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); - - // In case of failure, refer to the documentation of the - // [`validate_bitflag_backwards_compat`] macro for detailed instructions on handling - // it. - validate_bitflag_backwards_compat!(Account, UnusedBits::NotZero); - validate_bitflag_backwards_compat!(AccountHashingCheckpoint, UnusedBits::NotZero); - validate_bitflag_backwards_compat!(CheckpointBlockRange, UnusedBits::Zero); - validate_bitflag_backwards_compat!(CompactClientVersion, UnusedBits::Zero); - validate_bitflag_backwards_compat!(CompactU256, UnusedBits::NotZero); - validate_bitflag_backwards_compat!(CompactU64, UnusedBits::NotZero); - validate_bitflag_backwards_compat!(EntitiesCheckpoint, UnusedBits::Zero); - validate_bitflag_backwards_compat!(ExecutionCheckpoint, UnusedBits::Zero); - validate_bitflag_backwards_compat!(HeadersCheckpoint, UnusedBits::Zero); - validate_bitflag_backwards_compat!(IndexHistoryCheckpoint, UnusedBits::Zero); - validate_bitflag_backwards_compat!(PruneCheckpoint, UnusedBits::NotZero); - validate_bitflag_backwards_compat!(PruneMode, UnusedBits::Zero); - validate_bitflag_backwards_compat!(PruneSegment, UnusedBits::Zero); - validate_bitflag_backwards_compat!(Receipt, UnusedBits::NotZero); - validate_bitflag_backwards_compat!(ReceiptWithBloom, UnusedBits::Zero); - validate_bitflag_backwards_compat!(SealedHeader, UnusedBits::Zero); validate_bitflag_backwards_compat!(StageCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(StageUnitCheckpoint, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockBodyIndices, UnusedBits::Zero); diff --git a/crates/storage/db-api/src/models/sharded_key.rs b/crates/storage/db-api/src/models/sharded_key.rs index dd8702a4812b..d1de1bd400c4 100644 --- a/crates/storage/db-api/src/models/sharded_key.rs +++ b/crates/storage/db-api/src/models/sharded_key.rs @@ -16,7 +16,7 @@ pub const NUM_OF_INDICES_IN_SHARD: usize = 2_000; /// `Address | 200` -> data is from block 0 to 200. /// /// `Address | 300` -> data is from block 201 to 300. -#[derive(Debug, Default, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)] +#[derive(Debug, Default, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Hash)] pub struct ShardedKey { /// The key for this type. pub key: T, @@ -43,11 +43,7 @@ impl ShardedKey { } } -impl Encode for ShardedKey -where - T: Encode, - Vec: From<::Encoded>, -{ +impl Encode for ShardedKey { type Encoded = Vec; fn encode(self) -> Self::Encoded { @@ -57,30 +53,11 @@ where } } -impl Decode for ShardedKey -where - T: Decode, -{ - fn decode>(value: B) -> Result { - let value = value.as_ref(); - - let tx_num_index = value.len() - 8; - - let highest_tx_number = u64::from_be_bytes( - value[tx_num_index..].try_into().map_err(|_| DatabaseError::Decode)?, - ); - let key = T::decode(&value[..tx_num_index])?; - +impl Decode for ShardedKey { + fn decode(value: &[u8]) -> Result { + let (key, highest_tx_number) = value.split_last_chunk().unwrap(); + let key = T::decode(key)?; + let highest_tx_number = u64::from_be_bytes(*highest_tx_number); Ok(Self::new(key, highest_tx_number)) } } - -impl Hash for ShardedKey -where - T: Hash, -{ - fn hash(&self, state: &mut H) { - self.key.hash(state); - self.highest_block_number.hash(state); - } -} diff --git a/crates/storage/db-api/src/models/storage_sharded_key.rs b/crates/storage/db-api/src/models/storage_sharded_key.rs index b6538256e629..5fd79ba655c1 100644 --- a/crates/storage/db-api/src/models/storage_sharded_key.rs +++ b/crates/storage/db-api/src/models/storage_sharded_key.rs @@ -61,8 +61,7 @@ impl Encode for StorageShardedKey { } impl Decode for StorageShardedKey { - fn decode>(value: B) -> Result { - let value = value.as_ref(); + fn decode(value: &[u8]) -> Result { let tx_num_index = value.len() - 8; let highest_tx_number = u64::from_be_bytes( diff --git a/crates/storage/db-api/src/scale.rs b/crates/storage/db-api/src/scale.rs index 99382a4a9179..591635be054e 100644 --- a/crates/storage/db-api/src/scale.rs +++ b/crates/storage/db-api/src/scale.rs @@ -22,7 +22,7 @@ where } fn compress_to_buf>(self, buf: &mut B) { - buf.put_slice(&parity_scale_codec::Encode::encode(&self)) + parity_scale_codec::Encode::encode_to(&self, OutputCompat::wrap_mut(buf)); } } @@ -30,8 +30,8 @@ impl Decompress for T where T: ScaleValue + parity_scale_codec::Decode + Sync + Send + std::fmt::Debug, { - fn decompress>(value: B) -> Result { - parity_scale_codec::Decode::decode(&mut value.as_ref()).map_err(|_| DatabaseError::Decode) + fn decompress(mut value: &[u8]) -> Result { + parity_scale_codec::Decode::decode(&mut value).map_err(|_| DatabaseError::Decode) } } @@ -50,3 +50,22 @@ impl sealed::Sealed for Vec {} impl_compression_for_scale!(U256); impl_compression_for_scale!(u8, u32, u16, u64); + +#[repr(transparent)] +struct OutputCompat(B); + +impl OutputCompat { + fn wrap_mut(buf: &mut B) -> &mut Self { + unsafe { std::mem::transmute(buf) } + } +} + +impl parity_scale_codec::Output for OutputCompat { + fn write(&mut self, bytes: &[u8]) { + self.0.put_slice(bytes); + } + + fn push_byte(&mut self, byte: u8) { + self.0.put_u8(byte); + } +} diff --git a/crates/storage/db-api/src/table.rs b/crates/storage/db-api/src/table.rs index 6d3f52198d28..963457af05c3 100644 --- a/crates/storage/db-api/src/table.rs +++ b/crates/storage/db-api/src/table.rs @@ -38,11 +38,11 @@ pub trait Compress: Send + Sync + Sized + Debug { /// Trait that will transform the data to be read from the DB. pub trait Decompress: Send + Sync + Sized + Debug { /// Decompresses data coming from the database. - fn decompress>(value: B) -> Result; + fn decompress(value: &[u8]) -> Result; /// Decompresses owned data coming from the database. fn decompress_owned(value: Vec) -> Result { - Self::decompress(value) + Self::decompress(&value) } } @@ -58,7 +58,12 @@ pub trait Encode: Send + Sync + Sized + Debug { /// Trait that will transform the data to be read from the DB. pub trait Decode: Send + Sync + Sized + Debug { /// Decodes data coming from the database. - fn decode>(value: B) -> Result; + fn decode(value: &[u8]) -> Result; + + /// Decodes owned data coming from the database. + fn decode_owned(value: Vec) -> Result { + Self::decode(&value) + } } /// Generic trait that enforces the database key to implement [`Encode`] and [`Decode`]. diff --git a/crates/storage/db-api/src/utils.rs b/crates/storage/db-api/src/utils.rs index b9ee6277e959..65ed5b6c01d4 100644 --- a/crates/storage/db-api/src/utils.rs +++ b/crates/storage/db-api/src/utils.rs @@ -10,8 +10,7 @@ macro_rules! impl_fixed_arbitrary { fn arbitrary(u: &mut Unstructured<'a>) -> Result { let mut buffer = vec![0; $size]; u.fill_buffer(buffer.as_mut_slice())?; - - Decode::decode(buffer).map_err(|_| arbitrary::Error::IncorrectFormat) + Decode::decode_owned(buffer).map_err(|_| arbitrary::Error::IncorrectFormat) } } @@ -26,7 +25,7 @@ macro_rules! impl_fixed_arbitrary { fn arbitrary_with(args: Self::Parameters) -> Self::Strategy { use proptest::strategy::Strategy; proptest::collection::vec(proptest::arbitrary::any_with::(args), $size) - .prop_map(move |vec| Decode::decode(vec).unwrap()) + .prop_map(move |vec| Decode::decode_owned(vec).unwrap()) } } )+ diff --git a/crates/storage/db-common/src/db_tool/mod.rs b/crates/storage/db-common/src/db_tool/mod.rs index 483e6c301eee..67a5dd627623 100644 --- a/crates/storage/db-common/src/db_tool/mod.rs +++ b/crates/storage/db-common/src/db_tool/mod.rs @@ -2,7 +2,6 @@ use boyer_moore_magiclen::BMByte; use eyre::Result; -use reth_chainspec::ChainSpec; use reth_db::{RawTable, TableRawRow}; use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRO}, @@ -13,7 +12,7 @@ use reth_db_api::{ }; use reth_fs_util as fs; use reth_node_types::NodeTypesWithDB; -use reth_provider::{ChainSpecProvider, ProviderFactory}; +use reth_provider::{providers::ProviderNodeTypes, ChainSpecProvider, ProviderFactory}; use std::{path::Path, rc::Rc, sync::Arc}; use tracing::info; @@ -25,7 +24,7 @@ pub struct DbTool { } impl DbTool { - /// Get an [`Arc`] to the [`ChainSpec`]. + /// Get an [`Arc`] to the underlying chainspec. pub fn chain(&self) -> Arc { self.provider_factory.chain_spec() } @@ -110,7 +109,7 @@ impl DbTool { } } -impl> DbTool { +impl DbTool { /// Takes a DB where the tables have already been created. pub fn new(provider_factory: ProviderFactory) -> eyre::Result { // Disable timeout because we are entering a TUI which might read for a long time. We diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 6e94677abfc6..3962dfd69809 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -2,7 +2,7 @@ use alloy_genesis::GenesisAccount; use alloy_primitives::{Address, B256, U256}; -use reth_chainspec::{ChainSpec, EthChainSpec}; +use reth_chainspec::EthChainSpec; use reth_codecs::Compact; use reth_config::config::EtlConfig; use reth_db::tables; @@ -333,7 +333,7 @@ where Provider: DBProvider + BlockNumReader + BlockHashReader - + ChainSpecProvider + + ChainSpecProvider + StageCheckpointWriter + HistoryWriter + HeaderProvider @@ -346,7 +346,7 @@ where let hash = provider_rw.block_hash(block)?.unwrap(); let expected_state_root = provider_rw .header_by_number(block)? - .ok_or(ProviderError::HeaderNotFound(block.into()))? + .ok_or_else(|| ProviderError::HeaderNotFound(block.into()))? .state_root; // first line can be state root @@ -366,7 +366,7 @@ where debug!(target: "reth::cli", block, - chain=%provider_rw.chain_spec().chain, + chain=%provider_rw.chain_spec().chain(), "Initializing state at block" ); @@ -582,7 +582,7 @@ struct GenesisAccountWithAddress { mod tests { use super::*; use alloy_genesis::Genesis; - use reth_chainspec::{Chain, HOLESKY, MAINNET, SEPOLIA}; + use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET, SEPOLIA}; use reth_db::DatabaseEnv; use reth_db_api::{ cursor::DbCursorRO, diff --git a/crates/storage/db-models/src/accounts.rs b/crates/storage/db-models/src/accounts.rs index 74736247a6fb..e1f4773960fa 100644 --- a/crates/storage/db-models/src/accounts.rs +++ b/crates/storage/db-models/src/accounts.rs @@ -1,8 +1,8 @@ use reth_codecs::{add_arbitrary_tests, Compact}; use serde::Serialize; -use alloy_primitives::Address; -use reth_primitives::{Account, Buf}; +use alloy_primitives::{bytes::Buf, Address}; +use reth_primitives::Account; /// Account as it is saved in the database. /// diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 09ae5efd43de..ba012cf68af1 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -43,13 +43,13 @@ metrics = { workspace = true, optional = true } # misc bytes.workspace = true -page_size = { version = "0.6.0", optional = true } +page_size = { version = "0.6.0", optional = true } thiserror.workspace = true tempfile = { workspace = true, optional = true } derive_more.workspace = true paste.workspace = true rustc-hash = { workspace = true, optional = true } -sysinfo = { version = "0.30", default-features = false } +sysinfo = { version = "0.31", default-features = false, features = ["system"] } # arbitrary utils strum = { workspace = true, features = ["derive"], optional = true } diff --git a/crates/storage/db/benches/criterion.rs b/crates/storage/db/benches/criterion.rs index 6d273a8ce93c..7ac9566d80c5 100644 --- a/crates/storage/db/benches/criterion.rs +++ b/crates/storage/db/benches/criterion.rs @@ -87,7 +87,7 @@ where |input| { { for (_, k, _, _) in input { - let _ = ::Key::decode(k); + let _ = ::Key::decode(&k); } }; black_box(()); @@ -115,7 +115,7 @@ where |input| { { for (_, _, _, v) in input { - let _ = ::Value::decompress(v); + let _ = ::Value::decompress(&v); } }; black_box(()); diff --git a/crates/storage/db/benches/iai.rs b/crates/storage/db/benches/iai.rs index ebcf6c8a42c0..167cd0860e26 100644 --- a/crates/storage/db/benches/iai.rs +++ b/crates/storage/db/benches/iai.rs @@ -25,7 +25,7 @@ macro_rules! impl_iai_callgrind_inner { #[library_benchmark] pub fn $decompress() { for (_, _, _, comp) in black_box(load_vectors::()) { - let _ = black_box(::Value::decompress(comp)); + let _ = black_box(::Value::decompress(&comp)); } } @@ -39,7 +39,7 @@ macro_rules! impl_iai_callgrind_inner { #[library_benchmark] pub fn $decode() { for (_, enc, _, _) in black_box(load_vectors::()) { - let _ = black_box(::Key::decode(enc)); + let _ = black_box(::Key::decode(&enc)); } } diff --git a/crates/storage/db/benches/utils.rs b/crates/storage/db/benches/utils.rs index 72d121aa75cc..9700ef94b241 100644 --- a/crates/storage/db/benches/utils.rs +++ b/crates/storage/db/benches/utils.rs @@ -1,7 +1,5 @@ -#![cfg(feature = "test-utils")] #![allow(missing_docs)] - -use std::{path::Path, sync::Arc}; +#![cfg(feature = "test-utils")] use alloy_primitives::Bytes; use reth_db::{test_utils::create_test_rw_db_with_path, DatabaseEnv}; @@ -11,6 +9,7 @@ use reth_db_api::{ Database, }; use reth_fs_util as fs; +use std::{path::Path, sync::Arc}; /// Path where the DB is initialized for benchmarks. #[allow(dead_code)] diff --git a/crates/storage/db/src/implementation/mdbx/cursor.rs b/crates/storage/db/src/implementation/mdbx/cursor.rs index c908bad45942..756a622bcb03 100644 --- a/crates/storage/db/src/implementation/mdbx/cursor.rs +++ b/crates/storage/db/src/implementation/mdbx/cursor.rs @@ -81,7 +81,7 @@ macro_rules! compress_to_buf_or_ref { if let Some(value) = $value.uncompressable_ref() { Some(value) } else { - $self.buf.truncate(0); + $self.buf.clear(); $value.compress_to_buf(&mut $self.buf); None } diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 8b4a136c300d..1deb86ba614f 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -1319,7 +1319,7 @@ mod tests { for i in 1..5 { let key = ShardedKey::new(real_key, i * 100); - let list: IntegerList = vec![i * 100u64].into(); + let list = IntegerList::new_pre_sorted([i * 100u64]); db.update(|tx| tx.put::(key.clone(), list.clone()).expect("")) .unwrap(); @@ -1340,7 +1340,7 @@ mod tests { .expect("should be able to retrieve it."); assert_eq!(ShardedKey::new(real_key, 200), key); - let list200: IntegerList = vec![200u64].into(); + let list200 = IntegerList::new_pre_sorted([200u64]); assert_eq!(list200, list); } // Seek greatest index @@ -1357,7 +1357,7 @@ mod tests { .expect("should be able to retrieve it."); assert_eq!(ShardedKey::new(real_key, 400), key); - let list400: IntegerList = vec![400u64].into(); + let list400 = IntegerList::new_pre_sorted([400u64]); assert_eq!(list400, list); } } diff --git a/crates/storage/db/src/lockfile.rs b/crates/storage/db/src/lockfile.rs index 828cec6e7d29..6dc063a167ac 100644 --- a/crates/storage/db/src/lockfile.rs +++ b/crates/storage/db/src/lockfile.rs @@ -7,7 +7,7 @@ use reth_tracing::tracing::error; use std::{ path::{Path, PathBuf}, process, - sync::Arc, + sync::{Arc, OnceLock}, }; use sysinfo::{ProcessRefreshKind, RefreshKind, System}; @@ -91,7 +91,7 @@ impl StorageLockInner { } } -#[derive(Debug)] +#[derive(Clone, Debug)] struct ProcessUID { /// OS process identifier pid: usize, @@ -102,14 +102,19 @@ struct ProcessUID { impl ProcessUID { /// Creates [`Self`] for the provided PID. fn new(pid: usize) -> Option { - System::new_with_specifics(RefreshKind::new().with_processes(ProcessRefreshKind::new())) - .process(pid.into()) - .map(|process| Self { pid, start_time: process.start_time() }) + let mut system = System::new(); + let pid2 = sysinfo::Pid::from(pid); + system.refresh_processes_specifics( + sysinfo::ProcessesToUpdate::Some(&[pid2]), + ProcessRefreshKind::new(), + ); + system.process(pid2).map(|process| Self { pid, start_time: process.start_time() }) } /// Creates [`Self`] from own process. fn own() -> Self { - Self::new(process::id() as usize).expect("own process") + static CACHE: OnceLock = OnceLock::new(); + CACHE.get_or_init(|| Self::new(process::id() as usize).expect("own process")).clone() } /// Parses [`Self`] from a file. diff --git a/crates/storage/db/src/static_file/mask.rs b/crates/storage/db/src/static_file/mask.rs index 61fef697e186..f5d35a193d70 100644 --- a/crates/storage/db/src/static_file/mask.rs +++ b/crates/storage/db/src/static_file/mask.rs @@ -26,9 +26,6 @@ macro_rules! add_segments { $( #[doc = concat!("Mask for ", stringify!($segment), " static file segment. See [`Mask`] for more.")] #[derive(Debug)] - // TODO: remove next attribute when nightly is fixed (ie. does - // not return wrong warnings for never constructed structs). - #[allow(dead_code)] pub struct [<$segment Mask>](Mask); )+ } diff --git a/crates/storage/db/src/tables/codecs/fuzz/inputs.rs b/crates/storage/db/src/tables/codecs/fuzz/inputs.rs index 2c944e158eb1..bb26e8b9e217 100644 --- a/crates/storage/db/src/tables/codecs/fuzz/inputs.rs +++ b/crates/storage/db/src/tables/codecs/fuzz/inputs.rs @@ -10,12 +10,7 @@ pub struct IntegerListInput(pub Vec); impl From for IntegerList { fn from(list: IntegerListInput) -> Self { let mut v = list.0; - - // Empty lists are not supported by `IntegerList`, so we want to skip these cases. - if v.is_empty() { - return vec![1u64].into() - } - v.sort(); - v.into() + v.sort_unstable(); + Self::new_pre_sorted(v) } } diff --git a/crates/storage/db/src/tables/codecs/fuzz/mod.rs b/crates/storage/db/src/tables/codecs/fuzz/mod.rs index 846ed17e1f1a..e64a3841df49 100644 --- a/crates/storage/db/src/tables/codecs/fuzz/mod.rs +++ b/crates/storage/db/src/tables/codecs/fuzz/mod.rs @@ -30,13 +30,12 @@ macro_rules! impl_fuzzer_with_input { /// Encodes and decodes table types returning its encoded size and the decoded object. /// This method is used for benchmarking, so its parameter should be the actual type that is being tested. - pub fn encode_and_decode(obj: $name) -> (usize, $name) - { + pub fn encode_and_decode(obj: $name) -> (usize, $name) { let data = table::$encode::$encode_method(obj); let size = data.len(); // Some `data` might be a fixed array. - (size, table::$decode::$decode_method(data.to_vec()).expect("failed to decode")) + (size, table::$decode::$decode_method(&data).expect("failed to decode")) } #[cfg(test)] diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 835d1486dafe..384139618163 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -429,8 +429,8 @@ impl Encode for ChainStateKey { } impl Decode for ChainStateKey { - fn decode>(value: B) -> Result { - if value.as_ref() == [0] { + fn decode(value: &[u8]) -> Result { + if value == [0] { Ok(Self::LastFinalizedBlock) } else { Err(reth_db_api::DatabaseError::Decode) diff --git a/crates/storage/db/src/tables/raw.rs b/crates/storage/db/src/tables/raw.rs index 1e8fa56b3603..6b6de41613eb 100644 --- a/crates/storage/db/src/tables/raw.rs +++ b/crates/storage/db/src/tables/raw.rs @@ -96,8 +96,12 @@ impl Encode for RawKey { // Decode impl Decode for RawKey { - fn decode>(key: B) -> Result { - Ok(Self { key: key.as_ref().to_vec(), _phantom: std::marker::PhantomData }) + fn decode(value: &[u8]) -> Result { + Ok(Self { key: value.to_vec(), _phantom: std::marker::PhantomData }) + } + + fn decode_owned(value: Vec) -> Result { + Ok(Self { key: value, _phantom: std::marker::PhantomData }) } } @@ -168,8 +172,8 @@ impl Compress for RawValue { } impl Decompress for RawValue { - fn decompress>(value: B) -> Result { - Ok(Self { value: value.as_ref().to_vec(), _phantom: std::marker::PhantomData }) + fn decompress(value: &[u8]) -> Result { + Ok(Self { value: value.to_vec(), _phantom: std::marker::PhantomData }) } fn decompress_owned(value: Vec) -> Result { diff --git a/crates/storage/db/src/tables/utils.rs b/crates/storage/db/src/tables/utils.rs index 616d1038264e..0948ee108f68 100644 --- a/crates/storage/db/src/tables/utils.rs +++ b/crates/storage/db/src/tables/utils.rs @@ -6,7 +6,7 @@ use std::borrow::Cow; /// Helper function to decode a `(key, value)` pair. pub(crate) fn decoder<'a, T>( - kv: (Cow<'a, [u8]>, Cow<'a, [u8]>), + (k, v): (Cow<'a, [u8]>, Cow<'a, [u8]>), ) -> Result, DatabaseError> where T: Table, @@ -14,11 +14,11 @@ where T::Value: Decompress, { Ok(( - match kv.0 { + match k { Cow::Borrowed(k) => Decode::decode(k)?, - Cow::Owned(k) => Decode::decode(k)?, + Cow::Owned(k) => Decode::decode_owned(k)?, }, - match kv.1 { + match v { Cow::Borrowed(v) => Decompress::decompress(v)?, Cow::Owned(v) => Decompress::decompress_owned(v)?, }, diff --git a/crates/storage/errors/src/db.rs b/crates/storage/errors/src/db.rs index f27dacdc309f..22efbb1fb4f8 100644 --- a/crates/storage/errors/src/db.rs +++ b/crates/storage/errors/src/db.rs @@ -50,11 +50,10 @@ pub enum DatabaseError { Other(String), } -#[cfg(feature = "std")] -impl std::error::Error for DatabaseError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { +impl core::error::Error for DatabaseError { + fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { match self { - Self::Write(err) => std::error::Error::source(err), + Self::Write(err) => core::error::Error::source(err), _ => Option::None, } } @@ -113,8 +112,7 @@ impl fmt::Display for DatabaseWriteError { } } -#[cfg(feature = "std")] -impl std::error::Error for DatabaseWriteError {} +impl core::error::Error for DatabaseWriteError {} /// Database write operation type. #[derive(Clone, Copy, Debug, PartialEq, Eq)] diff --git a/crates/storage/errors/src/lockfile.rs b/crates/storage/errors/src/lockfile.rs index 667197f571d7..9a3af4ba325a 100644 --- a/crates/storage/errors/src/lockfile.rs +++ b/crates/storage/errors/src/lockfile.rs @@ -12,8 +12,7 @@ pub enum StorageLockError { Other(String), } -#[cfg(feature = "std")] -impl std::error::Error for StorageLockError {} +impl core::error::Error for StorageLockError {} /// TODO: turn into variant once `ProviderError` impl From for StorageLockError { diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index f0ecfbea804d..d60a2adb92bb 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -172,14 +172,13 @@ impl From for ProviderError { } } -#[cfg(feature = "std")] -impl std::error::Error for ProviderError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { +impl core::error::Error for ProviderError { + fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { match self { - Self::Database(source) => std::error::Error::source(source), - Self::Rlp(source) => std::error::Error::source(source), - Self::StorageLockError(source) => std::error::Error::source(source), - Self::UnifiedStorageWriterError(source) => std::error::Error::source(source), + Self::Database(source) => core::error::Error::source(source), + Self::Rlp(source) => core::error::Error::source(source), + Self::StorageLockError(source) => core::error::Error::source(source), + Self::UnifiedStorageWriterError(source) => core::error::Error::source(source), _ => Option::None, } } diff --git a/crates/storage/libmdbx-rs/Cargo.toml b/crates/storage/libmdbx-rs/Cargo.toml index 8056b68557b8..fa10a73cb330 100644 --- a/crates/storage/libmdbx-rs/Cargo.toml +++ b/crates/storage/libmdbx-rs/Cargo.toml @@ -19,14 +19,16 @@ byteorder = "1" derive_more.workspace = true indexmap = "2" parking_lot.workspace = true +smallvec.workspace = true thiserror.workspace = true -dashmap = { workspace = true, features = ["inline"], optional = true } tracing.workspace = true +dashmap = { workspace = true, features = ["inline"], optional = true } + [features] default = [] return-borrowed = [] -read-tx-timeouts = ["dashmap", "dashmap/inline"] +read-tx-timeouts = ["dep:dashmap"] [dev-dependencies] pprof = { workspace = true, features = [ diff --git a/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml b/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml index 8cd56d1f2791..433de2684f9f 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml +++ b/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml @@ -10,4 +10,4 @@ repository.workspace = true [build-dependencies] cc = "1.0" -bindgen = { version = "0.69", default-features = false, features = ["runtime"] } +bindgen = { version = "0.70", default-features = false, features = ["runtime"] } diff --git a/crates/storage/libmdbx-rs/src/database.rs b/crates/storage/libmdbx-rs/src/database.rs index 1c4739b2bee7..c8733889160e 100644 --- a/crates/storage/libmdbx-rs/src/database.rs +++ b/crates/storage/libmdbx-rs/src/database.rs @@ -4,7 +4,7 @@ use crate::{ Environment, Transaction, }; use ffi::MDBX_db_flags_t; -use std::{ffi::CString, ptr}; +use std::{ffi::CStr, ptr}; /// A handle to an individual database in an environment. /// @@ -27,8 +27,13 @@ impl Database { name: Option<&str>, flags: MDBX_db_flags_t, ) -> Result { - let c_name = name.map(|n| CString::new(n).unwrap()); - let name_ptr = if let Some(c_name) = &c_name { c_name.as_ptr() } else { ptr::null() }; + let mut c_name_buf = smallvec::SmallVec::<[u8; 32]>::new(); + let c_name = name.map(|n| { + c_name_buf.extend_from_slice(n.as_bytes()); + c_name_buf.push(0); + CStr::from_bytes_with_nul(&c_name_buf).unwrap() + }); + let name_ptr = if let Some(c_name) = c_name { c_name.as_ptr() } else { ptr::null() }; let mut dbi: ffi::MDBX_dbi = 0; txn.txn_execute(|txn_ptr| { mdbx_result(unsafe { ffi::mdbx_dbi_open(txn_ptr, name_ptr, flags, &mut dbi) }) diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index f243c7da18ec..edf9321ace40 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -643,8 +643,6 @@ impl EnvironmentBuilder { } for (opt, v) in [ (ffi::MDBX_opt_max_db, self.max_dbs), - (ffi::MDBX_opt_sync_bytes, self.sync_bytes), - (ffi::MDBX_opt_sync_period, self.sync_period), (ffi::MDBX_opt_rp_augment_limit, self.rp_augment_limit), (ffi::MDBX_opt_loose_limit, self.loose_limit), (ffi::MDBX_opt_dp_reserve_limit, self.dp_reserve_limit), @@ -698,6 +696,15 @@ impl EnvironmentBuilder { mode, ))?; + for (opt, v) in [ + (ffi::MDBX_opt_sync_bytes, self.sync_bytes), + (ffi::MDBX_opt_sync_period, self.sync_period), + ] { + if let Some(v) = v { + mdbx_result(ffi::mdbx_env_set_option(env, opt, v))?; + } + } + Ok(()) })() { ffi::mdbx_env_close_ex(env, false); diff --git a/crates/storage/libmdbx-rs/src/error.rs b/crates/storage/libmdbx-rs/src/error.rs index 1df5a397b2de..a70488b08262 100644 --- a/crates/storage/libmdbx-rs/src/error.rs +++ b/crates/storage/libmdbx-rs/src/error.rs @@ -119,8 +119,11 @@ pub enum Error { /// Read transaction has been timed out. #[error("read transaction has been timed out")] ReadTransactionTimeout, + /// Permission defined + #[error("permission denied to setup database")] + Permission, /// Unknown error code. - #[error("unknown error code")] + #[error("unknown error code: {0}")] Other(i32), } @@ -157,6 +160,7 @@ impl Error { ffi::MDBX_EACCESS => Self::Access, ffi::MDBX_TOO_LARGE => Self::TooLarge, ffi::MDBX_EBADSIGN => Self::BadSignature, + ffi::MDBX_EPERM => Self::Permission, other => Self::Other(other), } } @@ -196,6 +200,7 @@ impl Error { Self::WriteTransactionUnsupportedInReadOnlyMode | Self::NestedTransactionsUnsupportedWithWriteMap => ffi::MDBX_EACCESS, Self::ReadTransactionTimeout => -96000, // Custom non-MDBX error code + Self::Permission => ffi::MDBX_EPERM, Self::Other(err_code) => *err_code, } } diff --git a/crates/storage/nippy-jar/Cargo.toml b/crates/storage/nippy-jar/Cargo.toml index 7c391483a70b..9f212bf44e82 100644 --- a/crates/storage/nippy-jar/Cargo.toml +++ b/crates/storage/nippy-jar/Cargo.toml @@ -23,7 +23,7 @@ zstd = { workspace = true, features = ["experimental", "zdict_builder"] } lz4_flex = { version = "0.11", default-features = false } memmap2 = "0.9.4" -bincode = "1.3" +bincode.workspace = true serde = { workspace = true, features = ["derive"] } tracing.workspace = true anyhow = "1.0" diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index ffeb5f3939ed..fc096cf848c6 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -5,7 +5,7 @@ use thiserror::Error; #[derive(Error, Debug)] pub enum NippyJarError { #[error(transparent)] - Internal(#[from] Box), + Internal(#[from] Box), #[error(transparent)] Disconnect(#[from] std::io::Error), #[error(transparent)] diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index a720192d6a05..bdc950aa38a7 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -17,7 +17,7 @@ use memmap2::Mmap; use serde::{Deserialize, Serialize}; use std::{ error::Error as StdError, - fs::{File, OpenOptions}, + fs::File, ops::Range, path::{Path, PathBuf}, }; @@ -250,35 +250,9 @@ impl NippyJar { /// Writes all necessary configuration to file. fn freeze_config(&self) -> Result<(), NippyJarError> { - // Atomic writes are hard: - let mut tmp_path = self.config_path(); - tmp_path.set_extension(".tmp"); - - // Write to temporary file - let mut file = File::create(&tmp_path)?; - bincode::serialize_into(&mut file, &self)?; - - // fsync() file - file.sync_all()?; - - // Rename file, not move - reth_fs_util::rename(&tmp_path, self.config_path())?; - - // fsync() dir - if let Some(parent) = tmp_path.parent() { - //custom_flags() is only available on Windows - #[cfg(windows)] - OpenOptions::new() - .read(true) - .write(true) - .custom_flags(0x02000000) // FILE_FLAG_BACKUP_SEMANTICS - .open(parent)? - .sync_all()?; - - #[cfg(not(windows))] - OpenOptions::new().read(true).open(parent)?.sync_all()?; - } - Ok(()) + Ok(reth_fs_util::atomic_write_file(&self.config_path(), |file| { + bincode::serialize_into(file, &self) + })?) } } diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 7524fde6a2d7..048353452c0c 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -64,6 +64,7 @@ strum.workspace = true # test-utils once_cell = { workspace = true, optional = true } reth-ethereum-engine-primitives = { workspace = true, optional = true } +alloy-consensus = { workspace = true, optional = true } # parallel utils rayon.workspace = true @@ -81,6 +82,8 @@ rand.workspace = true once_cell.workspace = true eyre.workspace = true +alloy-consensus.workspace = true + [features] optimism = ["reth-primitives/optimism", "reth-execution-types/optimism", "reth-optimism-primitives"] serde = ["reth-execution-types/serde"] @@ -91,4 +94,5 @@ test-utils = [ "reth-chain-state/test-utils", "once_cell", "reth-ethereum-engine-primitives", + "alloy-consensus", ] diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index cb90cc08a61c..f5f838293eb4 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -7,8 +7,8 @@ use crate::{ RequestsProvider, StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; -use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; +use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber}; +use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; use alloy_rpc_types_engine::ForkchoiceState; use reth_chain_state::{ BlockState, CanonicalInMemoryState, ForkChoiceNotifications, ForkChoiceSubscriptions, @@ -20,8 +20,8 @@ use reth_evm::ConfigureEvmEnv; use reth_execution_types::ExecutionOutcome; use reth_node_types::NodeTypesWithDB; use reth_primitives::{ - alloy_primitives::Sealable, Account, Block, BlockWithSenders, EthereumHardforks, Header, - Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, + Account, Block, BlockWithSenders, EthereumHardforks, Header, Receipt, SealedBlock, + SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawal, Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; @@ -122,12 +122,18 @@ impl BlockchainProvider2 { (start, end) } - /// Fetches a range of data from both in-memory state and storage. + /// Fetches a range of data from both in-memory state and persistent storage while a predicate + /// is met. /// - /// - `fetch_db_range`: Retrieves a range of items from the database. - /// - `map_block_state_item`: Maps a block number to an item in memory. Stops fetching if `None` - /// is returned. - fn fetch_db_mem_range( + /// Creates a snapshot of the in-memory chain state and database provider to prevent + /// inconsistencies. Splits the range into in-memory and storage sections, prioritizing + /// recent in-memory blocks in case of overlaps. + /// + /// * `fetch_db_range` function (`F`) provides access to the database provider, allowing the + /// user to retrieve the required items from the database using [`RangeInclusive`]. + /// * `map_block_state_item` function (`G`) provides each block of the range in the in-memory + /// state, allowing for selection or filtering for the desired data. + fn get_in_memory_or_storage_by_block_range_while( &self, range: impl RangeBounds, fetch_db_range: F, @@ -135,34 +141,94 @@ impl BlockchainProvider2 { mut predicate: P, ) -> ProviderResult> where - F: FnOnce(RangeInclusive, &mut P) -> ProviderResult>, - G: Fn(BlockNumber, &mut P) -> Option, + F: FnOnce( + &DatabaseProviderRO, + RangeInclusive, + &mut P, + ) -> ProviderResult>, + G: Fn(Arc, &mut P) -> Option, P: FnMut(&T) -> bool, { + // Each one provides a snapshot at the time of instantiation, but its order matters. + // + // If we acquire first the database provider, it's possible that before the in-memory chain + // snapshot is instantiated, it will flush blocks to disk. This would + // mean that our database provider would not have access to the flushed blocks (since it's + // working under an older view), while the in-memory state may have deleted them + // entirely. Resulting in gaps on the range. + let mut in_memory_chain = + self.canonical_in_memory_state.canonical_chain().collect::>(); + let db_provider = self.database_provider_ro()?; + let (start, end) = self.convert_range_bounds(range, || { - self.canonical_in_memory_state.get_canonical_block_number() + // the first block is the highest one. + in_memory_chain + .first() + .map(|b| b.number()) + .unwrap_or_else(|| db_provider.last_block_number().unwrap_or_default()) }); - let mut range = start..=end; - let mut items = Vec::with_capacity((end - start + 1) as usize); - // First, fetch the items from the database - let mut db_items = fetch_db_range(range.clone(), &mut predicate)?; + if start > end { + return Ok(vec![]) + } + + // Split range into storage_range and in-memory range. If the in-memory range is not + // necessary drop it early. + // + // The last block of `in_memory_chain` is the lowest block number. + let (in_memory, storage_range) = match in_memory_chain.last().as_ref().map(|b| b.number()) { + Some(lowest_memory_block) if lowest_memory_block <= end => { + let highest_memory_block = + in_memory_chain.first().as_ref().map(|b| b.number()).expect("qed"); + + // Database will for a time overlap with in-memory-chain blocks. In + // case of a re-org, it can mean that the database blocks are of a forked chain, and + // so, we should prioritize the in-memory overlapped blocks. + let in_memory_range = + lowest_memory_block.max(start)..=end.min(highest_memory_block); + + // If requested range is in the middle of the in-memory range, remove the necessary + // lowest blocks + in_memory_chain.truncate( + in_memory_chain + .len() + .saturating_sub(start.saturating_sub(lowest_memory_block) as usize), + ); + + let storage_range = + (lowest_memory_block > start).then(|| start..=lowest_memory_block - 1); - if !db_items.is_empty() { + (Some((in_memory_chain, in_memory_range)), storage_range) + } + _ => { + // Drop the in-memory chain so we don't hold blocks in memory. + drop(in_memory_chain); + + (None, Some(start..=end)) + } + }; + + let mut items = Vec::with_capacity((end - start + 1) as usize); + + if let Some(storage_range) = storage_range { + let mut db_items = fetch_db_range(&db_provider, storage_range.clone(), &mut predicate)?; items.append(&mut db_items); - // Advance the range iterator by the number of items fetched from the database - range.nth(items.len() - 1); + // The predicate was not met, if the number of items differs from the expected. So, we + // return what we have. + if items.len() as u64 != storage_range.end() - storage_range.start() + 1 { + return Ok(items) + } } - // Fetch the remaining items from the in-memory state - for num in range { - // TODO: there might be an update between loop iterations, we - // need to handle that situation. - if let Some(item) = map_block_state_item(num, &mut predicate) { - items.push(item); - } else { - break; + if let Some((in_memory_chain, in_memory_range)) = in_memory { + for (num, block) in in_memory_range.zip(in_memory_chain.into_iter().rev()) { + debug_assert!(num == block.number()); + if let Some(item) = map_block_state_item(block, &mut predicate) { + items.push(item); + } else { + break + } } } @@ -172,73 +238,206 @@ impl BlockchainProvider2 { /// This uses a given [`BlockState`] to initialize a state provider for that block. fn block_state_provider( &self, - state: impl AsRef, + state: &BlockState, ) -> ProviderResult { - let state = state.as_ref(); let anchor_hash = state.anchor().hash; let latest_historical = self.database.history_by_block_hash(anchor_hash)?; - Ok(self.canonical_in_memory_state.state_provider(state.hash(), latest_historical)) + Ok(self.canonical_in_memory_state.state_provider_from_state(state, latest_historical)) } - /// Returns: - /// 1. The block state as [`Some`] if the block is in memory, and [`None`] if the block is in - /// database. - /// 2. The in-block transaction index. - fn block_state_by_tx_id( + /// Fetches data from either in-memory state or persistent storage for a range of transactions. + /// + /// * `fetch_from_db`: has a [`DatabaseProviderRO`] and the storage specific range. + /// * `fetch_from_block_state`: has a [`RangeInclusive`] of elements that should be fetched from + /// [`BlockState`]. [`RangeInclusive`] is necessary to handle partial look-ups of a block. + fn get_in_memory_or_storage_by_tx_range( &self, - provider: &DatabaseProviderRO, - id: TxNumber, - ) -> ProviderResult>, usize)>> { - // Get the last block number stored in the database - let last_database_block_number = provider.last_block_number()?; + range: impl RangeBounds, + fetch_from_db: S, + fetch_from_block_state: M, + ) -> ProviderResult> + where + S: FnOnce( + DatabaseProviderRO, + RangeInclusive, + ) -> ProviderResult>, + M: Fn(RangeInclusive, Arc) -> ProviderResult>, + { + let in_mem_chain = self.canonical_in_memory_state.canonical_chain().collect::>(); + let provider = self.database.provider()?; + + // Get the last block number stored in the storage which does NOT overlap with in-memory + // chain. + let mut last_database_block_number = provider.last_block_number()?; + if let Some(lowest_in_mem_block) = in_mem_chain.last() { + if lowest_in_mem_block.number() <= last_database_block_number { + last_database_block_number = lowest_in_mem_block.number().saturating_sub(1); + } + } + + // Get the next tx number for the last block stored in the storage, which marks the start of + // the in-memory state. + let last_block_body_index = provider + .block_body_indices(last_database_block_number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))?; + let mut in_memory_tx_num = last_block_body_index.next_tx_num(); + + let (start, end) = self.convert_range_bounds(range, || { + in_mem_chain + .iter() + .map(|b| b.block_ref().block().body.transactions.len() as u64) + .sum::() + + last_block_body_index.last_tx_num() + }); + + if start > end { + return Ok(vec![]) + } + + let mut tx_range = start..=end; + + // If the range is entirely before the first in-memory transaction number, fetch from + // storage + if *tx_range.end() < in_memory_tx_num { + return fetch_from_db(provider, tx_range); + } + + let mut items = Vec::with_capacity((tx_range.end() - tx_range.start() + 1) as usize); + + // If the range spans storage and memory, get elements from storage first. + if *tx_range.start() < in_memory_tx_num { + // Determine the range that needs to be fetched from storage. + let db_range = *tx_range.start()..=in_memory_tx_num.saturating_sub(1); + + // Set the remaining transaction range for in-memory + tx_range = in_memory_tx_num..=*tx_range.end(); + + items.extend(fetch_from_db(provider, db_range)?); + } + + // Iterate from the lowest block to the highest in-memory chain + for block_state in in_mem_chain.into_iter().rev() { + let block_tx_count = block_state.block_ref().block().body.transactions.len(); + let remaining = (tx_range.end() - tx_range.start() + 1) as usize; + + // This should only be more than 0 in the first iteration, in case of a partial range + let skip = (tx_range.start() - in_memory_tx_num) as usize; + + items.extend(fetch_from_block_state( + skip..=(remaining.min(block_tx_count) - 1), + block_state, + )?); + + in_memory_tx_num += block_tx_count as u64; + + // Break if the range has been fully processed + if in_memory_tx_num > *tx_range.end() { + break + } + + // Set updated range + tx_range = in_memory_tx_num..=*tx_range.end(); + } + + Ok(items) + } + + /// Fetches data from either in-memory state or persistent storage by transaction + /// [`HashOrNumber`]. + fn get_in_memory_or_storage_by_tx( + &self, + id: HashOrNumber, + fetch_from_db: S, + fetch_from_block_state: M, + ) -> ProviderResult> + where + S: FnOnce(DatabaseProviderRO) -> ProviderResult>, + M: Fn(usize, TxNumber, Arc) -> ProviderResult>, + { + // Order of instantiation matters. More information on: + // `get_in_memory_or_storage_by_block_range_while`. + let in_mem_chain = self.canonical_in_memory_state.canonical_chain().collect::>(); + let provider = self.database.provider()?; + + // Get the last block number stored in the database which does NOT overlap with in-memory + // chain. + let mut last_database_block_number = provider.last_block_number()?; + if let Some(lowest_in_mem_block) = in_mem_chain.last() { + if lowest_in_mem_block.number() <= last_database_block_number { + last_database_block_number = lowest_in_mem_block.number().saturating_sub(1); + } + } // Get the next tx number for the last block stored in the database and consider it the // first tx number of the in-memory state - let Some(last_block_body_index) = - provider.block_body_indices(last_database_block_number)? - else { - return Ok(None); - }; + let last_block_body_index = provider + .block_body_indices(last_database_block_number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))?; let mut in_memory_tx_num = last_block_body_index.next_tx_num(); - if id < in_memory_tx_num { - // If the transaction number is less than the first in-memory transaction number, make a - // database lookup - let Some(block_number) = provider.transaction_block(id)? else { return Ok(None) }; - let Some(body_index) = provider.block_body_indices(block_number)? else { - return Ok(None) - }; - let tx_index = body_index.last_tx_num() - id; - Ok(Some((None, tx_index as usize))) - } else { - // Otherwise, iterate through in-memory blocks and find the transaction with the - // matching number - - let first_in_memory_block_number = last_database_block_number.saturating_add(1); - let last_in_memory_block_number = - self.canonical_in_memory_state.get_canonical_block_number(); - - for block_number in first_in_memory_block_number..=last_in_memory_block_number { - let Some(block_state) = - self.canonical_in_memory_state.state_by_number(block_number) - else { - return Ok(None); - }; - - let executed_block = block_state.block(); - let block = executed_block.block(); - - for tx_index in 0..block.body.transactions.len() { - if id == in_memory_tx_num { - return Ok(Some((Some(block_state), tx_index))) - } + // If the transaction number is less than the first in-memory transaction number, make a + // database lookup + if let HashOrNumber::Number(id) = id { + if id < in_memory_tx_num { + return fetch_from_db(provider) + } + } + + // Iterate from the lowest block to the highest + for block_state in in_mem_chain.into_iter().rev() { + let executed_block = block_state.block(); + let block = executed_block.block(); - in_memory_tx_num += 1; + for tx_index in 0..block.body.transactions.len() { + match id { + HashOrNumber::Hash(tx_hash) => { + if tx_hash == block.body.transactions[tx_index].hash() { + return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) + } + } + HashOrNumber::Number(id) => { + if id == in_memory_tx_num { + return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) + } + } } + + in_memory_tx_num += 1; } + } - Ok(None) + // Not found in-memory, so check database. + if let HashOrNumber::Hash(_) = id { + return fetch_from_db(provider) } + + Ok(None) + } + + /// Fetches data from either in-memory state or persistent storage by [`BlockHashOrNumber`]. + fn get_in_memory_or_storage_by_block( + &self, + id: BlockHashOrNumber, + fetch_from_db: S, + fetch_from_block_state: M, + ) -> ProviderResult + where + S: FnOnce(DatabaseProviderRO) -> ProviderResult, + M: Fn(Arc) -> ProviderResult, + { + let block_state = match id { + BlockHashOrNumber::Hash(block_hash) => { + self.canonical_in_memory_state.state_by_hash(block_hash) + } + BlockHashOrNumber::Number(block_number) => { + self.canonical_in_memory_state.state_by_number(block_number) + } + }; + + if let Some(block_state) = block_state { + return fetch_from_block_state(block_state) + } + fetch_from_db(self.database_provider_ro()?) } } @@ -284,19 +483,19 @@ impl StaticFileProviderFactory for BlockchainProvider2 impl HeaderProvider for BlockchainProvider2 { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { - if let Some(block_state) = self.canonical_in_memory_state.state_by_hash(*block_hash) { - return Ok(Some(block_state.block().block().header.header().clone())); - } - - self.database.header(block_hash) + self.get_in_memory_or_storage_by_block( + (*block_hash).into(), + |db_provider| db_provider.header(block_hash), + |block_state| Ok(Some(block_state.block().block().header.header().clone())), + ) } fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { - if let Some(block_state) = self.canonical_in_memory_state.state_by_number(num) { - return Ok(Some(block_state.block().block().header.header().clone())); - } - - self.database.header_by_number(num) + self.get_in_memory_or_storage_by_block( + num.into(), + |db_provider| db_provider.header_by_number(num), + |block_state| Ok(Some(block_state.block().block().header.header().clone())), + ) } fn header_td(&self, hash: &BlockHash) -> ProviderResult> { @@ -308,57 +507,52 @@ impl HeaderProvider for BlockchainProvider2 { } fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - // If the TD is recorded on disk, we can just return that - if let Some(td) = self.database.header_td_by_number(number)? { - Ok(Some(td)) - } else if self.canonical_in_memory_state.hash_by_number(number).is_some() { - // Otherwise, if the block exists in memory, we should return a TD for it. + let number = if self.canonical_in_memory_state.hash_by_number(number).is_some() { + // If the block exists in memory, we should return a TD for it. // // The canonical in memory state should only store post-merge blocks. Post-merge blocks // have zero difficulty. This means we can use the total difficulty for the last - // persisted block number. - let last_persisted_block_number = self.database.last_block_number()?; - self.database.header_td_by_number(last_persisted_block_number) + // finalized block number if present (so that we are not affected by reorgs), if not the + // last number in the database will be used. + if let Some(last_finalized_num_hash) = + self.canonical_in_memory_state.get_finalized_num_hash() + { + last_finalized_num_hash.number + } else { + self.database.last_block_number()? + } } else { - // If the block does not exist in memory, and does not exist on-disk, we should not - // return a TD for it. - Ok(None) - } + // Otherwise, return what we have on disk for the input block + number + }; + self.database.header_td_by_number(number) } fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { - self.fetch_db_mem_range( + self.get_in_memory_or_storage_by_block_range_while( range, - |range, _| self.database.headers_range(range), - |num, _| { - self.canonical_in_memory_state - .state_by_number(num) - .map(|block_state| block_state.block().block().header.header().clone()) - }, + |db_provider, range, _| db_provider.headers_range(range), + |block_state, _| Some(block_state.block().block().header.header().clone()), |_| true, ) } fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { - if let Some(block_state) = self.canonical_in_memory_state.state_by_number(number) { - return Ok(Some(block_state.block().block().header.clone())); - } - - self.database.sealed_header(number) + self.get_in_memory_or_storage_by_block( + number.into(), + |db_provider| db_provider.sealed_header(number), + |block_state| Ok(Some(block_state.block().block().header.clone())), + ) } fn sealed_headers_range( &self, range: impl RangeBounds, ) -> ProviderResult> { - self.fetch_db_mem_range( + self.get_in_memory_or_storage_by_block_range_while( range, - |range, _| self.database.sealed_headers_range(range), - |num, _| { - self.canonical_in_memory_state - .state_by_number(num) - .map(|block_state| block_state.block().block().header.clone()) - }, + |db_provider, range, _| db_provider.sealed_headers_range(range), + |block_state, _| Some(block_state.block().block().header.clone()), |_| true, ) } @@ -368,14 +562,11 @@ impl HeaderProvider for BlockchainProvider2 { range: impl RangeBounds, predicate: impl FnMut(&SealedHeader) -> bool, ) -> ProviderResult> { - self.fetch_db_mem_range( + self.get_in_memory_or_storage_by_block_range_while( range, - |range, predicate| self.database.sealed_headers_while(range, predicate), - |num, predicate| { - self.canonical_in_memory_state - .state_by_number(num) - .map(|block_state| block_state.block().block().header.clone()) - .filter(|header| predicate(header)) + |db_provider, range, predicate| db_provider.sealed_headers_while(range, predicate), + |block_state, predicate| { + Some(block_state.block().block().header.clone()).filter(|header| predicate(header)) }, predicate, ) @@ -384,11 +575,11 @@ impl HeaderProvider for BlockchainProvider2 { impl BlockHashReader for BlockchainProvider2 { fn block_hash(&self, number: u64) -> ProviderResult> { - if let Some(block_state) = self.canonical_in_memory_state.state_by_number(number) { - return Ok(Some(block_state.hash())); - } - - self.database.block_hash(number) + self.get_in_memory_or_storage_by_block( + number.into(), + |db_provider| db_provider.block_hash(number), + |block_state| Ok(Some(block_state.hash())), + ) } fn canonical_hashes_range( @@ -396,14 +587,13 @@ impl BlockHashReader for BlockchainProvider2 { start: BlockNumber, end: BlockNumber, ) -> ProviderResult> { - self.fetch_db_mem_range( - start..=end, - |range, _| self.database.canonical_hashes_range(*range.start(), *range.end()), - |num, _| { - self.canonical_in_memory_state - .state_by_number(num) - .map(|block_state| block_state.hash()) + self.get_in_memory_or_storage_by_block_range_while( + start..end, + |db_provider, inclusive_range, _| { + db_provider + .canonical_hashes_range(*inclusive_range.start(), *inclusive_range.end() + 1) }, + |block_state, _| Some(block_state.hash()), |_| true, ) } @@ -423,11 +613,11 @@ impl BlockNumReader for BlockchainProvider2 { } fn block_number(&self, hash: B256) -> ProviderResult> { - if let Some(block_state) = self.canonical_in_memory_state.state_by_hash(hash) { - return Ok(Some(block_state.number())); - } - - self.database.block_number(hash) + self.get_in_memory_or_storage_by_block( + hash.into(), + |db_provider| db_provider.block_number(hash), + |block_state| Ok(Some(block_state.number())), + ) } } @@ -449,13 +639,13 @@ impl BlockReader for BlockchainProvider2 { fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { match source { BlockSource::Any | BlockSource::Canonical => { - // check in memory first // Note: it's fine to return the unsealed block because the caller already has // the hash - if let Some(block_state) = self.canonical_in_memory_state.state_by_hash(hash) { - return Ok(Some(block_state.block().block().clone().unseal())); - } - self.database.find_block_by_hash(hash, source) + self.get_in_memory_or_storage_by_block( + hash.into(), + |db_provider| db_provider.find_block_by_hash(hash, source), + |block_state| Ok(Some(block_state.block().block().clone().unseal())), + ) } BlockSource::Pending => { Ok(self.canonical_in_memory_state.pending_block().map(|block| block.unseal())) @@ -464,15 +654,11 @@ impl BlockReader for BlockchainProvider2 { } fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { - match id { - BlockHashOrNumber::Hash(hash) => self.find_block_by_hash(hash, BlockSource::Any), - BlockHashOrNumber::Number(num) => { - if let Some(block_state) = self.canonical_in_memory_state.state_by_number(num) { - return Ok(Some(block_state.block().block().clone().unseal())); - } - self.database.block_by_number(num) - } - } + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.block(id), + |block_state| Ok(Some(block_state.block().block().clone().unseal())), + ) } fn pending_block(&self) -> ProviderResult> { @@ -488,22 +674,22 @@ impl BlockReader for BlockchainProvider2 { } fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { - match self.convert_hash_or_number(id)? { - Some(number) => { - // If the Paris (Merge) hardfork block is known and block is after it, return empty - // ommers. - if self.database.chain_spec().final_paris_total_difficulty(number).is_some() { - return Ok(Some(Vec::new())); + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.ommers(id), + |block_state| { + if self + .database + .chain_spec() + .final_paris_total_difficulty(block_state.number()) + .is_some() + { + return Ok(Some(Vec::new())) } - // Check in-memory state first - self.canonical_in_memory_state - .state_by_number(number) - .map(|o| o.block().block().body.ommers.clone()) - .map_or_else(|| self.database.ommers(id), |ommers| Ok(Some(ommers))) - } - None => self.database.ommers(id), - } + Ok(Some(block_state.block().block().body.ommers.clone())) + }, + ) } fn block_body_indices( @@ -522,7 +708,7 @@ impl BlockReader for BlockchainProvider2 { let mut stored_indices = self .database .block_body_indices(anchor_num)? - .ok_or_else(|| ProviderError::BlockBodyIndicesNotFound(anchor_num))?; + .ok_or(ProviderError::BlockBodyIndicesNotFound(anchor_num))?; stored_indices.first_tx_num = stored_indices.next_tx_num(); for state in parent_chain { @@ -551,23 +737,11 @@ impl BlockReader for BlockchainProvider2 { id: BlockHashOrNumber, transaction_kind: TransactionVariant, ) -> ProviderResult> { - match id { - BlockHashOrNumber::Hash(hash) => { - if let Some(block_state) = self.canonical_in_memory_state.state_by_hash(hash) { - let block = block_state.block().block().clone(); - let senders = block_state.block().senders().clone(); - return Ok(Some(BlockWithSenders { block: block.unseal(), senders })); - } - } - BlockHashOrNumber::Number(num) => { - if let Some(block_state) = self.canonical_in_memory_state.state_by_number(num) { - let block = block_state.block().block().clone(); - let senders = block_state.block().senders().clone(); - return Ok(Some(BlockWithSenders { block: block.unseal(), senders })); - } - } - } - self.database.block_with_senders(id, transaction_kind) + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.block_with_senders(id, transaction_kind), + |block_state| Ok(Some(block_state.block_with_senders())), + ) } fn sealed_block_with_senders( @@ -575,34 +749,18 @@ impl BlockReader for BlockchainProvider2 { id: BlockHashOrNumber, transaction_kind: TransactionVariant, ) -> ProviderResult> { - match id { - BlockHashOrNumber::Hash(hash) => { - if let Some(block_state) = self.canonical_in_memory_state.state_by_hash(hash) { - let block = block_state.block().block().clone(); - let senders = block_state.block().senders().clone(); - return Ok(Some(SealedBlockWithSenders { block, senders })); - } - } - BlockHashOrNumber::Number(num) => { - if let Some(block_state) = self.canonical_in_memory_state.state_by_number(num) { - let block = block_state.block().block().clone(); - let senders = block_state.block().senders().clone(); - return Ok(Some(SealedBlockWithSenders { block, senders })); - } - } - } - self.database.sealed_block_with_senders(id, transaction_kind) + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.sealed_block_with_senders(id, transaction_kind), + |block_state| Ok(Some(block_state.sealed_block_with_senders())), + ) } fn block_range(&self, range: RangeInclusive) -> ProviderResult> { - self.fetch_db_mem_range( + self.get_in_memory_or_storage_by_block_range_while( range, - |range, _| self.database.block_range(range), - |num, _| { - self.canonical_in_memory_state - .state_by_number(num) - .map(|block_state| block_state.block().block().clone().unseal()) - }, + |db_provider, range, _| db_provider.block_range(range), + |block_state, _| Some(block_state.block().block().clone().unseal()), |_| true, ) } @@ -611,16 +769,10 @@ impl BlockReader for BlockchainProvider2 { &self, range: RangeInclusive, ) -> ProviderResult> { - self.fetch_db_mem_range( + self.get_in_memory_or_storage_by_block_range_while( range, - |range, _| self.database.block_with_senders_range(range), - |num, _| { - self.canonical_in_memory_state.state_by_number(num).map(|block_state| { - let block = block_state.block().block().clone(); - let senders = block_state.block().senders().clone(); - BlockWithSenders { block: block.unseal(), senders } - }) - }, + |db_provider, range, _| db_provider.block_with_senders_range(range), + |block_state, _| Some(block_state.block_with_senders()), |_| true, ) } @@ -629,16 +781,10 @@ impl BlockReader for BlockchainProvider2 { &self, range: RangeInclusive, ) -> ProviderResult> { - self.fetch_db_mem_range( + self.get_in_memory_or_storage_by_block_range_while( range, - |range, _| self.database.sealed_block_with_senders_range(range), - |num, _| { - self.canonical_in_memory_state.state_by_number(num).map(|block_state| { - let block = block_state.block().block().clone(); - let senders = block_state.block().senders().clone(); - SealedBlockWithSenders { block, senders } - }) - }, + |db_provider, range, _| db_provider.sealed_block_with_senders_range(range), + |block_state, _| Some(block_state.sealed_block_with_senders()), |_| true, ) } @@ -646,81 +792,41 @@ impl BlockReader for BlockchainProvider2 { impl TransactionsProvider for BlockchainProvider2 { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { - // First, check the database - if let Some(id) = self.database.transaction_id(tx_hash)? { - return Ok(Some(id)) - } - - // If the transaction is not found in the database, check the in-memory state - - // Get the last transaction number stored in the database - let last_database_block_number = self.database.last_block_number()?; - let last_database_tx_id = self - .database - .block_body_indices(last_database_block_number)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))? - .last_tx_num(); - - // Find the transaction in the in-memory state with the matching hash, and return its - // number - let mut in_memory_tx_id = last_database_tx_id + 1; - for block_number in last_database_block_number.saturating_add(1)..= - self.canonical_in_memory_state.get_canonical_block_number() - { - // TODO: there might be an update between loop iterations, we - // need to handle that situation. - let block_state = self - .canonical_in_memory_state - .state_by_number(block_number) - .ok_or(ProviderError::StateForNumberNotFound(block_number))?; - for tx in block_state.block().block().body.transactions() { - if tx.hash() == tx_hash { - return Ok(Some(in_memory_tx_id)) - } - - in_memory_tx_id += 1; - } - } - - Ok(None) + self.get_in_memory_or_storage_by_tx( + tx_hash.into(), + |db_provider| db_provider.transaction_id(tx_hash), + |_, tx_number, _| Ok(Some(tx_number)), + ) } fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { - let provider = self.database.provider()?; - let Some((block_state, tx_index)) = self.block_state_by_tx_id(&provider, id)? else { - return Ok(None) - }; - - if let Some(block_state) = block_state { - let transaction = block_state.block().block().body.transactions.get(tx_index).cloned(); - Ok(transaction) - } else { - provider.transaction_by_id(id) - } + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_by_id(id), + |tx_index, _, block_state| { + Ok(block_state.block().block().body.transactions.get(tx_index).cloned()) + }, + ) } fn transaction_by_id_no_hash( &self, id: TxNumber, ) -> ProviderResult> { - let provider = self.database.provider()?; - let Some((block_state, tx_index)) = self.block_state_by_tx_id(&provider, id)? else { - return Ok(None) - }; - - if let Some(block_state) = block_state { - let transaction = block_state - .block() - .block() - .body - .transactions - .get(tx_index) - .cloned() - .map(Into::into); - Ok(transaction) - } else { - provider.transaction_by_id_no_hash(id) - } + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_by_id_no_hash(id), + |tx_index, _, block_state| { + Ok(block_state + .block() + .block() + .body + .transactions + .get(tx_index) + .cloned() + .map(Into::into)) + }, + ) } fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { @@ -745,30 +851,22 @@ impl TransactionsProvider for BlockchainProvider2 { } fn transaction_block(&self, id: TxNumber) -> ProviderResult> { - let provider = self.database.provider()?; - Ok(self - .block_state_by_tx_id(&provider, id)? - .and_then(|(block_state, _)| block_state) - .map(|block_state| block_state.block().block().number)) + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_block(id), + |_, _, block_state| Ok(Some(block_state.block().block().number)), + ) } fn transactions_by_block( &self, id: BlockHashOrNumber, ) -> ProviderResult>> { - match id { - BlockHashOrNumber::Hash(hash) => { - if let Some(block_state) = self.canonical_in_memory_state.state_by_hash(hash) { - return Ok(Some(block_state.block().block().body.transactions.clone())); - } - } - BlockHashOrNumber::Number(number) => { - if let Some(block_state) = self.canonical_in_memory_state.state_by_number(number) { - return Ok(Some(block_state.block().block().body.transactions.clone())); - } - } - } - self.database.transactions_by_block(id) + self.get_in_memory_or_storage_by_block( + id, + |provider| provider.transactions_by_block(id), + |block_state| Ok(Some(block_state.block().block().body.transactions.clone())), + ) } fn transactions_by_block_range( @@ -810,50 +908,48 @@ impl TransactionsProvider for BlockchainProvider2 { &self, range: impl RangeBounds, ) -> ProviderResult> { - self.database.transactions_by_tx_range(range) + self.get_in_memory_or_storage_by_tx_range( + range, + |db_provider, db_range| db_provider.transactions_by_tx_range(db_range), + |index_range, block_state| { + Ok(block_state.block_ref().block().body.transactions[index_range] + .iter() + .cloned() + .map(Into::into) + .collect()) + }, + ) } fn senders_by_tx_range( &self, range: impl RangeBounds, ) -> ProviderResult> { - self.database.senders_by_tx_range(range) + self.get_in_memory_or_storage_by_tx_range( + range, + |db_provider, db_range| db_provider.senders_by_tx_range(db_range), + |index_range, block_state| Ok(block_state.block_ref().senders[index_range].to_vec()), + ) } fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { - let provider = self.database.provider()?; - let Some((block_state, tx_index)) = self.block_state_by_tx_id(&provider, id)? else { - return Ok(None) - }; - - if let Some(block_state) = block_state { - let sender = block_state - .block() - .block() - .body - .transactions - .get(tx_index) - .and_then(|transaction| transaction.recover_signer()); - Ok(sender) - } else { - provider.transaction_sender(id) - } + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_sender(id), + |tx_index, _, block_state| Ok(block_state.block().senders.get(tx_index).copied()), + ) } } impl ReceiptProvider for BlockchainProvider2 { fn receipt(&self, id: TxNumber) -> ProviderResult> { - let provider = self.database.provider()?; - let Some((block_state, tx_index)) = self.block_state_by_tx_id(&provider, id)? else { - return Ok(None) - }; - - if let Some(block_state) = block_state { - let receipt = block_state.executed_block_receipts().get(tx_index).cloned(); - Ok(receipt) - } else { - provider.receipt(id) - } + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.receipt(id), + |tx_index, _, block_state| { + Ok(block_state.executed_block_receipts().get(tx_index).cloned()) + }, + ) } fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { @@ -880,27 +976,24 @@ impl ReceiptProvider for BlockchainProvider2 { } fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { - match block { - BlockHashOrNumber::Hash(hash) => { - if let Some(block_state) = self.canonical_in_memory_state.state_by_hash(hash) { - return Ok(Some(block_state.executed_block_receipts())); - } - } - BlockHashOrNumber::Number(number) => { - if let Some(block_state) = self.canonical_in_memory_state.state_by_number(number) { - return Ok(Some(block_state.executed_block_receipts())); - } - } - } - - self.database.receipts_by_block(block) + self.get_in_memory_or_storage_by_block( + block, + |db_provider| db_provider.receipts_by_block(block), + |block_state| Ok(Some(block_state.executed_block_receipts())), + ) } fn receipts_by_tx_range( &self, range: impl RangeBounds, ) -> ProviderResult> { - self.database.receipts_by_tx_range(range) + self.get_in_memory_or_storage_by_tx_range( + range, + |db_provider, db_range| db_provider.receipts_by_tx_range(db_range), + |index_range, block_state| { + Ok(block_state.executed_block_receipts().drain(index_range).collect()) + }, + ) } } @@ -945,25 +1038,23 @@ impl WithdrawalsProvider for BlockchainProvider2 { return Ok(None) } - let Some(number) = self.convert_hash_or_number(id)? else { return Ok(None) }; - - if let Some(block) = self.canonical_in_memory_state.state_by_number(number) { - Ok(block.block().block().body.withdrawals.clone()) - } else { - self.database.withdrawals_by_block(id, timestamp) - } + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.withdrawals_by_block(id, timestamp), + |block_state| Ok(block_state.block().block().body.withdrawals.clone()), + ) } fn latest_withdrawal(&self) -> ProviderResult> { let best_block_num = self.best_block_number()?; - // If the best block is in memory, use that. Otherwise, use the latest withdrawal in the - // database. - if let Some(block) = self.canonical_in_memory_state.state_by_number(best_block_num) { - Ok(block.block().block().body.withdrawals.clone().and_then(|mut w| w.pop())) - } else { - self.database.latest_withdrawal() - } + self.get_in_memory_or_storage_by_block( + best_block_num.into(), + |db_provider| db_provider.latest_withdrawal(), + |block_state| { + Ok(block_state.block().block().body.withdrawals.clone().and_then(|mut w| w.pop())) + }, + ) } } @@ -976,12 +1067,12 @@ impl RequestsProvider for BlockchainProvider2 { if !self.database.chain_spec().is_prague_active_at_timestamp(timestamp) { return Ok(None) } - let Some(number) = self.convert_hash_or_number(id)? else { return Ok(None) }; - if let Some(block) = self.canonical_in_memory_state.state_by_number(number) { - Ok(block.block().block().body.requests.clone()) - } else { - self.database.requests_by_block(id, timestamp) - } + + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.requests_by_block(id, timestamp), + |block_state| Ok(block_state.block().block().body.requests.clone()), + ) } } @@ -1091,7 +1182,7 @@ impl StateProviderFactory for BlockchainProvider2 { // use latest state provider if the head state exists if let Some(state) = self.canonical_in_memory_state.head_state() { trace!(target: "providers::blockchain", "Using head state for latest state provider"); - Ok(self.block_state_provider(state)?.boxed()) + Ok(self.block_state_provider(&state)?.boxed()) } else { trace!(target: "providers::blockchain", "Using database state for latest state provider"); self.database.latest() @@ -1112,17 +1203,18 @@ impl StateProviderFactory for BlockchainProvider2 { fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); - if let Ok(state) = self.database.history_by_block_hash(block_hash) { - // This could be tracked by a block in the database block - Ok(state) - } else if let Some(state) = self.canonical_in_memory_state.state_by_hash(block_hash) { - // ... or this could be tracked by the in memory state - let state_provider = self.block_state_provider(state)?; - Ok(Box::new(state_provider)) - } else { - // if we couldn't find it anywhere, then we should return an error - Err(ProviderError::StateForHashNotFound(block_hash)) - } + + self.get_in_memory_or_storage_by_block( + block_hash.into(), + |_| { + // TODO(joshie): port history_by_block_hash to DatabaseProvider and use db_provider + self.database.history_by_block_hash(block_hash) + }, + |block_state| { + let state_provider = self.block_state_provider(&block_state)?; + Ok(Box::new(state_provider)) + }, + ) } fn state_by_block_hash(&self, hash: BlockHash) -> ProviderResult { @@ -1146,12 +1238,9 @@ impl StateProviderFactory for BlockchainProvider2 { fn pending(&self) -> ProviderResult { trace!(target: "providers::blockchain", "Getting provider for pending state"); - if let Some(block) = self.canonical_in_memory_state.pending_block_num_hash() { - let historical = self.database.history_by_block_hash(block.hash)?; - let pending_provider = - self.canonical_in_memory_state.state_provider(block.hash, historical); - - return Ok(Box::new(pending_provider)); + if let Some(pending) = self.canonical_in_memory_state.pending_state() { + // we have a pending block + return Ok(Box::new(self.block_state_provider(&pending)?)); } // fallback to latest state if the pending block is not available @@ -1159,13 +1248,9 @@ impl StateProviderFactory for BlockchainProvider2 { } fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult> { - let historical = self.database.history_by_block_hash(block_hash)?; - if let Some(block) = self.canonical_in_memory_state.pending_block_num_hash() { - if block.hash == block_hash { - let pending_provider = - self.canonical_in_memory_state.state_provider(block_hash, historical); - - return Ok(Some(Box::new(pending_provider))) + if let Some(pending) = self.canonical_in_memory_state.pending_state() { + if pending.hash() == block_hash { + return Ok(Some(Box::new(self.block_state_provider(&pending)?))); } } Ok(None) @@ -2203,6 +2288,28 @@ mod tests { assert_eq!(retrieved_block, &expected_block.clone().unseal()); } + // Check for partial in-memory ranges + let blocks = provider.block_range(start_block_number + 1..=end_block_number)?; + assert_eq!(blocks.len(), in_memory_blocks.len() - 1); + for (retrieved_block, expected_block) in blocks.iter().zip(in_memory_blocks.iter().skip(1)) + { + assert_eq!(retrieved_block, &expected_block.clone().unseal()); + } + + let blocks = provider.block_range(start_block_number + 1..=end_block_number - 1)?; + assert_eq!(blocks.len(), in_memory_blocks.len() - 2); + for (retrieved_block, expected_block) in blocks.iter().zip(in_memory_blocks.iter().skip(1)) + { + assert_eq!(retrieved_block, &expected_block.clone().unseal()); + } + + let blocks = provider.block_range(start_block_number + 1..=end_block_number + 1)?; + assert_eq!(blocks.len(), in_memory_blocks.len() - 1); + for (retrieved_block, expected_block) in blocks.iter().zip(in_memory_blocks.iter().skip(1)) + { + assert_eq!(retrieved_block, &expected_block.clone().unseal()); + } + Ok(()) } @@ -2650,6 +2757,10 @@ mod tests { let database_block = database_blocks.first().unwrap().clone(); let in_memory_block = in_memory_blocks.last().unwrap().clone(); + // make sure that the finalized block is on db + let finalized_block = database_blocks.get(database_blocks.len() - 3).unwrap(); + provider.set_finalized(finalized_block.header.clone()); + let blocks = [database_blocks, in_memory_blocks].concat(); assert_eq!(provider.header(&database_block.hash())?, Some(database_block.header().clone())); @@ -3766,10 +3877,7 @@ mod tests { index: 0, block_hash: in_memory_blocks[0].header.hash(), block_number: in_memory_blocks[0].header.number, - base_fee: in_memory_blocks[0] - .header - .base_fee_per_gas - .map(|base_fee_per_gas| base_fee_per_gas as u64), + base_fee: in_memory_blocks[0].header.base_fee_per_gas, excess_blob_gas: None, timestamp: in_memory_blocks[0].header.timestamp, }; @@ -3797,10 +3905,7 @@ mod tests { index: 0, block_hash: database_blocks[0].header.hash(), block_number: database_blocks[0].header.number, - base_fee: database_blocks[0] - .header - .base_fee_per_gas - .map(|base_fee_per_gas| base_fee_per_gas as u64), + base_fee: database_blocks[0].header.base_fee_per_gas, excess_blob_gas: None, timestamp: database_blocks[0].header.timestamp, }; @@ -3868,10 +3973,7 @@ mod tests { // Retrieve the block number for this transaction let result = provider.transaction_block(tx_id)?; - assert!( - result.is_none(), - "`block_state_by_tx_id` should be None if the block is in database" - ); + assert_eq!(Some(0), result, "The block number should match the database block number"); // Ensure that invalid transaction ID returns None let result = provider.transaction_block(67675657)?; @@ -4077,34 +4179,48 @@ mod tests { #[test] fn test_senders_by_tx_range() -> eyre::Result<()> { let mut rng = generators::rng(); - let (provider, database_blocks, _, _) = provider_with_random_blocks( + let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( &mut rng, TEST_BLOCKS_COUNT, - 0, + TEST_BLOCKS_COUNT, BlockRangeParams { tx_count: TEST_TRANSACTIONS_COUNT..TEST_TRANSACTIONS_COUNT, ..Default::default() }, )?; - // Define a valid transaction range within the database - let start_tx_num = 0; - let end_tx_num = 1; + let db_tx_count = + database_blocks.iter().map(|b| b.body.transactions.len()).sum::() as u64; + let in_mem_tx_count = + in_memory_blocks.iter().map(|b| b.body.transactions.len()).sum::() as u64; - // Retrieve the senders for this transaction number range - let result = provider.senders_by_tx_range(start_tx_num..=end_tx_num)?; + let db_range = 0..=(db_tx_count - 1); + let in_mem_range = db_tx_count..=(in_mem_tx_count + db_range.end()); - // Ensure the sender addresses match the expected addresses in the database - assert_eq!(result.len(), 2); + // Retrieve the senders for the whole database range + let database_senders = + database_blocks.iter().flat_map(|b| b.senders().unwrap()).collect::>(); + assert_eq!(provider.senders_by_tx_range(db_range)?, database_senders); + + // Retrieve the senders for the whole in-memory range + let in_memory_senders = + in_memory_blocks.iter().flat_map(|b| b.senders().unwrap()).collect::>(); + assert_eq!(provider.senders_by_tx_range(in_mem_range.clone())?, in_memory_senders); + + // Retrieve the senders for a partial in-memory range assert_eq!( - result[0], - database_blocks[0].senders().unwrap()[0], - "The sender address should match the expected sender address" + &provider.senders_by_tx_range(in_mem_range.start() + 1..=in_mem_range.end() - 1)?, + &in_memory_senders[1..in_memory_senders.len() - 1] ); + + // Retrieve the senders for a range that spans database and in-memory assert_eq!( - result[1], - database_blocks[0].senders().unwrap()[1], - "The sender address should match the expected sender address" + provider.senders_by_tx_range(in_mem_range.start() - 2..=in_mem_range.end() - 1)?, + database_senders[database_senders.len() - 2..] + .iter() + .chain(&in_memory_senders[..in_memory_senders.len() - 1]) + .copied() + .collect::>() ); // Define an empty range that should return no sender addresses diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index 50d3d46cef1f..6fe3fa85cb89 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -1,14 +1,16 @@ use crate::{ AccountReader, BlockHashReader, ExecutionDataProvider, StateProvider, StateRootProvider, }; -use alloy_primitives::{Address, BlockNumber, Bytes, B256}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + Address, BlockNumber, Bytes, B256, +}; use reth_primitives::{Account, Bytecode}; use reth_storage_api::{StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, }; -use std::collections::{HashMap, HashSet}; /// A state provider that resolves to data from either a wrapped [`crate::ExecutionOutcome`] /// or an underlying state provider. diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index e326ad6e1d13..8627cacabb4c 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -151,7 +151,9 @@ impl StaticFileProviderFactory for DatabaseProvider { } } -impl ChainSpecProvider for DatabaseProvider { +impl ChainSpecProvider + for DatabaseProvider +{ type ChainSpec = Spec; fn chain_spec(&self) -> Arc { @@ -628,7 +630,7 @@ impl DatabaseProvider { // recover the sender from the transaction if not found let sender = tx .recover_signer_unchecked() - .ok_or_else(|| ProviderError::SenderRecoveryError)?; + .ok_or(ProviderError::SenderRecoveryError)?; senders.push(sender); } Some(sender) => senders.push(*sender), @@ -911,14 +913,14 @@ impl DatabaseProvider { // iterate previous value and get plain state value to create changeset // Double option around Account represent if Account state is know (first option) and // account is removed (Second Option) - let mut state: BundleStateInit = HashMap::new(); + let mut state: BundleStateInit = HashMap::default(); // This is not working for blocks that are not at tip. as plain state is not the last // state of end range. We should rename the functions or add support to access // History state. Accessing history state can be tricky but we are not gaining // anything. - let mut reverts: RevertsInit = HashMap::new(); + let mut reverts: RevertsInit = HashMap::default(); // add account changeset changes for (block_number, account_before) in account_changeset.into_iter().rev() { @@ -926,7 +928,7 @@ impl DatabaseProvider { match state.entry(address) { hash_map::Entry::Vacant(entry) => { let new_info = plain_accounts_cursor.seek_exact(address)?.map(|kv| kv.1); - entry.insert((old_info, new_info, HashMap::new())); + entry.insert((old_info, new_info, HashMap::default())); } hash_map::Entry::Occupied(mut entry) => { // overwrite old account state. @@ -944,7 +946,7 @@ impl DatabaseProvider { let account_state = match state.entry(address) { hash_map::Entry::Vacant(entry) => { let present_info = plain_accounts_cursor.seek_exact(address)?.map(|kv| kv.1); - entry.insert((present_info, present_info, HashMap::new())) + entry.insert((present_info, present_info, HashMap::default())) } hash_map::Entry::Occupied(entry) => entry.into_mut(), }; @@ -1354,7 +1356,7 @@ impl DatabaseProvider { }; self.tx.put::( sharded_key_factory(partial_key, highest_block_number), - BlockNumberList::new_pre_sorted(list), + BlockNumberList::new_pre_sorted(list.iter().copied()), )?; } } @@ -1956,10 +1958,8 @@ impl TransactionsProvider index, block_hash, block_number, - base_fee: header.base_fee_per_gas.map(|base_fee| base_fee as u64), - excess_blob_gas: header - .excess_blob_gas - .map(|excess_blob| excess_blob as u64), + base_fee: header.base_fee_per_gas, + excess_blob_gas: header.excess_blob_gas, timestamp: header.timestamp, }; diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 1a43b611bc31..5fed81c155d1 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -8,7 +8,7 @@ use crate::{ TransactionVariant, TransactionsProvider, TreeViewer, WithdrawalsProvider, }; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; -use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; +use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; use reth_blockchain_tree_api::{ error::{CanonicalError, InsertBlockError}, BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, @@ -20,9 +20,9 @@ use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_node_types::NodeTypesWithDB; use reth_primitives::{ - alloy_primitives::Sealable, Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, Withdrawal, Withdrawals, + Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, + SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawal, + Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -120,7 +120,7 @@ impl BlockchainProvider { let best: ChainInfo = provider.chain_info()?; let latest_header = provider .header_by_number(best.best_number)? - .ok_or(ProviderError::HeaderNotFound(best.best_number.into()))?; + .ok_or_else(|| ProviderError::HeaderNotFound(best.best_number.into()))?; let finalized_header = provider .last_finalized_block_number()? diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index a8613a8d1a87..de30f89c98ee 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -2,7 +2,10 @@ use crate::{ providers::{state::macros::delegate_provider_impls, StaticFileProvider}, AccountReader, BlockHashReader, ProviderError, StateProvider, StateRootProvider, }; -use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, +}; use reth_db::{tables, BlockNumberList}; use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRO}, @@ -21,10 +24,7 @@ use reth_trie_db::{ DatabaseHashedPostState, DatabaseHashedStorage, DatabaseProof, DatabaseStateRoot, DatabaseStorageRoot, DatabaseTrieWitness, }; -use std::{ - collections::{HashMap, HashSet}, - fmt::Debug, -}; +use std::fmt::Debug; /// State provider for a given block number which takes a tx reference. /// diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 74dfdac73287..f63eaee23862 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -2,7 +2,10 @@ use crate::{ providers::{state::macros::delegate_provider_impls, StaticFileProvider}, AccountReader, BlockHashReader, StateProvider, StateRootProvider, }; -use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, +}; use reth_db::tables; use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRO}, @@ -16,7 +19,6 @@ use reth_trie::{ HashedStorage, MultiProof, StateRoot, StorageRoot, TrieInput, }; use reth_trie_db::{DatabaseProof, DatabaseStateRoot, DatabaseStorageRoot, DatabaseTrieWitness}; -use std::collections::{HashMap, HashSet}; /// State provider over latest state that takes tx reference. #[derive(Debug)] diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index 49a168f4e7b4..388b59ab0a1e 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -52,8 +52,8 @@ macro_rules! delegate_provider_impls { } StateProofProvider $(where [$($generics)*])? { fn proof(&self, input: reth_trie::TrieInput, address: alloy_primitives::Address, slots: &[alloy_primitives::B256]) -> reth_storage_errors::provider::ProviderResult; - fn multiproof(&self, input: reth_trie::TrieInput, targets: std::collections::HashMap>) -> reth_storage_errors::provider::ProviderResult; - fn witness(&self, input: reth_trie::TrieInput, target: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult>; + fn multiproof(&self, input: reth_trie::TrieInput, targets: alloy_primitives::map::HashMap>) -> reth_storage_errors::provider::ProviderResult; + fn witness(&self, input: reth_trie::TrieInput, target: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult>; } ); } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 77c87664b72c..ec76e9504d19 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -285,7 +285,9 @@ impl StaticFileProvider { let fixed_block_range = self.find_fixed_range(block_range.start()); let jar_provider = self .get_segment_provider(segment, || Some(fixed_block_range), None)? - .ok_or(ProviderError::MissingStaticFileBlock(segment, block_range.start()))?; + .ok_or_else(|| { + ProviderError::MissingStaticFileBlock(segment, block_range.start()) + })?; entries += jar_provider.rows(); @@ -323,7 +325,7 @@ impl StaticFileProvider { || self.get_segment_ranges_from_block(segment, block), path, )? - .ok_or_else(|| ProviderError::MissingStaticFileBlock(segment, block)) + .ok_or(ProviderError::MissingStaticFileBlock(segment, block)) } /// Gets the [`StaticFileJarProvider`] of the requested segment and transaction. @@ -338,7 +340,7 @@ impl StaticFileProvider { || self.get_segment_ranges_from_transaction(segment, tx), path, )? - .ok_or_else(|| ProviderError::MissingStaticFileTx(segment, tx)) + .ok_or(ProviderError::MissingStaticFileTx(segment, tx)) } /// Gets the [`StaticFileJarProvider`] of the requested segment and block or transaction. @@ -533,15 +535,16 @@ impl StaticFileProvider { }) .or_insert_with(|| BTreeMap::from([(tx_end, current_block_range)])); } - } else if tx_index.get(&segment).map(|index| index.len()) == Some(1) { - // Only happens if we unwind all the txs/receipts from the first static file. - // Should only happen in test scenarios. - if jar.user_header().expected_block_start() == 0 && - matches!( - segment, - StaticFileSegment::Receipts | StaticFileSegment::Transactions - ) - { + } else if segment.is_tx_based() { + // The unwinded file has no more transactions/receipts. However, the highest + // block is within this files' block range. We only retain + // entries with block ranges before the current one. + tx_index.entry(segment).and_modify(|index| { + index.retain(|_, block_range| block_range.start() < fixed_range.start()); + }); + + // If the index is empty, just remove it. + if tx_index.get(&segment).is_some_and(|index| index.is_empty()) { tx_index.remove(&segment); } } @@ -1145,11 +1148,17 @@ impl StaticFileProvider { Ok(data) } - #[cfg(any(test, feature = "test-utils"))] /// Returns `static_files` directory + #[cfg(any(test, feature = "test-utils"))] pub fn path(&self) -> &Path { &self.path } + + /// Returns `static_files` transaction index + #[cfg(any(test, feature = "test-utils"))] + pub fn tx_index(&self) -> &RwLock { + &self.static_files_tx_index + } } /// Helper trait to manage different [`StaticFileProviderRW`] of an `Arc 0 { if segment.is_receipts() { - writer.append_receipt(*next_tx_num, &Receipt::default()).unwrap(); + // Used as ID for validation + receipt.cumulative_gas_used = *next_tx_num; + writer.append_receipt(*next_tx_num, &receipt).unwrap(); } else { - writer - .append_transaction(*next_tx_num, &TransactionSignedNoHash::default()) - .unwrap(); + // Used as ID for validation + tx.transaction.set_nonce(*next_tx_num); + writer.append_transaction(*next_tx_num, &tx).unwrap(); } *next_tx_num += 1; tx_count -= 1; @@ -376,10 +390,19 @@ mod tests { expected_tx_range.as_ref() ); }); + + // Ensure transaction index + let tx_index = sf_rw.tx_index().read(); + let expected_tx_index = + vec![(8, SegmentRangeInclusive::new(0, 9)), (9, SegmentRangeInclusive::new(20, 29))]; + assert_eq!( + tx_index.get(&segment).map(|index| index.iter().map(|(k, v)| (*k, *v)).collect()), + (!expected_tx_index.is_empty()).then_some(expected_tx_index), + "tx index mismatch", + ); } #[test] - #[ignore] fn test_tx_based_truncation() { let segments = [StaticFileSegment::Transactions, StaticFileSegment::Receipts]; let blocks_per_file = 10; // Number of blocks per file @@ -387,14 +410,16 @@ mod tests { let file_set_count = 3; // Number of sets of files to create let initial_file_count = files_per_range * file_set_count + 1; // Includes lockfile + #[allow(clippy::too_many_arguments)] fn prune_and_validate( sf_rw: &StaticFileProvider, static_dir: impl AsRef, segment: StaticFileSegment, prune_count: u64, last_block: u64, - expected_tx_tip: u64, + expected_tx_tip: Option, expected_file_count: i32, + expected_tx_index: Vec<(TxNumber, SegmentRangeInclusive)>, ) -> eyre::Result<()> { let mut writer = sf_rw.latest_writer(segment)?; @@ -412,11 +437,25 @@ mod tests { Some(last_block), "block mismatch", )?; - assert_eyre( - sf_rw.get_highest_static_file_tx(segment), - Some(expected_tx_tip), - "tx mismatch", - )?; + assert_eyre(sf_rw.get_highest_static_file_tx(segment), expected_tx_tip, "tx mismatch")?; + + // Verify that transactions and receipts are returned correctly. Uses + // cumulative_gas_used & nonce as ids. + if let Some(id) = expected_tx_tip { + if segment.is_receipts() { + assert_eyre( + expected_tx_tip, + sf_rw.receipt(id)?.map(|r| r.cumulative_gas_used), + "tx mismatch", + )?; + } else { + assert_eyre( + expected_tx_tip, + sf_rw.transaction_by_id(id)?.map(|t| t.nonce()), + "tx mismatch", + )?; + } + } // Ensure the file count has reduced as expected assert_eyre( @@ -424,6 +463,15 @@ mod tests { expected_file_count as usize, "file count mismatch", )?; + + // Ensure that the inner tx index (max_tx -> block range) is as expected + let tx_index = sf_rw.tx_index().read(); + assert_eyre( + tx_index.get(&segment).map(|index| index.iter().map(|(k, v)| (*k, *v)).collect()), + (!expected_tx_index.is_empty()).then_some(expected_tx_index), + "tx index mismatch", + )?; + Ok(()) } @@ -442,26 +490,46 @@ mod tests { let highest_tx = sf_rw.get_highest_static_file_tx(segment).unwrap(); // Test cases - // [prune_count, last_block, expected_tx_tip, expected_file_count) + // [prune_count, last_block, expected_tx_tip, expected_file_count, expected_tx_index) let test_cases = vec![ // Case 0: 20..=29 has only one tx. Prune the only tx of the block range. // It ensures that the file is not deleted even though there are no rows, since the // `last_block` which is passed to the prune method is the first // block of the range. - (1, blocks_per_file * 2, highest_tx - 1, initial_file_count), + ( + 1, + blocks_per_file * 2, + Some(highest_tx - 1), + initial_file_count, + vec![(highest_tx - 1, SegmentRangeInclusive::new(0, 9))], + ), // Case 1: 10..=19 has no txs. There are no txes in the whole block range, but want // to unwind to block 9. Ensures that the 20..=29 and 10..=19 files // are deleted. - (0, blocks_per_file - 1, highest_tx - 1, files_per_range + 1), // includes lockfile + ( + 0, + blocks_per_file - 1, + Some(highest_tx - 1), + files_per_range + 1, // includes lockfile + vec![(highest_tx - 1, SegmentRangeInclusive::new(0, 9))], + ), // Case 2: Prune most txs up to block 1. - (7, 1, 1, files_per_range + 1), + ( + highest_tx - 1, + 1, + Some(0), + files_per_range + 1, + vec![(0, SegmentRangeInclusive::new(0, 1))], + ), // Case 3: Prune remaining tx and ensure that file is not deleted. - (1, 0, 0, files_per_range + 1), + (1, 0, None, files_per_range + 1, vec![]), ]; // Loop through test cases - for (case, (prune_count, last_block, expected_tx_tip, expected_file_count)) in - test_cases.into_iter().enumerate() + for ( + case, + (prune_count, last_block, expected_tx_tip, expected_file_count, expected_tx_index), + ) in test_cases.into_iter().enumerate() { prune_and_validate( &sf_rw, @@ -471,6 +539,7 @@ mod tests { last_block, expected_tx_tip, expected_file_count, + expected_tx_index, ) .map_err(|err| eyre::eyre!("Test case {case}: {err}")) .unwrap(); diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index b62fc22c730a..d086c5693ca5 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -381,8 +381,9 @@ impl StaticFileProviderRW { /// Commits to the configuration file at the end. fn truncate(&mut self, num_rows: u64, last_block: Option) -> ProviderResult<()> { let mut remaining_rows = num_rows; + let segment = self.writer.user_header().segment(); while remaining_rows > 0 { - let len = match self.writer.user_header().segment() { + let len = match segment { StaticFileSegment::Headers => { self.writer.user_header().block_len().unwrap_or_default() } @@ -396,7 +397,14 @@ impl StaticFileProviderRW { // delete the whole file and go to the next static file let block_start = self.writer.user_header().expected_block_start(); - if block_start != 0 { + // We only delete the file if it's NOT the first static file AND: + // * it's a Header segment OR + // * it's a tx-based segment AND `last_block` is lower than the first block of this + // file's block range. Otherwise, having no rows simply means that this block + // range has no transactions, but the file should remain. + if block_start != 0 && + (segment.is_headers() || last_block.is_some_and(|b| b < block_start)) + { self.delete_current_and_open_previous()?; } else { // Update `SegmentHeader` diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 237fc8e3487e..daed906646d3 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -1,19 +1,19 @@ //! Dummy blocks and data for tests use crate::{DatabaseProviderRW, ExecutionOutcome}; -use alloy_primitives::{Log, Parity, Sealable, TxKind}; +use alloy_consensus::TxLegacy; +use alloy_primitives::{ + b256, hex_literal::hex, map::HashMap, Address, BlockNumber, Bytes, Log, Parity, Sealable, + TxKind, B256, U256, +}; use once_cell::sync::Lazy; use reth_db::tables; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_primitives::{ - alloy_primitives, b256, hex_literal::hex, Account, Address, BlockBody, BlockNumber, Bytes, - Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, Signature, Transaction, - TransactionSigned, TxLegacy, TxType, Withdrawal, Withdrawals, B256, U256, + Account, BlockBody, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + Signature, Transaction, TransactionSigned, TxType, Withdrawal, Withdrawals, }; use reth_trie::root::{state_root_unhashed, storage_root_unhashed}; -use revm::{ - db::BundleState, - primitives::{AccountInfo, HashMap}, -}; +use revm::{db::BundleState, primitives::AccountInfo}; use std::str::FromStr; /// Assert genesis block @@ -199,7 +199,7 @@ fn block1(number: BlockNumber) -> (SealedBlockWithSenders, ExecutionOutcome) { .revert_account_info(number, account1, Some(None)) .state_present_account_info(account2, info) .revert_account_info(number, account2, Some(None)) - .state_storage(account1, HashMap::from([(slot, (U256::ZERO, U256::from(10)))])) + .state_storage(account1, HashMap::from_iter([(slot, (U256::ZERO, U256::from(10)))])) .build(), vec![vec![Some(Receipt { tx_type: TxType::Eip2930, @@ -255,7 +255,7 @@ fn block2( account, AccountInfo { nonce: 3, balance: U256::from(20), ..Default::default() }, ) - .state_storage(account, HashMap::from([(slot, (U256::ZERO, U256::from(15)))])) + .state_storage(account, HashMap::from_iter([(slot, (U256::ZERO, U256::from(15)))])) .revert_account_info( number, account, diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 9277df6e6c5f..4f2faad8abe4 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -6,9 +6,12 @@ use crate::{ StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; +use alloy_consensus::constants::EMPTY_ROOT_HASH; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{ - keccak256, Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, + keccak256, + map::{HashMap, HashSet}, + Address, BlockHash, BlockNumber, Bytes, Sealable, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, }; use parking_lot::Mutex; @@ -18,8 +21,8 @@ use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ - alloy_primitives::Sealable, Account, Block, BlockWithSenders, Bytecode, GotExpected, Header, - Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, + Account, Block, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, SealedBlock, + SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawal, Withdrawals, }; use reth_stages_types::{StageCheckpoint, StageId}; @@ -32,7 +35,7 @@ use reth_trie::{ }; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ - collections::{BTreeMap, HashMap, HashSet}, + collections::BTreeMap, ops::{RangeBounds, RangeInclusive}, sync::Arc, }; @@ -286,14 +289,8 @@ impl TransactionsProvider for MockEthProvider { index: index as u64, block_hash: *block_hash, block_number: block.header.number, - base_fee: block - .header - .base_fee_per_gas - .map(|base_fer_per_gas| base_fer_per_gas as u64), - excess_blob_gas: block - .header - .excess_blob_gas - .map(|excess_blob_gas| excess_blob_gas as u64), + base_fee: block.header.base_fee_per_gas, + excess_blob_gas: block.header.excess_blob_gas, timestamp: block.header.timestamp, }; return Ok(Some((tx.clone(), meta))) @@ -640,7 +637,7 @@ impl StorageRootProvider for MockEthProvider { _address: Address, _hashed_storage: HashedStorage, ) -> ProviderResult { - Ok(B256::default()) + Ok(EMPTY_ROOT_HASH) } } diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 059accbd9005..e8b7760b880b 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -1,5 +1,4 @@ use std::{ - collections::{HashMap, HashSet}, ops::{RangeBounds, RangeInclusive}, path::PathBuf, sync::Arc, @@ -7,6 +6,7 @@ use std::{ use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{ + map::{HashMap, HashSet}, Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, }; use reth_chain_state::{ diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 17bf9db81bd4..ecb1de335559 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -413,7 +413,7 @@ where let mut tx_index = first_tx_index .or(last_tx_idx) - .ok_or_else(|| ProviderError::BlockBodyIndicesNotFound(block_number))?; + .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; for tx in transactions.borrow() { self.static_file_mut().append_transaction(tx_index, tx)?; @@ -483,7 +483,7 @@ where let first_tx_index = first_tx_index .or(last_tx_idx) - .ok_or_else(|| ProviderError::BlockBodyIndicesNotFound(block_number))?; + .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; // update for empty blocks last_tx_idx = Some(first_tx_index); @@ -544,14 +544,14 @@ mod tests { use crate::{ test_utils::create_test_provider_factory, AccountReader, StorageTrieWriter, TrieWriter, }; - use alloy_primitives::{keccak256, B256, U256}; + use alloy_primitives::{keccak256, map::HashMap, Address, B256, U256}; use reth_db::tables; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, models::{AccountBeforeTx, BlockNumberAddress}, transaction::{DbTx, DbTxMut}, }; - use reth_primitives::{Account, Address, Receipt, Receipts, StorageEntry}; + use reth_primitives::{Account, Receipt, Receipts, StorageEntry}; use reth_storage_api::DatabaseProviderFactory; use reth_trie::{ test_utils::{state_root, storage_root_prehashed}, @@ -570,10 +570,7 @@ mod tests { }, DatabaseCommit, State, }; - use std::{ - collections::{BTreeMap, HashMap}, - str::FromStr, - }; + use std::{collections::BTreeMap, str::FromStr}; #[test] fn wiped_entries_are_removed() { @@ -644,7 +641,7 @@ mod tests { state.insert_account(address_b, account_b.clone()); // 0x00.. is created - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address_a, RevmAccount { info: account_a.clone(), @@ -654,7 +651,7 @@ mod tests { )])); // 0xff.. is changed (balance + 1, nonce + 1) - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address_b, RevmAccount { info: account_b_changed.clone(), @@ -712,7 +709,7 @@ mod tests { state.insert_account(address_b, account_b_changed.clone()); // 0xff.. is destroyed - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address_b, RevmAccount { status: AccountStatus::Touched | AccountStatus::SelfDestructed, @@ -771,10 +768,10 @@ mod tests { state.insert_account_with_storage( address_b, account_b.clone(), - HashMap::from([(U256::from(1), U256::from(1))]), + HashMap::from_iter([(U256::from(1), U256::from(1))]), ); - state.commit(HashMap::from([ + state.commit(HashMap::from_iter([ ( address_a, RevmAccount { @@ -782,7 +779,7 @@ mod tests { info: RevmAccountInfo::default(), // 0x00 => 0 => 1 // 0x01 => 0 => 2 - storage: HashMap::from([ + storage: HashMap::from_iter([ ( U256::from(0), EvmStorageSlot { present_value: U256::from(1), ..Default::default() }, @@ -800,7 +797,7 @@ mod tests { status: AccountStatus::Touched, info: account_b, // 0x01 => 1 => 2 - storage: HashMap::from([( + storage: HashMap::from_iter([( U256::from(1), EvmStorageSlot { present_value: U256::from(2), @@ -905,7 +902,7 @@ mod tests { let mut state = State::builder().with_bundle_update().build(); state.insert_account(address_a, RevmAccountInfo::default()); - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address_a, RevmAccount { status: AccountStatus::Touched | AccountStatus::SelfDestructed, @@ -962,14 +959,14 @@ mod tests { // Block #0: initial state. let mut init_state = State::builder().with_bundle_update().build(); init_state.insert_not_existing(address1); - init_state.commit(HashMap::from([( + init_state.commit(HashMap::from_iter([( address1, RevmAccount { info: account_info.clone(), status: AccountStatus::Touched | AccountStatus::Created, // 0x00 => 0 => 1 // 0x01 => 0 => 2 - storage: HashMap::from([ + storage: HashMap::from_iter([ ( U256::ZERO, EvmStorageSlot { present_value: U256::from(1), ..Default::default() }, @@ -994,17 +991,17 @@ mod tests { state.insert_account_with_storage( address1, account_info.clone(), - HashMap::from([(U256::ZERO, U256::from(1)), (U256::from(1), U256::from(2))]), + HashMap::from_iter([(U256::ZERO, U256::from(1)), (U256::from(1), U256::from(2))]), ); // Block #1: change storage. - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address1, RevmAccount { status: AccountStatus::Touched, info: account_info.clone(), // 0x00 => 1 => 2 - storage: HashMap::from([( + storage: HashMap::from_iter([( U256::ZERO, EvmStorageSlot { original_value: U256::from(1), @@ -1017,7 +1014,7 @@ mod tests { state.merge_transitions(BundleRetention::Reverts); // Block #2: destroy account. - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address1, RevmAccount { status: AccountStatus::Touched | AccountStatus::SelfDestructed, @@ -1028,7 +1025,7 @@ mod tests { state.merge_transitions(BundleRetention::Reverts); // Block #3: re-create account and change storage. - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address1, RevmAccount { status: AccountStatus::Touched | AccountStatus::Created, @@ -1039,7 +1036,7 @@ mod tests { state.merge_transitions(BundleRetention::Reverts); // Block #4: change storage. - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address1, RevmAccount { status: AccountStatus::Touched, @@ -1047,7 +1044,7 @@ mod tests { // 0x00 => 0 => 2 // 0x02 => 0 => 4 // 0x06 => 0 => 6 - storage: HashMap::from([ + storage: HashMap::from_iter([ ( U256::ZERO, EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, @@ -1066,7 +1063,7 @@ mod tests { state.merge_transitions(BundleRetention::Reverts); // Block #5: Destroy account again. - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address1, RevmAccount { status: AccountStatus::Touched | AccountStatus::SelfDestructed, @@ -1077,7 +1074,7 @@ mod tests { state.merge_transitions(BundleRetention::Reverts); // Block #6: Create, change, destroy and re-create in the same block. - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address1, RevmAccount { status: AccountStatus::Touched | AccountStatus::Created, @@ -1085,19 +1082,19 @@ mod tests { storage: HashMap::default(), }, )])); - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address1, RevmAccount { status: AccountStatus::Touched, info: account_info.clone(), // 0x00 => 0 => 2 - storage: HashMap::from([( + storage: HashMap::from_iter([( U256::ZERO, EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, )]), }, )])); - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address1, RevmAccount { status: AccountStatus::Touched | AccountStatus::SelfDestructed, @@ -1105,7 +1102,7 @@ mod tests { storage: HashMap::default(), }, )])); - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address1, RevmAccount { status: AccountStatus::Touched | AccountStatus::Created, @@ -1116,13 +1113,13 @@ mod tests { state.merge_transitions(BundleRetention::Reverts); // Block #7: Change storage. - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address1, RevmAccount { status: AccountStatus::Touched, info: account_info, // 0x00 => 0 => 9 - storage: HashMap::from([( + storage: HashMap::from_iter([( U256::ZERO, EvmStorageSlot { present_value: U256::from(9), ..Default::default() }, )]), @@ -1277,14 +1274,14 @@ mod tests { // Block #0: initial state. let mut init_state = State::builder().with_bundle_update().build(); init_state.insert_not_existing(address1); - init_state.commit(HashMap::from([( + init_state.commit(HashMap::from_iter([( address1, RevmAccount { info: account1.clone(), status: AccountStatus::Touched | AccountStatus::Created, // 0x00 => 0 => 1 // 0x01 => 0 => 2 - storage: HashMap::from([ + storage: HashMap::from_iter([ ( U256::ZERO, EvmStorageSlot { present_value: U256::from(1), ..Default::default() }, @@ -1308,11 +1305,11 @@ mod tests { state.insert_account_with_storage( address1, account1.clone(), - HashMap::from([(U256::ZERO, U256::from(1)), (U256::from(1), U256::from(2))]), + HashMap::from_iter([(U256::ZERO, U256::from(1)), (U256::from(1), U256::from(2))]), ); // Block #1: Destroy, re-create, change storage. - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address1, RevmAccount { status: AccountStatus::Touched | AccountStatus::SelfDestructed, @@ -1321,7 +1318,7 @@ mod tests { }, )])); - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address1, RevmAccount { status: AccountStatus::Touched | AccountStatus::Created, @@ -1330,13 +1327,13 @@ mod tests { }, )])); - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address1, RevmAccount { status: AccountStatus::Touched, info: account1, // 0x01 => 0 => 5 - storage: HashMap::from([( + storage: HashMap::from_iter([( U256::from(1), EvmStorageSlot { present_value: U256::from(5), ..Default::default() }, )]), @@ -1468,7 +1465,7 @@ mod tests { let address1 = Address::with_last_byte(1); let account1_old = prestate.remove(&address1).unwrap(); state.insert_account(address1, account1_old.0.into()); - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address1, RevmAccount { status: AccountStatus::Touched | AccountStatus::SelfDestructed, @@ -1488,12 +1485,12 @@ mod tests { state.insert_account_with_storage( address2, account2.0.into(), - HashMap::from([(slot2, account2_slot2_old_value)]), + HashMap::from_iter([(slot2, account2_slot2_old_value)]), ); let account2_slot2_new_value = U256::from(100); account2.1.insert(slot2_key, account2_slot2_new_value); - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address2, RevmAccount { status: AccountStatus::Touched, @@ -1513,7 +1510,7 @@ mod tests { state.insert_account(address3, account3.0.into()); account3.0.balance = U256::from(24); - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address3, RevmAccount { status: AccountStatus::Touched, @@ -1530,7 +1527,7 @@ mod tests { state.insert_account(address4, account4.0.into()); account4.0.nonce = 128; - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address4, RevmAccount { status: AccountStatus::Touched, @@ -1545,7 +1542,7 @@ mod tests { let account1_new = Account { nonce: 56, balance: U256::from(123), bytecode_hash: Some(B256::random()) }; prestate.insert(address1, (account1_new, BTreeMap::default())); - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address1, RevmAccount { status: AccountStatus::Touched | AccountStatus::Created, @@ -1561,7 +1558,7 @@ mod tests { let slot20_key = B256::from(slot20); let account1_slot20_value = U256::from(12345); prestate.get_mut(&address1).unwrap().1.insert(slot20_key, account1_slot20_value); - state.commit(HashMap::from([( + state.commit(HashMap::from_iter([( address1, RevmAccount { status: AccountStatus::Touched | AccountStatus::Created, diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 6590b47b4524..a3b0cc7438f3 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -3,11 +3,10 @@ use crate::{ TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; -use alloy_primitives::{BlockNumber, B256}; +use alloy_primitives::{BlockNumber, Sealable, B256}; use reth_db_models::StoredBlockBodyIndices; use reth_primitives::{ - alloy_primitives::Sealable, Block, BlockWithSenders, Header, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, + Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, }; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeInclusive; diff --git a/crates/storage/storage-api/src/header.rs b/crates/storage/storage-api/src/header.rs index ce984fb10788..7202f51ddf1f 100644 --- a/crates/storage/storage-api/src/header.rs +++ b/crates/storage/storage-api/src/header.rs @@ -15,6 +15,11 @@ pub trait HeaderProvider: Send + Sync { /// Get header by block hash fn header(&self, block_hash: &BlockHash) -> ProviderResult>; + /// Retrieves the header sealed by the given block hash. + fn sealed_header_by_hash(&self, block_hash: BlockHash) -> ProviderResult> { + Ok(self.header(&block_hash)?.map(|header| SealedHeader::new(header, block_hash))) + } + /// Get header by block number fn header_by_number(&self, num: u64) -> ProviderResult>; diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index c3c33ac37908..7325e2b74360 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -1,17 +1,28 @@ //! Various noop implementations for traits. +use std::sync::Arc; + use crate::{BlockHashReader, BlockNumReader}; use alloy_primitives::{BlockNumber, B256}; -use reth_chainspec::ChainInfo; +use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec}; use reth_storage_errors::provider::ProviderResult; /// Supports various api interfaces for testing purposes. -#[derive(Debug, Clone, Default, Copy)] +#[derive(Debug, Clone)] #[non_exhaustive] -pub struct NoopBlockReader; +pub struct NoopBlockReader { + chain_spec: Arc, +} + +impl NoopBlockReader { + /// Create a new instance of the `NoopBlockReader`. + pub const fn new(chain_spec: Arc) -> Self { + Self { chain_spec } + } +} /// Noop implementation for testing purposes -impl BlockHashReader for NoopBlockReader { +impl BlockHashReader for NoopBlockReader { fn block_hash(&self, _number: u64) -> ProviderResult> { Ok(None) } @@ -25,7 +36,7 @@ impl BlockHashReader for NoopBlockReader { } } -impl BlockNumReader for NoopBlockReader { +impl BlockNumReader for NoopBlockReader { fn chain_info(&self) -> ProviderResult { Ok(ChainInfo::default()) } @@ -42,3 +53,11 @@ impl BlockNumReader for NoopBlockReader { Ok(None) } } + +impl ChainSpecProvider for NoopBlockReader { + type ChainSpec = ChainSpec; + + fn chain_spec(&self) -> Arc { + self.chain_spec.clone() + } +} diff --git a/crates/storage/storage-api/src/transactions.rs b/crates/storage/storage-api/src/transactions.rs index 3685d6b1de87..f2c44e9e140b 100644 --- a/crates/storage/storage-api/src/transactions.rs +++ b/crates/storage/storage-api/src/transactions.rs @@ -89,12 +89,12 @@ pub trait TransactionsProviderExt: BlockReader + Send + Sync { ) -> ProviderResult> { let from = self .block_body_indices(*block_range.start())? - .ok_or(ProviderError::BlockBodyIndicesNotFound(*block_range.start()))? + .ok_or_else(|| ProviderError::BlockBodyIndicesNotFound(*block_range.start()))? .first_tx_num(); let to = self .block_body_indices(*block_range.end())? - .ok_or(ProviderError::BlockBodyIndicesNotFound(*block_range.end()))? + .ok_or_else(|| ProviderError::BlockBodyIndicesNotFound(*block_range.end()))? .last_tx_num(); Ok(from..=to) diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index e41a15e107d3..d989def8bb0d 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -1,9 +1,11 @@ -use alloy_primitives::{Address, Bytes, B256}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + Address, Bytes, B256, +}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, }; -use std::collections::{HashMap, HashSet}; /// A type that can compute the state root of a given post state. #[auto_impl::auto_impl(&, Box, Arc)] diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index a0720c037ace..41abbb4b6b7e 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -54,6 +54,7 @@ rand = { workspace = true, optional = true } paste = { workspace = true, optional = true } proptest = { workspace = true, optional = true } proptest-arbitrary-interop = { workspace = true, optional = true } +alloy-consensus = { workspace = true, optional = true } [dev-dependencies] reth-primitives = { workspace = true, features = ["arbitrary"] } @@ -68,11 +69,12 @@ pprof = { workspace = true, features = ["criterion", "flamegraph"] } assert_matches.workspace = true tempfile.workspace = true serde_json.workspace = true +alloy-consensus.workspace = true [features] default = ["serde"] serde = ["dep:serde"] -test-utils = ["rand", "paste", "serde"] +test-utils = ["rand", "paste", "serde", "alloy-consensus"] arbitrary = ["proptest", "reth-primitives/arbitrary", "proptest-arbitrary-interop"] [[bench]] diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index d127b3e8e67d..ee98e3eed85e 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -90,7 +90,7 @@ pub enum BlobStoreError { DecodeError(#[from] alloy_rlp::Error), /// Other implementation specific error. #[error(transparent)] - Other(Box), + Other(Box), } /// Keeps track of the size of the blob store. diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index b5063f5b37a1..623493e6c9d4 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -266,7 +266,7 @@ mod tests { #[test] fn test_contains_local_address() { let address = Address::new([1; 20]); - let mut local_addresses = HashSet::new(); + let mut local_addresses = HashSet::default(); local_addresses.insert(address); let config = LocalTransactionConfig { local_addresses, ..Default::default() }; @@ -283,7 +283,7 @@ mod tests { let address = Address::new([1; 20]); let config = LocalTransactionConfig { no_exemptions: true, - local_addresses: HashSet::new(), + local_addresses: HashSet::default(), ..Default::default() }; @@ -294,7 +294,7 @@ mod tests { #[test] fn test_is_local_without_no_exemptions() { let address = Address::new([1; 20]); - let mut local_addresses = HashSet::new(); + let mut local_addresses = HashSet::default(); local_addresses.insert(address); let config = diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index e5142e18a0d3..a4766a89d5c1 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -10,7 +10,7 @@ pub type PoolResult = Result; /// /// For example during validation /// [`TransactionValidator::validate_transaction`](crate::validate::TransactionValidator::validate_transaction) -pub trait PoolTransactionError: std::error::Error + Send + Sync { +pub trait PoolTransactionError: core::error::Error + Send + Sync { /// Returns `true` if the error was caused by a transaction that is considered bad in the /// context of the transaction pool and warrants peer penalization. /// @@ -19,8 +19,8 @@ pub trait PoolTransactionError: std::error::Error + Send + Sync { } // Needed for `#[error(transparent)]` -impl std::error::Error for Box { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { +impl core::error::Error for Box { + fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { (**self).source() } } @@ -63,7 +63,7 @@ pub enum PoolErrorKind { /// Any other error that occurred while inserting/validating a transaction. e.g. IO database /// error #[error(transparent)] - Other(#[from] Box), + Other(#[from] Box), } // === impl PoolError === @@ -75,7 +75,10 @@ impl PoolError { } /// Creates a new pool error with the `Other` kind. - pub fn other(hash: TxHash, error: impl Into>) -> Self { + pub fn other( + hash: TxHash, + error: impl Into>, + ) -> Self { Self { hash, kind: PoolErrorKind::Other(error.into()) } } diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 5a68b16607c8..da416fd2d43f 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -7,18 +7,17 @@ use crate::{ traits::{CanonicalStateUpdate, TransactionPool, TransactionPoolExt}, BlockInfo, PoolTransaction, }; -use alloy_primitives::{Address, BlockHash, BlockNumber}; +use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable}; use futures_util::{ future::{BoxFuture, Fuse, FusedFuture}, FutureExt, Stream, StreamExt, }; use reth_chain_state::CanonStateNotification; -use reth_chainspec::{ChainSpec, ChainSpecProvider}; +use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_execution_types::ChangedAccount; use reth_fs_util::FsPathError; use reth_primitives::{ - alloy_primitives::Sealable, BlockNumberOrTag, IntoRecoveredTransaction, - PooledTransactionsElementEcRecovered, SealedHeader, TransactionSigned, + BlockNumberOrTag, PooledTransactionsElementEcRecovered, SealedHeader, TransactionSigned, }; use reth_storage_api::{errors::provider::ProviderError, BlockReaderIdExt, StateProviderFactory}; use reth_tasks::TaskSpawner; @@ -74,12 +73,7 @@ pub fn maintain_transaction_pool_future( config: MaintainPoolConfig, ) -> BoxFuture<'static, ()> where - Client: StateProviderFactory - + BlockReaderIdExt - + ChainSpecProvider - + Clone - + Send - + 'static, + Client: StateProviderFactory + BlockReaderIdExt + ChainSpecProvider + Clone + Send + 'static, P: TransactionPoolExt + 'static, St: Stream + Send + Unpin + 'static, Tasks: TaskSpawner + 'static, @@ -100,12 +94,7 @@ pub async fn maintain_transaction_pool( task_spawner: Tasks, config: MaintainPoolConfig, ) where - Client: StateProviderFactory - + BlockReaderIdExt - + ChainSpecProvider - + Clone - + Send - + 'static, + Client: StateProviderFactory + BlockReaderIdExt + ChainSpecProvider + Clone + Send + 'static, P: TransactionPoolExt + 'static, St: Stream + Send + Unpin + 'static, Tasks: TaskSpawner + 'static, @@ -119,12 +108,12 @@ pub async fn maintain_transaction_pool( let latest = SealedHeader::new(header, seal); let chain_spec = client.chain_spec(); let info = BlockInfo { - block_gas_limit: latest.gas_limit as u64, + block_gas_limit: latest.gas_limit, last_seen_block_hash: latest.hash(), last_seen_block_number: latest.number, pending_basefee: latest .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(latest.timestamp + 12)) - .unwrap_or_default() as u64, + .unwrap_or_default(), pending_blob_fee: latest.next_block_blob_fee(), }; pool.set_block_info(info); @@ -138,7 +127,7 @@ pub async fn maintain_transaction_pool( FinalizedBlockTracker::new(client.finalized_block_number().ok().flatten()); // keeps track of any dirty accounts that we know of are out of sync with the pool - let mut dirty_addresses = HashSet::new(); + let mut dirty_addresses = HashSet::default(); // keeps track of the state of the pool wrt to blocks let mut maintained_state = MaintainedPoolState::InSync; @@ -357,7 +346,7 @@ pub async fn maintain_transaction_pool( // update the pool first let update = CanonicalStateUpdate { new_tip: &new_tip.block, - pending_block_base_fee: pending_block_base_fee as u64, + pending_block_base_fee, pending_block_blob_fee, changed_accounts, // all transactions mined in the new chain need to be removed from the pool @@ -406,10 +395,10 @@ pub async fn maintain_transaction_pool( maintained_state = MaintainedPoolState::Drifted; debug!(target: "txpool", ?depth, "skipping deep canonical update"); let info = BlockInfo { - block_gas_limit: tip.gas_limit as u64, + block_gas_limit: tip.gas_limit, last_seen_block_hash: tip.hash(), last_seen_block_number: tip.number, - pending_basefee: pending_block_base_fee as u64, + pending_basefee: pending_block_base_fee, pending_blob_fee: pending_block_blob_fee, }; pool.set_block_info(info); @@ -440,7 +429,7 @@ pub async fn maintain_transaction_pool( // Canonical update let update = CanonicalStateUpdate { new_tip: &tip.block, - pending_block_base_fee: pending_block_base_fee as u64, + pending_block_base_fee, pending_block_blob_fee, changed_accounts, mined_transactions, diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index cfe38ea31da8..4c1a7f2c29bb 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -87,8 +87,7 @@ use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use reth_primitives::{ - BlobTransaction, BlobTransactionSidecar, IntoRecoveredTransaction, PooledTransactionsElement, - TransactionSigned, + BlobTransaction, BlobTransactionSidecar, PooledTransactionsElement, TransactionSigned, }; use std::{ collections::{HashMap, HashSet}, @@ -464,6 +463,7 @@ where } if let Some(replaced) = added.replaced_blob_transaction() { + debug!(target: "txpool", "[{:?}] delete replaced blob sidecar", replaced); // delete the replaced transaction from the blob store self.delete_blob(replaced); } @@ -580,9 +580,11 @@ where /// Notify all listeners about a blob sidecar for a newly inserted blob (eip4844) transaction. fn on_new_blob_sidecar(&self, tx_hash: &TxHash, sidecar: &BlobTransactionSidecar) { - let sidecar = Arc::new(sidecar.clone()); - let mut sidecar_listeners = self.blob_transaction_sidecar_listener.lock(); + if sidecar_listeners.is_empty() { + return + } + let sidecar = Arc::new(sidecar.clone()); sidecar_listeners.retain_mut(|listener| { let new_blob_event = NewBlobSidecar { tx_hash: *tx_hash, sidecar: sidecar.clone() }; match listener.sender.try_send(new_blob_event) { @@ -799,6 +801,7 @@ where /// Inserts a blob transaction into the blob store fn insert_blob(&self, hash: TxHash, blob: BlobTransactionSidecar) { + debug!(target: "txpool", "[{:?}] storing blob sidecar", hash); if let Err(err) = self.blob_store.insert(hash, blob) { warn!(target: "txpool", %err, "[{:?}] failed to insert blob", hash); self.blob_store_metrics.blobstore_failed_inserts.increment(1); diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index f3b59e09454d..912e04506a19 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -2708,7 +2708,7 @@ mod tests { assert_eq!(pool.pending_pool.len(), 2); - let mut changed_senders = HashMap::new(); + let mut changed_senders = HashMap::default(); changed_senders.insert( id.sender, SenderInfo { state_nonce: next.get_nonce(), balance: U256::from(1_000) }, @@ -2892,7 +2892,7 @@ mod tests { assert_eq!(1, pool.pending_transactions().len()); // Simulate new block arrival - and chain nonce increasing. - let mut updated_accounts = HashMap::new(); + let mut updated_accounts = HashMap::default(); on_chain_nonce += 1; updated_accounts.insert( v0.sender_id(), @@ -2967,7 +2967,7 @@ mod tests { assert_eq!(1, pool.pending_transactions().len()); // Simulate new block arrival - and chain nonce increasing. - let mut updated_accounts = HashMap::new(); + let mut updated_accounts = HashMap::default(); on_chain_nonce += 1; updated_accounts.insert( v0.sender_id(), diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index f87083f16f36..d51bf80270de 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -1,11 +1,11 @@ use crate::EthPooledTransaction; -use alloy_eips::eip2930::AccessList; -use alloy_primitives::{Address, TxKind, B256, U256}; +use alloy_consensus::{TxEip1559, TxEip4844, TxLegacy}; +use alloy_eips::{eip2718::Encodable2718, eip2930::AccessList}; +use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use rand::Rng; use reth_chainspec::MAINNET; use reth_primitives::{ - constants::MIN_PROTOCOL_BASE_FEE, sign_message, Bytes, Transaction, TransactionSigned, - TxEip1559, TxEip4844, TxLegacy, + constants::MIN_PROTOCOL_BASE_FEE, sign_message, Transaction, TransactionSigned, }; /// A generator for transactions for testing purposes. @@ -106,7 +106,7 @@ impl TransactionGenerator { /// Generates and returns a pooled EIP-4844 transaction with a random signer. pub fn gen_eip4844_pooled(&mut self) -> EthPooledTransaction { let tx = self.gen_eip4844().into_ecrecovered().unwrap(); - let encoded_length = tx.length_without_header(); + let encoded_length = tx.encode_2718_len(); EthPooledTransaction::new(tx, encoded_length) } } @@ -145,7 +145,7 @@ impl TransactionBuilder { TxLegacy { chain_id: Some(self.chain_id), nonce: self.nonce, - gas_limit: self.gas_limit.into(), + gas_limit: self.gas_limit, gas_price: self.max_fee_per_gas, to: self.to, value: self.value, @@ -162,7 +162,7 @@ impl TransactionBuilder { TxEip1559 { chain_id: self.chain_id, nonce: self.nonce, - gas_limit: self.gas_limit.into(), + gas_limit: self.gas_limit, max_fee_per_gas: self.max_fee_per_gas, max_priority_fee_per_gas: self.max_priority_fee_per_gas, to: self.to, @@ -180,7 +180,7 @@ impl TransactionBuilder { TxEip4844 { chain_id: self.chain_id, nonce: self.nonce, - gas_limit: self.gas_limit as u128, + gas_limit: self.gas_limit, max_fee_per_gas: self.max_fee_per_gas, max_priority_fee_per_gas: self.max_priority_fee_per_gas, to: match self.to { diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 604f4ffefbfc..6b470bb6fb1f 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -7,6 +7,7 @@ use crate::{ CoinbaseTipOrdering, EthBlobTransactionSidecar, EthPoolTransaction, PoolTransaction, ValidPoolTransaction, }; +use alloy_consensus::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}; use alloy_eips::eip2930::AccessList; use alloy_primitives::{Address, Bytes, ChainId, TxHash, TxKind, B256, U256}; use paste::paste; @@ -18,9 +19,8 @@ use reth_primitives::{ constants::{eip4844::DATA_GAS_PER_BLOB, MIN_PROTOCOL_BASE_FEE}, transaction::TryFromRecoveredTransactionError, BlobTransactionSidecar, BlobTransactionValidationError, PooledTransactionsElementEcRecovered, - Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, TxEip2930, - TxEip4844, TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, - LEGACY_TX_TYPE_ID, + Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, + EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; @@ -797,7 +797,7 @@ impl TryFrom for MockTransaction { sender, nonce, gas_price, - gas_limit: gas_limit as u64, + gas_limit, to, value, input, @@ -818,7 +818,7 @@ impl TryFrom for MockTransaction { sender, nonce, gas_price, - gas_limit: gas_limit as u64, + gas_limit, to, value, input, @@ -842,7 +842,7 @@ impl TryFrom for MockTransaction { nonce, max_fee_per_gas, max_priority_fee_per_gas, - gas_limit: gas_limit as u64, + gas_limit, to, value, input, @@ -869,7 +869,7 @@ impl TryFrom for MockTransaction { max_fee_per_gas, max_priority_fee_per_gas, max_fee_per_blob_gas, - gas_limit: gas_limit as u64, + gas_limit, to, value, input, @@ -916,15 +916,7 @@ impl From for Transaction { value, input, size: _, - } => Self::Legacy(TxLegacy { - chain_id, - nonce, - gas_price, - gas_limit: gas_limit.into(), - to, - value, - input, - }), + } => Self::Legacy(TxLegacy { chain_id, nonce, gas_price, gas_limit, to, value, input }), MockTransaction::Eip2930 { chain_id, hash: _, @@ -941,7 +933,7 @@ impl From for Transaction { chain_id, nonce, gas_price, - gas_limit: gas_limit.into(), + gas_limit, to, value, access_list, @@ -963,7 +955,7 @@ impl From for Transaction { } => Self::Eip1559(TxEip1559 { chain_id, nonce, - gas_limit: gas_limit.into(), + gas_limit, max_fee_per_gas, max_priority_fee_per_gas, to, @@ -989,7 +981,7 @@ impl From for Transaction { } => Self::Eip4844(TxEip4844 { chain_id, nonce, - gas_limit: gas_limit.into(), + gas_limit, max_fee_per_gas, max_priority_fee_per_gas, to, @@ -1026,7 +1018,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { hash: tx_hash, nonce: *nonce, gas_price: *gas_price, - gas_limit: *gas_limit as u64, + gas_limit: { *gas_limit }, to: *to, value: *value, input: input.clone(), @@ -1048,7 +1040,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { hash: tx_hash, nonce: *nonce, gas_price: *gas_price, - gas_limit: *gas_limit as u64, + gas_limit: { *gas_limit }, to: *to, value: *value, input: input.clone(), @@ -1072,7 +1064,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { nonce: *nonce, max_fee_per_gas: *max_fee_per_gas, max_priority_fee_per_gas: *max_priority_fee_per_gas, - gas_limit: *gas_limit as u64, + gas_limit: { *gas_limit }, to: *to, value: *value, input: input.clone(), @@ -1099,7 +1091,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { max_fee_per_gas: *max_fee_per_gas, max_priority_fee_per_gas: *max_priority_fee_per_gas, max_fee_per_blob_gas: *max_fee_per_blob_gas, - gas_limit: *gas_limit as u64, + gas_limit: { *gas_limit }, to: *to, value: *value, input: input.clone(), diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index cef776aab320..d4eabc73bbce 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -7,7 +7,7 @@ use crate::{ validate::ValidPoolTransaction, AllTransactionsEvents, }; -use alloy_eips::{eip2930::AccessList, eip4844::BlobAndProofV1}; +use alloy_eips::{eip2718::Encodable2718, eip2930::AccessList, eip4844::BlobAndProofV1}; use alloy_primitives::{Address, TxHash, TxKind, B256, U256}; use futures_util::{ready, Stream}; use reth_eth_wire_types::HandleMempoolData; @@ -666,7 +666,7 @@ impl<'a> CanonicalStateUpdate<'a> { /// Returns the block info for the tip block. pub fn block_info(&self) -> BlockInfo { BlockInfo { - block_gas_limit: self.new_tip.gas_limit as u64, + block_gas_limit: self.new_tip.gas_limit, last_seen_block_hash: self.hash(), last_seen_block_number: self.number(), pending_basefee: self.pending_block_base_fee, @@ -1251,7 +1251,7 @@ impl TryFrom for EthPooledTransaction { } }; - let encoded_length = tx.length_without_header(); + let encoded_length = tx.encode_2718_len(); let transaction = Self::new(tx, encoded_length); Ok(transaction) } @@ -1392,10 +1392,8 @@ impl Stream for NewSubpoolTransactionStream { #[cfg(test)] mod tests { use super::*; - use reth_primitives::{ - constants::eip4844::DATA_GAS_PER_BLOB, Signature, TransactionSigned, TxEip1559, TxEip2930, - TxEip4844, TxEip7702, TxLegacy, - }; + use alloy_consensus::{TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}; + use reth_primitives::{constants::eip4844::DATA_GAS_PER_BLOB, Signature, TransactionSigned}; #[test] fn test_pool_size_invariants() { diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 7baa5e3f335e..b8fe7cbb1de0 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -8,8 +8,8 @@ use crate::{ use alloy_primitives::{Address, TxHash, B256, U256}; use futures_util::future::Either; use reth_primitives::{ - BlobTransactionSidecar, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, - SealedBlock, TransactionSignedEcRecovered, + BlobTransactionSidecar, PooledTransactionsElementEcRecovered, SealedBlock, + TransactionSignedEcRecovered, }; use std::{fmt, future::Future, time::Instant}; @@ -51,7 +51,7 @@ pub enum TransactionValidationOutcome { /// this transaction from ever becoming valid. Invalid(T, InvalidPoolTransactionError), /// An error occurred while trying to validate the transaction - Error(TxHash, Box), + Error(TxHash, Box), } impl TransactionValidationOutcome { @@ -380,10 +380,11 @@ impl ValidPoolTransaction { } } -impl> IntoRecoveredTransaction - for ValidPoolTransaction -{ - fn to_recovered_transaction(&self) -> TransactionSignedEcRecovered { +impl> ValidPoolTransaction { + /// Converts to this type into a [`TransactionSignedEcRecovered`]. + /// + /// Note: this takes `&self` since indented usage is via `Arc`. + pub fn to_recovered_transaction(&self) -> TransactionSignedEcRecovered { self.transaction.clone().into_consensus() } } diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index df32b1cb9f6a..8aca67f8d1ad 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -2,15 +2,16 @@ use crate::{Nibbles, TrieAccount}; use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; -use alloy_rlp::{encode_fixed_size, Decodable}; +use alloy_rlp::{encode_fixed_size, Decodable, EMPTY_STRING_CODE}; use alloy_trie::{ nodes::TrieNode, - proof::{verify_proof, ProofVerificationError}, + proof::{verify_proof, ProofNodes, ProofVerificationError}, EMPTY_ROOT_HASH, }; +use itertools::Itertools; use reth_primitives_traits::{constants::KECCAK_EMPTY, Account}; use serde::{Deserialize, Serialize}; -use std::collections::{BTreeMap, HashMap}; +use std::collections::HashMap; /// The state multiproof of target accounts and multiproofs of their storage tries. /// Multiproof is effectively a state subtrie that only contains the nodes @@ -18,7 +19,7 @@ use std::collections::{BTreeMap, HashMap}; #[derive(Clone, Default, Debug)] pub struct MultiProof { /// State trie multiproof for requested accounts. - pub account_subtree: BTreeMap, + pub account_subtree: ProofNodes, /// Storage trie multiproofs. pub storages: HashMap, } @@ -36,8 +37,8 @@ impl MultiProof { // Retrieve the account proof. let proof = self .account_subtree - .iter() - .filter(|(path, _)| nibbles.starts_with(path)) + .matching_nodes_iter(&nibbles) + .sorted_by(|a, b| a.0.cmp(b.0)) .map(|(_, node)| node.clone()) .collect::>(); @@ -82,16 +83,21 @@ pub struct StorageMultiProof { /// Storage trie root. pub root: B256, /// Storage multiproof for requested slots. - pub subtree: BTreeMap, + pub subtree: ProofNodes, } -impl Default for StorageMultiProof { - fn default() -> Self { - Self { root: EMPTY_ROOT_HASH, subtree: BTreeMap::default() } +impl StorageMultiProof { + /// Create new storage multiproof for empty trie. + pub fn empty() -> Self { + Self { + root: EMPTY_ROOT_HASH, + subtree: ProofNodes::from_iter([( + Nibbles::default(), + Bytes::from([EMPTY_STRING_CODE]), + )]), + } } -} -impl StorageMultiProof { /// Return storage proofs for the target storage slot (unhashed). pub fn storage_proof(&self, slot: B256) -> Result { let nibbles = Nibbles::unpack(keccak256(slot)); @@ -99,8 +105,8 @@ impl StorageMultiProof { // Retrieve the storage proof. let proof = self .subtree - .iter() - .filter(|(path, _)| nibbles.starts_with(path)) + .matching_nodes_iter(&nibbles) + .sorted_by(|a, b| a.0.cmp(b.0)) .map(|(_, node)| node.clone()) .collect::>(); @@ -208,6 +214,12 @@ impl StorageProof { Self { key, nibbles, ..Default::default() } } + /// Set proof nodes on storage proof. + pub fn with_proof(mut self, proof: Vec) -> Self { + self.proof = proof; + self + } + /// Verify the proof against the provided storage root. pub fn verify(&self, root: B256) -> Result<(), ProofVerificationError> { let expected = diff --git a/crates/trie/common/src/root.rs b/crates/trie/common/src/root.rs index 600e818ebbaa..20f3ba1366d5 100644 --- a/crates/trie/common/src/root.rs +++ b/crates/trie/common/src/root.rs @@ -75,7 +75,7 @@ pub fn state_root_unhashed>( pub fn state_root_unsorted>( state: impl IntoIterator, ) -> B256 { - state_root(state.into_iter().sorted_by_key(|(key, _)| *key)) + state_root(state.into_iter().sorted_unstable_by_key(|(key, _)| *key)) } /// Calculates the root hash of the state represented as MPT. @@ -105,7 +105,7 @@ pub fn storage_root_unhashed(storage: impl IntoIterator) -> /// Sorts and calculates the root hash of account storage trie. /// See [`storage_root`] for more info. pub fn storage_root_unsorted(storage: impl IntoIterator) -> B256 { - storage_root(storage.into_iter().sorted_by_key(|(key, _)| *key)) + storage_root(storage.into_iter().sorted_unstable_by_key(|(key, _)| *key)) } /// Calculates the root hash of account storage trie. diff --git a/crates/trie/db/src/proof.rs b/crates/trie/db/src/proof.rs index 2d06d9e2f33b..1d5fda84cc5b 100644 --- a/crates/trie/db/src/proof.rs +++ b/crates/trie/db/src/proof.rs @@ -1,5 +1,8 @@ use crate::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; -use alloy_primitives::{Address, B256}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + Address, B256, +}; use reth_db_api::transaction::DbTx; use reth_execution_errors::StateProofError; use reth_trie::{ @@ -7,7 +10,6 @@ use reth_trie::{ trie_cursor::InMemoryTrieCursorFactory, MultiProof, TrieInput, }; use reth_trie_common::AccountProof; -use std::collections::{HashMap, HashSet}; /// Extends [`Proof`] with operations specific for working with a database transaction. pub trait DatabaseProof<'a, TX> { @@ -76,7 +78,6 @@ impl<'a, TX: DbTx> DatabaseProof<'a, TX> &state_sorted, )) .with_prefix_sets_mut(input.prefix_sets) - .with_targets(targets) - .multiproof() + .multiproof(targets) } } diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index 4f27679ddad9..5acb9e0d1b49 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -77,9 +77,10 @@ pub trait DatabaseStateRoot<'a, TX>: Sized { /// # Example /// /// ``` + /// use alloy_primitives::U256; /// use reth_db::test_utils::create_test_rw_db; /// use reth_db_api::database::Database; - /// use reth_primitives::{Account, U256}; + /// use reth_primitives::Account; /// use reth_trie::{updates::TrieUpdates, HashedPostState, StateRoot}; /// use reth_trie_db::DatabaseStateRoot; /// @@ -267,11 +268,11 @@ impl DatabaseHashedPostState for HashedPostState { #[cfg(test)] mod tests { use super::*; + use alloy_primitives::{hex, map::HashMap, Address, U256}; use reth_db::test_utils::create_test_rw_db; use reth_db_api::database::Database; - use reth_primitives::{hex, revm_primitives::AccountInfo, Address, U256}; + use reth_primitives::revm_primitives::AccountInfo; use revm::db::BundleState; - use std::collections::HashMap; #[test] fn from_bundle_state_with_rayon() { @@ -286,8 +287,8 @@ mod tests { let bundle_state = BundleState::builder(2..=2) .state_present_account_info(address1, account1) .state_present_account_info(address2, account2) - .state_storage(address1, HashMap::from([(slot1, (U256::ZERO, U256::from(10)))])) - .state_storage(address2, HashMap::from([(slot2, (U256::ZERO, U256::from(20)))])) + .state_storage(address1, HashMap::from_iter([(slot1, (U256::ZERO, U256::from(10)))])) + .state_storage(address2, HashMap::from_iter([(slot2, (U256::ZERO, U256::from(20)))])) .build(); assert_eq!(bundle_state.reverts.len(), 1); diff --git a/crates/trie/db/src/trie_cursor.rs b/crates/trie/db/src/trie_cursor.rs index 124b8ccb20ca..601100b3faee 100644 --- a/crates/trie/db/src/trie_cursor.rs +++ b/crates/trie/db/src/trie_cursor.rs @@ -209,8 +209,8 @@ where #[cfg(test)] mod tests { use super::*; + use alloy_primitives::hex_literal::hex; use reth_db_api::{cursor::DbCursorRW, transaction::DbTxMut}; - use reth_primitives::hex_literal::hex; use reth_provider::test_utils::create_test_provider_factory; #[test] diff --git a/crates/trie/db/src/witness.rs b/crates/trie/db/src/witness.rs index 62b945d26dc2..54d017780ae4 100644 --- a/crates/trie/db/src/witness.rs +++ b/crates/trie/db/src/witness.rs @@ -1,12 +1,11 @@ use crate::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; -use alloy_primitives::{Bytes, B256}; +use alloy_primitives::{map::HashMap, Bytes, B256}; use reth_db_api::transaction::DbTx; use reth_execution_errors::TrieWitnessError; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, trie_cursor::InMemoryTrieCursorFactory, witness::TrieWitness, HashedPostState, TrieInput, }; -use std::collections::HashMap; /// Extends [`TrieWitness`] with operations specific for working with a database transaction. pub trait DatabaseTrieWitness<'a, TX> { diff --git a/crates/trie/db/tests/proof.rs b/crates/trie/db/tests/proof.rs index 33a19de38037..5ffa6729b49a 100644 --- a/crates/trie/db/tests/proof.rs +++ b/crates/trie/db/tests/proof.rs @@ -1,6 +1,7 @@ #![allow(missing_docs)] use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; +use alloy_rlp::EMPTY_STRING_CODE; use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET}; use reth_primitives::{constants::EMPTY_ROOT_HASH, Account}; use reth_provider::test_utils::{create_test_provider_factory, insert_genesis}; @@ -111,7 +112,10 @@ fn testspec_empty_storage_proof() { assert_eq!(slots.len(), account_proof.storage_proofs.len()); for (idx, slot) in slots.into_iter().enumerate() { let proof = account_proof.storage_proofs.get(idx).unwrap(); - assert_eq!(proof, &StorageProof::new(slot)); + assert_eq!( + proof, + &StorageProof::new(slot).with_proof(vec![Bytes::from([EMPTY_STRING_CODE])]) + ); assert_eq!(proof.verify(account_proof.storage_root), Ok(())); } assert_eq!(account_proof.verify(root), Ok(())); diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs index f7413c64509f..59fffec58d06 100644 --- a/crates/trie/db/tests/trie.rs +++ b/crates/trie/db/tests/trie.rs @@ -1,6 +1,6 @@ #![allow(missing_docs)] -use alloy_primitives::{keccak256, Address, B256, U256}; +use alloy_primitives::{hex_literal::hex, keccak256, Address, B256, U256}; use proptest::{prelude::ProptestConfig, proptest}; use proptest_arbitrary_interop::arb; use reth_db::{tables, test_utils::TempDatabase, DatabaseEnv}; @@ -8,7 +8,7 @@ use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, transaction::DbTxMut, }; -use reth_primitives::{constants::EMPTY_ROOT_HASH, hex_literal::hex, Account, StorageEntry}; +use reth_primitives::{constants::EMPTY_ROOT_HASH, Account, StorageEntry}; use reth_provider::{ test_utils::create_test_provider_factory, DatabaseProviderRW, StorageTrieWriter, TrieWriter, }; diff --git a/crates/trie/db/tests/witness.rs b/crates/trie/db/tests/witness.rs new file mode 100644 index 000000000000..cc921f657087 --- /dev/null +++ b/crates/trie/db/tests/witness.rs @@ -0,0 +1,57 @@ +#![allow(missing_docs)] + +use alloy_primitives::{ + keccak256, + map::{HashMap, HashSet}, + Address, Bytes, B256, U256, +}; +use alloy_rlp::EMPTY_STRING_CODE; +use reth_primitives::{constants::EMPTY_ROOT_HASH, Account}; +use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; +use reth_trie::{proof::Proof, witness::TrieWitness, HashedPostState, HashedStorage, StateRoot}; +use reth_trie_db::{DatabaseProof, DatabaseStateRoot, DatabaseTrieWitness}; + +#[test] +fn includes_empty_node_preimage() { + let factory = create_test_provider_factory(); + let provider = factory.provider_rw().unwrap(); + + let address = Address::random(); + let hashed_address = keccak256(address); + let hashed_slot = B256::random(); + + // witness includes empty state trie root node + assert_eq!( + TrieWitness::from_tx(provider.tx_ref()) + .compute(HashedPostState { + accounts: HashMap::from([(hashed_address, Some(Account::default()))]), + storages: HashMap::default(), + }) + .unwrap(), + HashMap::from_iter([(EMPTY_ROOT_HASH, Bytes::from([EMPTY_STRING_CODE]))]) + ); + + // Insert account into database + provider.insert_account_for_hashing([(address, Some(Account::default()))]).unwrap(); + + let state_root = StateRoot::from_tx(provider.tx_ref()).root().unwrap(); + let multiproof = Proof::from_tx(provider.tx_ref()) + .multiproof(HashMap::from_iter([(hashed_address, HashSet::from_iter([hashed_slot]))])) + .unwrap(); + + let witness = TrieWitness::from_tx(provider.tx_ref()) + .compute(HashedPostState { + accounts: HashMap::from([(hashed_address, Some(Account::default()))]), + storages: HashMap::from([( + hashed_address, + HashedStorage::from_iter(false, [(hashed_slot, U256::from(1))]), + )]), + }) + .unwrap(); + assert!(witness.contains_key(&state_root)); + for node in multiproof.account_subtree.values() { + assert_eq!(witness.get(&keccak256(node)), Some(node)); + } + // witness includes empty state trie root node + assert_eq!(witness.get(&EMPTY_ROOT_HASH), Some(&Bytes::from([EMPTY_STRING_CODE]))); +} diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index 80fa0a70d0e1..64a4644bdce4 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -31,13 +31,8 @@ tracing.workspace = true # misc thiserror.workspace = true derive_more.workspace = true - -# `async` feature -tokio = { workspace = true, optional = true, default-features = false } -itertools = { workspace = true, optional = true } - -# `parallel` feature -rayon = { workspace = true, optional = true } +rayon.workspace = true +itertools.workspace = true # `metrics` feature reth-metrics = { workspace = true, optional = true } @@ -58,12 +53,9 @@ proptest.workspace = true proptest-arbitrary-interop.workspace = true [features] -default = ["metrics", "async", "parallel"] +default = ["metrics"] metrics = ["reth-metrics", "dep:metrics", "reth-trie/metrics"] -async = ["tokio/sync", "itertools"] -parallel = ["rayon"] [[bench]] name = "root" -required-features = ["async", "parallel"] harness = false diff --git a/crates/trie/parallel/benches/root.rs b/crates/trie/parallel/benches/root.rs index 470222e3e1da..d1ffe49dd0ad 100644 --- a/crates/trie/parallel/benches/root.rs +++ b/crates/trie/parallel/benches/root.rs @@ -13,7 +13,7 @@ use reth_trie::{ TrieInput, }; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseStateRoot}; -use reth_trie_parallel::{async_root::AsyncStateRoot, parallel_root::ParallelStateRoot}; +use reth_trie_parallel::parallel_root::ParallelStateRoot; use std::collections::HashMap; pub fn calculate_state_root(c: &mut Criterion) { @@ -70,14 +70,6 @@ pub fn calculate_state_root(c: &mut Criterion) { |calculator| async { calculator.incremental_root() }, ); }); - - // async root - group.bench_function(BenchmarkId::new("async root", size), |b| { - b.to_async(&runtime).iter_with_setup( - || AsyncStateRoot::new(view.clone(), TrieInput::from_state(updated_state.clone())), - |calculator| calculator.incremental_root(), - ); - }); } } diff --git a/crates/trie/parallel/src/async_root.rs b/crates/trie/parallel/src/async_root.rs deleted file mode 100644 index 74481f09e9f8..000000000000 --- a/crates/trie/parallel/src/async_root.rs +++ /dev/null @@ -1,333 +0,0 @@ -#[cfg(feature = "metrics")] -use crate::metrics::ParallelStateRootMetrics; -use crate::{stats::ParallelTrieTracker, storage_root_targets::StorageRootTargets}; -use alloy_primitives::B256; -use alloy_rlp::{BufMut, Encodable}; -use itertools::Itertools; -use reth_execution_errors::StorageRootError; -use reth_provider::{ - providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, -}; -use reth_trie::{ - hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, - node_iter::{TrieElement, TrieNodeIter}, - trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, - updates::TrieUpdates, - walker::TrieWalker, - HashBuilder, Nibbles, StorageRoot, TrieAccount, TrieInput, -}; -use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; -use std::{collections::HashMap, sync::Arc}; -use thiserror::Error; -use tokio::sync::oneshot; -use tracing::*; - -/// Async state root calculator. -/// -/// The calculator starts off by launching tasks to compute storage roots. -/// Then, it immediately starts walking the state trie updating the necessary trie -/// nodes in the process. Upon encountering a leaf node, it will poll the storage root -/// task for the corresponding hashed address. -/// -/// Internally, the calculator uses [`ConsistentDbView`] since -/// it needs to rely on database state saying the same until -/// the last transaction is open. -/// See docs of using [`ConsistentDbView`] for caveats. -/// -/// For sync usage, take a look at `ParallelStateRoot`. -#[derive(Debug)] -pub struct AsyncStateRoot { - /// Consistent view of the database. - view: ConsistentDbView, - /// Trie input. - input: TrieInput, - /// Parallel state root metrics. - #[cfg(feature = "metrics")] - metrics: ParallelStateRootMetrics, -} - -impl AsyncStateRoot { - /// Create new async state root calculator. - pub fn new(view: ConsistentDbView, input: TrieInput) -> Self { - Self { - view, - input, - #[cfg(feature = "metrics")] - metrics: ParallelStateRootMetrics::default(), - } - } -} - -impl AsyncStateRoot -where - Factory: DatabaseProviderFactory + Clone + Send + Sync + 'static, -{ - /// Calculate incremental state root asynchronously. - pub async fn incremental_root(self) -> Result { - self.calculate(false).await.map(|(root, _)| root) - } - - /// Calculate incremental state root with updates asynchronously. - pub async fn incremental_root_with_updates( - self, - ) -> Result<(B256, TrieUpdates), AsyncStateRootError> { - self.calculate(true).await - } - - async fn calculate( - self, - retain_updates: bool, - ) -> Result<(B256, TrieUpdates), AsyncStateRootError> { - let mut tracker = ParallelTrieTracker::default(); - let trie_nodes_sorted = Arc::new(self.input.nodes.into_sorted()); - let hashed_state_sorted = Arc::new(self.input.state.into_sorted()); - let prefix_sets = self.input.prefix_sets.freeze(); - let storage_root_targets = StorageRootTargets::new( - prefix_sets.account_prefix_set.iter().map(|nibbles| B256::from_slice(&nibbles.pack())), - prefix_sets.storage_prefix_sets, - ); - - // Pre-calculate storage roots async for accounts which were changed. - tracker.set_precomputed_storage_roots(storage_root_targets.len() as u64); - debug!(target: "trie::async_state_root", len = storage_root_targets.len(), "pre-calculating storage roots"); - let mut storage_roots = HashMap::with_capacity(storage_root_targets.len()); - for (hashed_address, prefix_set) in - storage_root_targets.into_iter().sorted_unstable_by_key(|(address, _)| *address) - { - let view = self.view.clone(); - let hashed_state_sorted = hashed_state_sorted.clone(); - let trie_nodes_sorted = trie_nodes_sorted.clone(); - #[cfg(feature = "metrics")] - let metrics = self.metrics.storage_trie.clone(); - - let (tx, rx) = oneshot::channel(); - - rayon::spawn_fifo(move || { - let result = (|| -> Result<_, AsyncStateRootError> { - let provider_ro = view.provider_ro()?; - let trie_cursor_factory = InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), - &trie_nodes_sorted, - ); - let hashed_state = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), - &hashed_state_sorted, - ); - Ok(StorageRoot::new_hashed( - trie_cursor_factory, - hashed_state, - hashed_address, - #[cfg(feature = "metrics")] - metrics, - ) - .with_prefix_set(prefix_set) - .calculate(retain_updates)?) - })(); - let _ = tx.send(result); - }); - storage_roots.insert(hashed_address, rx); - } - - trace!(target: "trie::async_state_root", "calculating state root"); - let mut trie_updates = TrieUpdates::default(); - - let provider_ro = self.view.provider_ro()?; - let trie_cursor_factory = InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), - &trie_nodes_sorted, - ); - let hashed_cursor_factory = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), - &hashed_state_sorted, - ); - - let walker = TrieWalker::new( - trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, - prefix_sets.account_prefix_set, - ) - .with_deletions_retained(retain_updates); - let mut account_node_iter = TrieNodeIter::new( - walker, - hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, - ); - - let mut hash_builder = HashBuilder::default().with_updates(retain_updates); - let mut account_rlp = Vec::with_capacity(128); - while let Some(node) = account_node_iter.try_next().map_err(ProviderError::Database)? { - match node { - TrieElement::Branch(node) => { - hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); - } - TrieElement::Leaf(hashed_address, account) => { - let (storage_root, _, updates) = match storage_roots.remove(&hashed_address) { - Some(rx) => rx.await.map_err(|_| { - AsyncStateRootError::StorageRootChannelClosed { hashed_address } - })??, - // Since we do not store all intermediate nodes in the database, there might - // be a possibility of re-adding a non-modified leaf to the hash builder. - None => { - tracker.inc_missed_leaves(); - StorageRoot::new_hashed( - trie_cursor_factory.clone(), - hashed_cursor_factory.clone(), - hashed_address, - #[cfg(feature = "metrics")] - self.metrics.storage_trie.clone(), - ) - .calculate(retain_updates)? - } - }; - - if retain_updates { - trie_updates.insert_storage_updates(hashed_address, updates); - } - - account_rlp.clear(); - let account = TrieAccount::from((account, storage_root)); - account.encode(&mut account_rlp as &mut dyn BufMut); - hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); - } - } - } - - let root = hash_builder.root(); - - trie_updates.finalize( - account_node_iter.walker, - hash_builder, - prefix_sets.destroyed_accounts, - ); - - let stats = tracker.finish(); - - #[cfg(feature = "metrics")] - self.metrics.record_state_trie(stats); - - trace!( - target: "trie::async_state_root", - %root, - duration = ?stats.duration(), - branches_added = stats.branches_added(), - leaves_added = stats.leaves_added(), - missed_leaves = stats.missed_leaves(), - precomputed_storage_roots = stats.precomputed_storage_roots(), - "calculated state root" - ); - - Ok((root, trie_updates)) - } -} - -/// Error during async state root calculation. -#[derive(Error, Debug)] -pub enum AsyncStateRootError { - /// Storage root channel for a given address was closed. - #[error("storage root channel for {hashed_address} got closed")] - StorageRootChannelClosed { - /// The hashed address for which channel was closed. - hashed_address: B256, - }, - /// Error while calculating storage root. - #[error(transparent)] - StorageRoot(#[from] StorageRootError), - /// Provider error. - #[error(transparent)] - Provider(#[from] ProviderError), -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_primitives::{keccak256, Address, U256}; - use rand::Rng; - use reth_primitives::{Account, StorageEntry}; - use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; - use reth_trie::{test_utils, HashedPostState, HashedStorage}; - - #[tokio::test] - async fn random_async_root() { - let factory = create_test_provider_factory(); - let consistent_view = ConsistentDbView::new(factory.clone(), None); - - let mut rng = rand::thread_rng(); - let mut state = (0..100) - .map(|_| { - let address = Address::random(); - let account = - Account { balance: U256::from(rng.gen::()), ..Default::default() }; - let mut storage = HashMap::::default(); - let has_storage = rng.gen_bool(0.7); - if has_storage { - for _ in 0..100 { - storage.insert( - B256::from(U256::from(rng.gen::())), - U256::from(rng.gen::()), - ); - } - } - (address, (account, storage)) - }) - .collect::>(); - - { - let provider_rw = factory.provider_rw().unwrap(); - provider_rw - .insert_account_for_hashing( - state.iter().map(|(address, (account, _))| (*address, Some(*account))), - ) - .unwrap(); - provider_rw - .insert_storage_for_hashing(state.iter().map(|(address, (_, storage))| { - ( - *address, - storage - .iter() - .map(|(slot, value)| StorageEntry { key: *slot, value: *value }), - ) - })) - .unwrap(); - provider_rw.commit().unwrap(); - } - - assert_eq!( - AsyncStateRoot::new(consistent_view.clone(), Default::default(),) - .incremental_root() - .await - .unwrap(), - test_utils::state_root(state.clone()) - ); - - let mut hashed_state = HashedPostState::default(); - for (address, (account, storage)) in &mut state { - let hashed_address = keccak256(address); - - let should_update_account = rng.gen_bool(0.5); - if should_update_account { - *account = Account { balance: U256::from(rng.gen::()), ..*account }; - hashed_state.accounts.insert(hashed_address, Some(*account)); - } - - let should_update_storage = rng.gen_bool(0.3); - if should_update_storage { - for (slot, value) in storage.iter_mut() { - let hashed_slot = keccak256(slot); - *value = U256::from(rng.gen::()); - hashed_state - .storages - .entry(hashed_address) - .or_insert_with(|| HashedStorage::new(false)) - .storage - .insert(hashed_slot, *value); - } - } - } - - assert_eq!( - AsyncStateRoot::new(consistent_view.clone(), TrieInput::from_state(hashed_state)) - .incremental_root() - .await - .unwrap(), - test_utils::state_root(state) - ); - } -} diff --git a/crates/trie/parallel/src/lib.rs b/crates/trie/parallel/src/lib.rs index ff130b2187e7..40a6af347580 100644 --- a/crates/trie/parallel/src/lib.rs +++ b/crates/trie/parallel/src/lib.rs @@ -13,12 +13,7 @@ pub use storage_root_targets::StorageRootTargets; /// Parallel trie calculation stats. pub mod stats; -/// Implementation of async state root computation. -#[cfg(feature = "async")] -pub mod async_root; - /// Implementation of parallel state root computation. -#[cfg(feature = "parallel")] pub mod parallel_root; /// Parallel state root metrics. diff --git a/crates/trie/parallel/src/parallel_root.rs b/crates/trie/parallel/src/parallel_root.rs index e63c3f1a17b3..a64b8351446e 100644 --- a/crates/trie/parallel/src/parallel_root.rs +++ b/crates/trie/parallel/src/parallel_root.rs @@ -1,10 +1,10 @@ #[cfg(feature = "metrics")] use crate::metrics::ParallelStateRootMetrics; use crate::{stats::ParallelTrieTracker, storage_root_targets::StorageRootTargets}; +use alloy_primitives::B256; use alloy_rlp::{BufMut, Encodable}; -use rayon::prelude::*; +use itertools::Itertools; use reth_execution_errors::StorageRootError; -use reth_primitives::B256; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, }; @@ -17,22 +17,21 @@ use reth_trie::{ HashBuilder, Nibbles, StorageRoot, TrieAccount, TrieInput, }; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; -use std::collections::HashMap; +use std::{collections::HashMap, sync::Arc}; use thiserror::Error; use tracing::*; /// Parallel incremental state root calculator. /// -/// The calculator starts off by pre-computing storage roots of changed -/// accounts in parallel. Once that's done, it proceeds to walking the state -/// trie retrieving the pre-computed storage roots when needed. +/// The calculator starts off by launching tasks to compute storage roots. +/// Then, it immediately starts walking the state trie updating the necessary trie +/// nodes in the process. Upon encountering a leaf node, it will poll the storage root +/// task for the corresponding hashed address. /// /// Internally, the calculator uses [`ConsistentDbView`] since /// it needs to rely on database state saying the same until /// the last transaction is open. /// See docs of using [`ConsistentDbView`] for caveats. -/// -/// If possible, use more optimized `AsyncStateRoot` instead. #[derive(Debug)] pub struct ParallelStateRoot { /// Consistent view of the database. @@ -58,7 +57,7 @@ impl ParallelStateRoot { impl ParallelStateRoot where - Factory: DatabaseProviderFactory + Send + Sync, + Factory: DatabaseProviderFactory + Clone + Send + Sync + 'static, { /// Calculate incremental state root in parallel. pub fn incremental_root(self) -> Result { @@ -77,8 +76,8 @@ where retain_updates: bool, ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { let mut tracker = ParallelTrieTracker::default(); - let trie_nodes_sorted = self.input.nodes.into_sorted(); - let hashed_state_sorted = self.input.state.into_sorted(); + let trie_nodes_sorted = Arc::new(self.input.nodes.into_sorted()); + let hashed_state_sorted = Arc::new(self.input.state.into_sorted()); let prefix_sets = self.input.prefix_sets.freeze(); let storage_root_targets = StorageRootTargets::new( prefix_sets.account_prefix_set.iter().map(|nibbles| B256::from_slice(&nibbles.pack())), @@ -88,30 +87,43 @@ where // Pre-calculate storage roots in parallel for accounts which were changed. tracker.set_precomputed_storage_roots(storage_root_targets.len() as u64); debug!(target: "trie::parallel_state_root", len = storage_root_targets.len(), "pre-calculating storage roots"); - let mut storage_roots = storage_root_targets - .into_par_iter() - .map(|(hashed_address, prefix_set)| { - let provider_ro = self.view.provider_ro()?; - let trie_cursor_factory = InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), - &trie_nodes_sorted, - ); - let hashed_cursor_factory = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), - &hashed_state_sorted, - ); - let storage_root_result = StorageRoot::new_hashed( - trie_cursor_factory, - hashed_cursor_factory, - hashed_address, - #[cfg(feature = "metrics")] - self.metrics.storage_trie.clone(), - ) - .with_prefix_set(prefix_set) - .calculate(retain_updates); - Ok((hashed_address, storage_root_result?)) - }) - .collect::, ParallelStateRootError>>()?; + let mut storage_roots = HashMap::with_capacity(storage_root_targets.len()); + for (hashed_address, prefix_set) in + storage_root_targets.into_iter().sorted_unstable_by_key(|(address, _)| *address) + { + let view = self.view.clone(); + let hashed_state_sorted = hashed_state_sorted.clone(); + let trie_nodes_sorted = trie_nodes_sorted.clone(); + #[cfg(feature = "metrics")] + let metrics = self.metrics.storage_trie.clone(); + + let (tx, rx) = std::sync::mpsc::sync_channel(1); + + rayon::spawn_fifo(move || { + let result = (|| -> Result<_, ParallelStateRootError> { + let provider_ro = view.provider_ro()?; + let trie_cursor_factory = InMemoryTrieCursorFactory::new( + DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), + &trie_nodes_sorted, + ); + let hashed_state = HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), + &hashed_state_sorted, + ); + Ok(StorageRoot::new_hashed( + trie_cursor_factory, + hashed_state, + hashed_address, + #[cfg(feature = "metrics")] + metrics, + ) + .with_prefix_set(prefix_set) + .calculate(retain_updates)?) + })(); + let _ = tx.send(result); + }); + storage_roots.insert(hashed_address, rx); + } trace!(target: "trie::parallel_state_root", "calculating state root"); let mut trie_updates = TrieUpdates::default(); @@ -145,7 +157,13 @@ where } TrieElement::Leaf(hashed_address, account) => { let (storage_root, _, updates) = match storage_roots.remove(&hashed_address) { - Some(result) => result, + Some(rx) => rx.recv().map_err(|_| { + ParallelStateRootError::StorageRoot(StorageRootError::Database( + reth_db::DatabaseError::Other(format!( + "channel closed for {hashed_address}" + )), + )) + })??, // Since we do not store all intermediate nodes in the database, there might // be a possibility of re-adding a non-modified leaf to the hash builder. None => { diff --git a/crates/trie/parallel/src/storage_root_targets.rs b/crates/trie/parallel/src/storage_root_targets.rs index 8325fbcf7205..9b52d49afc80 100644 --- a/crates/trie/parallel/src/storage_root_targets.rs +++ b/crates/trie/parallel/src/storage_root_targets.rs @@ -36,7 +36,6 @@ impl IntoIterator for StorageRootTargets { } } -#[cfg(feature = "parallel")] impl rayon::iter::IntoParallelIterator for StorageRootTargets { type Iter = rayon::collections::hash_map::IntoIter; type Item = (B256, PrefixSet); diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 12cf9ac1cb7b..d0f0fa092a77 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -44,6 +44,9 @@ triehash = { version = "0.8", optional = true } # `serde` feature serde = { workspace = true, optional = true } +# `serde-bincode-compat` feature +serde_with = { workspace = true, optional = true } + [dev-dependencies] # reth reth-chainspec.workspace = true @@ -63,10 +66,12 @@ tokio = { workspace = true, default-features = false, features = [ ] } serde_json.workspace = true criterion.workspace = true +bincode.workspace = true [features] metrics = ["reth-metrics", "dep:metrics"] serde = ["dep:serde"] +serde-bincode-compat = ["serde_with"] test-utils = ["triehash", "reth-trie-common/test-utils"] [[bench]] diff --git a/crates/trie/trie/benches/hash_post_state.rs b/crates/trie/trie/benches/hash_post_state.rs index 49759f14a969..6e913ef78a3c 100644 --- a/crates/trie/trie/benches/hash_post_state.rs +++ b/crates/trie/trie/benches/hash_post_state.rs @@ -1,10 +1,9 @@ #![allow(missing_docs, unreachable_pub)] -use alloy_primitives::{keccak256, Address, B256, U256}; +use alloy_primitives::{keccak256, map::HashMap, Address, B256, U256}; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; use reth_trie::{HashedPostState, HashedStorage}; use revm::db::{states::BundleBuilder, BundleAccount}; -use std::collections::HashMap; pub fn hash_post_state(c: &mut Criterion) { let mut group = c.benchmark_group("Hash Post State"); @@ -68,7 +67,7 @@ fn generate_test_data(size: usize) -> HashMap { let mut bundle_builder = BundleBuilder::default(); for (address, storage) in state { - bundle_builder = bundle_builder.state_storage(address, storage); + bundle_builder = bundle_builder.state_storage(address, storage.into_iter().collect()); } let bundle_state = bundle_builder.build(); diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index 317ec3655400..bb568ae8b8cf 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -63,6 +63,17 @@ pub mod stats; // re-export for convenience pub use reth_trie_common::*; +/// Bincode-compatible serde implementations for trie types. +/// +/// `bincode` crate allows for more efficient serialization of trie types, because it allows +/// non-string map keys. +/// +/// Read more: +#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +pub mod serde_bincode_compat { + pub use super::updates::serde_bincode_compat as updates; +} + /// Trie calculation metrics. #[cfg(feature = "metrics")] pub mod metrics; diff --git a/crates/trie/trie/src/proof.rs b/crates/trie/trie/src/proof.rs index 8b9d2f9d09fb..95d9505218bf 100644 --- a/crates/trie/trie/src/proof.rs +++ b/crates/trie/trie/src/proof.rs @@ -1,18 +1,21 @@ use crate::{ hashed_cursor::{HashedCursorFactory, HashedStorageCursor}, node_iter::{TrieElement, TrieNodeIter}, - prefix_set::TriePrefixSetsMut, + prefix_set::{PrefixSetMut, TriePrefixSetsMut}, trie_cursor::TrieCursorFactory, walker::TrieWalker, HashBuilder, Nibbles, }; -use alloy_primitives::{keccak256, Address, B256}; +use alloy_primitives::{ + keccak256, + map::{HashMap, HashSet}, + Address, B256, +}; use alloy_rlp::{BufMut, Encodable}; use reth_execution_errors::trie::StateProofError; use reth_trie_common::{ proof::ProofRetainer, AccountProof, MultiProof, StorageMultiProof, TrieAccount, }; -use std::collections::{HashMap, HashSet}; /// A struct for generating merkle proofs. /// @@ -27,18 +30,15 @@ pub struct Proof { hashed_cursor_factory: H, /// A set of prefix sets that have changes. prefix_sets: TriePrefixSetsMut, - /// Proof targets. - targets: HashMap>, } impl Proof { - /// Create a new [Proof] instance. + /// Create a new [`Proof`] instance. pub fn new(t: T, h: H) -> Self { Self { trie_cursor_factory: t, hashed_cursor_factory: h, prefix_sets: TriePrefixSetsMut::default(), - targets: HashMap::default(), } } @@ -48,7 +48,6 @@ impl Proof { trie_cursor_factory, hashed_cursor_factory: self.hashed_cursor_factory, prefix_sets: self.prefix_sets, - targets: self.targets, } } @@ -58,7 +57,6 @@ impl Proof { trie_cursor_factory: self.trie_cursor_factory, hashed_cursor_factory, prefix_sets: self.prefix_sets, - targets: self.targets, } } @@ -67,22 +65,11 @@ impl Proof { self.prefix_sets = prefix_sets; self } - - /// Set the target account and slots. - pub fn with_target(self, target: (B256, HashSet)) -> Self { - self.with_targets(HashMap::from([target])) - } - - /// Set the target accounts and slots. - pub fn with_targets(mut self, targets: HashMap>) -> Self { - self.targets = targets; - self - } } impl Proof where - T: TrieCursorFactory, + T: TrieCursorFactory + Clone, H: HashedCursorFactory + Clone, { /// Generate an account proof from intermediate nodes. @@ -92,23 +79,28 @@ where slots: &[B256], ) -> Result { Ok(self - .with_target((keccak256(address), slots.iter().map(keccak256).collect())) - .multiproof()? + .multiproof(HashMap::from_iter([( + keccak256(address), + slots.iter().map(keccak256).collect(), + )]))? .account_proof(address, slots)?) } /// Generate a state multiproof according to specified targets. - pub fn multiproof(&self) -> Result { + pub fn multiproof( + mut self, + mut targets: HashMap>, + ) -> Result { let hashed_account_cursor = self.hashed_cursor_factory.hashed_account_cursor()?; let trie_cursor = self.trie_cursor_factory.account_trie_cursor()?; // Create the walker. let mut prefix_set = self.prefix_sets.account_prefix_set.clone(); - prefix_set.extend(self.targets.keys().map(Nibbles::unpack)); + prefix_set.extend(targets.keys().map(Nibbles::unpack)); let walker = TrieWalker::new(trie_cursor, prefix_set.freeze()); // Create a hash builder to rebuild the root node since it is not available in the database. - let retainer = ProofRetainer::from_iter(self.targets.keys().map(Nibbles::unpack)); + let retainer = ProofRetainer::from_iter(targets.keys().map(Nibbles::unpack)); let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); let mut storages = HashMap::default(); @@ -120,7 +112,19 @@ where hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); } TrieElement::Leaf(hashed_address, account) => { - let storage_multiproof = self.storage_multiproof(hashed_address)?; + let storage_prefix_set = self + .prefix_sets + .storage_prefix_sets + .remove(&hashed_address) + .unwrap_or_default(); + let proof_targets = targets.remove(&hashed_address).unwrap_or_default(); + let storage_multiproof = StorageProof::new_hashed( + self.trie_cursor_factory.clone(), + self.hashed_cursor_factory.clone(), + hashed_address, + ) + .with_prefix_set_mut(storage_prefix_set) + .storage_proof(proof_targets)?; // Encode account account_rlp.clear(); @@ -133,32 +137,69 @@ where } } let _ = hash_builder.root(); - Ok(MultiProof { account_subtree: hash_builder.take_proofs(), storages }) + Ok(MultiProof { account_subtree: hash_builder.take_proof_nodes(), storages }) } +} - /// Generate a storage multiproof according to specified targets. - pub fn storage_multiproof( - &self, - hashed_address: B256, +/// Generates storage merkle proofs. +#[derive(Debug)] +pub struct StorageProof { + /// The factory for traversing trie nodes. + trie_cursor_factory: T, + /// The factory for hashed cursors. + hashed_cursor_factory: H, + /// The hashed address of an account. + hashed_address: B256, + /// The set of storage slot prefixes that have changed. + prefix_set: PrefixSetMut, +} + +impl StorageProof { + /// Create a new [`StorageProof`] instance. + pub fn new(t: T, h: H, address: Address) -> Self { + Self::new_hashed(t, h, keccak256(address)) + } + + /// Create a new [`StorageProof`] instance with hashed address. + pub fn new_hashed(t: T, h: H, hashed_address: B256) -> Self { + Self { + trie_cursor_factory: t, + hashed_cursor_factory: h, + hashed_address, + prefix_set: PrefixSetMut::default(), + } + } + + /// Set the changed prefixes. + pub fn with_prefix_set_mut(mut self, prefix_set: PrefixSetMut) -> Self { + self.prefix_set = prefix_set; + self + } +} + +impl StorageProof +where + T: TrieCursorFactory, + H: HashedCursorFactory, +{ + /// Generate storage proof. + pub fn storage_proof( + mut self, + targets: HashSet, ) -> Result { let mut hashed_storage_cursor = - self.hashed_cursor_factory.hashed_storage_cursor(hashed_address)?; + self.hashed_cursor_factory.hashed_storage_cursor(self.hashed_address)?; // short circuit on empty storage if hashed_storage_cursor.is_storage_empty()? { - return Ok(StorageMultiProof::default()) + return Ok(StorageMultiProof::empty()) } - let target_nibbles = self - .targets - .get(&hashed_address) - .map_or(Vec::new(), |slots| slots.iter().map(Nibbles::unpack).collect()); + let target_nibbles = targets.into_iter().map(Nibbles::unpack).collect::>(); + self.prefix_set.extend(target_nibbles.clone()); - let mut prefix_set = - self.prefix_sets.storage_prefix_sets.get(&hashed_address).cloned().unwrap_or_default(); - prefix_set.extend(target_nibbles.clone()); - let trie_cursor = self.trie_cursor_factory.storage_trie_cursor(hashed_address)?; - let walker = TrieWalker::new(trie_cursor, prefix_set.freeze()); + let trie_cursor = self.trie_cursor_factory.storage_trie_cursor(self.hashed_address)?; + let walker = TrieWalker::new(trie_cursor, self.prefix_set.freeze()); let retainer = ProofRetainer::from_iter(target_nibbles); let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); @@ -178,6 +219,6 @@ where } let root = hash_builder.root(); - Ok(StorageMultiProof { root, subtree: hash_builder.take_proofs() }) + Ok(StorageMultiProof { root, subtree: hash_builder.take_proof_nodes() }) } } diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index d634f05f0f39..3b0af5cd879b 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -100,6 +100,36 @@ impl HashedPostState { self } + /// Returns `true` if the hashed state is empty. + pub fn is_empty(&self) -> bool { + self.accounts.is_empty() && self.storages.is_empty() + } + + /// Construct [`TriePrefixSetsMut`] from hashed post state. + /// The prefix sets contain the hashed account and storage keys that have been changed in the + /// post state. + pub fn construct_prefix_sets(&self) -> TriePrefixSetsMut { + // Populate account prefix set. + let mut account_prefix_set = PrefixSetMut::with_capacity(self.accounts.len()); + let mut destroyed_accounts = HashSet::default(); + for (hashed_address, account) in &self.accounts { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + + if account.is_none() { + destroyed_accounts.insert(*hashed_address); + } + } + + // Populate storage prefix sets. + let mut storage_prefix_sets = HashMap::with_capacity(self.storages.len()); + for (hashed_address, hashed_storage) in &self.storages { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + storage_prefix_sets.insert(*hashed_address, hashed_storage.construct_prefix_set()); + } + + TriePrefixSetsMut { account_prefix_set, storage_prefix_sets, destroyed_accounts } + } + /// Extend this hashed post state with contents of another. /// Entries in the second hashed post state take precedence. pub fn extend(&mut self, other: Self) { @@ -166,31 +196,6 @@ impl HashedPostState { HashedPostStateSorted { accounts, storages } } - - /// Construct [`TriePrefixSetsMut`] from hashed post state. - /// The prefix sets contain the hashed account and storage keys that have been changed in the - /// post state. - pub fn construct_prefix_sets(&self) -> TriePrefixSetsMut { - // Populate account prefix set. - let mut account_prefix_set = PrefixSetMut::with_capacity(self.accounts.len()); - let mut destroyed_accounts = HashSet::default(); - for (hashed_address, account) in &self.accounts { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - - if account.is_none() { - destroyed_accounts.insert(*hashed_address); - } - } - - // Populate storage prefix sets. - let mut storage_prefix_sets = HashMap::with_capacity(self.storages.len()); - for (hashed_address, hashed_storage) in &self.storages { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - storage_prefix_sets.insert(*hashed_address, hashed_storage.construct_prefix_set()); - } - - TriePrefixSetsMut { account_prefix_set, storage_prefix_sets, destroyed_accounts } - } } /// Representation of in-memory hashed storage. diff --git a/crates/trie/trie/src/updates.rs b/crates/trie/trie/src/updates.rs index c499d7eefae5..a2d3d67363ea 100644 --- a/crates/trie/trie/src/updates.rs +++ b/crates/trie/trie/src/updates.rs @@ -1,16 +1,14 @@ use crate::{walker::TrieWalker, BranchNodeCompact, HashBuilder, Nibbles}; use alloy_primitives::B256; -#[cfg(feature = "serde")] -use serde::{ser::SerializeMap, Serialize, Serializer}; use std::collections::{HashMap, HashSet}; /// The aggregation of trie updates. #[derive(PartialEq, Eq, Clone, Default, Debug)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct TrieUpdates { - #[cfg_attr(feature = "serde", serde(serialize_with = "serialize_nibbles_map"))] + #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_map"))] pub(crate) account_nodes: HashMap, - #[cfg_attr(feature = "serde", serde(serialize_with = "serialize_nibbles_set"))] + #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_set"))] pub(crate) removed_nodes: HashSet, pub(crate) storage_tries: HashMap, } @@ -117,10 +115,10 @@ pub struct StorageTrieUpdates { /// Flag indicating whether the trie was deleted. pub(crate) is_deleted: bool, /// Collection of updated storage trie nodes. - #[cfg_attr(feature = "serde", serde(serialize_with = "serialize_nibbles_map"))] + #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_map"))] pub(crate) storage_nodes: HashMap, /// Collection of removed storage trie nodes. - #[cfg_attr(feature = "serde", serde(serialize_with = "serialize_nibbles_set"))] + #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_set"))] pub(crate) removed_nodes: HashSet, } @@ -222,40 +220,118 @@ impl StorageTrieUpdates { } } -/// Serializes any [`HashSet`] that includes [`Nibbles`] elements, by using the hex-encoded packed -/// representation. +/// Serializes and deserializes any [`HashSet`] that includes [`Nibbles`] elements, by using the +/// hex-encoded packed representation. /// /// This also sorts the set before serializing. #[cfg(feature = "serde")] -fn serialize_nibbles_set(map: &HashSet, serializer: S) -> Result -where - S: Serializer, -{ - let mut storage_nodes = - Vec::from_iter(map.iter().map(|elem| reth_primitives::hex::encode(elem.pack()))); - storage_nodes.sort_unstable(); - storage_nodes.serialize(serializer) +mod serde_nibbles_set { + use std::collections::HashSet; + + use reth_trie_common::Nibbles; + use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; + + pub(super) fn serialize(map: &HashSet, serializer: S) -> Result + where + S: Serializer, + { + let mut storage_nodes = + Vec::from_iter(map.iter().map(|elem| alloy_primitives::hex::encode(elem.pack()))); + storage_nodes.sort_unstable(); + storage_nodes.serialize(serializer) + } + + pub(super) fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + Vec::::deserialize(deserializer)? + .into_iter() + .map(|node| { + Ok(Nibbles::unpack( + alloy_primitives::hex::decode(node) + .map_err(|err| D::Error::custom(err.to_string()))?, + )) + }) + .collect::, _>>() + } } -/// Serializes any [`HashMap`] that uses [`Nibbles`] as keys, by using the hex-encoded packed -/// representation. +/// Serializes and deserializes any [`HashMap`] that uses [`Nibbles`] as keys, by using the +/// hex-encoded packed representation. /// /// This also sorts the map's keys before encoding and serializing. #[cfg(feature = "serde")] -fn serialize_nibbles_map(map: &HashMap, serializer: S) -> Result -where - S: Serializer, - T: Serialize, -{ - let mut map_serializer = serializer.serialize_map(Some(map.len()))?; - let mut storage_nodes = Vec::from_iter(map); - storage_nodes.sort_unstable_by(|a, b| a.0.cmp(b.0)); - for (k, v) in storage_nodes { - // pack, then hex encode the Nibbles - let packed = reth_primitives::hex::encode(k.pack()); - map_serializer.serialize_entry(&packed, &v)?; - } - map_serializer.end() +mod serde_nibbles_map { + use std::{collections::HashMap, marker::PhantomData}; + + use alloy_primitives::hex; + use reth_trie_common::Nibbles; + use serde::{ + de::{Error, MapAccess, Visitor}, + ser::SerializeMap, + Deserialize, Deserializer, Serialize, Serializer, + }; + + pub(super) fn serialize( + map: &HashMap, + serializer: S, + ) -> Result + where + S: Serializer, + T: Serialize, + { + let mut map_serializer = serializer.serialize_map(Some(map.len()))?; + let mut storage_nodes = Vec::from_iter(map); + storage_nodes.sort_unstable_by_key(|node| node.0); + for (k, v) in storage_nodes { + // pack, then hex encode the Nibbles + let packed = alloy_primitives::hex::encode(k.pack()); + map_serializer.serialize_entry(&packed, &v)?; + } + map_serializer.end() + } + + pub(super) fn deserialize<'de, D, T>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + T: Deserialize<'de>, + { + struct NibblesMapVisitor { + marker: PhantomData, + } + + impl<'de, T> Visitor<'de> for NibblesMapVisitor + where + T: Deserialize<'de>, + { + type Value = HashMap; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("a map with hex-encoded Nibbles keys") + } + + fn visit_map(self, mut map: A) -> Result + where + A: MapAccess<'de>, + { + let mut result = HashMap::with_capacity(map.size_hint().unwrap_or(0)); + + while let Some((key, value)) = map.next_entry::()? { + let decoded_key = + hex::decode(&key).map_err(|err| Error::custom(err.to_string()))?; + + let nibbles = Nibbles::unpack(&decoded_key); + + result.insert(nibbles, value); + } + + Ok(result) + } + } + + deserializer.deserialize_map(NibblesMapVisitor { marker: PhantomData }) + } } /// Sorted trie updates used for lookups and insertions. @@ -320,38 +396,269 @@ fn exclude_empty_from_pair( iter.into_iter().filter(|(n, _)| !n.is_empty()) } +/// Bincode-compatible trie updates type serde implementations. +#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +pub mod serde_bincode_compat { + use std::{ + borrow::Cow, + collections::{HashMap, HashSet}, + }; + + use alloy_primitives::B256; + use reth_trie_common::{BranchNodeCompact, Nibbles}; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + use serde_with::{DeserializeAs, SerializeAs}; + + /// Bincode-compatible [`super::TrieUpdates`] serde implementation. + /// + /// Intended to use with the [`serde_with::serde_as`] macro in the following way: + /// ```rust + /// use reth_trie::{serde_bincode_compat, updates::TrieUpdates}; + /// use serde::{Deserialize, Serialize}; + /// use serde_with::serde_as; + /// + /// #[serde_as] + /// #[derive(Serialize, Deserialize)] + /// struct Data { + /// #[serde_as(as = "serde_bincode_compat::updates::TrieUpdates")] + /// trie_updates: TrieUpdates, + /// } + /// ``` + #[derive(Debug, Serialize, Deserialize)] + pub struct TrieUpdates<'a> { + account_nodes: Cow<'a, HashMap>, + removed_nodes: Cow<'a, HashSet>, + storage_tries: HashMap>, + } + + impl<'a> From<&'a super::TrieUpdates> for TrieUpdates<'a> { + fn from(value: &'a super::TrieUpdates) -> Self { + Self { + account_nodes: Cow::Borrowed(&value.account_nodes), + removed_nodes: Cow::Borrowed(&value.removed_nodes), + storage_tries: value.storage_tries.iter().map(|(k, v)| (*k, v.into())).collect(), + } + } + } + + impl<'a> From> for super::TrieUpdates { + fn from(value: TrieUpdates<'a>) -> Self { + Self { + account_nodes: value.account_nodes.into_owned(), + removed_nodes: value.removed_nodes.into_owned(), + storage_tries: value + .storage_tries + .into_iter() + .map(|(k, v)| (k, v.into())) + .collect(), + } + } + } + + impl<'a> SerializeAs for TrieUpdates<'a> { + fn serialize_as(source: &super::TrieUpdates, serializer: S) -> Result + where + S: Serializer, + { + TrieUpdates::from(source).serialize(serializer) + } + } + + impl<'de> DeserializeAs<'de, super::TrieUpdates> for TrieUpdates<'de> { + fn deserialize_as(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + TrieUpdates::deserialize(deserializer).map(Into::into) + } + } + + /// Bincode-compatible [`super::StorageTrieUpdates`] serde implementation. + /// + /// Intended to use with the [`serde_with::serde_as`] macro in the following way: + /// ```rust + /// use reth_trie::{serde_bincode_compat, updates::StorageTrieUpdates}; + /// use serde::{Deserialize, Serialize}; + /// use serde_with::serde_as; + /// + /// #[serde_as] + /// #[derive(Serialize, Deserialize)] + /// struct Data { + /// #[serde_as(as = "serde_bincode_compat::updates::StorageTrieUpdates")] + /// trie_updates: StorageTrieUpdates, + /// } + /// ``` + #[derive(Debug, Serialize, Deserialize)] + pub struct StorageTrieUpdates<'a> { + is_deleted: bool, + storage_nodes: Cow<'a, HashMap>, + removed_nodes: Cow<'a, HashSet>, + } + + impl<'a> From<&'a super::StorageTrieUpdates> for StorageTrieUpdates<'a> { + fn from(value: &'a super::StorageTrieUpdates) -> Self { + Self { + is_deleted: value.is_deleted, + storage_nodes: Cow::Borrowed(&value.storage_nodes), + removed_nodes: Cow::Borrowed(&value.removed_nodes), + } + } + } + + impl<'a> From> for super::StorageTrieUpdates { + fn from(value: StorageTrieUpdates<'a>) -> Self { + Self { + is_deleted: value.is_deleted, + storage_nodes: value.storage_nodes.into_owned(), + removed_nodes: value.removed_nodes.into_owned(), + } + } + } + + impl<'a> SerializeAs for StorageTrieUpdates<'a> { + fn serialize_as( + source: &super::StorageTrieUpdates, + serializer: S, + ) -> Result + where + S: Serializer, + { + StorageTrieUpdates::from(source).serialize(serializer) + } + } + + impl<'de> DeserializeAs<'de, super::StorageTrieUpdates> for StorageTrieUpdates<'de> { + fn deserialize_as(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + StorageTrieUpdates::deserialize(deserializer).map(Into::into) + } + } + + #[cfg(test)] + mod tests { + use crate::updates::StorageTrieUpdates; + + use super::super::{serde_bincode_compat, TrieUpdates}; + + use alloy_primitives::B256; + use reth_trie_common::{BranchNodeCompact, Nibbles}; + use serde::{Deserialize, Serialize}; + use serde_with::serde_as; + + #[test] + fn test_trie_updates_bincode_roundtrip() { + #[serde_as] + #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] + struct Data { + #[serde_as(as = "serde_bincode_compat::TrieUpdates")] + trie_updates: TrieUpdates, + } + + let mut data = Data { trie_updates: TrieUpdates::default() }; + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data = bincode::deserialize(&encoded).unwrap(); + assert_eq!(decoded, data); + + data.trie_updates.removed_nodes.insert(Nibbles::from_vec(vec![0x0b, 0x0e, 0x0e, 0x0f])); + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data = bincode::deserialize(&encoded).unwrap(); + assert_eq!(decoded, data); + + data.trie_updates.account_nodes.insert( + Nibbles::from_vec(vec![0x0d, 0x0e, 0x0a, 0x0d]), + BranchNodeCompact::default(), + ); + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data = bincode::deserialize(&encoded).unwrap(); + assert_eq!(decoded, data); + + data.trie_updates.storage_tries.insert(B256::default(), StorageTrieUpdates::default()); + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data = bincode::deserialize(&encoded).unwrap(); + assert_eq!(decoded, data); + } + + #[test] + fn test_storage_trie_updates_bincode_roundtrip() { + #[serde_as] + #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] + struct Data { + #[serde_as(as = "serde_bincode_compat::StorageTrieUpdates")] + trie_updates: StorageTrieUpdates, + } + + let mut data = Data { trie_updates: StorageTrieUpdates::default() }; + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data = bincode::deserialize(&encoded).unwrap(); + assert_eq!(decoded, data); + + data.trie_updates.removed_nodes.insert(Nibbles::from_vec(vec![0x0b, 0x0e, 0x0e, 0x0f])); + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data = bincode::deserialize(&encoded).unwrap(); + assert_eq!(decoded, data); + + data.trie_updates.storage_nodes.insert( + Nibbles::from_vec(vec![0x0d, 0x0e, 0x0a, 0x0d]), + BranchNodeCompact::default(), + ); + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data = bincode::deserialize(&encoded).unwrap(); + assert_eq!(decoded, data); + } + } +} + #[cfg(all(test, feature = "serde"))] mod tests { use super::*; #[test] - fn test_serialize_trie_updates_works() { + fn test_trie_updates_serde_roundtrip() { let mut default_updates = TrieUpdates::default(); - let _updates_string = serde_json::to_string(&default_updates).unwrap(); + let updates_serialized = serde_json::to_string(&default_updates).unwrap(); + let updates_deserialized: TrieUpdates = serde_json::from_str(&updates_serialized).unwrap(); + assert_eq!(updates_deserialized, default_updates); default_updates.removed_nodes.insert(Nibbles::from_vec(vec![0x0b, 0x0e, 0x0e, 0x0f])); - let _updates_string = serde_json::to_string(&default_updates).unwrap(); + let updates_serialized = serde_json::to_string(&default_updates).unwrap(); + let updates_deserialized: TrieUpdates = serde_json::from_str(&updates_serialized).unwrap(); + assert_eq!(updates_deserialized, default_updates); default_updates .account_nodes - .insert(Nibbles::from_vec(vec![0x0b, 0x0e, 0x0f]), BranchNodeCompact::default()); - let _updates_string = serde_json::to_string(&default_updates).unwrap(); + .insert(Nibbles::from_vec(vec![0x0d, 0x0e, 0x0a, 0x0d]), BranchNodeCompact::default()); + let updates_serialized = serde_json::to_string(&default_updates).unwrap(); + let updates_deserialized: TrieUpdates = serde_json::from_str(&updates_serialized).unwrap(); + assert_eq!(updates_deserialized, default_updates); default_updates.storage_tries.insert(B256::default(), StorageTrieUpdates::default()); - let _updates_string = serde_json::to_string(&default_updates).unwrap(); + let updates_serialized = serde_json::to_string(&default_updates).unwrap(); + let updates_deserialized: TrieUpdates = serde_json::from_str(&updates_serialized).unwrap(); + assert_eq!(updates_deserialized, default_updates); } #[test] - fn test_serialize_storage_trie_updates_works() { + fn test_storage_trie_updates_serde_roundtrip() { let mut default_updates = StorageTrieUpdates::default(); - let _updates_string = serde_json::to_string(&default_updates).unwrap(); + let updates_serialized = serde_json::to_string(&default_updates).unwrap(); + let updates_deserialized: StorageTrieUpdates = + serde_json::from_str(&updates_serialized).unwrap(); + assert_eq!(updates_deserialized, default_updates); default_updates.removed_nodes.insert(Nibbles::from_vec(vec![0x0b, 0x0e, 0x0e, 0x0f])); - let _updates_string = serde_json::to_string(&default_updates).unwrap(); + let updates_serialized = serde_json::to_string(&default_updates).unwrap(); + let updates_deserialized: StorageTrieUpdates = + serde_json::from_str(&updates_serialized).unwrap(); + assert_eq!(updates_deserialized, default_updates); default_updates .storage_nodes - .insert(Nibbles::from_vec(vec![0x0b, 0x0e, 0x0f]), BranchNodeCompact::default()); - let _updates_string = serde_json::to_string(&default_updates).unwrap(); + .insert(Nibbles::from_vec(vec![0x0d, 0x0e, 0x0a, 0x0d]), BranchNodeCompact::default()); + let updates_serialized = serde_json::to_string(&default_updates).unwrap(); + let updates_deserialized: StorageTrieUpdates = + serde_json::from_str(&updates_serialized).unwrap(); + assert_eq!(updates_deserialized, default_updates); } } diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 0d0839616021..b0fcfb021ae1 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -1,16 +1,24 @@ +use std::collections::BTreeMap; + use crate::{ - hashed_cursor::HashedCursorFactory, prefix_set::TriePrefixSetsMut, proof::Proof, - trie_cursor::TrieCursorFactory, HashedPostState, + hashed_cursor::HashedCursorFactory, + prefix_set::TriePrefixSetsMut, + proof::{Proof, StorageProof}, + trie_cursor::TrieCursorFactory, + HashedPostState, +}; +use alloy_primitives::{ + keccak256, + map::{HashMap, HashSet}, + Bytes, B256, }; -use alloy_primitives::{keccak256, Bytes, B256}; use alloy_rlp::{BufMut, Decodable, Encodable}; -use itertools::Either; -use reth_execution_errors::{StateProofError, TrieWitnessError}; +use itertools::{Either, Itertools}; +use reth_execution_errors::TrieWitnessError; use reth_primitives::constants::EMPTY_ROOT_HASH; use reth_trie_common::{ - BranchNode, HashBuilder, Nibbles, TrieAccount, TrieNode, CHILD_INDEX_RANGE, + BranchNode, HashBuilder, Nibbles, StorageMultiProof, TrieAccount, TrieNode, CHILD_INDEX_RANGE, }; -use std::collections::{BTreeMap, HashMap, HashSet}; /// State transition witness for the trie. #[derive(Debug)] @@ -78,6 +86,10 @@ where mut self, state: HashedPostState, ) -> Result, TrieWitnessError> { + if state.is_empty() { + return Ok(self.witness) + } + let proof_targets = HashMap::from_iter( state .accounts @@ -87,19 +99,21 @@ where (*hashed_address, storage.storage.keys().copied().collect()) })), ); + let mut account_multiproof = Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) .with_prefix_sets_mut(self.prefix_sets.clone()) - .with_targets(proof_targets.clone()) - .multiproof()?; + .multiproof(proof_targets.clone())?; // Attempt to compute state root from proofs and gather additional // information for the witness. let mut account_rlp = Vec::with_capacity(128); let mut account_trie_nodes = BTreeMap::default(); for (hashed_address, hashed_slots) in proof_targets { - let storage_multiproof = - account_multiproof.storages.remove(&hashed_address).unwrap_or_default(); + let storage_multiproof = account_multiproof + .storages + .remove(&hashed_address) + .unwrap_or_else(StorageMultiProof::empty); // Gather and record account trie nodes. let account = state @@ -115,24 +129,36 @@ where None }; let key = Nibbles::unpack(hashed_address); - let proof = account_multiproof.account_subtree.iter().filter(|e| key.starts_with(e.0)); - account_trie_nodes.extend(self.target_nodes(key.clone(), value, proof)?); + account_trie_nodes.extend( + self.target_nodes( + key.clone(), + value, + account_multiproof + .account_subtree + .matching_nodes_iter(&key) + .sorted_by(|a, b| a.0.cmp(b.0)), + )?, + ); // Gather and record storage trie nodes for this account. let mut storage_trie_nodes = BTreeMap::default(); let storage = state.storages.get(&hashed_address); for hashed_slot in hashed_slots { - let slot_key = Nibbles::unpack(hashed_slot); + let slot_nibbles = Nibbles::unpack(hashed_slot); let slot_value = storage .and_then(|s| s.storage.get(&hashed_slot)) .filter(|v| !v.is_zero()) .map(|v| alloy_rlp::encode_fixed_size(v).to_vec()); - let proof = storage_multiproof.subtree.iter().filter(|e| slot_key.starts_with(e.0)); - storage_trie_nodes.extend(self.target_nodes( - slot_key.clone(), - slot_value, - proof, - )?); + storage_trie_nodes.extend( + self.target_nodes( + slot_nibbles.clone(), + slot_value, + storage_multiproof + .subtree + .matching_nodes_iter(&slot_nibbles) + .sorted_by(|a, b| a.0.cmp(b.0)), + )?, + ); } Self::next_root_from_proofs(storage_trie_nodes, |key: Nibbles| { @@ -140,19 +166,25 @@ where let mut padded_key = key.pack(); padded_key.resize(32, 0); let target_key = B256::from_slice(&padded_key); - let mut proof = Proof::new( + let storage_prefix_set = self + .prefix_sets + .storage_prefix_sets + .get(&hashed_address) + .cloned() + .unwrap_or_default(); + let proof = StorageProof::new_hashed( self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone(), + hashed_address, ) - .with_prefix_sets_mut(self.prefix_sets.clone()) - .with_target((hashed_address, HashSet::from([target_key]))) - .storage_multiproof(hashed_address)?; + .with_prefix_set_mut(storage_prefix_set) + .storage_proof(HashSet::from_iter([target_key]))?; // The subtree only contains the proof for a single target. let node = - proof.subtree.remove(&key).ok_or(TrieWitnessError::MissingTargetNode(key))?; + proof.subtree.get(&key).ok_or(TrieWitnessError::MissingTargetNode(key))?; self.witness.insert(keccak256(node.as_ref()), node.clone()); // record in witness - Ok(node) + Ok(node.clone()) })?; } @@ -160,19 +192,17 @@ where // Right pad the target with 0s. let mut padded_key = key.pack(); padded_key.resize(32, 0); - let mut proof = + let targets = HashMap::from_iter([(B256::from_slice(&padded_key), HashSet::default())]); + let proof = Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) .with_prefix_sets_mut(self.prefix_sets.clone()) - .with_target((B256::from_slice(&padded_key), HashSet::default())) - .multiproof()?; + .multiproof(targets)?; // The subtree only contains the proof for a single target. - let node = proof - .account_subtree - .remove(&key) - .ok_or(TrieWitnessError::MissingTargetNode(key))?; + let node = + proof.account_subtree.get(&key).ok_or(TrieWitnessError::MissingTargetNode(key))?; self.witness.insert(keccak256(node.as_ref()), node.clone()); // record in witness - Ok(node) + Ok(node.clone()) })?; Ok(self.witness) @@ -185,9 +215,10 @@ where key: Nibbles, value: Option>, proof: impl IntoIterator, - ) -> Result>>, StateProofError> { + ) -> Result>>, TrieWitnessError> { let mut trie_nodes = BTreeMap::default(); - for (path, encoded) in proof { + let mut proof_iter = proof.into_iter().enumerate().peekable(); + while let Some((idx, (path, encoded))) = proof_iter.next() { // Record the node in witness. self.witness.insert(keccak256(encoded.as_ref()), encoded.clone()); @@ -211,6 +242,11 @@ where trie_nodes.insert(next_path.clone(), Either::Right(leaf.value.clone())); } } + TrieNode::EmptyRoot => { + if idx != 0 || proof_iter.peek().is_some() { + return Err(TrieWitnessError::UnexpectedEmptyRoot(next_path)) + } + } }; } @@ -268,6 +304,9 @@ where TrieNode::Extension(ext) => { path.extend_from_slice(&ext.key); } + TrieNode::EmptyRoot => { + return Err(TrieWitnessError::UnexpectedEmptyRoot(path)) + } } } } diff --git a/examples/beacon-api-sidecar-fetcher/Cargo.toml b/examples/beacon-api-sidecar-fetcher/Cargo.toml index 80f5f726d96d..47a2a181f7e5 100644 --- a/examples/beacon-api-sidecar-fetcher/Cargo.toml +++ b/examples/beacon-api-sidecar-fetcher/Cargo.toml @@ -10,6 +10,7 @@ reth.workspace = true reth-node-ethereum.workspace = true alloy-rpc-types-beacon.workspace = true +alloy-primitives.workspace = true clap.workspace = true eyre.workspace = true diff --git a/examples/beacon-api-sidecar-fetcher/src/main.rs b/examples/beacon-api-sidecar-fetcher/src/main.rs index ff6659642705..7d8880ca185c 100644 --- a/examples/beacon-api-sidecar-fetcher/src/main.rs +++ b/examples/beacon-api-sidecar-fetcher/src/main.rs @@ -18,11 +18,12 @@ use std::{ net::{IpAddr, Ipv4Addr}, }; +use alloy_primitives::B256; use clap::Parser; use futures_util::{stream::FuturesUnordered, StreamExt}; use mined_sidecar::MinedSidecarStream; use reth::{ - args::utils::DefaultChainSpecParser, builder::NodeHandle, cli::Cli, primitives::B256, + args::utils::EthereumChainSpecParser, builder::NodeHandle, cli::Cli, providers::CanonStateSubscriptions, }; use reth_node_ethereum::EthereumNode; @@ -30,7 +31,7 @@ use reth_node_ethereum::EthereumNode; pub mod mined_sidecar; fn main() { - Cli::::parse() + Cli::::parse() .run(|builder, beacon_config| async move { // launch the node let NodeHandle { node, node_exit_future } = diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index 04fbd5fbfbc0..cc761aa98a61 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -1,10 +1,11 @@ use crate::BeaconSidecarConfig; +use alloy_primitives::B256; use alloy_rpc_types_beacon::sidecar::{BeaconBlobBundle, SidecarIterator}; use eyre::Result; use futures_util::{stream::FuturesUnordered, Future, Stream, StreamExt}; use reqwest::{Error, StatusCode}; use reth::{ - primitives::{BlobTransaction, SealedBlockWithSenders, B256}, + primitives::{BlobTransaction, SealedBlockWithSenders}, providers::CanonStateNotification, transaction_pool::{BlobStoreError, TransactionPoolExt}, }; @@ -115,7 +116,7 @@ where let block_metadata = BlockMetadata { block_hash: block.hash(), block_number: block.number, - gas_used: block.gas_used as u64, + gas_used: block.gas_used, }; actions_to_queue.push(BlobTransactionEvent::Mined(MinedBlob { transaction, @@ -194,7 +195,7 @@ where let block_metadata = BlockMetadata { block_hash: new.tip().block.hash(), block_number: new.tip().block.number, - gas_used: new.tip().block.gas_used as u64, + gas_used: new.tip().block.gas_used, }; BlobTransactionEvent::Reorged(ReorgedBlob { transaction_hash, @@ -267,7 +268,7 @@ async fn fetch_blobs_for_block( let block_metadata = BlockMetadata { block_hash: block.hash(), block_number: block.number, - gas_used: block.gas_used as u64, + gas_used: block.gas_used, }; BlobTransactionEvent::Mined(MinedBlob { transaction, block_metadata }) }) diff --git a/examples/beacon-api-sse/src/main.rs b/examples/beacon-api-sse/src/main.rs index 53078e5bc8ff..81535ef6140b 100644 --- a/examples/beacon-api-sse/src/main.rs +++ b/examples/beacon-api-sse/src/main.rs @@ -21,13 +21,13 @@ use alloy_rpc_types_beacon::events::PayloadAttributesEvent; use clap::Parser; use futures_util::stream::StreamExt; use mev_share_sse::{client::EventStream, EventClient}; -use reth::{args::utils::DefaultChainSpecParser, cli::Cli}; +use reth::{args::utils::EthereumChainSpecParser, cli::Cli}; use reth_node_ethereum::EthereumNode; use std::net::{IpAddr, Ipv4Addr}; use tracing::{info, warn}; fn main() { - Cli::::parse() + Cli::::parse() .run(|builder, args| async move { let handle = builder.node(EthereumNode::default()).launch().await?; diff --git a/examples/bsc-p2p/Cargo.toml b/examples/bsc-p2p/Cargo.toml index dde02080d135..fecbab7a8f93 100644 --- a/examples/bsc-p2p/Cargo.toml +++ b/examples/bsc-p2p/Cargo.toml @@ -16,9 +16,15 @@ reth-network-peers.workspace = true reth-primitives.workspace = true reth-tracing.workspace = true -secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } +secp256k1 = { workspace = true, features = [ + "global-context", + "rand-std", + "recovery", +] } tokio.workspace = true tokio-stream.workspace = true serde_json.workspace = true + +alloy-primitives.workspace = true diff --git a/examples/bsc-p2p/src/chainspec.rs b/examples/bsc-p2p/src/chainspec.rs index 11c570233263..8a47a604e726 100644 --- a/examples/bsc-p2p/src/chainspec.rs +++ b/examples/bsc-p2p/src/chainspec.rs @@ -1,8 +1,8 @@ +use alloy_primitives::{b256, B256}; use reth_chainspec::{ once_cell_set, BaseFeeParams, Chain, ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition, }; use reth_network_peers::NodeRecord; -use reth_primitives::{b256, B256}; use std::sync::Arc; diff --git a/examples/bsc-p2p/src/main.rs b/examples/bsc-p2p/src/main.rs index 7756728aa9df..e46ea4bec357 100644 --- a/examples/bsc-p2p/src/main.rs +++ b/examples/bsc-p2p/src/main.rs @@ -49,9 +49,8 @@ async fn main() { // The network configuration let mut net_cfg = NetworkConfig::builder(secret_key) - .chain_spec(bsc_chain_spec()) .listener_addr(local_addr) - .build_with_noop_provider() + .build_with_noop_provider(bsc_chain_spec()) .set_discovery_v4( Discv4ConfigBuilder::default() .add_boot_nodes(boot_nodes()) diff --git a/examples/custom-dev-node/Cargo.toml b/examples/custom-dev-node/Cargo.toml index e4c5a9dab2ed..8ed277686f4e 100644 --- a/examples/custom-dev-node/Cargo.toml +++ b/examples/custom-dev-node/Cargo.toml @@ -10,7 +10,6 @@ license.workspace = true reth.workspace = true reth-chainspec.workspace = true reth-node-core.workspace = true -reth-primitives.workspace = true reth-node-ethereum = { workspace = true, features = ["test-utils"] } futures-util.workspace = true @@ -19,3 +18,4 @@ tokio.workspace = true serde_json.workspace = true alloy-genesis.workspace = true +alloy-primitives.workspace = true diff --git a/examples/custom-dev-node/src/main.rs b/examples/custom-dev-node/src/main.rs index 93e5df6287c4..7fa44418c523 100644 --- a/examples/custom-dev-node/src/main.rs +++ b/examples/custom-dev-node/src/main.rs @@ -6,6 +6,7 @@ use std::sync::Arc; use alloy_genesis::Genesis; +use alloy_primitives::{b256, hex}; use futures_util::StreamExt; use reth::{ builder::{NodeBuilder, NodeHandle}, @@ -16,7 +17,6 @@ use reth::{ use reth_chainspec::ChainSpec; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::EthereumNode; -use reth_primitives::{b256, hex}; #[tokio::main] async fn main() -> eyre::Result<()> { diff --git a/examples/custom-engine-types/Cargo.toml b/examples/custom-engine-types/Cargo.toml index 9a949b8367fd..f826451d2038 100644 --- a/examples/custom-engine-types/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -18,6 +18,7 @@ reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true alloy-genesis.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } +alloy-primitives.workspace = true eyre.workspace = true tokio.workspace = true diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 5e0503701ab9..213a156af8fd 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -23,6 +23,7 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use alloy_genesis::Genesis; +use alloy_primitives::{Address, B256}; use alloy_rpc_types::{ engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, @@ -63,7 +64,7 @@ use reth_payload_builder::{ EthBuiltPayload, EthPayloadBuilderAttributes, PayloadBuilderError, PayloadBuilderHandle, PayloadBuilderService, }; -use reth_primitives::{Address, Withdrawals, B256}; +use reth_primitives::Withdrawals; use reth_tracing::{RethTracer, Tracer}; /// A custom payload attributes type. diff --git a/examples/custom-evm/Cargo.toml b/examples/custom-evm/Cargo.toml index 7642dc80cf2f..53563ab9575b 100644 --- a/examples/custom-evm/Cargo.toml +++ b/examples/custom-evm/Cargo.toml @@ -15,6 +15,7 @@ reth-primitives.workspace = true reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true alloy-genesis.workspace = true +alloy-primitives.workspace = true eyre.workspace = true tokio.workspace = true diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 3a93d85ad6fb..d931c3b275bf 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -3,17 +3,14 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] use alloy_genesis::Genesis; +use alloy_primitives::{address, Address, Bytes, U256}; use reth::{ builder::{ components::{ExecutorBuilder, PayloadServiceBuilder}, BuilderContext, NodeBuilder, }, payload::{EthBuiltPayload, EthPayloadBuilderAttributes}, - primitives::{ - address, - revm_primitives::{Env, PrecompileResult}, - Bytes, - }, + primitives::revm_primitives::{Env, PrecompileResult}, revm::{ handler::register::EvmHandler, inspector_handle_register, @@ -38,7 +35,7 @@ use reth_node_ethereum::{ }; use reth_primitives::{ revm_primitives::{CfgEnvWithHandlerCfg, TxEnv}, - Address, Header, TransactionSigned, U256, + Header, TransactionSigned, }; use reth_tracing::{RethTracer, Tracer}; use std::sync::Arc; diff --git a/examples/custom-inspector/Cargo.toml b/examples/custom-inspector/Cargo.toml index 101806276099..18629556c42f 100644 --- a/examples/custom-inspector/Cargo.toml +++ b/examples/custom-inspector/Cargo.toml @@ -8,7 +8,7 @@ license.workspace = true [dependencies] reth.workspace = true reth-node-ethereum.workspace = true -reth-rpc-types.workspace = true alloy-rpc-types.workspace = true clap = { workspace = true, features = ["derive"] } futures-util.workspace = true +alloy-primitives.workspace = true diff --git a/examples/custom-inspector/src/main.rs b/examples/custom-inspector/src/main.rs index 87f8dd7e95bf..700de274e681 100644 --- a/examples/custom-inspector/src/main.rs +++ b/examples/custom-inspector/src/main.rs @@ -10,14 +10,15 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use alloy_primitives::Address; use alloy_rpc_types::state::EvmOverrides; use clap::Parser; use futures_util::StreamExt; use reth::{ - args::utils::DefaultChainSpecParser, + args::utils::EthereumChainSpecParser, builder::NodeHandle, cli::Cli, - primitives::{Address, BlockNumberOrTag, IntoRecoveredTransaction}, + primitives::BlockNumberOrTag, revm::{ inspector_handle_register, interpreter::{Interpreter, OpCode}, @@ -29,7 +30,7 @@ use reth::{ use reth_node_ethereum::node::EthereumNode; fn main() { - Cli::::parse() + Cli::::parse() .run(|builder, args| async move { // launch the node let NodeHandle { node, node_exit_future } = diff --git a/examples/custom-payload-builder/Cargo.toml b/examples/custom-payload-builder/Cargo.toml index f10bd8058b64..1c160fe5ec87 100644 --- a/examples/custom-payload-builder/Cargo.toml +++ b/examples/custom-payload-builder/Cargo.toml @@ -15,6 +15,8 @@ reth-payload-builder.workspace = true reth-node-ethereum.workspace = true reth-ethereum-payload-builder.workspace = true +alloy-primitives.workspace = true + tracing.workspace = true futures-util.workspace = true eyre.workspace = true diff --git a/examples/custom-payload-builder/src/generator.rs b/examples/custom-payload-builder/src/generator.rs index 807cbf6a53bb..f5d64e41cd09 100644 --- a/examples/custom-payload-builder/src/generator.rs +++ b/examples/custom-payload-builder/src/generator.rs @@ -1,4 +1,5 @@ use crate::job::EmptyBlockPayloadJob; +use alloy_primitives::Bytes; use reth::{ providers::{BlockReaderIdExt, BlockSource, StateProviderFactory}, tasks::TaskSpawner, @@ -7,7 +8,7 @@ use reth::{ use reth_basic_payload_builder::{BasicPayloadJobGeneratorConfig, PayloadBuilder, PayloadConfig}; use reth_node_api::PayloadBuilderAttributes; use reth_payload_builder::{PayloadBuilderError, PayloadJobGenerator}; -use reth_primitives::{BlockNumberOrTag, Bytes}; +use reth_primitives::BlockNumberOrTag; use std::sync::Arc; /// The generator type that creates new jobs that builds empty blocks. diff --git a/examples/custom-rlpx-subprotocol/Cargo.toml b/examples/custom-rlpx-subprotocol/Cargo.toml index d2d1caab6355..d59d16f35cfc 100644 --- a/examples/custom-rlpx-subprotocol/Cargo.toml +++ b/examples/custom-rlpx-subprotocol/Cargo.toml @@ -15,9 +15,9 @@ reth-network-api.workspace = true reth-node-ethereum.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-primitives.workspace = true -reth-rpc-types.workspace = true reth.workspace = true tokio-stream.workspace = true eyre.workspace = true rand.workspace = true tracing.workspace = true +alloy-primitives.workspace = true diff --git a/examples/custom-rlpx-subprotocol/src/main.rs b/examples/custom-rlpx-subprotocol/src/main.rs index 8dc95641e201..e16f71071c8c 100644 --- a/examples/custom-rlpx-subprotocol/src/main.rs +++ b/examples/custom-rlpx-subprotocol/src/main.rs @@ -19,7 +19,6 @@ use reth_network::{ }; use reth_network_api::{test_utils::PeersHandleProvider, NetworkInfo}; use reth_node_ethereum::EthereumNode; -use reth_provider::test_utils::NoopProvider; use subprotocol::{ connection::CustomCommand, protocol::{ @@ -51,7 +50,7 @@ fn main() -> eyre::Result<()> { .listener_addr(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0))) .disable_discovery() .add_rlpx_sub_protocol(custom_rlpx_handler_2.into_rlpx_sub_protocol()) - .build(NoopProvider::default()); + .build_with_noop_provider(node.chain_spec()); // spawn the second network instance let subnetwork = NetworkManager::new(net_cfg).await?; diff --git a/examples/custom-rlpx-subprotocol/src/subprotocol/connection/mod.rs b/examples/custom-rlpx-subprotocol/src/subprotocol/connection/mod.rs index a6d835b70c26..6017871d2f99 100644 --- a/examples/custom-rlpx-subprotocol/src/subprotocol/connection/mod.rs +++ b/examples/custom-rlpx-subprotocol/src/subprotocol/connection/mod.rs @@ -1,7 +1,7 @@ use super::protocol::proto::{CustomRlpxProtoMessage, CustomRlpxProtoMessageKind}; +use alloy_primitives::bytes::BytesMut; use futures::{Stream, StreamExt}; use reth_eth_wire::multiplex::ProtocolConnection; -use reth_primitives::BytesMut; use std::{ pin::Pin, task::{ready, Context, Poll}, diff --git a/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/proto.rs b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/proto.rs index 8b179a447d9f..043d37c4f6ae 100644 --- a/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/proto.rs +++ b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/proto.rs @@ -1,8 +1,8 @@ //! Simple RLPx Ping Pong protocol that also support sending messages, //! following [RLPx specs](https://github.com/ethereum/devp2p/blob/master/rlpx.md) +use alloy_primitives::bytes::{Buf, BufMut, BytesMut}; use reth_eth_wire::{protocol::Protocol, Capability}; -use reth_primitives::{Buf, BufMut, BytesMut}; #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq, Eq)] diff --git a/examples/db-access/Cargo.toml b/examples/db-access/Cargo.toml index c0fbe74e1874..0a7ef9bb6b29 100644 --- a/examples/db-access/Cargo.toml +++ b/examples/db-access/Cargo.toml @@ -11,11 +11,11 @@ reth-chainspec.workspace = true reth-db.workspace = true reth-primitives.workspace = true reth-provider.workspace = true -reth-rpc-types.workspace = true reth-node-ethereum.workspace = true reth-node-types.workspace = true alloy-rpc-types.workspace = true +alloy-primitives.workspace = true eyre.workspace = true diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 53b56b738c0a..ab018a0b07a6 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -1,8 +1,9 @@ +use alloy_primitives::{Address, Sealable, B256}; use alloy_rpc_types::{Filter, FilteredParams}; use reth_chainspec::ChainSpecBuilder; use reth_node_ethereum::EthereumNode; use reth_node_types::NodeTypesWithDBAdapter; -use reth_primitives::{alloy_primitives::Sealable, Address, SealedHeader, B256}; +use reth_primitives::SealedHeader; use reth_provider::{ providers::StaticFileProvider, AccountReader, BlockReader, BlockSource, HeaderProvider, ProviderFactory, ReceiptProvider, StateProvider, TransactionsProvider, diff --git a/examples/node-custom-rpc/src/main.rs b/examples/node-custom-rpc/src/main.rs index 40a7beb7c96d..5aeecfd2915c 100644 --- a/examples/node-custom-rpc/src/main.rs +++ b/examples/node-custom-rpc/src/main.rs @@ -14,12 +14,12 @@ use clap::Parser; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth::{args::utils::DefaultChainSpecParser, cli::Cli}; +use reth::{args::utils::EthereumChainSpecParser, cli::Cli}; use reth_node_ethereum::EthereumNode; use reth_transaction_pool::TransactionPool; fn main() { - Cli::::parse() + Cli::::parse() .run(|builder, args| async move { let handle = builder .node(EthereumNode::default()) diff --git a/examples/polygon-p2p/Cargo.toml b/examples/polygon-p2p/Cargo.toml index b3a7af7506b4..bdf9a27ce560 100644 --- a/examples/polygon-p2p/Cargo.toml +++ b/examples/polygon-p2p/Cargo.toml @@ -8,7 +8,11 @@ license.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } +secp256k1 = { workspace = true, features = [ + "global-context", + "rand-std", + "recovery", +] } tokio.workspace = true reth-network.workspace = true reth-chainspec.workspace = true @@ -18,3 +22,4 @@ reth-tracing.workspace = true tokio-stream.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-discv4 = { workspace = true, features = ["test-utils"] } +alloy-primitives.workspace = true diff --git a/examples/polygon-p2p/src/chain_cfg.rs b/examples/polygon-p2p/src/chain_cfg.rs index eabcfb2e71db..84bfac8f209f 100644 --- a/examples/polygon-p2p/src/chain_cfg.rs +++ b/examples/polygon-p2p/src/chain_cfg.rs @@ -1,8 +1,9 @@ +use alloy_primitives::{b256, B256}; use reth_chainspec::{ once_cell_set, BaseFeeParams, Chain, ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition, }; use reth_discv4::NodeRecord; -use reth_primitives::{b256, Head, B256}; +use reth_primitives::Head; use std::sync::Arc; diff --git a/examples/polygon-p2p/src/main.rs b/examples/polygon-p2p/src/main.rs index d93c92cebb02..6078ae14cb85 100644 --- a/examples/polygon-p2p/src/main.rs +++ b/examples/polygon-p2p/src/main.rs @@ -14,7 +14,6 @@ use reth_discv4::Discv4ConfigBuilder; use reth_network::{ config::NetworkMode, NetworkConfig, NetworkEvent, NetworkEventListenerProvider, NetworkManager, }; -use reth_provider::test_utils::NoopProvider; use reth_tracing::{ tracing::info, tracing_subscriber::filter::LevelFilter, LayerInfo, LogFormat, RethTracer, Tracer, @@ -47,11 +46,10 @@ async fn main() { // The network configuration let net_cfg = NetworkConfig::builder(secret_key) - .chain_spec(polygon_chain_spec()) .set_head(head()) .network_mode(NetworkMode::Work) .listener_addr(local_addr) - .build(NoopProvider::default()); + .build_with_noop_provider(polygon_chain_spec()); // Set Discv4 lookup interval to 1 second let mut discv4_cfg = Discv4ConfigBuilder::default(); diff --git a/examples/stateful-precompile/Cargo.toml b/examples/stateful-precompile/Cargo.toml index 2ae4656eee86..47a784c36e14 100644 --- a/examples/stateful-precompile/Cargo.toml +++ b/examples/stateful-precompile/Cargo.toml @@ -14,6 +14,7 @@ reth-primitives.workspace = true reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true alloy-genesis.workspace = true +alloy-primitives.workspace = true eyre.workspace = true parking_lot.workspace = true diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index 88ca2ac1a7bb..05a6fd86c935 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -3,14 +3,12 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] use alloy_genesis::Genesis; +use alloy_primitives::{Address, Bytes, U256}; use parking_lot::RwLock; use reth::{ api::NextBlockEnvAttributes, builder::{components::ExecutorBuilder, BuilderContext, NodeBuilder}, - primitives::{ - revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, PrecompileResult, TxEnv}, - Address, Bytes, U256, - }, + primitives::revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, PrecompileResult, TxEnv}, revm::{ handler::register::EvmHandler, inspector_handle_register, diff --git a/examples/txpool-tracing/Cargo.toml b/examples/txpool-tracing/Cargo.toml index 219292ee0686..38d0ad9409b6 100644 --- a/examples/txpool-tracing/Cargo.toml +++ b/examples/txpool-tracing/Cargo.toml @@ -11,3 +11,4 @@ reth-node-ethereum.workspace = true alloy-rpc-types-trace.workspace = true clap = { workspace = true, features = ["derive"] } futures-util.workspace = true +alloy-primitives.workspace = true diff --git a/examples/txpool-tracing/src/main.rs b/examples/txpool-tracing/src/main.rs index c9d27089d9ce..f8c2e19d203a 100644 --- a/examples/txpool-tracing/src/main.rs +++ b/examples/txpool-tracing/src/main.rs @@ -10,21 +10,18 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use alloy_primitives::Address; use alloy_rpc_types_trace::{parity::TraceType, tracerequest::TraceCallRequest}; use clap::Parser; use futures_util::StreamExt; use reth::{ - args::utils::DefaultChainSpecParser, - builder::NodeHandle, - cli::Cli, - primitives::{Address, IntoRecoveredTransaction}, - rpc::compat::transaction::transaction_to_call_request, - transaction_pool::TransactionPool, + args::utils::EthereumChainSpecParser, builder::NodeHandle, cli::Cli, + rpc::compat::transaction::transaction_to_call_request, transaction_pool::TransactionPool, }; use reth_node_ethereum::node::EthereumNode; fn main() { - Cli::::parse() + Cli::::parse() .run(|builder, args| async move { // launch the node let NodeHandle { node, node_exit_future } = diff --git a/fork.yaml b/fork.yaml index 1891d2583975..0ae42b86efad 100644 --- a/fork.yaml +++ b/fork.yaml @@ -4,7 +4,7 @@ footer: | base: name: reth url: https://github.com/paradigmxyz/reth - hash: 5d2867f2c5bd2aaa5673f31cf4c13b8f4f1e2c39 + hash: 4960b927bcf5b1ce1fffd88f76c77929110b9eb0 fork: name: scroll-reth url: https://github.com/scroll-tech/reth diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 8c8c3189a5d3..3f3df15363a1 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -1,7 +1,7 @@ //! Shared models for use crate::{assert::assert_equal, Error}; -use alloy_primitives::{Address, Bloom, Bytes, B256, B64, U256}; +use alloy_primitives::{keccak256, Address, Bloom, Bytes, B256, B64, U256}; use reth_chainspec::{ChainSpec, ChainSpecBuilder}; use reth_db::tables; use reth_db_api::{ @@ -9,8 +9,7 @@ use reth_db_api::{ transaction::{DbTx, DbTxMut}, }; use reth_primitives::{ - keccak256, Account as RethAccount, Bytecode, Header as RethHeader, SealedHeader, StorageEntry, - Withdrawals, + Account as RethAccount, Bytecode, Header as RethHeader, SealedHeader, StorageEntry, Withdrawals, }; use serde::Deserialize; use std::{collections::BTreeMap, ops::Deref}; @@ -94,12 +93,12 @@ pub struct Header { impl From
for SealedHeader { fn from(value: Header) -> Self { let header = RethHeader { - base_fee_per_gas: value.base_fee_per_gas.map(|v| v.to::().into()), + base_fee_per_gas: value.base_fee_per_gas.map(|v| v.to::()), beneficiary: value.coinbase, difficulty: value.difficulty, extra_data: value.extra_data, - gas_limit: value.gas_limit.to::().into(), - gas_used: value.gas_used.to::().into(), + gas_limit: value.gas_limit.to::(), + gas_used: value.gas_used.to::(), mix_hash: value.mix_hash, nonce: u64::from_be_bytes(value.nonce.0).into(), number: value.number.to::(), @@ -111,8 +110,8 @@ impl From
for SealedHeader { parent_hash: value.parent_hash, logs_bloom: value.bloom, withdrawals_root: value.withdrawals_root, - blob_gas_used: value.blob_gas_used.map(|v| v.to::().into()), - excess_blob_gas: value.excess_blob_gas.map(|v| v.to::().into()), + blob_gas_used: value.blob_gas_used.map(|v| v.to::()), + excess_blob_gas: value.excess_blob_gas.map(|v| v.to::()), parent_beacon_block_root: value.parent_beacon_block_root, requests_root: value.requests_root, }; diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml index af592e294c84..49a59ecf6ae3 100644 --- a/testing/testing-utils/Cargo.toml +++ b/testing/testing-utils/Cargo.toml @@ -17,6 +17,7 @@ reth-primitives = { workspace = true, features = ["secp256k1"] } alloy-eips.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 46930a5dd9ef..70d6cc02b655 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -1,5 +1,6 @@ //! Generators for different data structures like block headers, block bodies and ranges of those. +use alloy_consensus::TxLegacy; use alloy_eips::{ eip6110::DepositRequest, eip7002::WithdrawalRequest, eip7251::ConsolidationRequest, }; @@ -10,7 +11,7 @@ use rand::{ }; use reth_primitives::{ proofs, sign_message, Account, BlockBody, Header, Log, Receipt, Request, Requests, SealedBlock, - SealedHeader, StorageEntry, Transaction, TransactionSigned, TxLegacy, Withdrawal, Withdrawals, + SealedHeader, StorageEntry, Transaction, TransactionSigned, Withdrawal, Withdrawals, }; use secp256k1::{Keypair, Secp256k1}; use std::{ @@ -220,11 +221,11 @@ pub fn random_block(rng: &mut R, number: u64, block_params: BlockParams) let sealed = Header { parent_hash: block_params.parent.unwrap_or_default(), number, - gas_used: total_gas.into(), - gas_limit: total_gas.into(), + gas_used: total_gas, + gas_limit: total_gas, transactions_root, ommers_hash, - base_fee_per_gas: Some(rng.gen::().into()), + base_fee_per_gas: Some(rng.gen()), requests_root, withdrawals_root, ..Default::default() @@ -497,9 +498,10 @@ pub fn random_request(rng: &mut R) -> Request { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::TxEip1559; use alloy_eips::eip2930::AccessList; - use alloy_primitives::Parity; - use reth_primitives::{hex, public_key_to_address, Signature, TxEip1559}; + use alloy_primitives::{hex, Parity}; + use reth_primitives::{public_key_to_address, Signature}; use std::str::FromStr; #[test] diff --git a/testing/testing-utils/src/genesis_allocator.rs b/testing/testing-utils/src/genesis_allocator.rs index 8a5adb300240..acf5e091cba3 100644 --- a/testing/testing-utils/src/genesis_allocator.rs +++ b/testing/testing-utils/src/genesis_allocator.rs @@ -18,7 +18,7 @@ use std::{ /// /// # Example /// ``` -/// # use reth_primitives::{Address, U256, hex, Bytes}; +/// # use alloy_primitives::{Address, U256, hex, Bytes}; /// # use reth_testing_utils::GenesisAllocator; /// # use std::str::FromStr; /// let mut allocator = GenesisAllocator::default();