diff --git a/.github/assets/check_rv32imac.sh b/.github/assets/check_rv32imac.sh index 477abc5d1740..0166f43c876a 100755 --- a/.github/assets/check_rv32imac.sh +++ b/.github/assets/check_rv32imac.sh @@ -8,6 +8,8 @@ crates_to_check=( reth-network-peers reth-trie-common reth-chainspec + reth-consensus + reth-consensus-common ## ethereum reth-ethereum-forks diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index d34ea5d95690..673a5f7fcef4 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -64,8 +64,10 @@ exclude_crates=( reth-stages-api # reth-provider, reth-prune reth-static-file # tokio reth-transaction-pool # c-kzg + reth-payload-util # reth-transaction-pool reth-trie-parallel # tokio reth-testing-utils + reth-optimism-txpool # reth-transaction-pool reth-scroll-cli # tokio reth-scroll-node # tokio reth-scroll # tokio @@ -74,6 +76,7 @@ exclude_crates=( reth-scroll-evm # tokio reth-scroll-rpc #tokio reth-scroll-engine-primitives # proptest + reth-scroll-payload # c-kzg ) # Array to hold the results diff --git a/.github/assets/hive/build_simulators.sh b/.github/assets/hive/build_simulators.sh index b33f4db4ee79..9d7c5bd1569f 100755 --- a/.github/assets/hive/build_simulators.sh +++ b/.github/assets/hive/build_simulators.sh @@ -11,7 +11,7 @@ go build . # Run each hive command in the background for each simulator and wait echo "Building images" -./hive -client reth --sim "pyspec" -sim.timelimit 1s || true & +./hive -client reth --sim "ethereum/eest" --sim.buildarg fixtures=https://github.com/ethereum/execution-spec-tests/releases/download/pectra-devnet-6%40v1.0.0/fixtures_pectra-devnet-6.tar.gz -sim.timelimit 1s || true & ./hive -client reth --sim "ethereum/engine" -sim.timelimit 1s || true & ./hive -client reth --sim "devp2p" -sim.timelimit 1s || true & ./hive -client reth --sim "ethereum/rpc-compat" -sim.timelimit 1s || true & @@ -26,7 +26,8 @@ docker save hive/hiveproxy:latest -o ../hive_assets/hiveproxy.tar & docker save hive/simulators/devp2p:latest -o ../hive_assets/devp2p.tar & docker save hive/simulators/ethereum/engine:latest -o ../hive_assets/engine.tar & docker save hive/simulators/ethereum/rpc-compat:latest -o ../hive_assets/rpc_compat.tar & -docker save hive/simulators/ethereum/pyspec:latest -o ../hive_assets/pyspec.tar & +docker save hive/simulators/ethereum/eest/consume-engine:latest -o ../hive_assets/eest_engine.tar & +docker save hive/simulators/ethereum/eest/consume-rlp:latest -o ../hive_assets/eest_rlp.tar & docker save hive/simulators/smoke/genesis:latest -o ../hive_assets/smoke_genesis.tar & docker save hive/simulators/smoke/network:latest -o ../hive_assets/smoke_network.tar & docker save hive/simulators/ethereum/sync:latest -o ../hive_assets/ethereum_sync.tar & diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index ae5a66f03bb0..ef2c94224ba4 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -15,8 +15,6 @@ rpc-compat: # https://github.com/paradigmxyz/reth/issues/13879 - eth_createAccessList/create-al-contract-eip1559 (reth) - - eth_getBlockByNumber/get-genesis (reth) - - eth_getTransactionReceipt/get-setcode-tx (reth) # https://github.com/paradigmxyz/reth/issues/8732 engine-withdrawals: @@ -51,3 +49,9 @@ engine-cancun: - Invalid NewPayload, ExcessBlobGas, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) sync: [] + +# no fix: it’s too expensive to check whether the storage is empty on each creation +eest/consume-engine: + - tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test_engine-zero_nonce]-reth +eest/consume-rlp: + - tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test-zero_nonce]-reth diff --git a/.github/assets/hive/load_images.sh b/.github/assets/hive/load_images.sh index 05e1cb9905fa..37a2f82de545 100755 --- a/.github/assets/hive/load_images.sh +++ b/.github/assets/hive/load_images.sh @@ -11,6 +11,8 @@ IMAGES=( "/tmp/smoke_genesis.tar" "/tmp/smoke_network.tar" "/tmp/ethereum_sync.tar" + "/tmp/eest_engine.tar" + "/tmp/eest_rlp.tar" "/tmp/reth_image.tar" ) @@ -22,4 +24,4 @@ done wait -docker image ls -a \ No newline at end of file +docker image ls -a diff --git a/.github/assets/hive/run_simulator.sh b/.github/assets/hive/run_simulator.sh index 731c94c3f69b..cb4d8110dfa3 100755 --- a/.github/assets/hive/run_simulator.sh +++ b/.github/assets/hive/run_simulator.sh @@ -7,7 +7,7 @@ sim="${1}" limit="${2}" run_hive() { - hive --sim "${sim}" --sim.limit "${limit}" --sim.parallelism 4 --client reth 2>&1 | tee /tmp/log || true + hive --sim "${sim}" --sim.limit "${limit}" --sim.parallelism 8 --client reth 2>&1 | tee /tmp/log || true } check_log() { diff --git a/.github/assets/kurtosis_op_network_params.yaml b/.github/assets/kurtosis_op_network_params.yaml index 13b5b0bcd91d..12e6f67e504a 100644 --- a/.github/assets/kurtosis_op_network_params.yaml +++ b/.github/assets/kurtosis_op_network_params.yaml @@ -9,9 +9,13 @@ optimism_package: - participants: - el_type: op-geth cl_type: op-node + # https://github.com/ethpandaops/optimism-package/issues/157 + cl_image: "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:a79e8cc06aa354511983fafcb6d71ab04cdfadbc" - el_type: op-reth el_image: "ghcr.io/paradigmxyz/op-reth:kurtosis-ci" cl_type: op-node + # https://github.com/ethpandaops/optimism-package/issues/157 + cl_image: "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:a79e8cc06aa354511983fafcb6d71ab04cdfadbc" batcher_params: extra_params: - "--throttle-interval=0" diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index 8a96d6a38348..38e7be6841cf 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -114,40 +114,38 @@ jobs: - eth_syncing # debug_ rpc methods - debug_ - # Pyspec cancun jobs - # TODO: uncomment when https://github.com/ethereum/hive/issues/1147 is fixed - #- sim: pyspec - # include: [cancun/eip4844] - #- sim: pyspec - # include: [cancun/eip4788] - #- sim: pyspec - # include: [cancun/eip6780] - #- sim: pyspec - # include: [cancun/eip5656] - #- sim: pyspec - # include: [cancun/eip1153] - #- sim: pyspec - # include: [cancun/eip7516] - # Pyspec shanghai jobs - #- sim: pyspec - # include: [shanghai/eip3651] - #- sim: pyspec - # include: [shanghai/eip3855] - #- sim: pyspec - # include: [shanghai/eip3860] - #- sim: pyspec - # include: [shanghai/eip4895] - # Pyspec merge and earlier jobs - #- sim: pyspec - # include: [merge/] - #- sim: pyspec - # include: [berlin/] - #- sim: pyspec - # include: [istanbul/] - #- sim: pyspec - # include: [homestead/] - #- sim: pyspec - # include: [frontier/] + + # consume-engine + - sim: ethereum/eest/consume-engine + limit: tests/prague + - sim: ethereum/eest/consume-engine + limit: tests/cancun + - sim: ethereum/eest/consume-engine + limit: tests/shanghai + - sim: ethereum/eest/consume-engine + limit: tests/berlin + - sim: ethereum/eest/consume-engine + limit: tests/istanbul + - sim: ethereum/eest/consume-engine + limit: tests/homestead + - sim: ethereum/eest/consume-engine + limit: tests/frontier + + # consume-rlp + - sim: ethereum/eest/consume-rlp + limit: tests/prague + - sim: ethereum/eest/consume-rlp + limit: tests/cancun + - sim: ethereum/eest/consume-rlp + limit: tests/shanghai + - sim: ethereum/eest/consume-rlp + limit: tests/berlin + - sim: ethereum/eest/consume-rlp + limit: tests/istanbul + - sim: ethereum/eest/consume-rlp + limit: tests/homestead + - sim: ethereum/eest/consume-rlp + limit: tests/frontier needs: - prepare-reth - prepare-hive diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 30100557905f..10521fd3a310 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -34,7 +34,6 @@ jobs: - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@clippy with: - toolchain: nightly-2025-01-16 components: clippy - uses: Swatinem/rust-cache@v2 with: @@ -57,7 +56,6 @@ jobs: - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@nightly with: - toolchain: nightly-2025-01-16 components: clippy - uses: Swatinem/rust-cache@v2 with: @@ -191,11 +189,11 @@ jobs: cargo udeps --workspace --lib --examples --tests --benches --all-features --locked \ --exclude reth-optimism-cli --exclude reth-optimism-consensus --exclude reth-optimism-payload-builder \ --exclude reth-optimism-node --exclude reth-optimism-evm --exclude reth-optimism-node --exclude reth-optimism-rpc \ - --exclude op-reth --exclude "example-*" --exclude reth --exclude reth-payload-primitives \ + --exclude reth-optimism-txpool --exclude op-reth --exclude "example-*" --exclude reth --exclude reth-payload-primitives \ --exclude reth-e2e-test-utils --exclude reth-ethereum-payload-builder --exclude reth-exex-test-utils \ --exclude reth-node-ethereum --exclude reth-scroll-cli --exclude reth-scroll-evm \ --exclude reth-scroll-node --exclude scroll-reth --exclude reth-scroll-rpc --exclude reth-scroll-trie \ - --exclude reth-scroll-engine-primitives --exclude scroll-alloy-provider + --exclude reth-scroll-engine-primitives --exclude reth-scroll-payload --exclude scroll-alloy-provider book: name: book diff --git a/Cargo.lock b/Cargo.lock index fca1fbb44730..57ebbdfe65a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -62,7 +62,7 @@ dependencies = [ "getrandom 0.2.15", "once_cell", "version_check", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -106,9 +106,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.1.58" +version = "0.1.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0d6c784abf2e061139798d51299da278fc8f02d7b7546662b898d9b22ab5e9" +checksum = "4d37bc62b68c056e3742265ab73c73d413d07357909e0e4ea1e95453066a7469" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -176,9 +176,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc9138f4f0912793642d453523c3116bd5d9e11de73b70177aa7cb3e94b98ad2" +checksum = "7f2d547eba3f2d331b0e08f64a24e202f66d4f291e2a3e0073914c0e1400ced3" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -189,7 +189,7 @@ dependencies = [ "itoa", "serde", "serde_json", - "winnow 0.6.26", + "winnow", ] [[package]] @@ -273,9 +273,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24acd2f5ba97c7a320e67217274bc81fe3c3174b8e6144ec875d9d54e760e278" +checksum = "d62cf1b25f5a50ca2d329b0b4aeb0a0dedeaf225ad3c5099d83b1a4c4616186e" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -354,9 +354,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec878088ec6283ce1e90d280316aadd3d6ce3de06ff63d68953c855e7e447e92" +checksum = "bc1360603efdfba91151e623f13a4f4d3dc4af4adc1cbd90bf37c81e84db4c77" dependencies = [ "alloy-rlp", "arbitrary", @@ -377,7 +377,7 @@ dependencies = [ "proptest-derive", "rand 0.8.5", "ruint", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "serde", "sha3", "tiny-keccak", @@ -408,7 +408,7 @@ dependencies = [ "async-stream", "async-trait", "auto_impl", - "dashmap", + "dashmap 6.1.0", "futures", "futures-utils-wasm", "lru 0.13.0", @@ -462,7 +462,7 @@ checksum = "a40e1ef334153322fd878d07e86af7a529bcb86b2439525920a88eba87bcf943" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -694,23 +694,23 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d039d267aa5cbb7732fa6ce1fd9b5e9e29368f580f80ba9d7a8450c794de4b2" +checksum = "13f28f2131dc3a7b8e2cda882758ad4d5231ca26281b9861d4b18c700713e2da" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "620ae5eee30ee7216a38027dec34e0585c55099f827f92f50d11e3d2d3a4a954" +checksum = "1ee2da033256a3b27131c030933eab0460a709fbcc4d4bd57bf9a5650b2441c5" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -719,41 +719,41 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad9f7d057e00f8c5994e4ff4492b76532c51ead39353aa2ed63f8c50c0f4d52e" +checksum = "4e9d9918b0abb632818bf27e2dfb86b209be8433baacf22100b190bbc0904bd4" dependencies = [ "const-hex", "dunce", "heck", "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74e60b084fe1aef8acecda2743ff2d93c18ff3eb67a2d3b12f62582a1e66ef5e" +checksum = "a971129d242338d92009470a2f750d3b2630bc5da00a40a94d51f5d456b5712f" dependencies = [ "serde", - "winnow 0.6.26", + "winnow", ] [[package]] name = "alloy-sol-types" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1382302752cd751efd275f4d6ef65877ddf61e0e6f5ac84ef4302b79a33a31a" +checksum = "75f306fc801b3aa2e3c4785b7b5252ec8b19f77b30e3b75babfd23849c81bd8c" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -841,9 +841,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6917c79e837aa7b77b7a6dae9f89cbe15313ac161c4d3cfaf8909ef21f3d22d8" +checksum = "d95a94854e420f07e962f7807485856cde359ab99ab6413883e15235ad996e8b" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -947,7 +947,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -1145,7 +1145,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -1181,7 +1181,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -1192,7 +1192,7 @@ checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -1214,9 +1214,9 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "aurora-engine-modexp" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aef7712851e524f35fbbb74fa6599c5cd8692056a1c36f9ca0d2001b670e7e5" +checksum = "518bc5745a6264b5fd7b09dffb9667e400ee9e2bbe18555fac75e1fe9afa0df9" dependencies = [ "hex", "num", @@ -1230,7 +1230,7 @@ checksum = "e12882f59de5360c748c4cbf569a042d5fb0eb515f7bea9c1f470b47f6ffbd73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -1336,7 +1336,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -1460,7 +1460,7 @@ dependencies = [ "boa_string", "indexmap 2.7.1", "num-bigint", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", ] [[package]] @@ -1480,7 +1480,7 @@ dependencies = [ "boa_string", "bytemuck", "cfg-if", - "dashmap", + "dashmap 6.1.0", "fast-float2", "hashbrown 0.15.2", "icu_normalizer", @@ -1496,7 +1496,7 @@ dependencies = [ "portable-atomic", "rand 0.8.5", "regress", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "ryu-js", "serde", "serde_json", @@ -1533,7 +1533,7 @@ dependencies = [ "indexmap 2.7.1", "once_cell", "phf", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "static_assertions", ] @@ -1545,7 +1545,7 @@ checksum = "9fd3f870829131332587f607a7ff909f1af5fc523fd1b192db55fbbdf52e8d3c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", "synstructure", ] @@ -1565,7 +1565,7 @@ dependencies = [ "num-bigint", "num-traits", "regress", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", ] [[package]] @@ -1582,7 +1582,7 @@ checksum = "7debc13fbf7997bf38bf8e9b20f1ad5e2a7d27a900e1f6039fe244ce30f589b5" dependencies = [ "fast-float2", "paste", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "sptr", "static_assertions", ] @@ -1650,6 +1650,12 @@ version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" +[[package]] +name = "bytecount" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce" + [[package]] name = "bytemuck" version = "1.21.0" @@ -1667,7 +1673,7 @@ checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -1678,9 +1684,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" +checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" dependencies = [ "serde", ] @@ -1718,6 +1724,19 @@ dependencies = [ "serde", ] +[[package]] +name = "cargo_metadata" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" +dependencies = [ + "camino", + "cargo-platform", + "semver 1.0.25", + "serde", + "serde_json", +] + [[package]] name = "cargo_metadata" version = "0.18.1" @@ -1755,9 +1774,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.11" +version = "1.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4730490333d58093109dc02c23174c3f4d490998c3fed3cc8e82d57afedb9cf" +checksum = "c7777341816418c02e033934a09f20dc0ccaf65a5201ef8a450ae0105a573fda" dependencies = [ "jobserver", "libc", @@ -1856,9 +1875,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.27" +version = "4.5.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "769b0145982b4b48713e01ec42d61614425f27b7058bda7180a3a41f30104796" +checksum = "3e77c3243bd94243c03672cb5154667347c457ca271254724f9f393aee1c05ff" dependencies = [ "clap_builder", "clap_derive", @@ -1878,14 +1897,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.24" +version = "4.5.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" +checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -2006,13 +2025,12 @@ dependencies = [ [[package]] name = "comfy-table" -version = "7.1.3" +version = "7.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24f165e7b643266ea80cb858aed492ad9280e3e05ce24d4a99d7d7b889b6a4d9" +checksum = "4a65ebfec4fb190b6f90e944a817d60499ee0744e582530e2c9900a22e591d9a" dependencies = [ "crossterm", - "strum", - "strum_macros", + "unicode-segmentation", "unicode-width 0.2.0", ] @@ -2079,6 +2097,26 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" +[[package]] +name = "const_format" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" +dependencies = [ + "const_format_proc_macros", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + [[package]] name = "constant_time_eq" version = "0.3.1" @@ -2367,7 +2405,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -2391,7 +2429,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -2402,7 +2440,20 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.97", + "syn 2.0.98", +] + +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", ] [[package]] @@ -2443,7 +2494,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1145d32e826a7748b69ee8fc62d3e6355ff7f1051df53141e7048162fc90481b" dependencies = [ "data-encoding", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -2511,7 +2562,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -2532,7 +2583,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", "unicode-xid", ] @@ -2646,7 +2697,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -2792,7 +2843,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -2803,7 +2854,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -2823,7 +2874,7 @@ checksum = "3bf679796c0322556351f287a51b49e48f7c4986e727b5dd78c972d30e2e16cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -2842,6 +2893,15 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "error-chain" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" +dependencies = [ + "version_check", +] + [[package]] name = "ethereum_serde_utils" version = "0.7.0" @@ -2857,9 +2917,9 @@ dependencies = [ [[package]] name = "ethereum_ssz" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "862e41ea8eea7508f70cfd8cd560f0c34bb0af37c719a8e06c2672f0f031d8e5" +checksum = "86da3096d1304f5f28476ce383005385459afeaf0eea08592b65ddbc9b258d16" dependencies = [ "alloy-primitives", "ethereum_serde_utils", @@ -2872,14 +2932,14 @@ dependencies = [ [[package]] name = "ethereum_ssz_derive" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d31ecf6640112f61dc34b4d8359c081102969af0edd18381fed2052f6db6a410" +checksum = "d832a5c38eba0e7ad92592f7a22d693954637fbb332b4f669590d66a5c3183e5" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -3007,6 +3067,7 @@ dependencies = [ "eyre", "reth", "reth-chainspec", + "reth-ethereum-payload-builder", "reth-evm", "reth-evm-ethereum", "reth-node-api", @@ -3141,6 +3202,7 @@ dependencies = [ name = "example-network-txpool" version = "0.0.0" dependencies = [ + "alloy-consensus", "eyre", "reth-network", "reth-provider", @@ -3454,7 +3516,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -3759,9 +3821,9 @@ dependencies = [ [[package]] name = "hickory-proto" -version = "0.25.0-alpha.4" +version = "0.25.0-alpha.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d063c0692ee669aa6d261988aa19ca5510f1cc40e4f211024f50c888499a35d7" +checksum = "1d00147af6310f4392a31680db52a3ed45a2e0f68eb18e8c3fe5537ecc96d9e2" dependencies = [ "async-recursion", "async-trait", @@ -3774,7 +3836,7 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand 0.8.5", + "rand 0.9.0", "serde", "thiserror 2.0.11", "tinyvec", @@ -3785,9 +3847,9 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.25.0-alpha.4" +version = "0.25.0-alpha.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42bc352e4412fb657e795f79b4efcf2bd60b59ee5ca0187f3554194cd1107a27" +checksum = "5762f69ebdbd4ddb2e975cd24690bf21fe6b2604039189c26acddbc427f12887" dependencies = [ "cfg-if", "futures-util", @@ -3796,7 +3858,7 @@ dependencies = [ "moka", "once_cell", "parking_lot", - "rand 0.8.5", + "rand 0.9.0", "resolv-conf", "serde", "smallvec", @@ -4147,7 +4209,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -4204,7 +4266,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -4325,7 +4387,7 @@ dependencies = [ "indoc", "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -4538,7 +4600,7 @@ dependencies = [ "parking_lot", "pin-project", "rand 0.8.5", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "serde", "serde_json", "thiserror 1.0.69", @@ -4583,7 +4645,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -4651,11 +4713,11 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "9.3.0" +version = "9.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" +checksum = "5a87cc7a48537badeae96744432de36f4be2b4a34a05a5ef32e9dd8a1c169dde" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "js-sys", "pem", "ring", @@ -4981,7 +5043,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -5068,6 +5130,21 @@ dependencies = [ "unicase", ] +[[package]] +name = "mini-moka" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c325dfab65f261f386debee8b0969da215b3fa0037e74c8a1234db7ba986d803" +dependencies = [ + "crossbeam-channel", + "crossbeam-utils", + "dashmap 5.5.3", + "skeptic", + "smallvec", + "tagptr", + "triomphe", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -5370,7 +5447,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -5407,9 +5484,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.2" +version = "1.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" +checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" dependencies = [ "critical-section", "portable-atomic", @@ -5423,9 +5500,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.10.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36e8e67b41afd338096ca31f24c5e7800797858b963490be2e8971d17d733f49" +checksum = "1363dd2454f473e2a2a6ee5eda585ecf94209319e35529bd703ddc5072798eb4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5441,15 +5518,15 @@ dependencies = [ [[package]] name = "op-alloy-flz" -version = "0.10.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5402b85f0fa22712960498c3a123875d7b53da21928b24a8f8667babb268c4c" +checksum = "3419796a04a6bcc4752b9ba26b89d6ba4d46d7b69476ddcac3692aa44a558574" [[package]] name = "op-alloy-network" -version = "0.10.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b062f0c57e8b910f9ec1222b6fba2dddd7e33fb7b139a68556a7390d40b03624" +checksum = "9fc2e8a7501b1e9c0e83bde80b5b9866bf4418caceaf64d919bd608b66ecaa16" dependencies = [ "alloy-consensus", "alloy-network", @@ -5462,9 +5539,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-jsonrpsee" -version = "0.10.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e5288739ee87d1fbf1c7e09e991ead1f86f01aea63bf91e8530cde4faa7713" +checksum = "cd3f3b1b0decc0be5d766144df2b8297b22fada5b4a1bcbafa76f0aec7ac5d24" dependencies = [ "alloy-primitives", "jsonrpsee", @@ -5472,9 +5549,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.10.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06d55acda15bd273d1d9a052536cf46c3520f597ac9884376c9c56241f2f5e9b" +checksum = "77979abf2b8b2be8996da3b434b09d770edbcceccd59251c3373ef553e743cf0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5490,9 +5567,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.10.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6735ec88e69917c0cb0a3ee60ad7e5cd14dcb905a6ca55fbc7004b72c5e20" +checksum = "b2e4fe1929b0e39130da37cb975c98d70418904ba7991a061799ad971dbd09fe" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5582,30 +5659,32 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.12" +version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +checksum = "c9fde3d0718baf5bc92f577d652001da0f8d54cd03a7974e118d04fc888dc23d" dependencies = [ "arbitrary", "arrayvec", "bitvec", "byte-slice-cast", "bytes", + "const_format", "impl-trait-for-tuples", "parity-scale-codec-derive", + "rustversion", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "3.6.12" +version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +checksum = "581c837bb6b9541ce7faa9377c20616e4fb7650f6b0f68bc93c827ee504fb7b3" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.98", ] [[package]] @@ -5720,7 +5799,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -5734,22 +5813,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" +checksum = "dfe2e71e1471fe07709406bf725f710b02927c9c54b2b5b2ec0e8087d97c327d" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" +checksum = "f6e859e6e5bd50440ab63c47e3ebabc90f26251f7c73c3d3e837b74a1cc3fa67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -5885,7 +5964,7 @@ version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -5905,7 +5984,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" dependencies = [ "proc-macro2", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -5956,7 +6035,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -6054,7 +6133,18 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", +] + +[[package]] +name = "pulldown-cmark" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57206b407293d2bcd3af849ce869d52068623f19e1b5ff8e8778e3309439682b" +dependencies = [ + "bitflags 2.8.0", + "memchr", + "unicase", ] [[package]] @@ -6106,7 +6196,7 @@ dependencies = [ "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "rustls", "socket2", "thiserror 2.0.11", @@ -6124,7 +6214,7 @@ dependencies = [ "getrandom 0.2.15", "rand 0.8.5", "ring", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "rustls", "rustls-pki-types", "slab", @@ -6188,6 +6278,17 @@ dependencies = [ "serde", ] +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.0", + "zerocopy 0.8.17", +] + [[package]] name = "rand_chacha" version = "0.2.2" @@ -6208,6 +6309,16 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.0", +] + [[package]] name = "rand_core" version = "0.5.1" @@ -6226,6 +6337,16 @@ dependencies = [ "getrandom 0.2.15", ] +[[package]] +name = "rand_core" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b08f3c9802962f7e1b25113931d94f43ed9725bebc59db9d0c3e9a23b67e15ff" +dependencies = [ + "getrandom 0.3.1", + "zerocopy 0.8.17", +] + [[package]] name = "rand_hc" version = "0.2.0" @@ -6475,6 +6596,7 @@ dependencies = [ "reth-errors", "reth-ethereum-cli", "reth-ethereum-payload-builder", + "reth-ethereum-primitives", "reth-evm", "reth-execution-types", "reth-exex", @@ -6537,7 +6659,6 @@ dependencies = [ "reth-provider", "reth-revm", "reth-tasks", - "reth-transaction-pool", "revm", "tokio", "tracing", @@ -6570,7 +6691,6 @@ dependencies = [ "reth-node-core", "reth-primitives", "reth-primitives-traits", - "reth-rpc-types-compat", "reth-tracing", "serde", "thiserror 2.0.11", @@ -6768,7 +6888,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -6865,7 +6985,7 @@ dependencies = [ "reth-storage-errors", "reth-tracing", "reth-trie-common", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "serde", "serde_json", "strum", @@ -7167,7 +7287,6 @@ dependencies = [ "reth-node-types", "reth-optimism-chainspec", "reth-payload-builder", - "reth-payload-builder-primitives", "reth-payload-primitives", "reth-provider", "reth-prune", @@ -7187,7 +7306,9 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", + "auto_impl", "futures", + "reth-chain-state", "reth-errors", "reth-execution-types", "reth-payload-builder-primitives", @@ -7243,7 +7364,7 @@ dependencies = [ "derive_more", "futures", "metrics", - "moka", + "mini-moka", "proptest", "rand 0.8.5", "rayon", @@ -7257,11 +7378,11 @@ dependencies = [ "reth-ethereum-engine-primitives", "reth-ethereum-primitives", "reth-evm", + "reth-evm-ethereum", "reth-exex-types", "reth-metrics", "reth-network-p2p", "reth-payload-builder", - "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives-traits", "reth-provider", @@ -7448,7 +7569,7 @@ dependencies = [ "auto_impl", "dyn-clone", "once_cell", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "serde", ] @@ -7576,7 +7697,6 @@ dependencies = [ "alloy-rlp", "nybbles", "reth-consensus", - "reth-prune-types", "reth-storage-errors", "thiserror 2.0.11", ] @@ -7658,6 +7778,7 @@ dependencies = [ "reth-consensus", "reth-db", "reth-db-common", + "reth-ethereum-payload-builder", "reth-evm", "reth-execution-types", "reth-exex", @@ -7759,7 +7880,7 @@ dependencies = [ "bitflags 2.8.0", "byteorder", "codspeed-criterion-compat", - "dashmap", + "dashmap 6.1.0", "derive_more", "indexmap 2.7.1", "parking_lot", @@ -7861,7 +7982,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "reth-transaction-pool", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "schnellru", "secp256k1", "serde", @@ -7911,9 +8032,9 @@ dependencies = [ "parking_lot", "reth-consensus", "reth-eth-wire-types", + "reth-ethereum-primitives", "reth-network-peers", "reth-network-types", - "reth-primitives", "reth-primitives-traits", "reth-storage-errors", "tokio", @@ -7973,6 +8094,7 @@ version = "1.1.5" dependencies = [ "alloy-rpc-types-engine", "eyre", + "reth-basic-payload-builder", "reth-consensus", "reth-db-api", "reth-engine-primitives", @@ -7980,10 +8102,12 @@ dependencies = [ "reth-network-api", "reth-node-core", "reth-node-types", + "reth-payload-builder", "reth-payload-builder-primitives", "reth-payload-primitives", "reth-provider", "reth-tasks", + "reth-tokio-util", "reth-transaction-pool", ] @@ -8001,6 +8125,7 @@ dependencies = [ "futures", "jsonrpsee", "rayon", + "reth-basic-payload-builder", "reth-chain-state", "reth-chainspec", "reth-cli-util", @@ -8117,7 +8242,6 @@ dependencies = [ "eyre", "futures", "rand 0.8.5", - "reth-basic-payload-builder", "reth-chainspec", "reth-consensus", "reth-db", @@ -8133,12 +8257,15 @@ dependencies = [ "reth-node-api", "reth-node-builder", "reth-node-core", - "reth-payload-builder", "reth-payload-primitives", "reth-provider", "reth-revm", "reth-rpc", + "reth-rpc-api", + "reth-rpc-builder", "reth-rpc-eth-api", + "reth-rpc-eth-types", + "reth-rpc-server-types", "reth-tasks", "reth-tracing", "reth-transaction-pool", @@ -8348,6 +8475,7 @@ dependencies = [ "alloy-network", "alloy-primitives", "alloy-rpc-types-engine", + "alloy-rpc-types-eth", "alloy-signer-local", "clap", "derive_more", @@ -8376,6 +8504,7 @@ dependencies = [ "reth-optimism-payload-builder", "reth-optimism-primitives", "reth-optimism-rpc", + "reth-optimism-txpool", "reth-payload-builder", "reth-payload-util", "reth-payload-validator", @@ -8516,6 +8645,31 @@ dependencies = [ "reth-stages-types", ] +[[package]] +name = "reth-optimism-txpool" +version = "1.1.5" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rpc-types-eth", + "c-kzg", + "derive_more", + "op-alloy-consensus", + "op-alloy-flz", + "parking_lot", + "reth-chainspec", + "reth-optimism-chainspec", + "reth-optimism-evm", + "reth-optimism-forks", + "reth-optimism-primitives", + "reth-primitives-traits", + "reth-provider", + "reth-storage-api", + "reth-transaction-pool", + "revm", +] + [[package]] name = "reth-payload-builder" version = "1.1.5" @@ -8523,7 +8677,6 @@ dependencies = [ "alloy-consensus", "alloy-primitives", "alloy-rpc-types", - "async-trait", "futures-util", "metrics", "reth-chain-state", @@ -8543,8 +8696,6 @@ dependencies = [ name = "reth-payload-builder-primitives" version = "1.1.5" dependencies = [ - "alloy-rpc-types-engine", - "async-trait", "pin-project", "reth-payload-primitives", "tokio", @@ -8560,6 +8711,7 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", "assert_matches", + "auto_impl", "op-alloy-rpc-types-engine", "reth-chain-state", "reth-chainspec", @@ -8577,7 +8729,7 @@ version = "1.1.5" dependencies = [ "alloy-consensus", "alloy-primitives", - "reth-primitives", + "reth-transaction-pool", ] [[package]] @@ -8586,6 +8738,7 @@ version = "1.1.5" dependencies = [ "alloy-rpc-types", "reth-chainspec", + "reth-engine-primitives", "reth-primitives", "reth-primitives-traits", ] @@ -8660,7 +8813,7 @@ dependencies = [ "alloy-rpc-types-engine", "assert_matches", "auto_impl", - "dashmap", + "dashmap 6.1.0", "eyre", "itertools 0.13.0", "metrics", @@ -8725,7 +8878,7 @@ dependencies = [ "reth-testing-utils", "reth-tokio-util", "reth-tracing", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "thiserror 2.0.11", "tokio", "tracing", @@ -8758,7 +8911,8 @@ dependencies = [ "alloy-eips", "alloy-primitives", "reth-ethereum-forks", - "reth-primitives", + "reth-ethereum-primitives", + "reth-primitives-traits", "reth-storage-api", "reth-storage-errors", "reth-trie", @@ -8811,6 +8965,7 @@ dependencies = [ "reth-network-api", "reth-network-peers", "reth-network-types", + "reth-node-api", "reth-primitives", "reth-primitives-traits", "reth-provider", @@ -8842,6 +8997,7 @@ name = "reth-rpc-api" version = "1.1.5" dependencies = [ "alloy-eips", + "alloy-genesis", "alloy-json-rpc", "alloy-primitives", "alloy-rpc-types", @@ -8949,19 +9105,17 @@ dependencies = [ "reth-chainspec", "reth-engine-primitives", "reth-ethereum-engine-primitives", + "reth-ethereum-primitives", "reth-metrics", "reth-payload-builder", "reth-payload-builder-primitives", "reth-payload-primitives", - "reth-primitives", "reth-primitives-traits", "reth-provider", "reth-rpc-api", - "reth-rpc-types-compat", "reth-storage-api", "reth-tasks", "reth-testing-utils", - "reth-tokio-util", "reth-transaction-pool", "serde", "thiserror 2.0.11", @@ -9092,7 +9246,6 @@ name = "reth-rpc-types-compat" version = "1.1.5" dependencies = [ "alloy-consensus", - "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -9238,7 +9391,6 @@ dependencies = [ "reth-node-api", "reth-node-builder", "reth-node-types", - "reth-payload-builder", "reth-primitives", "reth-primitives-traits", "reth-provider", @@ -9264,8 +9416,13 @@ name = "reth-scroll-payload" version = "1.1.5" dependencies = [ "futures-util", + "reth-basic-payload-builder", "reth-payload-builder", "reth-payload-primitives", + "reth-payload-util", + "reth-primitives-traits", + "reth-scroll-engine-primitives", + "reth-transaction-pool", ] [[package]] @@ -9510,7 +9667,6 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "derive_more", - "reth-fs-util", "reth-primitives-traits", "reth-prune-types", "reth-static-file-types", @@ -9598,7 +9754,6 @@ dependencies = [ "reth-execution-types", "reth-fs-util", "reth-metrics", - "reth-payload-util", "reth-primitives", "reth-primitives-traits", "reth-provider", @@ -9607,7 +9762,7 @@ dependencies = [ "reth-tracing", "revm-interpreter", "revm-primitives", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "schnellru", "serde", "serde_json", @@ -9634,7 +9789,6 @@ dependencies = [ "metrics", "proptest", "proptest-arbitrary-interop", - "rayon", "reth-ethereum-primitives", "reth-execution-errors", "reth-metrics", @@ -9671,8 +9825,10 @@ dependencies = [ "plain_hasher", "proptest", "proptest-arbitrary-interop", + "rayon", "reth-codecs", "reth-primitives-traits", + "revm", "serde", "serde_json", "serde_with", @@ -9991,7 +10147,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.97", + "syn 2.0.98", "unicode-ident", ] @@ -10040,9 +10196,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" dependencies = [ "rand 0.8.5", ] @@ -10221,9 +10377,9 @@ dependencies = [ [[package]] name = "scc" -version = "2.3.0" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28e1c91382686d21b5ac7959341fcb9780fa7c03773646995a87c950fa7be640" +checksum = "ea091f6cac2595aa38993f04f4ee692ed43757035c36e67c180b6828356385b1" dependencies = [ "sdd", ] @@ -10376,9 +10532,9 @@ dependencies = [ [[package]] name = "sdd" -version = "3.0.5" +version = "3.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478f121bb72bbf63c52c93011ea1791dca40140dfe13f8336c4c5ac952c33aa9" +checksum = "b07779b9b918cc05650cb30f404d4d7835d26df37c235eded8a6832e2fb82cca" [[package]] name = "sec1" @@ -10508,7 +10664,7 @@ checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -10583,7 +10739,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -10616,7 +10772,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -10777,6 +10933,21 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" +[[package]] +name = "skeptic" +version = "0.13.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" +dependencies = [ + "bytecount", + "cargo_metadata 0.14.2", + "error-chain", + "glob", + "pulldown-cmark", + "tempfile", + "walkdir", +] + [[package]] name = "sketches-ddsketch" version = "0.3.0" @@ -10810,18 +10981,18 @@ checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" [[package]] name = "snmalloc-rs" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d43ff92911d7d9705d1c0203300a3edfd00d16c8b8b0c27c56f9407a3f31e7a6" +checksum = "eb317153089fdfa4d8a2eec059d40a5a23c3bde43995ea23b19121c3f621e74a" dependencies = [ "snmalloc-sys", ] [[package]] name = "snmalloc-sys" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "954e1f984860770475196be81a547ed1517d34fcb8a15cb87bdb37cff3353230" +checksum = "065fea53d32bb77bc36cca466cb191f2e5216ebfd0ed360b1d64889ee6e559ea" dependencies = [ "cc", "cmake", @@ -10926,7 +11097,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -10950,9 +11121,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "symbolic-common" -version = "12.13.3" +version = "12.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13a4dfe4bbeef59c1f32fc7524ae7c95b9e1de5e79a43ce1604e181081d71b0c" +checksum = "b6189977df1d6ec30c920647919d76f29fb8d8f25e8952e835b0fcda25e8f792" dependencies = [ "debugid", "memmap2", @@ -10962,9 +11133,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.13.3" +version = "12.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98cf6a95abff97de4d7ff3473f33cacd38f1ddccad5c1feab435d6760300e3b6" +checksum = "d234917f7986498e7f62061438cee724bafb483fe84cfbe2486f68dce48240d7" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -10984,9 +11155,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.97" +version = "2.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dabd04e3b9a8c3c03d5e743f5ef5e1207befc9de704d477f7198cc28049763e" +checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" dependencies = [ "proc-macro2", "quote", @@ -10995,14 +11166,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84e4d83a0a6704561302b917a932484e1cae2d8c6354c64be8b7bac1c1fe057" +checksum = "b7f6a4b9002584ea56d0a19713b65da44cbbf6070aca9ae0360577cba5c4db68" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -11022,7 +11193,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -11083,7 +11254,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0bef5dd380747bd7b6e636a8032a24aa34fcecaf843e59fc97d299681922e86" dependencies = [ "bincode", - "cargo_metadata", + "cargo_metadata 0.18.1", "serde", ] @@ -11100,7 +11271,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -11148,7 +11319,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -11159,7 +11330,7 @@ checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -11316,7 +11487,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -11374,9 +11545,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148" dependencies = [ "serde", "serde_spanned", @@ -11403,7 +11574,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow 0.7.0", + "winnow", ] [[package]] @@ -11517,7 +11688,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -11637,6 +11808,12 @@ dependencies = [ "rlp", ] +[[package]] +name = "triomphe" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef8f7726da4807b58ea5c96fdc122f80702030edc33b35aff9190a51148ccc85" + [[package]] name = "try-lock" version = "0.2.5" @@ -11812,11 +11989,11 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.12.1" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3758f5e68192bb96cc8f9b7e2c2cfdabb435499a28499a42f8f984092adad4b" +checksum = "ced87ca4be083373936a67f8de945faa23b6b42384bd5b64434850802c6dccd0" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.3.1", ] [[package]] @@ -11832,7 +12009,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2990d9ea5967266ea0ccf413a4aa5c42a93dbcfda9cb49a97de6931726b12566" dependencies = [ "anyhow", - "cargo_metadata", + "cargo_metadata 0.18.1", "cfg-if", "regex", "rustversion", @@ -11853,14 +12030,14 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] name = "wait-timeout" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" dependencies = [ "libc", ] @@ -11933,7 +12110,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", "wasm-bindgen-shared", ] @@ -11968,7 +12145,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -12137,7 +12314,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -12148,7 +12325,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -12159,7 +12336,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -12170,7 +12347,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -12362,18 +12539,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e90edd2ac1aa278a5c4599b1d89cf03074b610800f866d4026dc199d7929a28" -dependencies = [ - "memchr", -] - -[[package]] -name = "winnow" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e49d2d35d3fad69b39b94139037ecfb4f359f08958b9c11e7315ce770462419" +checksum = "86e376c75f4f43f44db463cf729e0d3acbf954d13e22c51e26e4c264b4ab545f" dependencies = [ "memchr", ] @@ -12463,7 +12631,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", "synstructure", ] @@ -12474,7 +12642,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", - "zerocopy-derive", + "zerocopy-derive 0.7.35", +] + +[[package]] +name = "zerocopy" +version = "0.8.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa91407dacce3a68c56de03abe2760159582b846c6a4acd2f456618087f12713" +dependencies = [ + "zerocopy-derive 0.8.17", ] [[package]] @@ -12485,7 +12662,18 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06718a168365cad3d5ff0bb133aad346959a2074bd4a85c121255a11304a8626" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", ] [[package]] @@ -12505,7 +12693,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", "synstructure", ] @@ -12526,7 +12714,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] @@ -12548,7 +12736,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.97", + "syn 2.0.98", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 594e14646ba2..56e94bc851a6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,6 +76,7 @@ members = [ "crates/optimism/primitives/", "crates/optimism/rpc/", "crates/optimism/storage", + "crates/optimism/txpool/", "crates/payload/basic/", "crates/payload/builder/", "crates/payload/builder-primitives/", @@ -396,6 +397,7 @@ reth-optimism-payload-builder = { path = "crates/optimism/payload" } reth-optimism-primitives = { path = "crates/optimism/primitives" } reth-optimism-rpc = { path = "crates/optimism/rpc" } reth-optimism-storage = { path = "crates/optimism/storage" } +reth-optimism-txpool = { path = "crates/optimism/txpool" } reth-payload-builder = { path = "crates/payload/builder" } reth-payload-builder-primitives = { path = "crates/payload/builder-primitives" } reth-payload-primitives = { path = "crates/payload/primitives" } @@ -419,7 +421,7 @@ reth-rpc-server-types = { path = "crates/rpc/rpc-server-types" } reth-rpc-types-compat = { path = "crates/rpc/rpc-types-compat" } reth-stages = { path = "crates/stages/stages" } reth-stages-api = { path = "crates/stages/api" } -reth-stages-types = { path = "crates/stages/types" } +reth-stages-types = { path = "crates/stages/types", default-features = false } reth-static-file = { path = "crates/static-file/static-file" } reth-static-file-types = { path = "crates/static-file/types" } reth-storage-api = { path = "crates/storage/storage-api" } @@ -553,7 +555,7 @@ tracing-appender = "0.2" url = { version = "2.3", default-features = false } zstd = "0.13" byteorder = "1" -moka = "0.12" +mini-moka = "0.10" # metrics metrics = "0.24.0" diff --git a/DockerfileOp b/DockerfileOp new file mode 100644 index 000000000000..dbfb0ce2d243 --- /dev/null +++ b/DockerfileOp @@ -0,0 +1,43 @@ +FROM lukemathwalker/cargo-chef:latest-rust-1 AS chef +WORKDIR /app + +LABEL org.opencontainers.image.source=https://github.com/paradigmxyz/reth +LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0" + +RUN apt-get update && apt-get -y upgrade && apt-get install -y libclang-dev pkg-config + +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +FROM chef AS builder +COPY --from=planner /app/recipe.json recipe.json +COPY . . + +ARG BUILD_PROFILE=release +ENV BUILD_PROFILE=$BUILD_PROFILE + +ARG RUSTFLAGS="" +ENV RUSTFLAGS="$RUSTFLAGS" + +RUN cargo chef cook --profile $BUILD_PROFILE --features "optimism" --recipe-path recipe.json --manifest-path /app/crates/optimism/bin/Cargo.toml + +COPY . . +RUN cargo build --profile $BUILD_PROFILE --bin op-reth --features "optimism" --manifest-path /app/crates/optimism/bin/Cargo.toml + +RUN ls -la /app/target/$BUILD_PROFILE/op-reth +RUN cp /app/target/$BUILD_PROFILE/op-reth /app/op-reth + +FROM ubuntu:22.04 AS runtime + +RUN apt-get update && \ + apt-get install -y ca-certificates libssl-dev pkg-config strace && \ + rm -rf /var/lib/apt/lists/* + +WORKDIR /app +COPY --from=builder /app/op-reth /usr/local/bin/ +RUN chmod +x /usr/local/bin/op-reth +COPY LICENSE-* ./ + +EXPOSE 30303 30303/udp 9001 8545 8546 7545 8551 +ENTRYPOINT ["/usr/local/bin/op-reth"] diff --git a/LICENSE-APACHE b/LICENSE-APACHE index e154ad895ce6..df75cd177678 100644 --- a/LICENSE-APACHE +++ b/LICENSE-APACHE @@ -186,7 +186,7 @@ APPENDIX: How to apply the Apache License to your work. same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright 2022-2024 Reth Contributors +Copyright 2022-2025 Reth Contributors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index 940c61bc80dc..097a2541adb1 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -18,7 +18,6 @@ reth-cli-runner.workspace = true reth-cli-util.workspace = true reth-node-core.workspace = true reth-node-api.workspace = true -reth-rpc-types-compat.workspace = true reth-primitives = { workspace = true, features = ["alloy-compat"] } reth-primitives-traits.workspace = true reth-tracing.workspace = true diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index 972ececabccd..37f8aae01f72 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -83,7 +83,7 @@ impl Command { let (payload, _) = ExecutionPayload::from_block_unchecked(block.hash(), &block.into_block()); - debug!(?block_number, "Sending payload",); + debug!(target: "reth-bench", ?block_number, "Sending payload",); // construct fcu to call let forkchoice_state = ForkchoiceState { diff --git a/bin/reth-bench/src/bench/new_payload_only.rs b/bin/reth-bench/src/bench/new_payload_only.rs index 5904d648dd69..327829442b62 100644 --- a/bin/reth-bench/src/bench/new_payload_only.rs +++ b/bin/reth-bench/src/bench/new_payload_only.rs @@ -70,6 +70,7 @@ impl Command { let block_number = payload.block_number(); debug!( + target: "reth-bench", number=?payload.block_number(), "Sending payload to engine", ); diff --git a/bin/reth-bench/src/valid_payload.rs b/bin/reth-bench/src/valid_payload.rs index 7f13bb4d3de8..a2c8d7336610 100644 --- a/bin/reth-bench/src/valid_payload.rs +++ b/bin/reth-bench/src/valid_payload.rs @@ -119,6 +119,11 @@ where ); panic!("Invalid newPayloadV3: {status:?}"); } + if status.is_syncing() { + return Err(alloy_json_rpc::RpcError::UnsupportedFeature( + "invalid range: no canonical state found for parent of requested block", + )) + } status = self .new_payload_v3(payload.clone(), versioned_hashes.clone(), parent_beacon_block_root) .await?; @@ -144,6 +149,11 @@ where ); panic!("Invalid forkchoiceUpdatedV1: {status:?}"); } + if status.is_syncing() { + return Err(alloy_json_rpc::RpcError::UnsupportedFeature( + "invalid range: no canonical state found for parent of requested block", + )) + } status = self.fork_choice_updated_v1(fork_choice_state, payload_attributes.clone()).await?; } @@ -169,6 +179,11 @@ where ); panic!("Invalid forkchoiceUpdatedV2: {status:?}"); } + if status.is_syncing() { + return Err(alloy_json_rpc::RpcError::UnsupportedFeature( + "invalid range: no canonical state found for parent of requested block", + )) + } status = self.fork_choice_updated_v2(fork_choice_state, payload_attributes.clone()).await?; } diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 511f3189c67e..460c3c268fa7 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -57,6 +57,7 @@ reth-trie-db = { workspace = true, features = ["metrics"] } reth-node-api.workspace = true reth-node-core.workspace = true reth-ethereum-payload-builder.workspace = true +reth-ethereum-primitives.workspace = true reth-node-ethereum = { workspace = true, features = ["js-tracer"] } reth-node-builder.workspace = true reth-node-events.workspace = true diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index a146a88b724d..0593c0f17bd4 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -236,7 +236,7 @@ pub enum Commands { Config(config_cmd::Command), /// Various debug routines #[command(name = "debug")] - Debug(debug_cmd::Command), + Debug(Box>), /// Scripts for node recovery #[command(name = "recover")] Recover(recover::Command), diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 5764a8e16cf3..479d1bb666cd 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -9,9 +9,7 @@ use alloy_rlp::Decodable; use alloy_rpc_types::engine::{BlobsBundleV1, PayloadAttributes}; use clap::Parser; use eyre::Context; -use reth_basic_payload_builder::{ - BuildArguments, BuildOutcome, Cancelled, PayloadBuilder, PayloadConfig, -}; +use reth_basic_payload_builder::{BuildArguments, BuildOutcome, PayloadBuilder, PayloadConfig}; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; @@ -19,22 +17,25 @@ use reth_cli_runner::CliContext; use reth_consensus::{Consensus, FullConsensus}; use reth_errors::{ConsensusError, RethResult}; use reth_ethereum_payload_builder::EthereumBuilderConfig; +use reth_ethereum_primitives::{EthPrimitives, Transaction, TransactionSigned}; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_fs_util as fs; use reth_node_api::{BlockTy, EngineApiMessageVersion, PayloadBuilderAttributes}; use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig, EthExecutorProvider}; -use reth_primitives::{ - transaction::SignedTransactionIntoRecoveredExt, EthPrimitives, SealedBlock, SealedHeader, - Transaction, TransactionSigned, +use reth_primitives_traits::{ + transaction::signed::SignedTransactionIntoRecoveredExt, Block as _, SealedBlock, SealedHeader, + SignedTransaction, }; -use reth_primitives_traits::{Block as _, SignedTransaction}; use reth_provider::{ providers::{BlockchainProvider, ProviderNodeTypes}, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, StateProviderFactory, }; -use reth_revm::{cached::CachedReads, database::StateProviderDatabase, primitives::KzgSettings}; +use reth_revm::{ + cached::CachedReads, cancelled::CancelOnDrop, database::StateProviderDatabase, + primitives::KzgSettings, +}; use reth_stages::StageId; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, BlobStore, EthPooledTransaction, PoolConfig, TransactionOrigin, @@ -134,16 +135,11 @@ impl> Command { let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; let blob_store = InMemoryBlobStore::default(); - let validator = - TransactionValidationTaskExecutor::eth_builder(provider_factory.chain_spec()) - .with_head_timestamp(best_block.timestamp) - .kzg_settings(self.kzg_settings()?) - .with_additional_tasks(1) - .build_with_tasks( - blockchain_db.clone(), - ctx.task_executor.clone(), - blob_store.clone(), - ); + let validator = TransactionValidationTaskExecutor::eth_builder(blockchain_db.clone()) + .with_head_timestamp(best_block.timestamp) + .kzg_settings(self.kzg_settings()?) + .with_additional_tasks(1) + .build_with_tasks(ctx.task_executor.clone(), blob_store.clone()); let transaction_pool = reth_transaction_pool::Pool::eth_pool( validator, @@ -218,15 +214,15 @@ impl> Command { ); let args = BuildArguments::new( - blockchain_db.clone(), - transaction_pool, CachedReads::default(), payload_config, - Cancelled::default(), + CancelOnDrop::default(), None, ); let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::new( + blockchain_db.clone(), + transaction_pool, EthEvmConfig::new(provider_factory.chain_spec()), EthereumBuilderConfig::new(Default::default()), ); diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 8011ba86a90a..bcf79ebf3380 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -18,6 +18,7 @@ use reth_downloaders::{ headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_errors::ConsensusError; +use reth_ethereum_primitives::EthPrimitives; use reth_exex::ExExManagerHandle; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; @@ -25,7 +26,6 @@ use reth_network_p2p::{headers::client::HeadersClient, EthBlockClient}; use reth_node_api::NodeTypesWithDBAdapter; use reth_node_ethereum::{consensus::EthBeaconConsensus, EthExecutorProvider}; use reth_node_events::node::NodeEvent; -use reth_primitives::EthPrimitives; use reth_provider::{ providers::ProviderNodeTypes, ChainSpecProvider, ProviderFactory, StageCheckpointReader, }; diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 30ba4ec9070f..ce85aaed1ab6 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -15,14 +15,14 @@ use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, Environ use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; -use reth_errors::BlockValidationError; +use reth_ethereum_primitives::EthPrimitives; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_node_api::NodePrimitives; use reth_node_ethereum::{consensus::EthBeaconConsensus, EthExecutorProvider}; -use reth_primitives::{EthPrimitives, SealedBlock}; +use reth_primitives_traits::SealedBlock; use reth_provider::{ providers::ProviderNodeTypes, AccountExtReader, ChainSpecProvider, DatabaseProviderFactory, HashedPostStateProvider, HashingWriter, LatestStateProviderRef, OriginalValuesKnown, @@ -62,9 +62,9 @@ impl> Command { N: ProviderNodeTypes< ChainSpec = C::ChainSpec, Primitives: NodePrimitives< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - BlockHeader = reth_primitives::Header, + Block = reth_ethereum_primitives::Block, + Receipt = reth_ethereum_primitives::Receipt, + BlockHeader = alloy_consensus::Header, >, >, >( @@ -165,9 +165,7 @@ impl> Command { let provider_rw = provider_factory.database_provider_rw()?; // Insert block, state and hashes - provider_rw.insert_historical_block( - block.clone().try_recover().map_err(|_| BlockValidationError::SenderRecoveryError)?, - )?; + provider_rw.insert_historical_block(block.clone().try_recover()?)?; provider_rw.write_state( &execution_outcome, OriginalValuesKnown::No, diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index d7f3516500a6..aa94afc0678d 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -13,13 +13,13 @@ use reth_config::Config; use reth_consensus::{Consensus, ConsensusError}; use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; +use reth_ethereum_primitives::EthPrimitives; use reth_evm::execute::{BatchExecutor, BlockExecutorProvider}; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_network_p2p::full_block::FullBlockClient; use reth_node_api::{BlockTy, NodePrimitives}; use reth_node_ethereum::{consensus::EthBeaconConsensus, EthExecutorProvider}; -use reth_primitives::EthPrimitives; use reth_provider::{ providers::ProviderNodeTypes, BlockNumReader, BlockWriter, ChainSpecProvider, DatabaseProviderFactory, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, @@ -61,9 +61,9 @@ impl> Command { N: ProviderNodeTypes< ChainSpec = C::ChainSpec, Primitives: NodePrimitives< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - BlockHeader = reth_primitives::Header, + Block = reth_ethereum_primitives::Block, + Receipt = reth_ethereum_primitives::Receipt, + BlockHeader = alloy_consensus::Header, >, >, >( diff --git a/bin/reth/src/commands/debug_cmd/mod.rs b/bin/reth/src/commands/debug_cmd/mod.rs index 26077a1274fb..d4b03fd81c4c 100644 --- a/bin/reth/src/commands/debug_cmd/mod.rs +++ b/bin/reth/src/commands/debug_cmd/mod.rs @@ -5,8 +5,8 @@ use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::CliNodeTypes; use reth_cli_runner::CliContext; +use reth_ethereum_primitives::EthPrimitives; use reth_node_ethereum::EthEngineTypes; -use reth_primitives::EthPrimitives; mod build_block; mod execution; diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index b813c07e3696..021e772228ab 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -381,7 +381,7 @@ RPC State Cache: [default: 2000] - --rpc-cache.max-envs + --rpc-cache.max-headers Max number of headers in cache [default: 1000] @@ -443,6 +443,16 @@ TxPool: [default: 20] + --txpool.blobpool-max-count + Max number of transaction in the blobpool + + [default: 10000] + + --txpool.blobpool-max-size + Max size of the blobpool in megabytes + + [default: 20] + --txpool.max-account-slots Max number of executable transaction slots guaranteed per account diff --git a/book/cli/reth/stage/unwind.md b/book/cli/reth/stage/unwind.md index cb72b9313c0f..d2caa867dbb1 100644 --- a/book/cli/reth/stage/unwind.md +++ b/book/cli/reth/stage/unwind.md @@ -83,173 +83,6 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout -Networking: - -d, --disable-discovery - Disable the discovery service - - --disable-dns-discovery - Disable the DNS discovery - - --disable-discv4-discovery - Disable Discv4 discovery - - --enable-discv5-discovery - Enable Discv5 discovery - - --disable-nat - Disable Nat discovery - - --discovery.addr - The UDP address to use for devp2p peer discovery version 4 - - [default: 0.0.0.0] - - --discovery.port - The UDP port to use for devp2p peer discovery version 4 - - [default: 30303] - - --discovery.v5.addr - The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 - - --discovery.v5.addr.ipv6 - The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 - - --discovery.v5.port - The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set - - [default: 9200] - - --discovery.v5.port.ipv6 - The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set - - [default: 9200] - - --discovery.v5.lookup-interval - The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program - - [default: 60] - - --discovery.v5.bootstrap.lookup-interval - The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap - - [default: 5] - - --discovery.v5.bootstrap.lookup-countdown - The number of times to carry out boost lookup queries at bootstrap - - [default: 100] - - --trusted-peers - Comma separated enode URLs of trusted peers for P2P connections. - - --trusted-peers enode://abcd@192.168.0.1:30303 - - --trusted-only - Connect to or accept from trusted peers only - - --bootnodes - Comma separated enode URLs for P2P discovery bootstrap. - - Will fall back to a network-specific default if not specified. - - --dns-retries - Amount of DNS resolution requests retries to perform when peering - - [default: 0] - - --peers-file - The path to the known peers file. Connected peers are dumped to this file on nodes - shutdown, and read on startup. Cannot be used with `--no-persist-peers`. - - --identity - Custom node identity - - [default: reth/-/] - - --p2p-secret-key - Secret key to use for this node. - - This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. - - --no-persist-peers - Do not persist peers. - - --nat - NAT resolution method (any|none|upnp|publicip|extip:\) - - [default: any] - - --addr - Network listening address - - [default: 0.0.0.0] - - --port - Network listening port - - [default: 30303] - - --max-outbound-peers - Maximum number of outbound requests. default: 100 - - --max-inbound-peers - Maximum number of inbound requests. default: 30 - - --max-tx-reqs - Max concurrent `GetPooledTransactions` requests. - - [default: 130] - - --max-tx-reqs-peer - Max concurrent `GetPooledTransactions` requests per peer. - - [default: 1] - - --max-seen-tx-history - Max number of seen transactions to remember per peer. - - Default is 320 transaction hashes. - - [default: 320] - - --max-pending-imports - Max number of transactions to import concurrently. - - [default: 4096] - - --pooled-tx-response-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions - to pack in one response. - Spec'd at 2MiB. - - [default: 2097152] - - --pooled-tx-pack-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions to - request in one request. - - Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a - transaction announcement (see `RLPx` specs). This allows a node to request a specific size - response. - - By default, nodes request only 128 KiB worth of transactions, but should a peer request - more, up to 2 MiB, a node will answer with more than 128 KiB. - - Default is 128 KiB. - - [default: 131072] - - --max-tx-pending-fetch - Max capacity of cache of hashes for transactions pending fetch. - - [default: 25600] - - --net-if.experimental - Name of network interface used to communicate with peers. - - If flag is set, but no value is passed, the default interface for docker `eth0` is tried. - --offline If this is enabled, then all stages except headers, bodies, and sender recovery will be unwound diff --git a/crates/cli/commands/src/stage/mod.rs b/crates/cli/commands/src/stage/mod.rs index c7423930c8b2..e9b2123bce28 100644 --- a/crates/cli/commands/src/stage/mod.rs +++ b/crates/cli/commands/src/stage/mod.rs @@ -31,7 +31,7 @@ pub enum Subcommands { /// assuming that all the data can be held in memory. It is not recommended /// to run a stage for really large block ranges if your computer does not have /// a lot of memory to store all the data. - Run(run::Command), + Run(Box>), /// Drop a stage's tables from the database. Drop(drop::Command), /// Dumps a stage from a range into a new database. diff --git a/crates/cli/commands/src/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs index fc6b9250df64..a8f1cebea47a 100644 --- a/crates/cli/commands/src/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -12,7 +12,6 @@ use reth_db::DatabaseEnv; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_evm::noop::NoopBlockExecutorProvider; use reth_exex::ExExManagerHandle; -use reth_node_core::args::NetworkArgs; use reth_provider::{ providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, ChainStateBlockReader, ChainStateBlockWriter, ProviderFactory, StaticFileProviderFactory, StorageLocation, @@ -33,9 +32,6 @@ pub struct Command { #[command(flatten)] env: EnvironmentArgs, - #[command(flatten)] - network: NetworkArgs, - #[command(subcommand)] command: Subcommands, diff --git a/crates/cli/commands/src/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs index f845d2a66130..c51d5063e51d 100644 --- a/crates/cli/commands/src/test_vectors/tables.rs +++ b/crates/cli/commands/src/test_vectors/tables.rs @@ -124,19 +124,19 @@ where // We want to control our repeated keys let mut seen_keys = HashSet::new(); - let strat_values = proptest::collection::vec(arb::(), 100..300).no_shrink().boxed(); + let start_values = proptest::collection::vec(arb::(), 100..300).no_shrink().boxed(); - let strat_keys = arb::().no_shrink().boxed(); + let start_keys = arb::().no_shrink().boxed(); while rows.len() < per_table { - let key: T::Key = strat_keys.new_tree(runner).map_err(|e| eyre::eyre!("{e}"))?.current(); + let key: T::Key = start_keys.new_tree(runner).map_err(|e| eyre::eyre!("{e}"))?.current(); if !seen_keys.insert(key.clone()) { continue } let mut values: Vec = - strat_values.new_tree(runner).map_err(|e| eyre::eyre!("{e}"))?.current(); + start_values.new_tree(runner).map_err(|e| eyre::eyre!("{e}"))?.current(); values.sort(); diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index a186b8407a8a..d7e7f3c4efbc 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -13,8 +13,8 @@ workspace = true [dependencies] # reth reth-network-types = { workspace = true, features = ["serde"] } -reth-prune-types.workspace = true -reth-stages-types.workspace = true +reth-prune-types = { workspace = true, features = ["serde"] } +reth-stages-types = { workspace = true, features = ["serde"] } # serde serde.workspace = true diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 63aaf1bd19a6..0c43f1fa8a64 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -60,11 +60,13 @@ where node: FullNode, attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes + 'static, ) -> eyre::Result { - let builder = node.payload_builder.clone(); - Ok(Self { inner: node.clone(), - payload: PayloadTestContext::new(builder, attributes_generator).await?, + payload: PayloadTestContext::new( + node.payload_builder_handle.clone(), + attributes_generator, + ) + .await?, network: NetworkTestContext::new(node.network.clone()), engine_api: EngineApiTestContext { chain_spec: node.chain_spec(), @@ -264,8 +266,6 @@ where /// Sends FCU and waits for the node to sync to the given block. pub async fn sync_to(&self, block: BlockHash) -> eyre::Result<()> { - self.engine_api.update_forkchoice(block, block).await?; - let start = std::time::Instant::now(); while self @@ -275,6 +275,7 @@ where .is_none_or(|h| h.hash() != block) { tokio::time::sleep(std::time::Duration::from_millis(100)).await; + self.engine_api.update_forkchoice(block, block).await?; assert!(start.elapsed() <= std::time::Duration::from_secs(10), "timed out"); } diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index 858e311cacb2..f98e85f1700e 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -1,7 +1,7 @@ use futures_util::StreamExt; use reth_node_api::BlockBody; use reth_payload_builder::{PayloadBuilderHandle, PayloadId}; -use reth_payload_builder_primitives::{Events, PayloadBuilder}; +use reth_payload_builder_primitives::Events; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadTypes}; use tokio_stream::wrappers::BroadcastStream; diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index 73185b8b1b0a..dc0a1fd6fdbc 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -19,7 +19,6 @@ reth-node-types.workspace = true reth-evm.workspace = true reth-ethereum-engine-primitives.workspace = true reth-payload-builder.workspace = true -reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index 48cf735b87e2..484a1eeae9a5 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -7,7 +7,6 @@ use eyre::OptionExt; use futures_util::{stream::Fuse, StreamExt}; use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_builder_primitives::PayloadBuilder; use reth_payload_primitives::{ BuiltPayload, EngineApiMessageVersion, PayloadAttributesBuilder, PayloadKind, PayloadTypes, }; @@ -208,13 +207,8 @@ where let block = payload.block(); let (tx, rx) = oneshot::channel(); - let (payload, sidecar) = EngineT::block_to_payload(payload.block().clone()); - self.to_engine.send(BeaconEngineMessage::NewPayload { - payload, - // todo: prague support - sidecar, - tx, - })?; + let payload = EngineT::block_to_payload(payload.block().clone()); + self.to_engine.send(BeaconEngineMessage::NewPayload { payload, tx })?; let res = rx.await??; diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 77b61c8221a9..dd6a644acb8b 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -29,8 +29,8 @@ use reth_engine_tree::{ persistence::PersistenceHandle, tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, }; -use reth_evm::execute::BlockExecutorProvider; -use reth_node_types::BlockTy; +use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; +use reth_node_types::{BlockTy, HeaderTy, TxTy}; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{PayloadAttributesBuilder, PayloadTypes}; use reth_provider::{ @@ -65,7 +65,7 @@ where { /// Constructor for [`LocalEngineService`]. #[allow(clippy::too_many_arguments)] - pub fn new( + pub fn new( consensus: Arc>, executor_factory: impl BlockExecutorProvider, provider: ProviderFactory, @@ -80,10 +80,12 @@ where from_engine: EngineMessageStream, mode: MiningMode, payload_attributes_builder: B, + evm_config: C, ) -> Self where B: PayloadAttributesBuilder<::PayloadAttributes>, V: EngineValidator>, + C: ConfigureEvm
, Transaction = TxTy>, { let chain_spec = provider.chain_spec(); let engine_kind = @@ -93,18 +95,20 @@ where PersistenceHandle::::spawn_service(provider, pruner, sync_metrics_tx); let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); - let (to_tree_tx, from_tree) = EngineApiTreeHandler::::spawn_new( - blockchain_db.clone(), - executor_factory, - consensus, - payload_validator, - persistence_handle, - payload_builder.clone(), - canonical_in_memory_state, - tree_config, - invalid_block_hook, - engine_kind, - ); + let (to_tree_tx, from_tree) = + EngineApiTreeHandler::::spawn_new( + blockchain_db.clone(), + executor_factory, + consensus, + payload_validator, + persistence_handle, + payload_builder.clone(), + canonical_in_memory_state, + tree_config, + invalid_block_hook, + engine_kind, + evm_config, + ); let handler = EngineApiRequestHandler::new(to_tree_tx, from_tree); diff --git a/crates/engine/primitives/Cargo.toml b/crates/engine/primitives/Cargo.toml index 5bee3436a47e..0a4cc06c40ba 100644 --- a/crates/engine/primitives/Cargo.toml +++ b/crates/engine/primitives/Cargo.toml @@ -17,6 +17,7 @@ reth-payload-primitives.workspace = true reth-payload-builder-primitives.workspace = true reth-primitives.workspace = true reth-primitives-traits.workspace = true +reth-chain-state.workspace = true reth-trie.workspace = true reth-errors.workspace = true @@ -31,6 +32,7 @@ tokio = { workspace = true, features = ["sync"] } futures.workspace = true # misc +auto_impl.workspace = true serde.workspace = true thiserror.workspace = true diff --git a/crates/engine/primitives/src/event.rs b/crates/engine/primitives/src/event.rs index 1afed370d1bf..f4471dd8c892 100644 --- a/crates/engine/primitives/src/event.rs +++ b/crates/engine/primitives/src/event.rs @@ -1,7 +1,7 @@ //! Events emitted by the beacon consensus engine. use crate::ForkchoiceStatus; -use alloc::{boxed::Box, sync::Arc}; +use alloc::boxed::Box; use alloy_consensus::BlockHeader; use alloy_primitives::B256; use alloy_rpc_types_engine::ForkchoiceState; @@ -9,7 +9,8 @@ use core::{ fmt::{Display, Formatter, Result}, time::Duration, }; -use reth_primitives::{EthPrimitives, SealedBlock}; +use reth_chain_state::ExecutedBlockWithTrieUpdates; +use reth_primitives::EthPrimitives; use reth_primitives_traits::{NodePrimitives, SealedHeader}; /// Events emitted by the consensus engine. @@ -18,9 +19,9 @@ pub enum BeaconConsensusEngineEvent { /// The fork choice state was updated, and the current fork choice status ForkchoiceUpdated(ForkchoiceState, ForkchoiceStatus), /// A block was added to the fork chain. - ForkBlockAdded(Arc>, Duration), + ForkBlockAdded(ExecutedBlockWithTrieUpdates, Duration), /// A block was added to the canonical chain, and the elapsed time validating the block - CanonicalBlockAdded(Arc>, Duration), + CanonicalBlockAdded(ExecutedBlockWithTrieUpdates, Duration), /// A canonical chain was committed, and the elapsed time committing the data CanonicalChainCommitted(Box>, Duration), /// The consensus engine is involved in live sync, and has specific progress @@ -48,10 +49,14 @@ where write!(f, "ForkchoiceUpdated({state:?}, {status:?})") } Self::ForkBlockAdded(block, duration) => { - write!(f, "ForkBlockAdded({:?}, {duration:?})", block.num_hash()) + write!(f, "ForkBlockAdded({:?}, {duration:?})", block.recovered_block.num_hash()) } Self::CanonicalBlockAdded(block, duration) => { - write!(f, "CanonicalBlockAdded({:?}, {duration:?})", block.num_hash()) + write!( + f, + "CanonicalBlockAdded({:?}, {duration:?})", + block.recovered_block.num_hash() + ) } Self::CanonicalChainCommitted(block, duration) => { write!(f, "CanonicalChainCommitted({:?}, {duration:?})", block.num_hash()) diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index edf4896ad4a0..394d731bb84b 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -11,13 +11,21 @@ extern crate alloc; -use reth_payload_primitives::{BuiltPayload, PayloadAttributes}; -mod error; - -use core::fmt; - use alloy_consensus::BlockHeader; -use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}; +use alloy_eips::{eip7685::Requests, Decodable2718}; +use alloy_primitives::B256; +use alloy_rpc_types_engine::{ExecutionPayloadSidecar, PayloadError}; +use core::fmt::{self, Debug}; +use reth_payload_primitives::{ + validate_execution_requests, BuiltPayload, EngineApiMessageVersion, + EngineObjectValidationError, InvalidPayloadAttributesError, PayloadAttributes, + PayloadOrAttributes, PayloadTypes, +}; +use reth_primitives::{NodePrimitives, SealedBlock}; +use reth_primitives_traits::Block; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; + +mod error; pub use error::*; mod forkchoice; @@ -32,15 +40,68 @@ pub use event::*; mod invalid_block_hook; pub use invalid_block_hook::InvalidBlockHook; -use reth_payload_primitives::{ - validate_execution_requests, EngineApiMessageVersion, EngineObjectValidationError, - InvalidPayloadAttributesError, PayloadOrAttributes, PayloadTypes, -}; -use reth_primitives::{NodePrimitives, SealedBlock}; -use reth_primitives_traits::Block; -use serde::{de::DeserializeOwned, ser::Serialize}; +/// Struct aggregating [`alloy_rpc_types_engine::ExecutionPayload`] and [`ExecutionPayloadSidecar`] +/// and encapsulating complete payload supplied for execution. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ExecutionData { + /// Execution payload. + pub payload: alloy_rpc_types_engine::ExecutionPayload, + /// Additional fork-specific fields. + pub sidecar: ExecutionPayloadSidecar, +} + +impl ExecutionData { + /// Creates new instance of [`ExecutionData`]. + pub const fn new( + payload: alloy_rpc_types_engine::ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Self { + Self { payload, sidecar } + } + + /// Tries to create a new unsealed block from the given payload and payload sidecar. + /// + /// Performs additional validation of `extra_data` and `base_fee_per_gas` fields. + /// + /// # Note + /// + /// The log bloom is assumed to be validated during serialization. + /// + /// See + pub fn try_into_block( + self, + ) -> Result, PayloadError> { + self.payload.try_into_block_with_sidecar(&self.sidecar) + } +} -use alloy_eips::eip7685::Requests; +/// An execution payload. +pub trait ExecutionPayload: + Serialize + DeserializeOwned + Debug + Clone + Send + Sync + 'static +{ + /// Returns the parent hash of the block. + fn parent_hash(&self) -> B256; + + /// Returns the hash of the block. + fn block_hash(&self) -> B256; + + /// Returns the number of the block. + fn block_number(&self) -> u64; +} + +impl ExecutionPayload for ExecutionData { + fn parent_hash(&self) -> B256 { + self.payload.parent_hash() + } + + fn block_hash(&self) -> B256 { + self.payload.block_hash() + } + + fn block_number(&self) -> u64 { + self.payload.block_number() + } +} /// This type defines the versioned types of the engine API. /// @@ -88,20 +149,26 @@ pub trait EngineTypes: + Send + Sync + 'static; + /// Execution data. + type ExecutionData: ExecutionPayload; /// Converts a [`BuiltPayload`] into an [`ExecutionPayload`] and [`ExecutionPayloadSidecar`]. fn block_to_payload( block: SealedBlock< <::Primitives as NodePrimitives>::Block, >, - ) -> (ExecutionPayload, ExecutionPayloadSidecar); + ) -> Self::ExecutionData; } /// Type that validates an [`ExecutionPayload`]. +#[auto_impl::auto_impl(&, Arc)] pub trait PayloadValidator: fmt::Debug + Send + Sync + Unpin + 'static { /// The block type used by the engine. type Block: Block; + /// The execution payload type used by the engine. + type ExecutionData; + /// Ensures that the given payload does not violate any consensus rules that concern the block's /// layout. /// @@ -112,13 +179,14 @@ pub trait PayloadValidator: fmt::Debug + Send + Sync + Unpin + 'static { /// engine-API specification. fn ensure_well_formed_payload( &self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, + payload: Self::ExecutionData, ) -> Result, PayloadError>; } /// Type that validates the payloads processed by the engine. -pub trait EngineValidator: PayloadValidator { +pub trait EngineValidator: + PayloadValidator +{ /// Validates the execution requests according to [EIP-7685](https://eips.ethereum.org/EIPS/eip-7685). fn validate_execution_requests( &self, diff --git a/crates/engine/primitives/src/message.rs b/crates/engine/primitives/src/message.rs index d055d4e424fa..cc90733524ce 100644 --- a/crates/engine/primitives/src/message.rs +++ b/crates/engine/primitives/src/message.rs @@ -1,10 +1,10 @@ use crate::{ error::BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, EngineApiMessageVersion, - EngineTypes, ForkchoiceStatus, + EngineTypes, ExecutionPayload, ForkchoiceStatus, }; use alloy_rpc_types_engine::{ - ExecutionPayload, ExecutionPayloadSidecar, ForkChoiceUpdateResult, ForkchoiceState, - ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, + ForkChoiceUpdateResult, ForkchoiceState, ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, + PayloadStatus, PayloadStatusEnum, }; use core::{ fmt::{self, Display}, @@ -145,10 +145,7 @@ pub enum BeaconEngineMessage { /// Message with new payload. NewPayload { /// The execution payload received by Engine API. - payload: ExecutionPayload, - /// The execution payload sidecar with additional version-specific fields received by - /// engine API. - sidecar: ExecutionPayloadSidecar, + payload: Engine::ExecutionData, /// The sender for returning payload status result. tx: oneshot::Sender>, }, @@ -220,11 +217,10 @@ where /// See also pub async fn new_payload( &self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, + payload: Engine::ExecutionData, ) -> Result { let (tx, rx) = oneshot::channel(); - let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { payload, sidecar, tx }); + let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { payload, tx }); rx.await.map_err(|_| BeaconOnNewPayloadError::EngineUnavailable)? } diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 81c5ca5ad79a..f6b791e744c0 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -14,9 +14,9 @@ pub use reth_engine_tree::{ chain::{ChainEvent, ChainOrchestrator}, engine::EngineApiEvent, }; -use reth_evm::execute::BlockExecutorProvider; +use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_p2p::BlockClient; -use reth_node_types::{BlockTy, NodeTypes, NodeTypesWithEngine}; +use reth_node_types::{BlockTy, HeaderTy, NodeTypes, NodeTypesWithEngine, TxTy}; use reth_payload_builder::PayloadBuilderHandle; use reth_primitives::EthPrimitives; use reth_provider::{ @@ -73,7 +73,7 @@ where { /// Constructor for `EngineService`. #[allow(clippy::too_many_arguments)] - pub fn new( + pub fn new( consensus: Arc>, executor_factory: E, chain_spec: Arc, @@ -89,9 +89,11 @@ where tree_config: TreeConfig, invalid_block_hook: Box>, sync_metrics_tx: MetricEventsSender, + evm_config: C, ) -> Self where V: EngineValidator>, + C: ConfigureEvm
, Transaction = TxTy>, { let engine_kind = if chain_spec.is_optimism() { EngineApiKind::OpStack } else { EngineApiKind::Ethereum }; @@ -103,18 +105,20 @@ where let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); - let (to_tree_tx, from_tree) = EngineApiTreeHandler::::spawn_new( - blockchain_db, - executor_factory, - consensus, - payload_validator, - persistence_handle, - payload_builder, - canonical_in_memory_state, - tree_config, - invalid_block_hook, - engine_kind, - ); + let (to_tree_tx, from_tree) = + EngineApiTreeHandler::::spawn_new( + blockchain_db, + executor_factory, + consensus, + payload_validator, + persistence_handle, + payload_builder, + canonical_in_memory_state, + tree_config, + invalid_block_hook, + engine_kind, + evm_config, + ); let engine_handler = EngineApiRequestHandler::new(to_tree_tx, from_tree); let handler = EngineHandler::new(engine_handler, downloader, incoming_requests); @@ -160,7 +164,7 @@ mod tests { use reth_engine_tree::{test_utils::TestPipelineBuilder, tree::NoopInvalidBlockHook}; use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; - use reth_evm_ethereum::execute::EthExecutorProvider; + use reth_evm_ethereum::{execute::EthExecutorProvider, EthEvmConfig}; use reth_exex_types::FinishedExExHeight; use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives::SealedHeader; @@ -200,6 +204,7 @@ mod tests { let engine_payload_validator = EthereumEngineValidator::new(chain_spec.clone()); let (_tx, rx) = watch::channel(FinishedExExHeight::NoExExs); let pruner = Pruner::new_with_factory(provider_factory.clone(), vec![], 0, 0, None, rx); + let evm_config = EthEvmConfig::new(chain_spec.clone()); let (sync_metrics_tx, _sync_metrics_rx) = unbounded_channel(); let (tx, _rx) = unbounded_channel(); @@ -219,6 +224,7 @@ mod tests { TreeConfig::default(), Box::new(NoopInvalidBlockHook::default()), sync_metrics_tx, + evm_config, ); } } diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index b6c93f88c630..58919d42e60c 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -20,7 +20,6 @@ reth-engine-primitives.workspace = true reth-errors.workspace = true reth-evm.workspace = true reth-network-p2p.workspace = true -reth-payload-builder-primitives.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-primitives-traits.workspace = true @@ -48,7 +47,7 @@ revm-primitives.workspace = true futures.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["macros", "sync"] } -moka = { workspace = true, features = ["sync"] } +mini-moka = { workspace = true, features = ["sync"] } # metrics metrics.workspace = true @@ -72,6 +71,7 @@ reth-chain-state = { workspace = true, features = ["test-utils"] } reth-chainspec.workspace = true reth-ethereum-engine-primitives.workspace = true reth-ethereum-consensus.workspace = true +reth-evm-ethereum.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } reth-exex-types.workspace = true reth-network-p2p = { workspace = true, features = ["test-utils"] } diff --git a/crates/engine/tree/src/tree/block_buffer.rs b/crates/engine/tree/src/tree/block_buffer.rs index 58a4c6b362c4..4a0fede28f78 100644 --- a/crates/engine/tree/src/tree/block_buffer.rs +++ b/crates/engine/tree/src/tree/block_buffer.rs @@ -17,7 +17,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; /// Note: Buffer is limited by number of blocks that it can contain and eviction of the block /// is done by last recently used block. #[derive(Debug)] -pub(super) struct BlockBuffer { +pub struct BlockBuffer { /// All blocks in the buffer stored by their block hash. pub(crate) blocks: HashMap>, /// Map of any parent block hash (even the ones not currently in the buffer) @@ -38,7 +38,7 @@ pub(super) struct BlockBuffer { impl BlockBuffer { /// Create new buffer with max limit of blocks - pub(super) fn new(limit: u32) -> Self { + pub fn new(limit: u32) -> Self { Self { blocks: Default::default(), parent_to_child: Default::default(), @@ -49,12 +49,12 @@ impl BlockBuffer { } /// Return reference to the requested block. - pub(super) fn block(&self, hash: &BlockHash) -> Option<&RecoveredBlock> { + pub fn block(&self, hash: &BlockHash) -> Option<&RecoveredBlock> { self.blocks.get(hash) } /// Return a reference to the lowest ancestor of the given block in the buffer. - pub(super) fn lowest_ancestor(&self, hash: &BlockHash) -> Option<&RecoveredBlock> { + pub fn lowest_ancestor(&self, hash: &BlockHash) -> Option<&RecoveredBlock> { let mut current_block = self.blocks.get(hash)?; while let Some(parent) = self.blocks.get(¤t_block.parent_hash()) { current_block = parent; @@ -63,7 +63,7 @@ impl BlockBuffer { } /// Insert a correct block inside the buffer. - pub(super) fn insert_block(&mut self, block: RecoveredBlock) { + pub fn insert_block(&mut self, block: RecoveredBlock) { let hash = block.hash(); self.parent_to_child.entry(block.parent_hash()).or_default().insert(hash); @@ -98,7 +98,7 @@ impl BlockBuffer { /// /// Note: that order of returned blocks is important and the blocks with lower block number /// in the chain will come first so that they can be executed in the correct order. - pub(super) fn remove_block_with_children( + pub fn remove_block_with_children( &mut self, parent_hash: &BlockHash, ) -> Vec> { @@ -112,7 +112,7 @@ impl BlockBuffer { } /// Discard all blocks that precede block number from the buffer. - pub(super) fn remove_old_blocks(&mut self, block_number: BlockNumber) { + pub fn remove_old_blocks(&mut self, block_number: BlockNumber) { let mut block_hashes_to_remove = Vec::new(); // discard all blocks that are before the finalized number. diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index aaa284d5a099..ebe26ee5809b 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -1,7 +1,7 @@ //! Implements a state provider that has a shared cache in front of it. use alloy_primitives::{map::B256HashMap, Address, StorageKey, StorageValue, B256}; use metrics::Gauge; -use moka::sync::CacheBuilder; +use mini_moka::sync::CacheBuilder; use reth_errors::ProviderResult; use reth_metrics::Metrics; use reth_primitives_traits::{Account, Bytecode}; @@ -9,13 +9,17 @@ use reth_provider::{ AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, }; +use reth_revm::db::BundleState; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; use revm_primitives::map::DefaultHashBuilder; +use std::time::{Duration, Instant}; +use tracing::{debug, trace}; -type Cache = moka::sync::Cache; +pub(crate) type Cache = + mini_moka::sync::Cache; /// A wrapper of a state provider and a shared cache. pub(crate) struct CachedStateProvider { @@ -44,6 +48,73 @@ where } } +impl CachedStateProvider { + /// Creates a new [`SavedCache`] from the given state updates and executed block hash. + /// + /// This does not update the code cache, because no changes are required to the code cache on + /// state change. + /// + /// NOTE: Consumers should ensure that these caches are not in use by a state provider for a + /// previous block - otherwise, this update will cause that state provider to contain future + /// state, which would be incorrect. + pub(crate) fn save_cache( + self, + executed_block_hash: B256, + state_updates: &BundleState, + ) -> Result { + let Self { caches, metrics, state_provider: _ } = self; + let start = Instant::now(); + + for (addr, account) in &state_updates.state { + // If the account was not modified, as in not changed and not destroyed, then we have + // nothing to do w.r.t. this particular account and can move on + if account.status.is_not_modified() { + continue + } + + // if the account was destroyed, invalidate from the account / storage caches + if account.was_destroyed() { + // invalidate the account cache entry if destroyed + caches.account_cache.invalidate(addr); + + caches.invalidate_account_storage(addr); + continue + } + + // if we have an account that was modified, but it has a `None` account info, some wild + // error has occurred because this state should be unrepresentable. An account with + // `None` current info, should be destroyed. + let Some(ref account_info) = account.info else { + trace!(target: "engine::caching", ?account, "Account with None account info found in state updates"); + return Err(()) + }; + + // insert will update if present, so we just use the new account info as the new value + // for the account cache + caches.account_cache.insert(*addr, Some(Account::from(account_info))); + + // now we iterate over all storage and make updates to the cached storage values + for (storage_key, slot) in &account.storage { + // we convert the storage key from U256 to B256 because that is how it's represented + // in the cache + caches.insert_storage(*addr, (*storage_key).into(), Some(slot.present_value)); + } + } + + // set metrics + metrics.storage_cache_size.set(caches.total_storage_slots() as f64); + metrics.account_cache_size.set(caches.account_cache.entry_count() as f64); + metrics.code_cache_size.set(caches.code_cache.entry_count() as f64); + + debug!(target: "engine::caching", update_latency=?start.elapsed(), "Updated state caches"); + + // create a saved cache with the executed block hash, same metrics, and updated caches + let saved_cache = SavedCache { hash: executed_block_hash, caches, metrics }; + + Ok(saved_cache) + } +} + /// Metrics for the cached state provider, showing hits / misses for each cache #[derive(Metrics, Clone)] #[metrics(scope = "sync.caching")] @@ -54,17 +125,35 @@ pub(crate) struct CachedStateMetrics { /// Code cache misses code_cache_misses: Gauge, + /// Code cache size + /// + /// NOTE: this uses the moka caches' `entry_count`, NOT the `weighted_size` method to calculate + /// size. + code_cache_size: Gauge, + /// Storage cache hits storage_cache_hits: Gauge, /// Storage cache misses storage_cache_misses: Gauge, + /// Storage cache size + /// + /// NOTE: this uses the moka caches' `entry_count`, NOT the `weighted_size` method to calculate + /// size. + storage_cache_size: Gauge, + /// Account cache hits account_cache_hits: Gauge, /// Account cache misses account_cache_misses: Gauge, + + /// Account cache size + /// + /// NOTE: this uses the moka caches' `entry_count`, NOT the `weighted_size` method to calculate + /// size. + account_cache_size: Gauge, } impl CachedStateMetrics { @@ -112,7 +201,7 @@ impl StateProvider for CachedStateProvider { account: Address, storage_key: StorageKey, ) -> ProviderResult> { - if let Some(res) = self.caches.storage_cache.get(&(account, storage_key)) { + if let Some(res) = self.caches.get_storage(&account, &storage_key) { self.metrics.storage_cache_hits.increment(1); return Ok(res) } @@ -120,7 +209,7 @@ impl StateProvider for CachedStateProvider { self.metrics.storage_cache_misses.increment(1); let final_res = self.state_provider.storage(account, storage_key)?; - self.caches.storage_cache.insert((account, storage_key), final_res); + self.caches.insert_storage(account, storage_key, final_res); Ok(final_res) } @@ -243,13 +332,48 @@ pub(crate) struct ProviderCaches { /// The cache for bytecode code_cache: Cache>, - /// The cache for storage - storage_cache: Cache<(Address, StorageKey), Option>, + /// The cache for storage, organized hierarchically by account + storage_cache: Cache, /// The cache for basic accounts account_cache: Cache>, } +impl ProviderCaches { + /// Get storage value from hierarchical cache + pub(crate) fn get_storage( + &self, + address: &Address, + key: &StorageKey, + ) -> Option> { + self.storage_cache.get(address).and_then(|account_cache| account_cache.get_storage(key)) + } + + /// Insert storage value into hierarchical cache + pub(crate) fn insert_storage( + &self, + address: Address, + key: StorageKey, + value: Option, + ) { + let account_cache = self.storage_cache.get(&address).unwrap_or_default(); + + account_cache.insert_storage(key, value); + + self.storage_cache.insert(address, account_cache); + } + + /// Invalidate storage for specific account + pub(crate) fn invalidate_account_storage(&self, address: &Address) { + self.storage_cache.invalidate(address); + } + + /// Returns the total number of storage slots cached across all accounts + pub(crate) fn total_storage_slots(&self) -> usize { + self.storage_cache.iter().map(|addr| addr.len()).sum() + } +} + /// A builder for [`ProviderCaches`]. #[derive(Debug)] pub(crate) struct ProviderCacheBuilder { @@ -266,23 +390,252 @@ pub(crate) struct ProviderCacheBuilder { impl ProviderCacheBuilder { /// Build a [`ProviderCaches`] struct, so that provider caches can be easily cloned. pub(crate) fn build_caches(self) -> ProviderCaches { - ProviderCaches { - code_cache: CacheBuilder::new(self.code_cache_size) - .build_with_hasher(DefaultHashBuilder::default()), - storage_cache: CacheBuilder::new(self.storage_cache_size) - .build_with_hasher(DefaultHashBuilder::default()), - account_cache: CacheBuilder::new(self.account_cache_size) - .build_with_hasher(DefaultHashBuilder::default()), - } + // TODO: the total cache size could be a CLI configuration parameter. + const TOTAL_CACHE_SIZE: u64 = 4 * 1024 * 1024 * 1024; // 4GB + let storage_cache_size = (TOTAL_CACHE_SIZE * 8888) / 10000; // 88.88% of total + let account_cache_size = (TOTAL_CACHE_SIZE * 556) / 10000; // 5.56% of total + let code_cache_size = (TOTAL_CACHE_SIZE * 556) / 10000; // 5.56% of total + + const EXPIRY_TIME: Duration = Duration::from_secs(7200); // 2 hours + const TIME_TO_IDLE: Duration = Duration::from_secs(3600); // 1 hour + + let storage_cache = CacheBuilder::new(self.storage_cache_size) + .weigher(|_key: &Address, value: &AccountStorageCache| -> u32 { + // values based on results from measure_storage_cache_overhead test + let base_weight = 39_000; + let slots_weight = value.len() * 218; + (base_weight + slots_weight) as u32 + }) + .max_capacity(storage_cache_size) + .time_to_live(EXPIRY_TIME) + .time_to_idle(TIME_TO_IDLE) + .build_with_hasher(DefaultHashBuilder::default()); + + let account_cache = CacheBuilder::new(self.account_cache_size) + .weigher(|_key: &Address, value: &Option| -> u32 { + match value { + Some(account) => { + let mut weight = 40; + if account.nonce != 0 { + weight += 32; + } + if !account.balance.is_zero() { + weight += 32; + } + if account.bytecode_hash.is_some() { + weight += 33; // size of Option + } else { + weight += 8; // size of None variant + } + weight as u32 + } + None => 8, // size of None variant + } + }) + .max_capacity(account_cache_size) + .time_to_live(EXPIRY_TIME) + .time_to_idle(TIME_TO_IDLE) + .build_with_hasher(DefaultHashBuilder::default()); + + let code_cache = CacheBuilder::new(self.code_cache_size) + .weigher(|_key: &B256, value: &Option| -> u32 { + match value { + Some(bytecode) => { + // base weight + actual bytecode size + (40 + bytecode.len()) as u32 + } + None => 8, // size of None variant + } + }) + .max_capacity(code_cache_size) + .time_to_live(EXPIRY_TIME) + .time_to_idle(TIME_TO_IDLE) + .build_with_hasher(DefaultHashBuilder::default()); + + ProviderCaches { code_cache, storage_cache, account_cache } } } impl Default for ProviderCacheBuilder { fn default() -> Self { - // moka caches have been benchmarked up to 800k entries, so we just use 1M, optimizing for - // hitrate over memory consumption. + // With weigher and max_capacity in place, these numbers represent + // the maximum number of entries that can be stored, not the actual + // memory usage which is controlled by max_capacity. // - // See: https://github.com/moka-rs/moka/wiki#admission-and-eviction-policies - Self { code_cache_size: 1000000, storage_cache_size: 1000000, account_cache_size: 1000000 } + // Code cache: up to 10M entries but limited to 0.5GB + // Storage cache: up to 10M accounts but limited to 8GB + // Account cache: up to 10M accounts but limited to 0.5GB + Self { + code_cache_size: 10_000_000, + storage_cache_size: 10_000_000, + account_cache_size: 10_000_000, + } + } +} + +/// A saved cache that has been used for executing a specific block, which has been updated for its +/// execution. +#[derive(Debug)] +pub(crate) struct SavedCache { + /// The hash of the block these caches were used to execute. + hash: B256, + + /// The caches used for the provider. + caches: ProviderCaches, + + /// Metrics for the cached state provider + metrics: CachedStateMetrics, +} + +impl SavedCache { + /// Returns the hash for this cache + pub(crate) const fn executed_block_hash(&self) -> B256 { + self.hash + } + + /// Splits the cache into its caches and metrics, consuming it. + pub(crate) fn split(self) -> (ProviderCaches, CachedStateMetrics) { + (self.caches, self.metrics) + } +} + +/// Cache for an account's storage slots +#[derive(Debug, Clone)] +pub(crate) struct AccountStorageCache { + /// The storage slots for this account + slots: Cache>, +} + +impl AccountStorageCache { + /// Create a new [`AccountStorageCache`] + pub(crate) fn new(max_slots: u64) -> Self { + Self { + slots: CacheBuilder::new(max_slots).build_with_hasher(DefaultHashBuilder::default()), + } + } + + /// Get a storage value + pub(crate) fn get_storage(&self, key: &StorageKey) -> Option> { + self.slots.get(key) + } + + /// Insert a storage value + pub(crate) fn insert_storage(&self, key: StorageKey, value: Option) { + self.slots.insert(key, value); + } + + /// Returns the number of slots in the cache + pub(crate) fn len(&self) -> usize { + self.slots.entry_count() as usize + } +} + +impl Default for AccountStorageCache { + fn default() -> Self { + // With weigher and max_capacity in place, this number represents + // the maximum number of entries that can be stored, not the actual + // memory usage which is controlled by storage cache's max_capacity. + Self::new(1_000_000) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::Rng; + use std::mem::size_of; + + mod tracking_allocator { + use std::{ + alloc::{GlobalAlloc, Layout, System}, + sync::atomic::{AtomicUsize, Ordering}, + }; + + #[derive(Debug)] + pub(crate) struct TrackingAllocator { + allocated: AtomicUsize, + total_allocated: AtomicUsize, + inner: System, + } + + impl TrackingAllocator { + pub(crate) const fn new() -> Self { + Self { + allocated: AtomicUsize::new(0), + total_allocated: AtomicUsize::new(0), + inner: System, + } + } + + pub(crate) fn reset(&self) { + self.allocated.store(0, Ordering::SeqCst); + self.total_allocated.store(0, Ordering::SeqCst); + } + + pub(crate) fn total_allocated(&self) -> usize { + self.total_allocated.load(Ordering::SeqCst) + } + } + + unsafe impl GlobalAlloc for TrackingAllocator { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + let ret = self.inner.alloc(layout); + if !ret.is_null() { + self.allocated.fetch_add(layout.size(), Ordering::SeqCst); + self.total_allocated.fetch_add(layout.size(), Ordering::SeqCst); + } + ret + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + self.allocated.fetch_sub(layout.size(), Ordering::SeqCst); + self.inner.dealloc(ptr, layout) + } + } + } + + use tracking_allocator::TrackingAllocator; + + #[global_allocator] + static ALLOCATOR: TrackingAllocator = TrackingAllocator::new(); + + fn measure_allocation(f: F) -> (usize, T) + where + F: FnOnce() -> T, + { + ALLOCATOR.reset(); + let result = f(); + let total = ALLOCATOR.total_allocated(); + (total, result) + } + + #[test] + fn measure_storage_cache_overhead() { + let (base_overhead, cache) = measure_allocation(|| AccountStorageCache::new(1000)); + println!("Base AccountStorageCache overhead: {} bytes", base_overhead); + let mut rng = rand::thread_rng(); + + let key = StorageKey::random(); + let value = StorageValue::from(rng.gen::()); + let (first_slot, _) = measure_allocation(|| { + cache.insert_storage(key, Some(value)); + }); + println!("First slot insertion overhead: {} bytes", first_slot); + + const TOTAL_SLOTS: usize = 10_000; + let (test_slots, _) = measure_allocation(|| { + for _ in 0..TOTAL_SLOTS { + let key = StorageKey::random(); + let value = StorageValue::from(rng.gen::()); + cache.insert_storage(key, Some(value)); + } + }); + println!("Average overhead over {} slots: {} bytes", TOTAL_SLOTS, test_slots / TOTAL_SLOTS); + + println!("\nTheoretical sizes:"); + println!("StorageKey size: {} bytes", size_of::()); + println!("StorageValue size: {} bytes", size_of::()); + println!("Option size: {} bytes", size_of::>()); + println!("Option size: {} bytes", size_of::>()); } } diff --git a/crates/engine/tree/src/tree/error.rs b/crates/engine/tree/src/tree/error.rs index 73c4259b5d21..e0523a94ade2 100644 --- a/crates/engine/tree/src/tree/error.rs +++ b/crates/engine/tree/src/tree/error.rs @@ -189,10 +189,3 @@ pub enum InsertBlockValidationError { #[error(transparent)] Validation(#[from] BlockValidationError), } - -impl InsertBlockValidationError { - /// Returns true if this is a block pre merge error. - pub const fn is_block_pre_merge(&self) -> bool { - matches!(self, Self::Validation(BlockValidationError::BlockPreMerge { .. })) - } -} diff --git a/crates/engine/tree/src/tree/invalid_headers.rs b/crates/engine/tree/src/tree/invalid_headers.rs index 40303355a839..d349901a1985 100644 --- a/crates/engine/tree/src/tree/invalid_headers.rs +++ b/crates/engine/tree/src/tree/invalid_headers.rs @@ -16,7 +16,7 @@ const INVALID_HEADER_HIT_EVICTION_THRESHOLD: u8 = 128; /// Keeps track of invalid headers. #[derive(Debug)] -pub(super) struct InvalidHeaderCache { +pub struct InvalidHeaderCache { /// This maps a header hash to a reference to its invalid ancestor. headers: LruMap, /// Metrics for the cache. @@ -25,7 +25,7 @@ pub(super) struct InvalidHeaderCache { impl InvalidHeaderCache { /// Invalid header cache constructor. - pub(super) fn new(max_length: u32) -> Self { + pub fn new(max_length: u32) -> Self { Self { headers: LruMap::new(ByLength::new(max_length)), metrics: Default::default() } } @@ -37,7 +37,7 @@ impl InvalidHeaderCache { /// /// If this is called, the hit count for the entry is incremented. /// If the hit count exceeds the threshold, the entry is evicted and `None` is returned. - pub(super) fn get(&mut self, hash: &B256) -> Option { + pub fn get(&mut self, hash: &B256) -> Option { { let entry = self.headers.get(hash)?; entry.hit_count += 1; @@ -52,7 +52,7 @@ impl InvalidHeaderCache { } /// Inserts an invalid block into the cache, with a given invalid ancestor. - pub(super) fn insert_with_invalid_ancestor( + pub fn insert_with_invalid_ancestor( &mut self, header_hash: B256, invalid_ancestor: BlockWithParent, @@ -68,7 +68,7 @@ impl InvalidHeaderCache { } /// Inserts an invalid ancestor into the map. - pub(super) fn insert(&mut self, invalid_ancestor: BlockWithParent) { + pub fn insert(&mut self, invalid_ancestor: BlockWithParent) { if self.get(&invalid_ancestor.block.hash).is_none() { warn!(target: "consensus::engine", ?invalid_ancestor, "Bad block with hash"); self.insert_entry(invalid_ancestor.block.hash, invalid_ancestor); diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 34f86f921f27..839b6c92095c 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -16,6 +16,8 @@ pub(crate) struct EngineApiMetrics { pub(crate) block_validation: BlockValidationMetrics, /// A copy of legacy blockchain tree metrics, to be replaced when we replace the old tree pub(crate) tree: TreeMetrics, + /// Metrics for transaction prewarming threads + pub(crate) prewarm: PrewarmThreadMetrics, } /// Metrics for the entire blockchain tree @@ -69,11 +71,7 @@ pub(crate) struct BlockValidationMetrics { pub(crate) state_root_duration: Gauge, /// Trie input computation duration pub(crate) trie_input_duration: Gauge, - /// Prewarm spawn duration - #[allow(dead_code)] - pub(crate) prewarm_spawn_duration: Gauge, /// Cache saving duration - #[allow(dead_code)] pub(crate) cache_saving_duration: Gauge, /// State root config creation duration pub(crate) state_root_config_duration: Gauge, @@ -89,10 +87,30 @@ impl BlockValidationMetrics { } } +/// Metrics for prewarming threads +#[derive(Metrics, Clone)] +#[metrics(scope = "sync.prewarm")] +pub(crate) struct PrewarmThreadMetrics { + /// Prewarm thread spawn duration + pub(crate) spawn_duration: Gauge, + /// A histogram of the prewarm thread spawn duration + pub(crate) spawn_duration_histogram: Histogram, + /// The number of transactions in the block + pub(crate) transactions: Gauge, + /// A histogram of the number of transactions in the block + pub(crate) transactions_histogram: Histogram, + /// A histogram of total runtime durations for prewarm threads + pub(crate) total_runtime: Histogram, + /// A histogram of execution durations for prewarm threads + pub(crate) execution_duration: Histogram, + /// A histogram for total prefetch targets in prewarm threads + pub(crate) prefetch_storage_targets: Histogram, +} + /// Metrics for the blockchain tree block buffer #[derive(Metrics)] #[metrics(scope = "blockchain_tree.block_buffer")] -pub(super) struct BlockBufferMetrics { +pub(crate) struct BlockBufferMetrics { /// Total blocks in the block buffer pub blocks: Gauge, } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index f7fb2d7c6ba6..60a2ac317787 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -8,18 +8,19 @@ use crate::{ metrics::EngineApiMetrics, }, }; -use alloy_consensus::BlockHeader; +use alloy_consensus::{transaction::Recovered, BlockHeader}; use alloy_eips::BlockNumHash; use alloy_primitives::{ - map::{HashMap, HashSet}, + keccak256, + map::{B256Set, HashMap, HashSet}, BlockNumber, B256, U256, }; use alloy_rpc_types_engine::{ - ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, PayloadStatusEnum, - PayloadValidationError, + ForkchoiceState, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; -use block_buffer::BlockBuffer; +use cached_state::{ProviderCaches, SavedCache}; use error::{InsertBlockError, InsertBlockErrorKind, InsertBlockFatalError}; +use metrics::PrewarmThreadMetrics; use persistence_state::CurrentPersistenceAction; use reth_chain_state::{ CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, @@ -29,31 +30,37 @@ use reth_consensus::{Consensus, FullConsensus, PostExecutionInput}; pub use reth_engine_primitives::InvalidBlockHook; use reth_engine_primitives::{ BeaconConsensusEngineEvent, BeaconEngineMessage, BeaconOnNewPayloadError, EngineTypes, - EngineValidator, ForkchoiceStateTracker, OnForkChoiceUpdated, + EngineValidator, ExecutionPayload, ForkchoiceStateTracker, OnForkChoiceUpdated, }; use reth_errors::{ConsensusError, ProviderResult}; use reth_ethereum_primitives::EthPrimitives; use reth_evm::{ execute::BlockExecutorProvider, system_calls::{NoopHook, OnStateHook}, + ConfigureEvm, Evm, TransactionEnv, }; use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_builder_primitives::PayloadBuilder; use reth_payload_primitives::{EngineApiMessageVersion, PayloadBuilderAttributes}; -use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; +use reth_primitives_traits::{ + Block, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, SignedTransaction, +}; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ExecutionOutcome, HashedPostStateProvider, ProviderError, StateCommitmentProvider, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, TransactionVariant, }; -use reth_revm::database::StateProviderDatabase; +use reth_revm::{cancelled::ManualCancel, database::StateProviderDatabase}; use reth_stages_api::ControlFlow; use reth_trie::{ - trie_cursor::InMemoryTrieCursorFactory, updates::TrieUpdates, HashedPostState, TrieInput, + trie_cursor::InMemoryTrieCursorFactory, updates::TrieUpdates, HashedPostState, + MultiProofTargets, TrieInput, }; use reth_trie_db::DatabaseTrieCursorFactory; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; -use root::{StateRootComputeOutcome, StateRootConfig, StateRootHandle, StateRootTask}; +use revm_primitives::ResultAndState; +use root::{ + StateRootComputeOutcome, StateRootConfig, StateRootHandle, StateRootMessage, StateRootTask, +}; use std::{ cmp::Ordering, collections::{btree_map, hash_map, BTreeMap, VecDeque}, @@ -61,7 +68,7 @@ use std::{ ops::Bound, sync::{ mpsc::{Receiver, RecvError, RecvTimeoutError, Sender}, - Arc, + Arc, RwLock, }, time::{Duration, Instant}, }; @@ -82,12 +89,11 @@ mod persistence_state; pub mod root; mod trie_updates; -use crate::tree::{ - config::MIN_BLOCKS_FOR_PIPELINE_RUN, error::AdvancePersistenceError, - invalid_headers::InvalidHeaderCache, -}; +use crate::tree::{config::MIN_BLOCKS_FOR_PIPELINE_RUN, error::AdvancePersistenceError}; +pub use block_buffer::BlockBuffer; pub use config::TreeConfig; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; +pub use invalid_headers::InvalidHeaderCache; pub use persistence_state::PersistenceState; use trie_updates::compare_trie_updates; @@ -432,6 +438,39 @@ impl TreeState { } } +/// A builder for creating state providers that can be used across threads. +#[derive(Clone, Debug)] +pub struct StateProviderBuilder { + /// The provider factory used to create providers. + provider_factory: P, + /// The historical block hash to fetch state from. + historical: B256, + /// The blocks that form the chain from historical to target. + blocks: Vec>, +} + +impl StateProviderBuilder { + /// Creates a new state provider from the provider factory, historical block hash and blocks. + fn new( + provider_factory: P, + historical: B256, + blocks: Vec>, + ) -> Self { + Self { provider_factory, historical, blocks } + } +} + +impl StateProviderBuilder +where + P: BlockReader + StateProviderFactory + StateReader + StateCommitmentProvider + Clone, +{ + /// Creates a new state provider from this builder. + pub fn build(&self) -> ProviderResult { + let historical = self.provider_factory.state_by_block_hash(self.historical)?; + Ok(Box::new(MemoryOverlayStateProvider::new(historical, self.blocks.clone()))) + } +} + /// Tracks the state of the engine api internals. /// /// This type is not shareable. @@ -517,13 +556,14 @@ pub enum TreeAction { /// /// This type is responsible for processing engine API requests, maintaining the canonical state and /// emitting events. -pub struct EngineApiTreeHandler +pub struct EngineApiTreeHandler where N: NodePrimitives, T: EngineTypes, { provider: P, executor_provider: E, + evm_config: C, consensus: Arc>, payload_validator: V, /// Keeps track of internals such as executed and buffered blocks. @@ -561,18 +601,21 @@ where invalid_block_hook: Box>, /// The engine API variant of this handler engine_kind: EngineApiKind, - /// state root task thread pool - state_root_task_pool: Arc, + /// The most recent cache used for execution. + most_recent_cache: Option, + /// Thread pool used for the state root task and prewarming + thread_pool: Arc, } -impl std::fmt::Debug - for EngineApiTreeHandler +impl std::fmt::Debug + for EngineApiTreeHandler where N: NodePrimitives, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EngineApiTreeHandler") .field("provider", &self.provider) + .field("evm_config", &self.evm_config) .field("executor_provider", &self.executor_provider) .field("consensus", &self.consensus) .field("payload_validator", &self.payload_validator) @@ -591,7 +634,7 @@ where } } -impl EngineApiTreeHandler +impl EngineApiTreeHandler where N: NodePrimitives, P: DatabaseProviderFactory @@ -605,6 +648,7 @@ where

::Provider: BlockReader, E: BlockExecutorProvider, + C: ConfigureEvm

, T: EngineTypes, V: EngineValidator, { @@ -623,12 +667,13 @@ where payload_builder: PayloadBuilderHandle, config: TreeConfig, engine_kind: EngineApiKind, + evm_config: C, ) -> Self { let (incoming_tx, incoming) = std::sync::mpsc::channel(); let num_threads = root::thread_pool_size(); - let state_root_task_pool = Arc::new( + let thread_pool = Arc::new( rayon::ThreadPoolBuilder::new() .num_threads(num_threads) .thread_name(|i| format!("srt-worker-{}", i)) @@ -639,6 +684,7 @@ where Self { provider, executor_provider, + evm_config, consensus, payload_validator, incoming, @@ -654,7 +700,8 @@ where incoming_tx, invalid_block_hook: Box::new(NoopInvalidBlockHook), engine_kind, - state_root_task_pool, + most_recent_cache: None, + thread_pool, } } @@ -680,6 +727,7 @@ where config: TreeConfig, invalid_block_hook: Box>, kind: EngineApiKind, + evm_config: C, ) -> (Sender, N::Block>>, UnboundedReceiver>) { let best_block_number = provider.best_block_number().unwrap_or(0); @@ -711,6 +759,7 @@ where payload_builder, config, kind, + evm_config, ); task.set_invalid_block_hook(invalid_block_hook); let incoming = task.incoming_tx.clone(); @@ -789,8 +838,8 @@ where /// When the Consensus layer receives a new block via the consensus gossip protocol, /// the transactions in the block are sent to the execution layer in the form of a - /// [`ExecutionPayload`]. The Execution layer executes the transactions and validates the - /// state in the block header, then passes validation data back to Consensus layer, that + /// [`EngineTypes::ExecutionData`]. The Execution layer executes the transactions and validates + /// the state in the block header, then passes validation data back to Consensus layer, that /// adds the block to the head of its own blockchain and attests to it. The block is then /// broadcast over the consensus p2p network in the form of a "Beacon block". /// @@ -802,8 +851,7 @@ where #[instrument(level = "trace", skip_all, fields(block_hash = %payload.block_hash(), block_num = %payload.block_number(),), target = "engine::tree")] fn on_new_payload( &mut self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, + payload: T::ExecutionData, ) -> Result, InsertBlockFatalError> { trace!(target: "engine::tree", "invoked new payload"); self.metrics.engine.new_payload_messages.increment(1); @@ -834,7 +882,7 @@ where // // This validation **MUST** be instantly run in all cases even during active sync process. let parent_hash = payload.parent_hash(); - let block = match self.payload_validator.ensure_well_formed_payload(payload, sidecar) { + let block = match self.payload_validator.ensure_well_formed_payload(payload) { Ok(block) => block, Err(error) => { error!(target: "engine::tree", %error, "Invalid payload"); @@ -1343,14 +1391,10 @@ where self.canonical_in_memory_state.set_pending_block(block.clone()); } - let sealed_block = Arc::new(block.sealed_block().clone()); - self.state.tree_state.insert_executed(block); + self.state.tree_state.insert_executed(block.clone()); self.metrics.engine.inserted_already_executed_blocks.increment(1); self.emit_event(EngineApiEvent::BeaconConsensus( - BeaconConsensusEngineEvent::CanonicalBlockAdded( - sealed_block, - now.elapsed(), - ), + BeaconConsensusEngineEvent::CanonicalBlockAdded(block, now.elapsed()), )); } EngineApiRequest::Beacon(request) => { @@ -1390,8 +1434,8 @@ where error!(target: "engine::tree", "Failed to send event: {err:?}"); } } - BeaconEngineMessage::NewPayload { payload, sidecar, tx } => { - let output = self.on_new_payload(payload, sidecar); + BeaconEngineMessage::NewPayload { payload, tx } => { + let output = self.on_new_payload(payload); if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(|e| { BeaconOnNewPayloadError::Internal(Box::new(e)) @@ -2279,6 +2323,19 @@ where Ok(None) } + /// This fetches the most recent saved cache, using the hash of the block we are trying to + /// execute on top of. + /// + /// If the hash does not match the saved cache's hash, then the only saved cache doesn't contain + /// state useful for this block's execution, and we return `None`. + /// + /// If there is no cache saved, this returns `None`. + /// + /// This `take`s the cache, to avoid cloning the entire cache. + fn take_latest_cache(&mut self, parent_hash: B256) -> Option { + self.most_recent_cache.take_if(|cache| cache.executed_block_hash() == parent_hash) + } + fn insert_block_without_senders( &mut self, block: SealedBlock, @@ -2347,19 +2404,6 @@ where return Err(e.into()) } - // Use cached state provider before executing, this does nothing currently, will be used in - // prewarming - let caches = ProviderCacheBuilder::default().build_caches(); - let cache_metrics = CachedStateMetrics::zeroed(); - let state_provider = - CachedStateProvider::new_with_caches(state_provider, caches, cache_metrics); - - trace!(target: "engine::tree", block=?block_num_hash, "Executing block"); - - let executor = self.executor_provider.executor(StateProviderDatabase::new(&state_provider)); - - let sealed_block = Arc::new(block.clone_sealed_block()); - // We only run the parallel state root if we are currently persisting blocks that are all // ancestors of the one we are executing. If we're committing ancestor blocks, then: any // trie updates being committed are a subset of the in-memory trie updates collected before @@ -2370,7 +2414,10 @@ where let is_descendant_of_persisting_blocks = self.is_descendant_of_persisting_blocks(block.header()); - let (state_root_handle, state_root_task_config, state_hook) = + // Atomic bool for letting the prewarm tasks know when to stop + let cancel_execution = ManualCancel::default(); + + let (state_root_handle, state_root_task_config, state_root_sender, state_hook) = if is_descendant_of_persisting_blocks && self.config.use_state_root_task() { let consistent_view = ConsistentDbView::new_with_latest_tip(self.provider.clone())?; @@ -2397,21 +2444,83 @@ where .state_root_config_duration .set(config_elapsed.as_secs_f64()); - let state_root_task = StateRootTask::new( - state_root_config.clone(), - self.state_root_task_pool.clone(), - ); + let state_root_task = + StateRootTask::new(state_root_config.clone(), self.thread_pool.clone()); + let state_root_sender = state_root_task.state_root_message_sender(); let state_hook = Box::new(state_root_task.state_hook()) as Box; - (Some(state_root_task.spawn()), Some(state_root_config), state_hook) + ( + Some(state_root_task.spawn()), + Some(state_root_config), + Some(state_root_sender), + state_hook, + ) } else { - (None, None, Box::new(NoopHook::default()) as Box) + (None, None, None, Box::new(NoopHook::default()) as Box) }; + let (caches, cache_metrics) = + if let Some(cache) = self.take_latest_cache(block.parent_hash()) { + cache.split() + } else { + (ProviderCacheBuilder::default().build_caches(), CachedStateMetrics::zeroed()) + }; + + // Use cached state provider before executing, used in execution after prewarming threads + // complete + let state_provider = CachedStateProvider::new_with_caches( + state_provider, + caches.clone(), + cache_metrics.clone(), + ); + + // This prevents caches from being saved without all prewarm execution tasks being completed + let prewarm_task_lock = Arc::new(RwLock::new(())); + + if self.config.use_caching_and_prewarming() { + debug!(target: "engine::tree", "Spawning prewarm threads"); + let prewarm_start = Instant::now(); + let prewarm_metrics = self.metrics.prewarm.clone(); + + // Prewarm transactions + for (tx_idx, tx) in block.transactions_recovered().enumerate() { + let state_root_sender = state_root_sender.clone(); + + let start = Instant::now(); + self.prewarm_transaction( + block.header().clone(), + tx.cloned(), + caches.clone(), + cache_metrics.clone(), + state_root_sender, + cancel_execution.clone(), + prewarm_task_lock.clone(), + prewarm_metrics.clone(), + )?; + let elapsed = start.elapsed(); + debug!(target: "engine::tree", ?tx_idx, elapsed = ?elapsed, "Spawned transaction prewarm"); + } + + prewarm_metrics.transactions.set(block.transaction_count() as f64); + prewarm_metrics.transactions_histogram.record(block.transaction_count() as f64); + + drop(state_root_sender); + let elapsed = prewarm_start.elapsed(); + debug!(target: "engine::tree", ?elapsed, "Done spawning prewarm threads"); + + self.metrics.prewarm.spawn_duration.set(elapsed); + self.metrics.prewarm.spawn_duration_histogram.record(elapsed); + } + trace!(target: "engine::tree", block=?block_num_hash, "Executing block"); + + let executor = self.executor_provider.executor(StateProviderDatabase::new(&state_provider)); let execution_start = Instant::now(); let output = self.metrics.executor.execute_metered(executor, &block, state_hook)?; let execution_time = execution_start.elapsed(); trace!(target: "engine::tree", elapsed = ?execution_time, number=?block_num_hash.number, "Executed block"); + // Ensure that prewarm tasks don't send proof messages after state root sender is dropped + cancel_execution.cancel(); + if let Err(err) = self.consensus.validate_block_post_execution( &block, PostExecutionInput::new(&output.receipts, &output.requests), @@ -2442,7 +2551,7 @@ where self.handle_state_root_result( state_root_handle, state_root_config, - sealed_block.as_ref(), + block.sealed_block(), &hashed_state, &state_provider, root_time, @@ -2498,6 +2607,23 @@ where self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); debug!(target: "engine::tree", ?root_elapsed, block=?block_num_hash, "Calculated state root"); + if self.config.use_caching_and_prewarming() { + let save_cache_start = Instant::now(); + // this is the only place / thread a writer is acquired, so we would have already + // crashed if we had a poisoned rwlock + // + // we use a lock here and in prewarming, so we do not save the cache if a prewarm task + // is still running, since it would update the cache with stale data. It's unlikely that + // prewarm tasks are still running at this point however + drop(prewarm_task_lock.write().unwrap()); + // apply state updates to cache and save it (if saving was successful) + self.most_recent_cache = state_provider.save_cache(block.hash(), &output.state).ok(); + let elapsed = save_cache_start.elapsed(); + + // record how long it took to save caches + self.metrics.block_validation.cache_saving_duration.set(elapsed.as_secs_f64()); + } + let executed: ExecutedBlockWithTrieUpdates = ExecutedBlockWithTrieUpdates { block: ExecutedBlock { recovered_block: Arc::new(block), @@ -2514,15 +2640,15 @@ where self.canonical_in_memory_state.set_pending_block(executed.clone()); } - self.state.tree_state.insert_executed(executed); + self.state.tree_state.insert_executed(executed.clone()); self.metrics.engine.executed_blocks.set(self.state.tree_state.block_count() as f64); // emit insert event let elapsed = start.elapsed(); let engine_event = if self.is_fork(block_num_hash.hash)? { - BeaconConsensusEngineEvent::ForkBlockAdded(sealed_block, elapsed) + BeaconConsensusEngineEvent::ForkBlockAdded(executed, elapsed) } else { - BeaconConsensusEngineEvent::CanonicalBlockAdded(sealed_block, elapsed) + BeaconConsensusEngineEvent::CanonicalBlockAdded(executed, elapsed) }; self.emit_event(EngineApiEvent::BeaconConsensus(engine_event)); @@ -2580,6 +2706,124 @@ where Ok(input) } + /// Runs execution for a single transaction, spawning it in the prewarm threadpool. + #[allow(clippy::too_many_arguments)] + fn prewarm_transaction( + &self, + block: N::BlockHeader, + tx: Recovered, + caches: ProviderCaches, + cache_metrics: CachedStateMetrics, + state_root_sender: Option>, + cancel_execution: ManualCancel, + task_finished: Arc>, + metrics: PrewarmThreadMetrics, + ) -> Result<(), InsertBlockErrorKind> { + // Get the builder once, outside the thread + let Some(state_provider_builder) = self.state_provider_builder(block.parent_hash())? else { + trace!(target: "engine::tree", parent=%block.parent_hash(), "Could not get state provider builder for prewarm"); + return Ok(()) + }; + + // clone and copy info required for execution + let evm_config = self.evm_config.clone(); + + // spawn task executing the individual tx + self.thread_pool.spawn(move || { + let thread_start = Instant::now(); + let in_progress = task_finished.read().unwrap(); + + // Create the state provider inside the thread + let state_provider = match state_provider_builder.build() { + Ok(provider) => provider, + Err(err) => { + trace!(target: "engine::tree", %err, "Failed to build state provider in prewarm thread"); + return + } + }; + + // Use the caches to create a new provider with caching + let state_provider = CachedStateProvider::new_with_caches( + state_provider, + caches, + cache_metrics, + ); + + let state_provider = StateProviderDatabase::new(&state_provider); + + // create a new executor and disable nonce checks in the env + let mut evm = evm_config.evm_for_block(state_provider, &block); + + // create the tx env and reset nonce + let mut tx_env = evm_config.tx_env(&tx, tx.signer()); + tx_env.unset_nonce(); + + // exit early if execution is done + if cancel_execution.is_cancelled() { + return + } + + let execution_start = Instant::now(); + let ResultAndState { state, .. } = match evm.transact(tx_env) { + Ok(res) => res, + Err(err) => { + trace!(target: "engine::tree", %err, tx_hash=%tx.tx_hash(), sender=%tx.signer(), "Error when executing prewarm transaction"); + return + } + }; + metrics.execution_duration.record(execution_start.elapsed()); + + // execution no longer in progress, so we can drop the lock + drop(in_progress); + + // if execution is finished there is no point to sending proof targets + if cancel_execution.is_cancelled() { + return + } + + let Some(state_root_sender) = state_root_sender else { + return + }; + + let mut targets = MultiProofTargets::default(); + for (addr, account) in state { + // if account was not touched, do not fetch for it + if !account.is_touched() { + continue + } + + let mut storage_set = B256Set::default(); + for (key, slot) in account.storage { + // do nothing if unchanged + if !slot.is_changed() { + continue + } + + storage_set.insert(keccak256(B256::new(key.to_be_bytes()))); + } + + targets.insert(keccak256(addr), storage_set); + } + + let storage_targets = targets.values().map(|slots| slots.len()).sum::(); + debug!( + target: "engine::tree", + tx_hash = ?tx.tx_hash(), + targets = targets.len(), + storage_targets, + "Prefetching proofs for a transaction" + ); + metrics.prefetch_storage_targets.record(storage_targets as f64); + + let _ = state_root_sender.send(StateRootMessage::PrefetchProofs(targets)); + + // record final metrics + metrics.total_runtime.record(thread_start.elapsed()); + }); + + Ok(()) + } + /// Handles an error that occurred while inserting a block. /// /// If this is a validation error this will mark the block as invalid. @@ -2599,12 +2843,7 @@ where // invalid headers cache and `Ok` with [PayloadStatusEnum::Invalid] is // returned. warn!(target: "engine::tree", invalid_hash=?block.hash(), invalid_number=?block.number(), %validation_err, "Invalid block error on new payload"); - let latest_valid_hash = if validation_err.is_block_pre_merge() { - // zero hash must be returned if block is pre-merge - Some(B256::ZERO) - } else { - self.latest_valid_hash_for_invalid_payload(block.parent_hash())? - }; + let latest_valid_hash = self.latest_valid_hash_for_invalid_payload(block.parent_hash())?; // keep track of the invalid header self.state.invalid_headers.insert(block.block_with_parent()); @@ -2887,6 +3126,35 @@ where ); Ok(()) } + + /// Returns a builder for creating state providers for the given hash. + /// + /// This is an optimization for parallel execution contexts where we want to avoid + /// creating state providers in the critical path. + pub fn state_provider_builder( + &self, + hash: B256, + ) -> ProviderResult>> + where + P: BlockReader + StateProviderFactory + StateReader + StateCommitmentProvider + Clone, + { + if let Some((historical, blocks)) = self.state.tree_state.blocks_by_hash(hash) { + debug!(target: "engine::tree", %hash, %historical, "found canonical state for block in memory, creating provider builder"); + // the block leads back to the canonical chain + return Ok(Some(StateProviderBuilder::new(self.provider.clone(), historical, blocks))) + } + + // Check if the block is persisted + if let Some(header) = self.provider.header(&hash)? { + debug!(target: "engine::tree", %hash, number = %header.number(), "found canonical state for block in database, creating provider builder"); + // For persisted blocks, we create a builder that will fetch state directly from the + // database + return Ok(Some(StateProviderBuilder::new(self.provider.clone(), hash, vec![]))) + } + + debug!(target: "engine::tree", %hash, "no canonical state found for block"); + Ok(None) + } } /// Block inclusion can be valid, accepted, or invalid. Invalid blocks are returned as an error @@ -2932,11 +3200,12 @@ mod tests { use assert_matches::assert_matches; use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; - use reth_engine_primitives::ForkchoiceStatus; + use reth_engine_primitives::{ExecutionData, ForkchoiceStatus}; use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_ethereum_primitives::{Block, EthPrimitives}; use reth_evm::test_utils::MockExecutorProvider; + use reth_evm_ethereum::EthEvmConfig; use reth_primitives_traits::Block as _; use reth_provider::test_utils::MockEthProvider; use reth_trie::{updates::TrieUpdates, HashedPostState}; @@ -3007,6 +3276,7 @@ mod tests { MockExecutorProvider, EthEngineTypes, EthereumEngineValidator, + EthEvmConfig, >, to_tree_tx: Sender, Block>>, from_tree_rx: UnboundedReceiver, @@ -3053,6 +3323,8 @@ mod tests { let (to_payload_service, _payload_command_rx) = unbounded_channel(); let payload_builder = PayloadBuilderHandle::new(to_payload_service); + let evm_config = EthEvmConfig::new(chain_spec.clone()); + let tree = EngineApiTreeHandler::new( provider.clone(), executor_provider.clone(), @@ -3066,6 +3338,7 @@ mod tests { payload_builder, TreeConfig::default(), EngineApiKind::Ethereum, + evm_config, ); let block_builder = TestBlockBuilder::default().with_chain_spec((*chain_spec).clone()); @@ -3212,13 +3485,13 @@ mod tests { &block.clone_sealed_block().into_block(), ); self.tree - .on_new_payload( - payload.into(), - ExecutionPayloadSidecar::v3(CancunPayloadFields { + .on_new_payload(ExecutionData { + payload: payload.into(), + sidecar: ExecutionPayloadSidecar::v3(CancunPayloadFields { parent_beacon_block_root: block.parent_beacon_block_root.unwrap(), versioned_hashes: vec![], }), - ) + }) .unwrap(); } @@ -3266,9 +3539,9 @@ mod tests { let event = self.from_tree_rx.recv().await.unwrap(); match event { EngineApiEvent::BeaconConsensus( - BeaconConsensusEngineEvent::CanonicalBlockAdded(block, _), + BeaconConsensusEngineEvent::CanonicalBlockAdded(executed, _), ) => { - assert_eq!(block.hash(), expected_hash); + assert_eq!(executed.recovered_block.hash(), expected_hash); } _ => panic!("Unexpected event: {:#?}", event), } @@ -3278,10 +3551,10 @@ mod tests { let event = self.from_tree_rx.recv().await.unwrap(); match event { EngineApiEvent::BeaconConsensus(BeaconConsensusEngineEvent::ForkBlockAdded( - block, + executed, _, )) => { - assert_eq!(block.hash(), expected_hash); + assert_eq!(executed.recovered_block.hash(), expected_hash); } _ => panic!("Unexpected event: {:#?}", event), } @@ -3483,7 +3756,10 @@ mod tests { let outcome = test_harness .tree - .on_new_payload(payload.into(), ExecutionPayloadSidecar::none()) + .on_new_payload(ExecutionData { + payload: payload.into(), + sidecar: ExecutionPayloadSidecar::none(), + }) .unwrap(); assert!(outcome.outcome.is_syncing()); @@ -3528,8 +3804,10 @@ mod tests { .tree .on_engine_message(FromEngine::Request( BeaconEngineMessage::NewPayload { - payload: payload.clone().into(), - sidecar: ExecutionPayloadSidecar::none(), + payload: ExecutionData { + payload: payload.clone().into(), + sidecar: ExecutionPayloadSidecar::none(), + }, tx, } .into(), diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 7d454124bdf9..f63c2633ff42 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -146,6 +146,13 @@ pub enum StateRootMessage { PrefetchProofs(MultiProofTargets), /// New state update from transaction execution StateUpdate(EvmState), + /// Empty proof for a specific state update + EmptyProof { + /// The index of this proof in the sequence of state updates + sequence_number: u64, + /// The state update that was used to calculate the proof + state: HashedPostState, + }, /// Proof calculation completed for a specific state update ProofCalculated(Box), /// Error during proof calculation @@ -172,20 +179,10 @@ pub struct ProofCalculated { sequence_number: u64, /// Sparse trie update update: SparseTrieUpdate, - /// The source of the proof fetch, whether it was requested as a prefetch or as a result of a - /// state update. - source: ProofFetchSource, /// The time taken to calculate the proof. elapsed: Duration, } -impl ProofCalculated { - /// Returns true if the proof was calculated as a result of a state update. - pub(crate) const fn is_from_state_update(&self) -> bool { - matches!(self.source, ProofFetchSource::StateUpdate) - } -} - /// Whether or not a proof was fetched due to a state update, or due to a prefetch command. #[derive(Debug)] pub enum ProofFetchSource { @@ -318,7 +315,6 @@ struct MultiproofInput { proof_targets: MultiProofTargets, proof_sequence_number: u64, state_root_message_sender: Sender, - source: ProofFetchSource, } /// Manages concurrent multiproof calculations. @@ -339,12 +335,8 @@ struct MultiproofManager { impl MultiproofManager where - Factory: DatabaseProviderFactory - + StateCommitmentProvider - + Clone - + Send - + Sync - + 'static, + Factory: + DatabaseProviderFactory + StateCommitmentProvider + Clone + 'static, { /// Creates a new [`MultiproofManager`]. fn new(thread_pool: Arc, thread_pool_size: usize) -> Self { @@ -362,6 +354,19 @@ where /// Spawns a new multiproof calculation or enqueues it for later if /// `max_concurrent` are already inflight. fn spawn_or_queue(&mut self, input: MultiproofInput) { + // If there are no proof targets, we can just send an empty multiproof back immediately + if input.proof_targets.is_empty() { + debug!( + sequence_number = input.proof_sequence_number, + "No proof targets, sending empty multiproof back immediately" + ); + let _ = input.state_root_message_sender.send(StateRootMessage::EmptyProof { + sequence_number: input.proof_sequence_number, + state: input.hashed_state_update, + }); + return + } + if self.inflight >= self.max_concurrent { self.pending.push_back(input); return; @@ -388,7 +393,6 @@ where proof_targets, proof_sequence_number, state_root_message_sender, - source, } = input; let thread_pool = self.thread_pool.clone(); @@ -419,7 +423,6 @@ where targets: proof_targets, multiproof: proof, }, - source, elapsed, }), )); @@ -482,17 +485,14 @@ pub struct StateRootTask { thread_pool: Arc, /// Manages calculation of multiproofs. multiproof_manager: MultiproofManager, + /// State root task metrics metrics: StateRootTaskMetrics, } impl StateRootTask where - Factory: DatabaseProviderFactory - + StateCommitmentProvider - + Clone - + Send - + Sync - + 'static, + Factory: + DatabaseProviderFactory + StateCommitmentProvider + Clone + 'static, { /// Creates a new state root task with the unified message channel pub fn new(config: StateRootConfig, thread_pool: Arc) -> Self { @@ -509,6 +509,11 @@ where } } + /// Returns a [`Sender`] that can be used to send arbitrary [`StateRootMessage`]s to this task. + pub fn state_root_message_sender(&self) -> Sender { + self.tx.clone() + } + /// Returns a [`StateHookSender`] that can be used to send state updates to this task. pub fn state_hook_sender(&self) -> StateHookSender { StateHookSender::new(self.tx.clone()) @@ -570,18 +575,68 @@ where /// Handles request for proof prefetch. fn on_prefetch_proof(&mut self, targets: MultiProofTargets) { - extend_multi_proof_targets_ref(&mut self.fetched_proof_targets, &targets); + let proof_targets = self.get_prefetch_proof_targets(targets); + extend_multi_proof_targets_ref(&mut self.fetched_proof_targets, &proof_targets); self.multiproof_manager.spawn_or_queue(MultiproofInput { config: self.config.clone(), hashed_state_update: Default::default(), - proof_targets: targets, + proof_targets, proof_sequence_number: self.proof_sequencer.next_sequence(), state_root_message_sender: self.tx.clone(), - source: ProofFetchSource::Prefetch, }); } + /// Calls `get_proof_targets` with existing proof targets for prefetching. + fn get_prefetch_proof_targets(&self, mut targets: MultiProofTargets) -> MultiProofTargets { + // Here we want to filter out any targets that are already fetched + // + // This means we need to remove any storage slots that have already been fetched + let mut duplicates = 0; + + // First remove all storage targets that are subsets of already fetched storage slots + targets.retain(|hashed_address, target_storage| { + let keep = self + .fetched_proof_targets + .get(hashed_address) + // do NOT remove if None, because that means the account has not been fetched yet + .is_none_or(|fetched_storage| { + // remove if a subset + !target_storage.is_subset(fetched_storage) + }); + + if !keep { + duplicates += target_storage.len(); + } + + keep + }); + + // For all non-subset remaining targets, we have to calculate the difference + for (hashed_address, target_storage) in &mut targets { + let Some(fetched_storage) = self.fetched_proof_targets.get(hashed_address) else { + // this means the account has not been fetched yet, so we must fetch everything + // associated with this account + continue + }; + + let prev_target_storage_len = target_storage.len(); + + // keep only the storage slots that have not been fetched yet + // + // we already removed subsets, so this should only remove duplicates + target_storage.retain(|slot| !fetched_storage.contains(slot)); + + duplicates += prev_target_storage_len - target_storage.len(); + } + + if duplicates > 0 { + trace!(target: "engine::root", duplicates, "Removed duplicate prefetch proof targets"); + } + + targets + } + /// Handles state updates. /// /// Returns proof targets derived from the state update. @@ -596,7 +651,6 @@ where proof_targets, proof_sequence_number, state_root_message_sender: self.tx.clone(), - source: ProofFetchSource::StateUpdate, }); } @@ -619,9 +673,51 @@ where } } + /// Starts the main loop that handles all incoming messages, fetches proofs, applies them to the + /// sparse trie, updates the sparse trie, and eventually returns the state root. + /// + /// The lifecycle is the following: + /// 1. Either [`StateRootMessage::PrefetchProofs`] or [`StateRootMessage::StateUpdate`] is + /// received from the engine. + /// * For [`StateRootMessage::StateUpdate`], the state update is hashed with + /// [`evm_state_to_hashed_post_state`], and then (proof targets)[`MultiProofTargets`] are + /// extracted with [`get_proof_targets`]. + /// * For both messages, proof targets are deduplicated according to `fetched_proof_targets`, + /// so that the proofs for accounts and storage slots that were already fetched are not + /// requested again. + /// 2. Using the proof targets, a new multiproof is calculated using + /// [`MultiproofManager::spawn_or_queue`]. + /// * If the list of proof targets is empty, the [`StateRootMessage::EmptyProof`] message is + /// sent back to this task along with the original state update. + /// * Otherwise, the multiproof is calculated and the [`StateRootMessage::ProofCalculated`] + /// message is sent back to this task along with the resulting multiproof, proof targets + /// and original state update. + /// 3. Either [`StateRootMessage::EmptyProof`] or [`StateRootMessage::ProofCalculated`] is + /// received. + /// * The multiproof is added to the (proof sequencer)[`ProofSequencer`]. + /// * If the proof sequencer has a contiguous sequence of multiproofs in the same order as + /// state updates arrived (i.e. transaction order), such sequence is returned. + /// 4. Once there's a sequence of contiguous multiproofs along with the proof targets and state + /// updates associated with them, a [`SparseTrieUpdate`] is generated and sent to the sparse + /// trie task that's running in [`run_sparse_trie`]. + /// * Sparse trie task reveals the multiproof, updates the sparse trie, computes storage trie + /// roots, and calculates RLP nodes of the state trie below + /// [`SPARSE_TRIE_INCREMENTAL_LEVEL`]. + /// 5. Steps above are repeated until this task receives a + /// [`StateRootMessage::FinishedStateUpdates`]. + /// * Once this message is received, on every [`StateRootMessage::EmptyProof`] and + /// [`StateRootMessage::ProofCalculated`] message, we check if there are any proofs are + /// currently being calculated, or if there are any pending proofs in the proof sequencer + /// left to be revealed using [`check_end_condition`]. + /// * If there are none left, we drop the sparse trie task sender channel, and it signals + /// [`run_sparse_trie`] to calculate the state root of the full state trie, and send it + /// back to this task via [`StateRootMessage::RootCalculated`] message. + /// 6. On [`StateRootMessage::RootCalculated`] message, the loop exits and the the state root is + /// returned. fn run(mut self, sparse_trie_tx: Sender) -> StateRootResult { let mut sparse_trie_tx = Some(sparse_trie_tx); + let mut prefetch_proofs_received = 0; let mut updates_received = 0; let mut proofs_processed = 0; @@ -638,9 +734,12 @@ where Ok(message) => match message { StateRootMessage::PrefetchProofs(targets) => { trace!(target: "engine::root", "processing StateRootMessage::PrefetchProofs"); + prefetch_proofs_received += 1; debug!( target: "engine::root", - len = targets.len(), + targets = targets.len(), + storage_targets = targets.values().map(|slots| slots.len()).sum::(), + total_prefetches = prefetch_proofs_received, "Prefetching proofs" ); self.on_prefetch_proof(targets); @@ -667,24 +766,59 @@ where trace!(target: "engine::root", "processing StateRootMessage::FinishedStateUpdates"); updates_finished = true; - let all_proofs_received = proofs_processed >= updates_received; - let no_pending = !self.proof_sequencer.has_pending(); - if all_proofs_received && no_pending { - // drop the sender + if check_end_condition(CheckEndConditionParams { + proofs_processed, + updates_received, + prefetch_proofs_received, + updates_finished, + proof_sequencer: &self.proof_sequencer, + }) { sparse_trie_tx.take(); debug!( target: "engine::root", - total_updates = updates_received, - total_proofs = proofs_processed, "State updates finished and all proofs processed, ending calculation" ); + }; + } + StateRootMessage::EmptyProof { sequence_number, state } => { + trace!(target: "engine::root", "processing StateRootMessage::EmptyProof"); + + proofs_processed += 1; + + if let Some(combined_update) = self.on_proof( + sequence_number, + SparseTrieUpdate { + state, + targets: MultiProofTargets::default(), + multiproof: MultiProof::default(), + }, + ) { + let _ = sparse_trie_tx + .as_ref() + .expect("tx not dropped") + .send(combined_update); } + + if check_end_condition(CheckEndConditionParams { + proofs_processed, + updates_received, + prefetch_proofs_received, + updates_finished, + proof_sequencer: &self.proof_sequencer, + }) { + sparse_trie_tx.take(); + debug!( + target: "engine::root", + "State updates finished and all proofs processed, ending calculation" + ); + }; } StateRootMessage::ProofCalculated(proof_calculated) => { trace!(target: "engine::root", "processing StateRootMessage::ProofCalculated"); - if proof_calculated.is_from_state_update() { - proofs_processed += 1; - } + + // we increment proofs_processed for both state updates and prefetches, + // because both are used for the root termination condition. + proofs_processed += 1; self.metrics .proof_calculation_duration_histogram @@ -719,18 +853,19 @@ where .send(combined_update); } - let all_proofs_received = proofs_processed >= updates_received; - let no_pending = !self.proof_sequencer.has_pending(); - if all_proofs_received && no_pending && updates_finished { - // drop the sender + if check_end_condition(CheckEndConditionParams { + proofs_processed, + updates_received, + prefetch_proofs_received, + updates_finished, + proof_sequencer: &self.proof_sequencer, + }) { sparse_trie_tx.take(); debug!( target: "engine::root", - total_updates = updates_received, - total_proofs = proofs_processed, - "All proofs processed, ending calculation" + "State updates finished and all proofs processed, ending calculation" ); - } + }; } StateRootMessage::RootCalculated { state_root, trie_updates, iterations } => { trace!(target: "engine::root", "processing StateRootMessage::RootCalculated"); @@ -748,8 +883,10 @@ where "All proofs processed, ending calculation" ); - self.metrics.state_updates_received_histogram.record(updates_received); - self.metrics.proofs_processed_histogram.record(proofs_processed); + self.metrics + .state_updates_received_histogram + .record(updates_received as f64); + self.metrics.proofs_processed_histogram.record(proofs_processed as f64); self.metrics.state_root_iterations_histogram.record(iterations as f64); return Ok(StateRootComputeOutcome { @@ -786,8 +923,43 @@ where } } +/// Convenience params struct to pass to [`check_end_condition`]. +struct CheckEndConditionParams<'a> { + proofs_processed: u64, + updates_received: u64, + prefetch_proofs_received: u64, + updates_finished: bool, + proof_sequencer: &'a ProofSequencer, +} + +// Returns true if all state updates finished and all profs processed. +fn check_end_condition( + CheckEndConditionParams { + proofs_processed, + updates_received, + prefetch_proofs_received, + updates_finished, + proof_sequencer, + }: CheckEndConditionParams<'_>, +) -> bool { + let all_proofs_received = proofs_processed >= updates_received + prefetch_proofs_received; + let no_pending = !proof_sequencer.has_pending(); + debug!( + target: "engine::root", + proofs_processed, + updates_received, + prefetch_proofs_received, + no_pending, + updates_finished, + "Checking end condition" + ); + all_proofs_received && no_pending && updates_finished +} + /// Listen to incoming sparse trie updates and update the sparse trie. -/// Returns final state root, trie updates and the number of update iterations. +/// +/// Once the updates receiver channel is dropped, returns final state root, trie updates and the +/// number of update iterations. fn run_sparse_trie( config: StateRootConfig, metrics: StateRootTaskMetrics, @@ -986,6 +1158,7 @@ fn extend_multi_proof_targets_ref(targets: &mut MultiProofTargets, other: &Multi mod tests { #![allow(clippy::needless_update)] use super::*; + use alloy_primitives::map::B256Set; use reth_primitives_traits::{Account as RethAccount, StorageEntry}; use reth_provider::{ providers::ConsistentDbView, test_utils::create_test_provider_factory, HashingWriter, @@ -1054,6 +1227,42 @@ mod tests { updates } + fn create_state_root_config(factory: F, input: TrieInput) -> StateRootConfig + where + F: DatabaseProviderFactory + + StateCommitmentProvider + + Clone + + 'static, + { + let consistent_view = ConsistentDbView::new(factory, None); + let nodes_sorted = Arc::new(input.nodes.clone().into_sorted()); + let state_sorted = Arc::new(input.state.clone().into_sorted()); + let prefix_sets = Arc::new(input.prefix_sets); + + StateRootConfig { consistent_view, nodes_sorted, state_sorted, prefix_sets } + } + + fn create_test_state_root_task(factory: F) -> StateRootTask + where + F: DatabaseProviderFactory + + StateCommitmentProvider + + Clone + + 'static, + { + let num_threads = thread_pool_size(); + + let thread_pool = rayon::ThreadPoolBuilder::new() + .num_threads(num_threads) + .thread_name(|i| format!("test-worker-{}", i)) + .build() + .expect("Failed to create test proof worker thread pool"); + + let thread_pool = Arc::new(thread_pool); + let config = create_state_root_config(factory, TrieInput::default()); + + StateRootTask::new(config, thread_pool) + } + #[test] fn test_state_root_task() { reth_tracing::init_test_tracing(); @@ -1384,4 +1593,85 @@ mod tests { assert!(target_slots.contains(&slot1)); assert!(target_slots.contains(&slot2)); } + + #[test] + fn test_get_prefetch_proof_targets_no_duplicates() { + let test_provider_factory = create_test_provider_factory(); + let mut test_state_root_task = create_test_state_root_task(test_provider_factory); + + // populate some targets + let mut targets = MultiProofTargets::default(); + let addr1 = B256::random(); + let addr2 = B256::random(); + let slot1 = B256::random(); + let slot2 = B256::random(); + targets.insert(addr1, vec![slot1].into_iter().collect()); + targets.insert(addr2, vec![slot2].into_iter().collect()); + + let prefetch_proof_targets = + test_state_root_task.get_prefetch_proof_targets(targets.clone()); + + // check that the prefetch proof targets are the same because there are no fetched proof + // targets yet + assert_eq!(prefetch_proof_targets, targets); + + // add a different addr and slot to fetched proof targets + let addr3 = B256::random(); + let slot3 = B256::random(); + test_state_root_task.fetched_proof_targets.insert(addr3, vec![slot3].into_iter().collect()); + + let prefetch_proof_targets = + test_state_root_task.get_prefetch_proof_targets(targets.clone()); + + // check that the prefetch proof targets are the same because the fetched proof targets + // don't overlap with the prefetch targets + assert_eq!(prefetch_proof_targets, targets); + } + + #[test] + fn test_get_prefetch_proof_targets_remove_subset() { + let test_provider_factory = create_test_provider_factory(); + let mut test_state_root_task = create_test_state_root_task(test_provider_factory); + + // populate some targe + let mut targets = MultiProofTargets::default(); + let addr1 = B256::random(); + let addr2 = B256::random(); + let slot1 = B256::random(); + let slot2 = B256::random(); + targets.insert(addr1, vec![slot1].into_iter().collect()); + targets.insert(addr2, vec![slot2].into_iter().collect()); + + // add a subset of the first target to fetched proof targets + test_state_root_task.fetched_proof_targets.insert(addr1, vec![slot1].into_iter().collect()); + + let prefetch_proof_targets = + test_state_root_task.get_prefetch_proof_targets(targets.clone()); + + // check that the prefetch proof targets do not include the subset + assert_eq!(prefetch_proof_targets.len(), 1); + assert!(!prefetch_proof_targets.contains_key(&addr1)); + assert!(prefetch_proof_targets.contains_key(&addr2)); + + // now add one more slot to the prefetch targets + let slot3 = B256::random(); + targets.get_mut(&addr1).unwrap().insert(slot3); + + let prefetch_proof_targets = + test_state_root_task.get_prefetch_proof_targets(targets.clone()); + + // check that the prefetch proof targets do not include the subset + // but include the new slot + assert_eq!(prefetch_proof_targets.len(), 2); + assert!(prefetch_proof_targets.contains_key(&addr1)); + assert_eq!( + *prefetch_proof_targets.get(&addr1).unwrap(), + vec![slot3].into_iter().collect::() + ); + assert!(prefetch_proof_targets.contains_key(&addr2)); + assert_eq!( + *prefetch_proof_targets.get(&addr2).unwrap(), + vec![slot2].into_iter().collect::() + ); + } } diff --git a/crates/engine/util/src/engine_store.rs b/crates/engine/util/src/engine_store.rs index efed83159b31..641994814658 100644 --- a/crates/engine/util/src/engine_store.rs +++ b/crates/engine/util/src/engine_store.rs @@ -1,8 +1,8 @@ //! Stores engine API messages to disk for later inspection and replay. -use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState}; +use alloy_rpc_types_engine::ForkchoiceState; use futures::{Stream, StreamExt}; -use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; +use reth_engine_primitives::{BeaconEngineMessage, EngineTypes, ExecutionPayload}; use reth_fs_util as fs; use serde::{Deserialize, Serialize}; use std::{ @@ -17,21 +17,19 @@ use tracing::*; /// A message from the engine API that has been stored to disk. #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub enum StoredEngineApiMessage { +pub enum StoredEngineApiMessage { /// The on-disk representation of an `engine_forkchoiceUpdated` method call. ForkchoiceUpdated { /// The [`ForkchoiceState`] sent in the persisted call. state: ForkchoiceState, /// The payload attributes sent in the persisted call, if any. - payload_attrs: Option, + payload_attrs: Option, }, /// The on-disk representation of an `engine_newPayload` method call. NewPayload { - /// The [`ExecutionPayload`] sent in the persisted call. - payload: ExecutionPayload, - /// The execution payload sidecar with additional version-specific fields received by - /// engine API. - sidecar: ExecutionPayloadSidecar, + /// The [`EngineTypes::ExecutionData`] sent in the persisted call. + #[serde(flatten)] + payload: EngineT::ExecutionData, }, } @@ -72,22 +70,19 @@ impl EngineMessageStore { let filename = format!("{}-fcu-{}.json", timestamp, state.head_block_hash); fs::write( self.path.join(filename), - serde_json::to_vec(&StoredEngineApiMessage::ForkchoiceUpdated { + serde_json::to_vec(&StoredEngineApiMessage::::ForkchoiceUpdated { state: *state, payload_attrs: payload_attrs.clone(), })?, )?; } - BeaconEngineMessage::NewPayload { payload, sidecar, tx: _tx } => { + BeaconEngineMessage::NewPayload { payload, tx: _tx } => { let filename = format!("{}-new_payload-{}.json", timestamp, payload.block_hash()); fs::write( self.path.join(filename), - serde_json::to_vec( - &StoredEngineApiMessage::::NewPayload { - payload: payload.clone(), - sidecar: sidecar.clone(), - }, - )?, + serde_json::to_vec(&StoredEngineApiMessage::::NewPayload { + payload: payload.clone(), + })?, )?; } // noop diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 7e4a09f75e9e..4a521b1ab8c9 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -8,7 +8,8 @@ use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; use itertools::Either; use reth_chainspec::EthChainSpec; use reth_engine_primitives::{ - BeaconEngineMessage, BeaconOnNewPayloadError, EngineTypes, OnForkChoiceUpdated, + BeaconEngineMessage, BeaconOnNewPayloadError, EngineTypes, ExecutionData, + ExecutionPayload as _, OnForkChoiceUpdated, }; use reth_errors::{BlockExecutionError, BlockValidationError, RethError, RethResult}; use reth_ethereum_forks::EthereumHardforks; @@ -104,7 +105,7 @@ impl EngineReorg Stream for EngineReorg where S: Stream>, - Engine: EngineTypes, + Engine: EngineTypes, Provider: BlockReader + StateProviderFactory, Evm: ConfigureEvm
, Spec: EthChainSpec + EthereumHardforks, @@ -147,7 +148,7 @@ where let next = ready!(this.stream.poll_next_unpin(cx)); let item = match (next, &this.last_forkchoice_state) { ( - Some(BeaconEngineMessage::NewPayload { payload, sidecar, tx }), + Some(BeaconEngineMessage::NewPayload { payload, tx }), Some(last_forkchoice_state), ) if this.forkchoice_states_forwarded > this.frequency && // Only enter reorg state if new payload attaches to current head. @@ -162,13 +163,12 @@ where // forkchoice state. We will rely on CL to reorg us back to canonical chain. // TODO: This is an expensive blocking operation, ideally it's spawned as a task // so that the stream could yield the control back. - let (reorg_payload, reorg_sidecar) = match create_reorg_head( + let reorg_payload = match create_reorg_head( this.provider, this.evm_config, this.payload_validator, *this.depth, payload.clone(), - sidecar.clone(), ) { Ok(result) => result, Err(error) => { @@ -177,7 +177,6 @@ where // the next one return Poll::Ready(Some(BeaconEngineMessage::NewPayload { payload, - sidecar, tx, })) } @@ -197,11 +196,10 @@ where let queue = VecDeque::from([ // Current payload - BeaconEngineMessage::NewPayload { payload, sidecar, tx }, + BeaconEngineMessage::NewPayload { payload, tx }, // Reorg payload BeaconEngineMessage::NewPayload { payload: reorg_payload, - sidecar: reorg_sidecar, tx: reorg_payload_tx, }, // Reorg forkchoice state @@ -248,9 +246,8 @@ fn create_reorg_head( evm_config: &Evm, payload_validator: &ExecutionPayloadValidator, mut depth: usize, - next_payload: ExecutionPayload, - next_sidecar: ExecutionPayloadSidecar, -) -> RethResult<(ExecutionPayload, ExecutionPayloadSidecar)> + next_payload: ExecutionData, +) -> RethResult where Provider: BlockReader + StateProviderFactory, Evm: ConfigureEvm
, @@ -259,9 +256,8 @@ where let chain_spec = payload_validator.chain_spec(); // Ensure next payload is valid. - let next_block = payload_validator - .ensure_well_formed_payload(next_payload, next_sidecar) - .map_err(RethError::msg)?; + let next_block = + payload_validator.ensure_well_formed_payload(next_payload).map_err(RethError::msg)?; // Fetch reorg target block depending on its depth and its parent. let mut previous_hash = next_block.parent_hash; @@ -318,9 +314,8 @@ where } // Configure the environment for the block. - let tx_recovered = tx.try_clone_into_recovered().map_err(|_| { - BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError) - })?; + let tx_recovered = + tx.try_clone_into_recovered().map_err(|_| ProviderError::SenderRecoveryError)?; let tx_env = evm_config.tx_env(&tx_recovered, tx_recovered.signer()); let exec_result = match evm.transact(tx_env) { Ok(result) => result, @@ -419,10 +414,14 @@ where } .seal_slow(); - Ok(( - ExecutionPayload::from_block_unchecked(reorg_block.hash(), &reorg_block.into_block()).0, + Ok(ExecutionData { + payload: ExecutionPayload::from_block_unchecked( + reorg_block.hash(), + &reorg_block.into_block(), + ) + .0, // todo(onbjerg): how do we support execution requests? - reorg_target + sidecar: reorg_target .header .parent_beacon_block_root .map(|root| { @@ -432,5 +431,5 @@ where }) }) .unwrap_or_else(ExecutionPayloadSidecar::none), - )) + }) } diff --git a/crates/engine/util/src/skip_new_payload.rs b/crates/engine/util/src/skip_new_payload.rs index ea89bdf6d106..73b099f7b5f4 100644 --- a/crates/engine/util/src/skip_new_payload.rs +++ b/crates/engine/util/src/skip_new_payload.rs @@ -2,7 +2,7 @@ use alloy_rpc_types_engine::{PayloadStatus, PayloadStatusEnum}; use futures::{Stream, StreamExt}; -use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; +use reth_engine_primitives::{BeaconEngineMessage, EngineTypes, ExecutionPayload}; use std::{ pin::Pin, task::{ready, Context, Poll}, @@ -40,14 +40,14 @@ where loop { let next = ready!(this.stream.poll_next_unpin(cx)); let item = match next { - Some(BeaconEngineMessage::NewPayload { payload, sidecar, tx }) => { + Some(BeaconEngineMessage::NewPayload { payload, tx }) => { if this.skipped < this.threshold { *this.skipped += 1; tracing::warn!( target: "engine::stream::skip_new_payload", block_number = payload.block_number(), block_hash = %payload.block_hash(), - ?sidecar, + ?payload, threshold=this.threshold, skipped=this.skipped, "Skipping new payload" ); @@ -55,7 +55,7 @@ where continue } *this.skipped = 0; - Some(BeaconEngineMessage::NewPayload { payload, sidecar, tx }) + Some(BeaconEngineMessage::NewPayload { payload, tx }) } next => next, }; diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index ce05f128a6bf..f2bfd6027379 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -13,14 +13,14 @@ extern crate alloc; mod payload; use alloc::sync::Arc; -use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}; +use alloy_rpc_types_engine::{ExecutionPayload, PayloadError}; pub use alloy_rpc_types_engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, ExecutionPayloadV1, PayloadAttributes as EthPayloadAttributes, }; pub use payload::{EthBuiltPayload, EthPayloadBuilderAttributes}; use reth_chainspec::ChainSpec; -use reth_engine_primitives::{EngineTypes, EngineValidator, PayloadValidator}; +use reth_engine_primitives::{EngineTypes, EngineValidator, ExecutionData, PayloadValidator}; use reth_payload_primitives::{ validate_version_specific_fields, BuiltPayload, EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes, PayloadTypes, @@ -50,6 +50,7 @@ where + TryInto + TryInto, { + type ExecutionData = ExecutionData; type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1; type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3; @@ -59,8 +60,10 @@ where block: SealedBlock< <::Primitives as NodePrimitives>::Block, >, - ) -> (ExecutionPayload, ExecutionPayloadSidecar) { - ExecutionPayload::from_block_unchecked(block.hash(), &block.into_block()) + ) -> ExecutionData { + let (payload, sidecar) = + ExecutionPayload::from_block_unchecked(block.hash(), &block.into_block()); + ExecutionData { payload, sidecar } } } @@ -97,19 +100,19 @@ impl EthereumEngineValidator { impl PayloadValidator for EthereumEngineValidator { type Block = Block; + type ExecutionData = ExecutionData; fn ensure_well_formed_payload( &self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, + payload: ExecutionData, ) -> Result { - self.inner.ensure_well_formed_payload(payload, sidecar) + self.inner.ensure_well_formed_payload(payload) } } impl EngineValidator for EthereumEngineValidator where - Types: EngineTypes, + Types: EngineTypes, { fn validate_version_specific_fields( &self, diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index df4091f0592b..4225699e996c 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -100,22 +100,6 @@ impl BuiltPayload for EthBuiltPayload { } } -impl BuiltPayload for &EthBuiltPayload { - type Primitives = EthPrimitives; - - fn block(&self) -> &SealedBlock { - (**self).block() - } - - fn fees(&self) -> U256 { - (**self).fees() - } - - fn requests(&self) -> Option { - self.requests.clone() - } -} - // V1 engine_getPayloadV1 response impl From for ExecutionPayloadV1 { fn from(value: EthBuiltPayload) -> Self { diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index f782b6b1c41c..51537e92d4b0 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -122,8 +122,7 @@ where block: &RecoveredBlock, ) -> Result<(), Self::Error> { // Set state clear flag if the block is after the Spurious Dragon hardfork. - let state_clear_flag = - (*self.chain_spec).is_spurious_dragon_active_at_block(block.number()); + let state_clear_flag = self.chain_spec.is_spurious_dragon_active_at_block(block.number()); self.state.set_state_clear_flag(state_clear_flag); let mut evm = self.evm_config.evm_for_block(&mut self.state, block.header()); @@ -142,8 +141,8 @@ where let mut cumulative_gas_used = 0; let mut receipts = Vec::with_capacity(block.body().transaction_count()); for (sender, transaction) in block.transactions_with_sender() { - // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, - // must be no greater than the block’s gasLimit. + // The sum of the transaction's gas limit, Tg, and the gas utilized in this block prior, + // must be no greater than the block's gasLimit. let block_available_gas = block.gas_limit() - cumulative_gas_used; if transaction.gas_limit() > block_available_gas { return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { @@ -171,19 +170,14 @@ where cumulative_gas_used += result.gas_used(); // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push( - #[allow(clippy::needless_update)] // side-effect of optimism fields - Receipt { - tx_type: transaction.tx_type(), - // Success flag was added in `EIP-658: Embedding transaction status code in - // receipts`. - success: result.is_success(), - cumulative_gas_used, - // convert to reth log - logs: result.into_logs(), - ..Default::default() - }, - ); + receipts.push(Receipt { + tx_type: transaction.tx_type(), + // Success flag was added in `EIP-658: Embedding transaction status code in + // receipts`. + success: result.is_success(), + cumulative_gas_used, + logs: result.into_logs(), + }); } Ok(ExecuteOutput { receipts, gas_used: cumulative_gas_used }) } diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index ac2fea59e200..a2cb79a451e1 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -21,7 +21,7 @@ use alloc::{sync::Arc, vec::Vec}; use alloy_consensus::{BlockHeader, Header}; use alloy_primitives::{Address, U256}; use core::{convert::Infallible, fmt::Debug}; -use reth_chainspec::{ChainSpec, EthChainSpec}; +use reth_chainspec::{ChainSpec, EthChainSpec, MAINNET}; use reth_evm::{env::EvmEnv, ConfigureEvm, ConfigureEvmEnv, Database, Evm, NextBlockEnvAttributes}; use reth_primitives::TransactionSigned; use reth_primitives_traits::transaction::execute::FillTxEnv; @@ -130,6 +130,11 @@ impl EthEvmConfig { Self { chain_spec } } + /// Creates a new Ethereum EVM configuration for the ethereum mainnet. + pub fn mainnet() -> Self { + Self::new(MAINNET.clone()) + } + /// Returns the chain spec associated with this configuration. pub const fn chain_spec(&self) -> &Arc { &self.chain_spec diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index f8d3542e4d86..61492f246ca8 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -12,9 +12,7 @@ workspace = true [dependencies] # reth -reth-payload-builder.workspace = true reth-ethereum-engine-primitives.workspace = true -reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true reth-ethereum-consensus.workspace = true reth-ethereum-primitives.workspace = true @@ -27,10 +25,14 @@ reth-evm.workspace = true reth-evm-ethereum.workspace = true reth-consensus.workspace = true reth-rpc.workspace = true +reth-rpc-builder.workspace = true +reth-rpc-api.workspace = true +reth-rpc-server-types.workspace = true reth-node-api.workspace = true reth-chainspec.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-trie-db.workspace = true +reth-rpc-eth-types.workspace = true # revm with required ethereum features revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } @@ -75,7 +77,6 @@ test-utils = [ "reth-chainspec/test-utils", "reth-consensus/test-utils", "reth-network/test-utils", - "reth-payload-builder/test-utils", "reth-ethereum-primitives/test-utils", "reth-revm/test-utils", "reth-db/test-utils", diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index cbac35022848..40ac74deedb0 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -10,26 +10,31 @@ use reth_ethereum_engine_primitives::{ EthBuiltPayload, EthPayloadAttributes, EthPayloadBuilderAttributes, }; use reth_ethereum_primitives::{EthPrimitives, PooledTransaction}; -use reth_evm::execute::BasicBlockExecutorProvider; +use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; use reth_evm_ethereum::execute::EthExecutionStrategyFactory; use reth_network::{EthNetworkPrimitives, NetworkHandle, PeersInfo}; -use reth_node_api::{AddOnsContext, FullNodeComponents, TxTy}; +use reth_node_api::{AddOnsContext, FullNodeComponents, NodeAddOns, TxTy}; use reth_node_builder::{ components::{ ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, PoolBuilder, }, node::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}, - rpc::{EngineValidatorBuilder, RpcAddOns}, + rpc::{EngineValidatorAddOn, EngineValidatorBuilder, RethRpcAddOns, RpcAddOns, RpcHandle}, BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadTypes, }; use reth_provider::{providers::ProviderFactoryBuilder, CanonStateSubscriptions, EthStorage}; -use reth_rpc::EthApi; +use reth_rpc::{eth::core::EthApiFor, ValidationApi}; +use reth_rpc_api::servers::BlockSubmissionValidationApiServer; +use reth_rpc_builder::config::RethRpcServerConfig; +use reth_rpc_eth_types::{error::FromEvmError, EthApiError}; +use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ blobstore::DiskFileBlobStore, EthTransactionPool, PoolTransaction, TransactionPool, TransactionValidationTaskExecutor, }; use reth_trie_db::MerklePatriciaTrie; +use revm::primitives::TxEnv; use std::sync::Arc; /// Type configuration for a regular Ethereum node. @@ -87,10 +92,10 @@ impl EthereumNode { /// use reth_db::open_db_read_only; /// use reth_node_ethereum::EthereumNode; /// use reth_provider::providers::StaticFileProvider; - /// use std::{path::Path, sync::Arc}; + /// use std::sync::Arc; /// /// let factory = EthereumNode::provider_factory_builder() - /// .db(Arc::new(open_db_read_only(Path::new("db"), Default::default()).unwrap())) + /// .db(Arc::new(open_db_read_only("db", Default::default()).unwrap())) /// .chainspec(ChainSpecBuilder::mainnet().build().into()) /// .static_file(StaticFileProvider::read_only("db/static_files", false).unwrap()) /// .build_provider_factory(); @@ -112,16 +117,92 @@ impl NodeTypesWithEngine for EthereumNode { } /// Add-ons w.r.t. l1 ethereum. -pub type EthereumAddOns = RpcAddOns< - N, - EthApi< - ::Provider, - ::Pool, - NetworkHandle, - ::Evm, +#[derive(Debug)] +pub struct EthereumAddOns { + inner: RpcAddOns, EthereumEngineValidatorBuilder>, +} + +impl Default for EthereumAddOns { + fn default() -> Self { + Self { inner: Default::default() } + } +} + +impl NodeAddOns for EthereumAddOns +where + N: FullNodeComponents< + Types: NodeTypesWithEngine< + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + Engine = EthEngineTypes, + >, + Evm: ConfigureEvm, + >, + EthApiError: FromEvmError, +{ + type Handle = RpcHandle>; + + async fn launch_add_ons( + self, + ctx: reth_node_api::AddOnsContext<'_, N>, + ) -> eyre::Result { + let validation_api = ValidationApi::new( + ctx.node.provider().clone(), + Arc::new(ctx.node.consensus().clone()), + ctx.node.block_executor().clone(), + ctx.config.rpc.flashbots_config(), + Box::new(ctx.node.task_executor().clone()), + Arc::new(EthereumEngineValidator::new(ctx.config.chain.clone())), + ); + + self.inner + .launch_add_ons_with(ctx, move |modules, _| { + modules.merge_if_module_configured( + RethRpcModule::Flashbots, + validation_api.into_rpc(), + )?; + + Ok(()) + }) + .await + } +} + +impl RethRpcAddOns for EthereumAddOns +where + N: FullNodeComponents< + Types: NodeTypesWithEngine< + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + Engine = EthEngineTypes, + >, + Evm: ConfigureEvm, >, - EthereumEngineValidatorBuilder, ->; + EthApiError: FromEvmError, +{ + type EthApi = EthApiFor; + + fn hooks_mut(&mut self) -> &mut reth_node_builder::rpc::RpcHooks { + self.inner.hooks_mut() + } +} + +impl EngineValidatorAddOn for EthereumAddOns +where + N: FullNodeComponents< + Types: NodeTypesWithEngine< + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + Engine = EthEngineTypes, + >, + >, +{ + type Validator = EthereumEngineValidator; + + async fn engine_validator(&self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { + EthereumEngineValidatorBuilder::default().build(ctx).await + } +} impl Node for EthereumNode where @@ -196,16 +277,12 @@ where let data_dir = ctx.config().datadir(); let pool_config = ctx.pool_config(); let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), Default::default())?; - let validator = TransactionValidationTaskExecutor::eth_builder(ctx.chain_spec()) + let validator = TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone()) .with_head_timestamp(ctx.head().timestamp) .kzg_settings(ctx.kzg_settings()?) .with_local_transactions_config(pool_config.local_transactions_config.clone()) .with_additional_tasks(ctx.config().txpool.additional_validation_tasks) - .build_with_tasks( - ctx.provider().clone(), - ctx.task_executor().clone(), - blob_store.clone(), - ); + .build_with_tasks(ctx.task_executor().clone(), blob_store.clone()); let transaction_pool = reth_transaction_pool::Pool::eth_pool(validator, blob_store, pool_config); diff --git a/crates/ethereum/node/src/payload.rs b/crates/ethereum/node/src/payload.rs index 7bfb62cdd168..ad156bf52fc3 100644 --- a/crates/ethereum/node/src/payload.rs +++ b/crates/ethereum/node/src/payload.rs @@ -1,6 +1,5 @@ //! Payload component configuration for the Ethereum node. -use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_chainspec::ChainSpec; use reth_ethereum_engine_primitives::{ EthBuiltPayload, EthPayloadAttributes, EthPayloadBuilderAttributes, @@ -13,8 +12,6 @@ use reth_node_api::{FullNodeTypes, NodeTypesWithEngine, PrimitivesTy, TxTy}; use reth_node_builder::{ components::PayloadServiceBuilder, BuilderContext, PayloadBuilderConfig, PayloadTypes, }; -use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_provider::CanonStateSubscriptions; use reth_transaction_pool::{PoolTransaction, TransactionPool}; /// A basic ethereum payload service. @@ -23,13 +20,16 @@ use reth_transaction_pool::{PoolTransaction, TransactionPool}; pub struct EthereumPayloadBuilder; impl EthereumPayloadBuilder { - /// A helper method initializing [`PayloadBuilderService`] with the given EVM config. - pub fn spawn( - self, + /// A helper method initializing [`reth_ethereum_payload_builder::EthereumPayloadBuilder`] with + /// the given EVM config. + pub fn build( + &self, evm_config: Evm, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result> + ) -> eyre::Result< + reth_ethereum_payload_builder::EthereumPayloadBuilder, + > where Types: NodeTypesWithEngine, Node: FullNodeTypes, @@ -44,29 +44,12 @@ impl EthereumPayloadBuilder { >, { let conf = ctx.payload_builder_config(); - let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::new( - evm_config, - EthereumBuilderConfig::new(conf.extra_data_bytes()).with_gas_limit(conf.gas_limit()), - ); - - let payload_job_config = BasicPayloadJobGeneratorConfig::default() - .interval(conf.interval()) - .deadline(conf.deadline()) - .max_payload_tasks(conf.max_payload_tasks()); - - let payload_generator = BasicPayloadJobGenerator::with_builder( + Ok(reth_ethereum_payload_builder::EthereumPayloadBuilder::new( ctx.provider().clone(), pool, - ctx.task_executor().clone(), - payload_job_config, - payload_builder, - ); - let (payload_service, payload_builder) = - PayloadBuilderService::new(payload_generator, ctx.provider().canonical_state_stream()); - - ctx.task_executor().spawn_critical("payload builder service", Box::pin(payload_service)); - - Ok(payload_builder) + evm_config, + EthereumBuilderConfig::new(conf.extra_data_bytes()).with_gas_limit(conf.gas_limit()), + )) } } @@ -83,11 +66,14 @@ where PayloadBuilderAttributes = EthPayloadBuilderAttributes, >, { - async fn spawn_payload_service( - self, + type PayloadBuilder = + reth_ethereum_payload_builder::EthereumPayloadBuilder; + + async fn build_payload_builder( + &self, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result> { - self.spawn(EthEvmConfig::new(ctx.chain_spec()), ctx, pool) + ) -> eyre::Result { + self.build(EthEvmConfig::new(ctx.chain_spec()), ctx, pool) } } diff --git a/crates/ethereum/node/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs index 9acc5f126b7b..b9781390e57f 100644 --- a/crates/ethereum/node/tests/e2e/p2p.rs +++ b/crates/ethereum/node/tests/e2e/p2p.rs @@ -135,3 +135,46 @@ async fn test_long_reorg() -> eyre::Result<()> { Ok(()) } + +#[tokio::test] +async fn test_reorg_through_backfill() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let seed: [u8; 32] = rand::thread_rng().gen(); + let mut rng = StdRng::from_seed(seed); + println!("Seed: {:?}", seed); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .prague_activated() + .build(), + ); + + let (mut nodes, _tasks, _) = + setup_engine::(2, chain_spec.clone(), false, eth_payload_attributes).await?; + + let mut first_node = nodes.pop().unwrap(); + let mut second_node = nodes.pop().unwrap(); + + let first_provider = ProviderBuilder::new().on_http(first_node.rpc_url()); + + // Advance first node 100 blocks and finalize the chain. + advance_with_random_transactions(&mut first_node, 100, &mut rng, true).await?; + + // Sync second node to 20th block. + let head = first_provider.get_block_by_number(20.into(), false.into()).await?.unwrap(); + second_node.sync_to(head.header.hash).await?; + + // Produce an unfinalized fork chain with 5 blocks + second_node.payload.timestamp = head.header.timestamp; + advance_with_random_transactions(&mut second_node, 5, &mut rng, false).await?; + + // Now reorg second node to the finalized canonical head + let head = first_provider.get_block_by_number(100.into(), false.into()).await?.unwrap(); + second_node.sync_to(head.header.hash).await?; + + Ok(()) +} diff --git a/crates/ethereum/node/tests/e2e/utils.rs b/crates/ethereum/node/tests/e2e/utils.rs index 362002c03b2c..d33dc36501ab 100644 --- a/crates/ethereum/node/tests/e2e/utils.rs +++ b/crates/ethereum/node/tests/e2e/utils.rs @@ -11,10 +11,10 @@ use alloy_rpc_types_eth::TransactionRequest; use alloy_signer::SignerSync; use rand::{seq::SliceRandom, Rng}; use reth_e2e_test_utils::{wallet::Wallet, NodeHelperType, TmpDB}; +use reth_ethereum_engine_primitives::EthPayloadBuilderAttributes; use reth_ethereum_primitives::TxType; use reth_node_api::NodeTypesWithDBAdapter; use reth_node_ethereum::EthereumNode; -use reth_payload_builder::EthPayloadBuilderAttributes; use reth_provider::FullProvider; use revm::primitives::{AccessListItem, Authorization}; diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index c4e689ed6e60..7f0d770c12e4 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -10,9 +10,7 @@ #![allow(clippy::useless_let_if_seq)] use alloy_consensus::{BlockHeader, Header, Transaction, Typed2718, EMPTY_OMMER_ROOT_HASH}; -use alloy_eips::{ - eip4844::MAX_DATA_GAS_PER_BLOCK, eip6110, eip7685::Requests, merge::BEACON_NONCE, -}; +use alloy_eips::{eip4844::DATA_GAS_PER_BLOB, eip6110, eip7685::Requests, merge::BEACON_NONCE}; use alloy_primitives::U256; use reth_basic_payload_builder::{ commit_withdrawals, is_better_payload, BuildArguments, BuildOutcome, PayloadBuilder, @@ -39,8 +37,8 @@ use reth_primitives_traits::{ use reth_revm::database::StateProviderDatabase; use reth_storage_api::StateProviderFactory; use reth_transaction_pool::{ - error::InvalidPoolTransactionError, noop::NoopTransactionPool, BestTransactions, - BestTransactionsAttributes, PoolTransaction, TransactionPool, ValidPoolTransaction, + error::InvalidPoolTransactionError, BestTransactions, BestTransactionsAttributes, + PoolTransaction, TransactionPool, ValidPoolTransaction, }; use revm::{ db::{states::bundle_state::BundleRetention, State}, @@ -52,6 +50,7 @@ use tracing::{debug, trace, warn}; mod config; pub use config::*; +use reth_transaction_pool::error::Eip4844PoolTransactionError; type BestTransactionsIter = Box< dyn BestTransactions::Transaction>>>, @@ -59,21 +58,30 @@ type BestTransactionsIter = Box< /// Ethereum payload builder #[derive(Debug, Clone, PartialEq, Eq)] -pub struct EthereumPayloadBuilder { +pub struct EthereumPayloadBuilder { + /// Client providing access to node state. + client: Client, + /// Transaction pool. + pool: Pool, /// The type responsible for creating the evm. evm_config: EvmConfig, /// Payload builder configuration. builder_config: EthereumBuilderConfig, } -impl EthereumPayloadBuilder { +impl EthereumPayloadBuilder { /// `EthereumPayloadBuilder` constructor. - pub const fn new(evm_config: EvmConfig, builder_config: EthereumBuilderConfig) -> Self { - Self { evm_config, builder_config } + pub const fn new( + client: Client, + pool: Pool, + evm_config: EvmConfig, + builder_config: EthereumBuilderConfig, + ) -> Self { + Self { client, pool, evm_config, builder_config } } } -impl EthereumPayloadBuilder +impl EthereumPayloadBuilder where EvmConfig: ConfigureEvm
, { @@ -95,10 +103,10 @@ where } // Default implementation of [PayloadBuilder] for unit type -impl PayloadBuilder for EthereumPayloadBuilder +impl PayloadBuilder for EthereumPayloadBuilder where EvmConfig: ConfigureEvm
, - Client: StateProviderFactory + ChainSpecProvider, + Client: StateProviderFactory + ChainSpecProvider + Clone, Pool: TransactionPool>, { type Attributes = EthPayloadBuilderAttributes; @@ -106,49 +114,41 @@ where fn try_build( &self, - args: BuildArguments, + args: BuildArguments, ) -> Result, PayloadBuilderError> { let evm_env = self .evm_env(&args.config, &args.config.parent_header) .map_err(PayloadBuilderError::other)?; - let pool = args.pool.clone(); default_ethereum_payload( self.evm_config.clone(), + self.client.clone(), + self.pool.clone(), self.builder_config.clone(), args, evm_env, - |attributes| pool.best_transactions_with_attributes(attributes), + |attributes| self.pool.best_transactions_with_attributes(attributes), ) } fn build_empty_payload( &self, - client: &Client, config: PayloadConfig, ) -> Result { - let args = BuildArguments::new( - client, - // we use defaults here because for the empty payload we don't need to execute anything - NoopTransactionPool::default(), - Default::default(), - config, - Default::default(), - None, - ); + let args = BuildArguments::new(Default::default(), config, Default::default(), None); let evm_env = self .evm_env(&args.config, &args.config.parent_header) .map_err(PayloadBuilderError::other)?; - let pool = args.pool.clone(); - default_ethereum_payload( self.evm_config.clone(), + self.client.clone(), + self.pool.clone(), self.builder_config.clone(), args, evm_env, - |attributes| pool.best_transactions_with_attributes(attributes), + |attributes| self.pool.best_transactions_with_attributes(attributes), )? .into_payload() .ok_or_else(|| PayloadBuilderError::MissingPayload) @@ -161,10 +161,12 @@ where /// and configuration, this function creates a transaction payload. Returns /// a result indicating success with the payload or an error in case of failure. #[inline] -pub fn default_ethereum_payload( +pub fn default_ethereum_payload( evm_config: EvmConfig, + client: Client, + pool: Pool, builder_config: EthereumBuilderConfig, - args: BuildArguments, + args: BuildArguments, evm_env: EvmEnv, best_txs: F, ) -> Result, PayloadBuilderError> @@ -174,7 +176,7 @@ where Pool: TransactionPool>, F: FnOnce(BestTransactionsAttributes) -> BestTransactionsIter, { - let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; + let BuildArguments { mut cached_reads, config, cancel, best_payload } = args; let chain_spec = client.chain_spec(); let state_provider = client.state_by_block_hash(config.parent_header.hash())?; @@ -185,7 +187,6 @@ where debug!(target: "payload_builder", id=%attributes.id, parent_header = ?parent_header.hash(), parent_number = parent_header.number, "building new payload"); let mut cumulative_gas_used = 0; - let mut sum_blob_gas_used = 0; let block_gas_limit: u64 = evm_env.block_env.gas_limit.to::(); let base_fee = evm_env.block_env.basefee.to::(); @@ -228,6 +229,11 @@ where let mut evm = evm_config.evm_with_env(&mut db, evm_env); let mut receipts = Vec::new(); + let mut block_blob_count = 0; + let blob_params = chain_spec.blob_params_at_timestamp(attributes.timestamp); + let max_blob_count = + blob_params.as_ref().map(|params| params.max_blob_count).unwrap_or_default(); + while let Some(pool_tx) = best_txs.next() { // ensure we still have capacity for this transaction if cumulative_gas_used + pool_tx.gas_limit() > block_gas_limit { @@ -252,18 +258,21 @@ where // There's only limited amount of blob space available per block, so we need to check if // the EIP-4844 can still fit in the block if let Some(blob_tx) = tx.as_eip4844() { - let tx_blob_gas = blob_tx.blob_gas(); - if sum_blob_gas_used + tx_blob_gas > MAX_DATA_GAS_PER_BLOCK { + let tx_blob_count = blob_tx.blob_versioned_hashes.len() as u64; + + if block_blob_count + tx_blob_count > max_blob_count { // we can't fit this _blob_ transaction into the block, so we mark it as // invalid, which removes its dependent transactions from // the iterator. This is similar to the gas limit condition // for regular transactions above. - trace!(target: "payload_builder", tx=?tx.hash(), ?sum_blob_gas_used, ?tx_blob_gas, "skipping blob transaction because it would exceed the max data gas per block"); + trace!(target: "payload_builder", tx=?tx.hash(), ?block_blob_count, "skipping blob transaction because it would exceed the max blob count per block"); best_txs.mark_invalid( &pool_tx, - InvalidPoolTransactionError::ExceedsGasLimit( - tx_blob_gas, - MAX_DATA_GAS_PER_BLOCK, + InvalidPoolTransactionError::Eip4844( + Eip4844PoolTransactionError::TooManyEip4844Blobs { + have: block_blob_count + tx_blob_count, + permitted: max_blob_count, + }, ), ); continue @@ -303,11 +312,10 @@ where // add to the total blob gas used if the transaction successfully executed if let Some(blob_tx) = tx.as_eip4844() { - let tx_blob_gas = blob_tx.blob_gas(); - sum_blob_gas_used += tx_blob_gas; + block_blob_count += blob_tx.blob_versioned_hashes.len() as u64; - // if we've reached the max data gas per block, we can skip blob txs entirely - if sum_blob_gas_used == MAX_DATA_GAS_PER_BLOCK { + // if we've reached the max blob count, we can skip blob txs entirely + if block_blob_count == max_blob_count { best_txs.skip_blobs(); } } @@ -418,16 +426,14 @@ where .map_err(PayloadBuilderError::other)?; excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_header.timestamp) { - parent_header.maybe_next_block_excess_blob_gas( - chain_spec.blob_params_at_timestamp(attributes.timestamp), - ) + parent_header.maybe_next_block_excess_blob_gas(blob_params) } else { // for the first post-fork block, both parent.blob_gas_used and // parent.excess_blob_gas are evaluated as 0 Some(alloy_eips::eip7840::BlobParams::cancun().next_block_excess_blob_gas(0, 0)) }; - blob_gas_used = Some(sum_blob_gas_used); + blob_gas_used = Some(block_blob_count * DATA_GAS_PER_BLOB); } let header = Header { diff --git a/crates/evm/execution-errors/Cargo.toml b/crates/evm/execution-errors/Cargo.toml index 70ae29ae20a5..3d783353c55b 100644 --- a/crates/evm/execution-errors/Cargo.toml +++ b/crates/evm/execution-errors/Cargo.toml @@ -14,7 +14,6 @@ workspace = true # reth reth-consensus.workspace = true reth-storage-errors.workspace = true -reth-prune-types.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true diff --git a/crates/evm/execution-errors/src/lib.rs b/crates/evm/execution-errors/src/lib.rs index ffe853d427d8..09154c355fc3 100644 --- a/crates/evm/execution-errors/src/lib.rs +++ b/crates/evm/execution-errors/src/lib.rs @@ -18,7 +18,6 @@ use alloc::{ use alloy_eips::BlockNumHash; use alloy_primitives::B256; use reth_consensus::ConsensusError; -use reth_prune_types::PruneSegmentError; use reth_storage_errors::provider::ProviderError; use thiserror::Error; @@ -36,9 +35,6 @@ pub enum BlockValidationError { /// The EVM error. error: Box, }, - /// Error when recovering the sender for a transaction - #[error("failed to recover sender for transaction")] - SenderRecoveryError, /// Error when incrementing balance in post execution #[error("incrementing balance in post execution failed")] IncrementBalanceFailed, @@ -55,18 +51,6 @@ pub enum BlockValidationError { /// The available block gas block_available_gas: u64, }, - /// Error for pre-merge block - #[error("block {hash} is pre merge")] - BlockPreMerge { - /// The hash of the block - hash: B256, - }, - /// Error for missing total difficulty - #[error("missing total difficulty for block {hash}")] - MissingTotalDifficulty { - /// The hash of the block - hash: B256, - }, /// Error for EIP-4788 when parent beacon block root is missing #[error("EIP-4788 parent beacon block root missing for active Cancun block")] MissingParentBeaconBlockRoot, @@ -172,9 +156,6 @@ impl From for BlockExecutionError { /// Internal (i.e., not validation or consensus related) `BlockExecutor` Errors #[derive(Error, Debug)] pub enum InternalBlockExecutionError { - /// Pruning error, transparently wrapping [`PruneSegmentError`] - #[error(transparent)] - Pruning(#[from] PruneSegmentError), /// Error when appending chain on fork is not possible #[error( "appending chain on fork (other_chain_fork:?) is not possible as the tip is {chain_tip:?}" @@ -185,12 +166,12 @@ pub enum InternalBlockExecutionError { /// The fork on the other chain other_chain_fork: Box, }, - /// Error when fetching latest block state. + /// Error when fetching data from the db. #[error(transparent)] - LatestBlock(#[from] ProviderError), + Provider(#[from] ProviderError), /// Arbitrary Block Executor Errors #[error(transparent)] - Other(Box), + Other(Box), } impl InternalBlockExecutionError { @@ -206,4 +187,51 @@ impl InternalBlockExecutionError { pub fn msg(msg: impl core::fmt::Display) -> Self { Self::Other(msg.to_string().into()) } + + /// Returns the arbitrary error if it is [`InternalBlockExecutionError::Other`] + pub fn as_other(&self) -> Option<&(dyn core::error::Error + Send + Sync + 'static)> { + match self { + Self::Other(err) => Some(&**err), + _ => None, + } + } + + /// Attempts to downcast the [`InternalBlockExecutionError::Other`] variant to a concrete type + pub fn downcast(self) -> Result, Self> { + match self { + Self::Other(err) => err.downcast().map_err(Self::Other), + err => Err(err), + } + } + + /// Returns a reference to the [`InternalBlockExecutionError::Other`] value if this type is a + /// [`InternalBlockExecutionError::Other`] of that type. Returns None otherwise. + pub fn downcast_other(&self) -> Option<&T> { + let other = self.as_other()?; + other.downcast_ref() + } + + /// Returns true if the this type is a [`InternalBlockExecutionError::Other`] of that error + /// type. Returns false otherwise. + pub fn is_other(&self) -> bool { + self.as_other().map(|err| err.is::()).unwrap_or(false) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[derive(thiserror::Error, Debug)] + #[error("err")] + struct E; + + #[test] + fn other_downcast() { + let err = InternalBlockExecutionError::other(E); + assert!(err.is_other::()); + + assert!(err.downcast_other::().is_some()); + assert!(err.downcast::().is_ok()); + } } diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index 40f1e30a288b..fa754e159cc1 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -78,6 +78,7 @@ serde = [ "rand/serde", "secp256k1/serde", "reth-primitives-traits/serde", + "reth-prune-types/serde", ] scroll = [ "reth-node-core/scroll", diff --git a/crates/exex/test-utils/Cargo.toml b/crates/exex/test-utils/Cargo.toml index 1bb82e97f7db..ec8f9e397c41 100644 --- a/crates/exex/test-utils/Cargo.toml +++ b/crates/exex/test-utils/Cargo.toml @@ -20,18 +20,19 @@ reth-db-common.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } reth-execution-types.workspace = true reth-exex.workspace = true +reth-payload-builder.workspace = true reth-network.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true reth-node-builder = { workspace = true, features = ["test-utils"] } reth-node-ethereum.workspace = true -reth-payload-builder.workspace = true reth-primitives.workspace = true reth-primitives-traits.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-tasks.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-trie-db.workspace = true +reth-ethereum-payload-builder.workspace = true ## alloy alloy-eips.workspace = true diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index b29353d15b95..0ffdecf0a5ca 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -24,6 +24,7 @@ use reth_db::{ DatabaseEnv, }; use reth_db_common::init::init_genesis; +use reth_ethereum_payload_builder::EthereumBuilderConfig; use reth_evm::test_utils::MockExecutorProvider; use reth_execution_types::Chain; use reth_exex::{ExExContext, ExExEvent, ExExNotification, ExExNotifications, Wal}; @@ -287,7 +288,14 @@ pub async fn test_exex_context_with_chain_spec( let task_executor = tasks.executor(); tasks.executor().spawn(network_manager); - let (_, payload_builder) = NoopPayloadBuilderService::::new(); + let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::new( + provider.clone(), + transaction_pool.clone(), + evm_config.clone(), + EthereumBuilderConfig::new(Default::default()), + ); + + let (_, payload_builder_handle) = NoopPayloadBuilderService::::new(); let components = NodeAdapter::, _> { components: Components { @@ -297,6 +305,7 @@ pub async fn test_exex_context_with_chain_spec( consensus, network, payload_builder, + payload_builder_handle, }, task_executor, provider, diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 95cbe1fad45e..898be5f3cef4 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -31,7 +31,7 @@ use std::{ task::{ready, Context, Poll}, }; use thiserror::Error; -use tracing::{error, trace}; +use tracing::{debug, error, trace}; /// A heuristic that is used to determine the number of requests that should be prepared for a peer. /// This should ensure that there are always requests lined up for peers to handle while the @@ -203,6 +203,16 @@ where self.queued_validated_headers.last().or(self.lowest_validated_header.as_ref()) } + /// Resets the request trackers and clears the sync target. + /// + /// This ensures the downloader will restart after a new sync target has been set. + fn reset(&mut self) { + debug!(target: "downloaders::headers", "Resetting headers downloader"); + self.next_request_block_number = 0; + self.next_chain_tip_block_number = 0; + self.sync_target.take(); + } + /// Validate that the received header matches the expected sync target. fn validate_sync_target( &self, @@ -294,11 +304,23 @@ where // If the header is valid on its own, but not against its parent, we return it as // detached head error. + // In stage sync this will trigger an unwind because this means that the the local head + // is not part of the chain the sync target is on. In other words, the downloader was + // unable to connect the the sync target with the local head because the sync target and + // the local head or on different chains. if let Err(error) = self.consensus.validate_header_against_parent(&*last_header, head) { + let local_head = head.clone(); // Replace the last header with a detached variant error!(target: "downloaders::headers", %error, number = last_header.number(), hash = ?last_header.hash(), "Header cannot be attached to known canonical chain"); + + // Reset trackers so that we can start over the next time the sync target is + // updated. + // The expected event flow when that happens is that the node will unwind the local + // chain and restart the downloader. + self.reset(); + return Err(HeadersDownloaderError::DetachedHead { - local_head: Box::new(head.clone()), + local_head: Box::new(local_head), header: Box::new(last_header.clone()), error: Box::new(error), } @@ -674,6 +696,11 @@ where // headers are sorted high to low self.queued_validated_headers.pop(); } + trace!( + target: "downloaders::headers", + head=?head.num_hash(), + "Updating local head" + ); // update the local head self.local_head = Some(head); } @@ -681,6 +708,12 @@ where /// If the given target is different from the current target, we need to update the sync target fn update_sync_target(&mut self, target: SyncTarget) { let current_tip = self.sync_target.as_ref().and_then(|t| t.hash()); + trace!( + target: "downloaders::headers", + sync_target=?target, + current_tip=?current_tip, + "Updating sync target" + ); match target { SyncTarget::Tip(tip) => { if Some(tip) != current_tip { diff --git a/crates/net/downloaders/src/headers/task.rs b/crates/net/downloaders/src/headers/task.rs index 5253ca565062..d3522c07f8d8 100644 --- a/crates/net/downloaders/src/headers/task.rs +++ b/crates/net/downloaders/src/headers/task.rs @@ -176,6 +176,7 @@ impl Future for SpawnedDownloader { } /// Commands delegated to the spawned [`HeaderDownloader`] +#[derive(Debug)] enum DownloaderUpdates { UpdateSyncGap(SealedHeader, SyncTarget), UpdateLocalHead(SealedHeader), diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index 85d3f0cdb193..fa2efa593f5c 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -789,6 +789,8 @@ impl PeersManager { } /// Connects a peer and its address with the given kind. + /// + /// Note: This is invoked on demand via an external command received by the manager pub(crate) fn add_and_connect_kind( &mut self, peer_id: PeerId, @@ -807,6 +809,7 @@ impl PeersManager { peer.state = PeerConnectionState::PendingOut; peer.fork_id = fork_id; entry.insert(peer); + self.connection_info.inc_pending_out(); self.queued_actions .push_back(PeerAction::Connect { peer_id, remote_addr: addr.tcp() }); } @@ -2786,4 +2789,14 @@ mod tests { peers.on_active_session_gracefully_closed(peer); assert_eq!(peers.connection_info.num_inbound, 0); } + + #[tokio::test] + async fn test_add_pending_onnect() { + let peer = PeerId::random(); + let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); + let mut peers = PeersManager::default(); + peers.add_and_connect(peer, PeerAddr::from_tcp(socket_addr), None); + assert_eq!(peers.peers.get(&peer).unwrap().state, PeerConnectionState::PendingOut); + assert_eq!(peers.connection_info.num_pending_out, 1); + } } diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index 1e4bb174508a..06414c3b8798 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -10,7 +10,7 @@ use crate::{ }; use futures::{FutureExt, StreamExt}; use pin_project::pin_project; -use reth_chainspec::{ChainSpecProvider, Hardforks, MAINNET}; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks, Hardforks}; use reth_eth_wire::{ protocol::Protocol, DisconnectReason, EthNetworkPrimitives, HelloMessageWithProtocols, }; @@ -176,7 +176,12 @@ where impl Testnet where - C: StateProviderFactory + BlockReaderIdExt + HeaderProvider + Clone + 'static, + C: ChainSpecProvider + + StateProviderFactory + + BlockReaderIdExt + + HeaderProvider + + Clone + + 'static, Pool: TransactionPool, { /// Installs an eth pool on each peer @@ -185,7 +190,6 @@ where let blob_store = InMemoryBlobStore::default(); let pool = TransactionValidationTaskExecutor::eth( peer.client.clone(), - MAINNET.clone(), blob_store.clone(), TokioTaskExecutor::default(), ); @@ -206,7 +210,6 @@ where let blob_store = InMemoryBlobStore::default(); let pool = TransactionValidationTaskExecutor::eth( peer.client.clone(), - MAINNET.clone(), blob_store.clone(), TokioTaskExecutor::default(), ); @@ -762,4 +765,24 @@ impl NetworkEventStream { _ => None, } } + + /// Awaits the next event for a peer added. + pub async fn peer_added(&mut self) -> Option { + let peer_id = match self.inner.next().await { + Some(NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id))) => peer_id, + _ => return None, + }; + + Some(peer_id) + } + + /// Awaits the next event for a peer removed. + pub async fn peer_removed(&mut self) -> Option { + let peer_id = match self.inner.next().await { + Some(NetworkEvent::Peer(PeerEvent::PeerRemoved(peer_id))) => peer_id, + _ => return None, + }; + + Some(peer_id) + } } diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 5b53fd2e26d6..5cd84af86530 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -1710,7 +1710,7 @@ impl PooledTransactionsHashesBuilder { Self::Eth68(msg) => { msg.hashes.push(*pooled_tx.hash()); msg.sizes.push(pooled_tx.encoded_length()); - msg.types.push(pooled_tx.transaction.tx_type()); + msg.types.push(pooled_tx.transaction.ty()); } } } diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index b54f2400fbee..feac0cc516de 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -4,7 +4,7 @@ use alloy_node_bindings::Geth; use alloy_primitives::map::HashSet; use alloy_provider::{ext::AdminApi, ProviderBuilder}; use futures::StreamExt; -use reth_chainspec::MAINNET; +use reth_chainspec::{MAINNET, SEPOLIA}; use reth_discv4::Discv4Config; use reth_eth_wire::{DisconnectReason, EthNetworkPrimitives, HeadersDirection}; use reth_net_banlist::BanList; @@ -856,3 +856,41 @@ async fn test_disconnect_then_connect() { let peer = listener0.next_session_established().await.unwrap(); assert_eq!(peer, *handle1.peer_id()); } + +#[tokio::test(flavor = "multi_thread")] +async fn test_connect_peer_in_different_network_should_fail() { + reth_tracing::init_test_tracing(); + + // peer in mainnet. + let peer = new_random_peer(10, vec![]).await; + let peer_handle = peer.handle().clone(); + tokio::task::spawn(peer); + + // peer in sepolia. + let secret_key = SecretKey::new(&mut rand::thread_rng()); + // If the remote disconnect first, then we would not get a fatal protocol error. So set + // max_backoff_count to 0 to speed up the removal of the peer. + let peers_config = PeersConfig::default().with_max_backoff_count(0); + let config = NetworkConfigBuilder::eth(secret_key) + .listener_port(0) + .disable_discovery() + .peer_config(peers_config) + .build_with_noop_provider(SEPOLIA.clone()); + + let network = NetworkManager::new(config).await.unwrap(); + let handle = network.handle().clone(); + tokio::task::spawn(network); + + // create networkeventstream to get the next session event easily. + let events = handle.event_listener(); + + let mut event_stream = NetworkEventStream::new(events); + + handle.add_peer(*peer_handle.peer_id(), peer_handle.local_addr()); + + let added_peer_id = event_stream.peer_added().await.unwrap(); + assert_eq!(added_peer_id, *peer_handle.peer_id()); + + let removed_peer_id = event_stream.peer_removed().await.unwrap(); + assert_eq!(removed_peer_id, *peer_handle.peer_id()); +} diff --git a/crates/net/network/tests/it/session.rs b/crates/net/network/tests/it/session.rs index 53ab457eb0ce..5ab305e5746c 100644 --- a/crates/net/network/tests/it/session.rs +++ b/crates/net/network/tests/it/session.rs @@ -3,7 +3,7 @@ use futures::StreamExt; use reth_eth_wire::EthVersion; use reth_network::{ - test_utils::{PeerConfig, Testnet}, + test_utils::{NetworkEventStream, PeerConfig, Testnet}, NetworkEvent, NetworkEventListenerProvider, }; use reth_network_api::{ @@ -86,3 +86,40 @@ async fn test_session_established_with_different_capability() { handle.terminate().await; } + +#[tokio::test(flavor = "multi_thread")] +async fn test_capability_version_mismatch() { + reth_tracing::init_test_tracing(); + + let mut net = Testnet::create(0).await; + + let p0 = PeerConfig::with_protocols(NoopProvider::default(), Some(EthVersion::Eth66.into())); + net.add_peer_with_config(p0).await.unwrap(); + + let p1 = PeerConfig::with_protocols(NoopProvider::default(), Some(EthVersion::Eth67.into())); + net.add_peer_with_config(p1).await.unwrap(); + + net.for_each(|peer| assert_eq!(0, peer.num_peers())); + + let mut handles = net.handles(); + let handle0 = handles.next().unwrap(); + let handle1 = handles.next().unwrap(); + drop(handles); + + let handle = net.spawn(); + + let events = handle0.event_listener(); + + let mut event_stream = NetworkEventStream::new(events); + + handle0.add_peer(*handle1.peer_id(), handle1.local_addr()); + + let added_peer_id = event_stream.peer_added().await.unwrap(); + assert_eq!(added_peer_id, *handle1.peer_id()); + + // peer with mismatched capability version should fail to connect and be removed. + let removed_peer_id = event_stream.peer_removed().await.unwrap(); + assert_eq!(removed_peer_id, *handle1.peer_id()); + + handle.terminate().await; +} diff --git a/crates/net/p2p/Cargo.toml b/crates/net/p2p/Cargo.toml index eb49c24f4b44..da208d48b8ae 100644 --- a/crates/net/p2p/Cargo.toml +++ b/crates/net/p2p/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true +reth-ethereum-primitives.workspace = true reth-primitives-traits.workspace = true reth-eth-wire-types.workspace = true reth-consensus.workspace = true @@ -48,12 +48,13 @@ test-utils = [ "reth-consensus/test-utils", "parking_lot", "reth-network-types/test-utils", - "reth-primitives/test-utils", + "reth-ethereum-primitives/test-utils", "reth-primitives-traits/test-utils", + "alloy-primitives/rand", ] std = [ "reth-consensus/std", - "reth-primitives/std", + "reth-ethereum-primitives/std", "alloy-eips/std", "alloy-primitives/std", "reth-primitives-traits/std", diff --git a/crates/net/p2p/src/bodies/client.rs b/crates/net/p2p/src/bodies/client.rs index b31954ff1a00..c97b9ab5385e 100644 --- a/crates/net/p2p/src/bodies/client.rs +++ b/crates/net/p2p/src/bodies/client.rs @@ -9,7 +9,7 @@ use futures::{Future, FutureExt}; use reth_primitives_traits::BlockBody; /// The bodies future type -pub type BodiesFut = +pub type BodiesFut = Pin>> + Send + Sync>>; /// A client capable of downloading block bodies. diff --git a/crates/net/p2p/src/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs index d53ca32eb339..20287a4b4502 100644 --- a/crates/net/p2p/src/bodies/response.rs +++ b/crates/net/p2p/src/bodies/response.rs @@ -1,7 +1,6 @@ use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, U256}; -use reth_primitives::{SealedBlock, SealedHeader}; -use reth_primitives_traits::{Block, InMemorySize}; +use reth_primitives_traits::{Block, InMemorySize, SealedBlock, SealedHeader}; /// The block response #[derive(PartialEq, Eq, Debug, Clone)] pub enum BlockResponse { diff --git a/crates/net/p2p/src/error.rs b/crates/net/p2p/src/error.rs index db765a9ab41b..d650763b4eb4 100644 --- a/crates/net/p2p/src/error.rs +++ b/crates/net/p2p/src/error.rs @@ -8,7 +8,7 @@ use derive_more::{Display, Error}; use reth_consensus::ConsensusError; use reth_network_peers::WithPeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::{GotExpected, GotExpectedBoxed}; +use reth_primitives_traits::{GotExpected, GotExpectedBoxed}; use reth_storage_errors::{db::DatabaseError, provider::ProviderError}; use tokio::sync::{mpsc, oneshot}; diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index c8b5154cd40e..f8c09dbc7196 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -10,7 +10,7 @@ use alloy_primitives::{Sealable, B256}; use reth_consensus::{Consensus, ConsensusError}; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::WithPeerId; -use reth_primitives::{SealedBlock, SealedHeader}; +use reth_primitives_traits::{SealedBlock, SealedHeader}; use std::{ cmp::Reverse, collections::{HashMap, VecDeque}, @@ -644,7 +644,7 @@ enum RangeResponseResult { #[cfg(test)] mod tests { - use reth_primitives::BlockBody; + use reth_ethereum_primitives::BlockBody; use super::*; use crate::test_utils::TestFullBlockClient; diff --git a/crates/net/p2p/src/headers/downloader.rs b/crates/net/p2p/src/headers/downloader.rs index 1dc2f691af38..e2bf5891a374 100644 --- a/crates/net/p2p/src/headers/downloader.rs +++ b/crates/net/p2p/src/headers/downloader.rs @@ -23,7 +23,7 @@ pub trait HeaderDownloader: /// The header type being downloaded. type Header: Sealable + Debug + Send + Sync + Unpin + 'static; - /// Updates the gap to sync which ranges from local head to the sync target + /// Updates the gap to sync which ranges from local head to the sync target. /// /// See also [`HeaderDownloader::update_sync_target`] and /// [`HeaderDownloader::update_local_head`] @@ -35,7 +35,7 @@ pub trait HeaderDownloader: /// Updates the block number of the local database fn update_local_head(&mut self, head: SealedHeader); - /// Updates the target we want to sync to + /// Updates the target we want to sync to. fn update_sync_target(&mut self, target: SyncTarget); /// Sets the headers batch size that the Stream should return. diff --git a/crates/net/p2p/src/headers/error.rs b/crates/net/p2p/src/headers/error.rs index 5adf016c7d11..bc9b09194c50 100644 --- a/crates/net/p2p/src/headers/error.rs +++ b/crates/net/p2p/src/headers/error.rs @@ -1,7 +1,7 @@ use alloy_primitives::Sealable; use derive_more::{Display, Error}; use reth_consensus::ConsensusError; -use reth_primitives::SealedHeader; +use reth_primitives_traits::SealedHeader; /// Header downloader result pub type HeadersDownloaderResult = Result>; diff --git a/crates/net/p2p/src/lib.rs b/crates/net/p2p/src/lib.rs index bef537bdcf3d..0c5e1e326465 100644 --- a/crates/net/p2p/src/lib.rs +++ b/crates/net/p2p/src/lib.rs @@ -61,6 +61,6 @@ pub trait BlockClient: } /// The [`BlockClient`] providing Ethereum block parts. -pub trait EthBlockClient: BlockClient {} +pub trait EthBlockClient: BlockClient {} -impl EthBlockClient for T where T: BlockClient {} +impl EthBlockClient for T where T: BlockClient {} diff --git a/crates/net/p2p/src/sync.rs b/crates/net/p2p/src/sync.rs index 94d40dac0a3f..c7c43befc2ad 100644 --- a/crates/net/p2p/src/sync.rs +++ b/crates/net/p2p/src/sync.rs @@ -1,6 +1,6 @@ //! Traits used when interacting with the sync status of the network. -use reth_primitives::Head; +use alloy_eips::eip2124::Head; /// A type that provides information about whether the node is currently syncing and the network is /// currently serving syncing related requests. diff --git a/crates/net/p2p/src/test_utils/bodies.rs b/crates/net/p2p/src/test_utils/bodies.rs index a51ca1ea07fd..7570756d0fd2 100644 --- a/crates/net/p2p/src/test_utils/bodies.rs +++ b/crates/net/p2p/src/test_utils/bodies.rs @@ -6,8 +6,8 @@ use crate::{ }; use alloy_primitives::B256; use futures::FutureExt; +use reth_ethereum_primitives::BlockBody; use reth_network_peers::PeerId; -use reth_primitives::BlockBody; use std::fmt::{Debug, Formatter}; use tokio::sync::oneshot; diff --git a/crates/net/p2p/src/test_utils/full_block.rs b/crates/net/p2p/src/test_utils/full_block.rs index 2165ddbf56bf..60bcaad4eb0e 100644 --- a/crates/net/p2p/src/test_utils/full_block.rs +++ b/crates/net/p2p/src/test_utils/full_block.rs @@ -11,8 +11,9 @@ use alloy_eips::{BlockHashOrNumber, BlockNumHash}; use alloy_primitives::B256; use parking_lot::Mutex; use reth_eth_wire_types::HeadersDirection; +use reth_ethereum_primitives::{Block, BlockBody}; use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{BlockBody, SealedBlock, SealedHeader}; +use reth_primitives_traits::{SealedBlock, SealedHeader}; use std::{collections::HashMap, sync::Arc}; /// A headers+bodies client implementation that does nothing. @@ -131,7 +132,7 @@ impl TestFullBlockClient { } /// Get the block with the highest block number. - pub fn highest_block(&self) -> Option { + pub fn highest_block(&self) -> Option> { self.headers.lock().iter().max_by_key(|(_, header)| header.number).and_then( |(hash, header)| { self.bodies.lock().get(hash).map(|body| { @@ -246,5 +247,5 @@ impl BodiesClient for TestFullBlockClient { } impl BlockClient for TestFullBlockClient { - type Block = reth_primitives::Block; + type Block = reth_ethereum_primitives::Block; } diff --git a/crates/net/p2p/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs index 15adc3bedef8..3ac441dec5bd 100644 --- a/crates/net/p2p/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -15,7 +15,7 @@ use futures::{Future, FutureExt, Stream, StreamExt}; use reth_consensus::{test_utils::TestConsensus, HeaderValidator}; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::SealedHeader; +use reth_primitives_traits::SealedHeader; use std::{ fmt, pin::Pin, diff --git a/crates/node/api/Cargo.toml b/crates/node/api/Cargo.toml index f63f9a42c03a..59c477e04634 100644 --- a/crates/node/api/Cargo.toml +++ b/crates/node/api/Cargo.toml @@ -12,18 +12,21 @@ workspace = true [dependencies] # reth +reth-basic-payload-builder.workspace = true reth-db-api.workspace = true reth-consensus.workspace = true reth-evm.workspace = true reth-provider.workspace = true reth-engine-primitives.workspace = true reth-transaction-pool.workspace = true +reth-payload-builder.workspace = true reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true reth-network-api.workspace = true reth-node-types.workspace = true reth-node-core.workspace = true +reth-tokio-util.workspace = true alloy-rpc-types-engine.workspace = true diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 088729dd8f50..4ef4dccc1bbe 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -1,16 +1,19 @@ //! Traits for configuring a node. +use crate::PayloadTypes; use alloy_rpc_types_engine::JwtSecret; +use reth_basic_payload_builder::PayloadBuilder; use reth_consensus::{ConsensusError, FullConsensus}; use reth_db_api::{database_metrics::DatabaseMetrics, Database}; -use reth_engine_primitives::BeaconConsensusEngineHandle; +use reth_engine_primitives::{BeaconConsensusEngineEvent, BeaconConsensusEngineHandle}; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvmFor}; use reth_network_api::FullNetwork; use reth_node_core::node_config::NodeConfig; use reth_node_types::{NodeTypes, NodeTypesWithDBAdapter, NodeTypesWithEngine, TxTy}; -use reth_payload_builder_primitives::PayloadBuilder; +use reth_payload_builder::PayloadBuilderHandle; use reth_provider::FullProvider; use reth_tasks::TaskExecutor; +use reth_tokio_util::EventSender; use reth_transaction_pool::{PoolTransaction, TransactionPool}; use std::{future::Future, marker::PhantomData}; @@ -42,6 +45,23 @@ where type Provider = Provider; } +/// Helper trait to bound [`PayloadBuilder`] to the node's engine types. +pub trait PayloadBuilderFor: + PayloadBuilder< + Attributes = ::PayloadBuilderAttributes, + BuiltPayload = ::BuiltPayload, +> +{ +} + +impl PayloadBuilderFor for T where + T: PayloadBuilder< + Attributes = ::PayloadBuilderAttributes, + BuiltPayload = ::BuiltPayload, + > +{ +} + /// Encapsulates all types and components of the node. pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { /// The transaction pool of the node. @@ -63,8 +83,7 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { type Network: FullNetwork; /// Builds new blocks. - type PayloadBuilder: PayloadBuilder::Engine> - + Clone; + type PayloadBuilder: PayloadBuilderFor; /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; @@ -81,9 +100,15 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { /// Returns the handle to the network fn network(&self) -> &Self::Network; - /// Returns the handle to the payload builder service. + /// Returns the configured payload builder. fn payload_builder(&self) -> &Self::PayloadBuilder; + /// Returns the handle to the payload builder service handling payload building requests from + /// the engine. + fn payload_builder_handle( + &self, + ) -> &PayloadBuilderHandle<::Engine>; + /// Returns the provider of the node. fn provider(&self) -> &Self::Provider; @@ -101,6 +126,8 @@ pub struct AddOnsContext<'a, N: FullNodeComponents> { /// Handle to the beacon consensus engine. pub beacon_engine_handle: BeaconConsensusEngineHandle<::Engine>, + /// Notification channel for engine API events + pub engine_events: EventSender::Primitives>>, /// JWT secret for the node. pub jwt_secret: JwtSecret, } diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index a731aafeb0cb..2775d722bf71 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -54,6 +54,7 @@ reth-tasks.workspace = true reth-tokio-util.workspace = true reth-tracing.workspace = true reth-transaction-pool.workspace = true +reth-basic-payload-builder.workspace = true ## ethereum alloy-consensus.workspace = true diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 235e76a25584..10d3f47f9c22 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -565,6 +565,7 @@ where .with_persistence_threshold(builder.config.engine.persistence_threshold) .with_memory_block_buffer_target(builder.config.engine.memory_block_buffer_target) .with_state_root_task(builder.config.engine.state_root_task_enabled) + .with_caching_and_prewarming(builder.config.engine.caching_and_prewarming_enabled) .with_always_compare_trie_updates( builder.config.engine.state_root_task_compare_updates, ); diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index 0587003480c9..a07caccbd882 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -119,6 +119,14 @@ impl> FullNodeComponents for NodeAdapter< self.components.payload_builder() } + fn payload_builder_handle( + &self, + ) -> &reth_payload_builder::PayloadBuilderHandle< + ::Engine, + > { + self.components.payload_builder_handle() + } + fn provider(&self) -> &Self::Provider { &self.provider } diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index a9576d8c1882..29e4e866e97c 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -10,11 +10,12 @@ use crate::{ use reth_consensus::{ConsensusError, FullConsensus}; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvmFor}; use reth_network::NetworkPrimitives; -use reth_node_api::{BlockTy, BodyTy, HeaderTy, NodeTypesWithEngine, PrimitivesTy, TxTy}; -use reth_payload_builder::PayloadBuilderHandle; +use reth_node_api::{BlockTy, BodyTy, HeaderTy, PrimitivesTy, TxTy}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; use std::{future::Future, marker::PhantomData}; +use super::PayloadBuilderFor; + /// A generic, general purpose and customizable [`NodeComponentsBuilder`] implementation. /// /// This type is stateful and captures the configuration of the node's components. @@ -324,6 +325,7 @@ where ExecB::EVM, ExecB::Executor, ConsB::Consensus, + PayloadB::PayloadBuilder, >; async fn build_components( @@ -332,7 +334,7 @@ where ) -> eyre::Result { let Self { pool_builder, - payload_builder, + payload_builder: payload_builder_builder, network_builder, executor_builder: evm_builder, consensus_builder, @@ -342,7 +344,10 @@ where let (evm_config, executor) = evm_builder.build_evm(context).await?; let pool = pool_builder.build_pool(context).await?; let network = network_builder.build_network(context, pool.clone()).await?; - let payload_builder = payload_builder.spawn_payload_service(context, pool.clone()).await?; + let payload_builder = + payload_builder_builder.build_payload_builder(context, pool.clone()).await?; + let payload_builder_handle = payload_builder_builder + .spawn_payload_builder_service(context, payload_builder.clone())?; let consensus = consensus_builder.build_consensus(context).await?; Ok(Components { @@ -350,6 +355,7 @@ where evm_config, network, payload_builder, + payload_builder_handle, executor, consensus, }) @@ -380,10 +386,7 @@ impl Default for ComponentsBuilder<(), (), (), (), (), ()> { /// A type that's responsible for building the components of the node. pub trait NodeComponentsBuilder: Send { /// The components for the node with the given types - type Components: NodeComponents< - Node, - PayloadBuilder = PayloadBuilderHandle<::Engine>, - >; + type Components: NodeComponents; /// Consumes the type and returns the created components. fn build_components( @@ -392,7 +395,7 @@ pub trait NodeComponentsBuilder: Send { ) -> impl Future> + Send; } -impl NodeComponentsBuilder for F +impl NodeComponentsBuilder for F where N: NetworkPrimitives< BlockHeader = HeaderTy, @@ -401,7 +404,8 @@ where >, Node: FullNodeTypes, F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: Future>> + Send, + Fut: Future>> + + Send, Pool: TransactionPool>> + Unpin + 'static, @@ -409,8 +413,9 @@ where Executor: BlockExecutorProvider>, Cons: FullConsensus, Error = ConsensusError> + Clone + Unpin + 'static, + Payload: PayloadBuilderFor + Unpin + 'static, { - type Components = Components; + type Components = Components; fn build_components( self, diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index 538427289def..e2ebdc0589af 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -21,6 +21,7 @@ pub use network::*; pub use payload::*; pub use pool::*; use reth_network_p2p::BlockClient; +use reth_payload_builder::PayloadBuilderHandle; use crate::{ConfigureEvm, FullNodeTypes}; use reth_consensus::{ConsensusError, FullConsensus}; @@ -28,9 +29,9 @@ use reth_evm::{execute::BlockExecutorProvider, ConfigureEvmFor}; use reth_network::{NetworkHandle, NetworkPrimitives}; use reth_network_api::FullNetwork; use reth_node_api::{ - BlockTy, BodyTy, HeaderTy, NodeTypes, NodeTypesWithEngine, PayloadBuilder, PrimitivesTy, TxTy, + BlockTy, BodyTy, HeaderTy, NodeTypes, NodeTypesWithEngine, PayloadBuilderFor, PrimitivesTy, + TxTy, }; -use reth_payload_builder::PayloadBuilderHandle; use reth_transaction_pool::{PoolTransaction, TransactionPool}; /// An abstraction over the components of a node, consisting of: @@ -58,8 +59,7 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati type Network: FullNetwork>>; /// Builds new blocks. - type PayloadBuilder: PayloadBuilder::Engine> - + Clone; + type PayloadBuilder: PayloadBuilderFor + Clone + Unpin + 'static; /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; @@ -76,15 +76,29 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati /// Returns the handle to the network fn network(&self) -> &Self::Network; - /// Returns the handle to the payload builder service. + /// Returns the payload builder that knows how to build blocks. fn payload_builder(&self) -> &Self::PayloadBuilder; + + /// Returns the handle to the payload builder service handling payload building requests from + /// the engine. + fn payload_builder_handle( + &self, + ) -> &PayloadBuilderHandle<::Engine>; } /// All the components of the node. /// /// This provides access to all the components of the node. #[derive(Debug)] -pub struct Components { +pub struct Components< + Node: FullNodeTypes, + N: NetworkPrimitives, + Pool, + EVM, + Executor, + Consensus, + Payload, +> { /// The transaction pool of the node. pub transaction_pool: Pool, /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. @@ -95,19 +109,21 @@ pub struct Components, + /// The payload builder. + pub payload_builder: Payload, /// The handle to the payload builder service. - pub payload_builder: PayloadBuilderHandle<::Engine>, + pub payload_builder_handle: PayloadBuilderHandle<::Engine>, } -impl NodeComponents - for Components +impl NodeComponents + for Components where + Node: FullNodeTypes, N: NetworkPrimitives< BlockHeader = HeaderTy, BlockBody = BodyTy, Block = BlockTy, >, - Node: FullNodeTypes, Pool: TransactionPool>> + Unpin + 'static, @@ -115,13 +131,14 @@ where Executor: BlockExecutorProvider>, Cons: FullConsensus, Error = ConsensusError> + Clone + Unpin + 'static, + Payload: PayloadBuilderFor + Clone + Unpin + 'static, { type Pool = Pool; type Evm = EVM; type Executor = Executor; type Consensus = Cons; type Network = NetworkHandle; - type PayloadBuilder = PayloadBuilderHandle<::Engine>; + type PayloadBuilder = Payload; fn pool(&self) -> &Self::Pool { &self.transaction_pool @@ -146,16 +163,24 @@ where fn payload_builder(&self) -> &Self::PayloadBuilder { &self.payload_builder } + + fn payload_builder_handle( + &self, + ) -> &PayloadBuilderHandle<::Engine> { + &self.payload_builder_handle + } } -impl Clone for Components +impl Clone + for Components where N: NetworkPrimitives, Node: FullNodeTypes, Pool: TransactionPool, - EVM: ConfigureEvm
, Transaction = TxTy>, + EVM: ConfigureEvm, Executor: BlockExecutorProvider, Cons: Clone, + Payload: Clone, { fn clone(&self) -> Self { Self { @@ -165,6 +190,7 @@ where consensus: self.consensus.clone(), network: self.network.clone(), payload_builder: self.payload_builder.clone(), + payload_builder_handle: self.payload_builder_handle.clone(), } } } diff --git a/crates/node/builder/src/components/payload.rs b/crates/node/builder/src/components/payload.rs index 0efad9ba5c80..5fc7102cc70c 100644 --- a/crates/node/builder/src/components/payload.rs +++ b/crates/node/builder/src/components/payload.rs @@ -1,45 +1,73 @@ //! Payload service component for the node builder. -use std::future::Future; - -use reth_node_api::NodeTypesWithEngine; -use reth_payload_builder::PayloadBuilderHandle; +use crate::{BuilderContext, FullNodeTypes, NodeTypesWithEngine}; +use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; +use reth_chain_state::CanonStateSubscriptions; +use reth_node_api::PayloadBuilderFor; +use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_transaction_pool::TransactionPool; - -use crate::{BuilderContext, FullNodeTypes}; +use std::future::Future; /// A type that knows how to spawn the payload service. -pub trait PayloadServiceBuilder: Send { +pub trait PayloadServiceBuilder: Send + Sized { + /// Payload builder implementation. + type PayloadBuilder: PayloadBuilderFor + Unpin + 'static; + /// Spawns the payload service and returns the handle to it. /// /// The [`BuilderContext`] is provided to allow access to the node's configuration. - fn spawn_payload_service( - self, + fn build_payload_builder( + &self, ctx: &BuilderContext, pool: Pool, - ) -> impl Future< - Output = eyre::Result::Engine>>, - > + Send; + ) -> impl Future> + Send; + + /// Spawns the [`PayloadBuilderService`] and returns the handle to it for use by the engine. + /// + /// We provide default implementation via [`BasicPayloadJobGenerator`] but it can be overridden + /// for custom job orchestration logic, + fn spawn_payload_builder_service( + self, + ctx: &BuilderContext, + payload_builder: Self::PayloadBuilder, + ) -> eyre::Result::Engine>> { + let conf = ctx.config().builder.clone(); + + let payload_job_config = BasicPayloadJobGeneratorConfig::default() + .interval(conf.interval) + .deadline(conf.deadline) + .max_payload_tasks(conf.max_payload_tasks); + + let payload_generator = BasicPayloadJobGenerator::with_builder( + ctx.provider().clone(), + ctx.task_executor().clone(), + payload_job_config, + payload_builder, + ); + let (payload_service, payload_service_handle) = + PayloadBuilderService::new(payload_generator, ctx.provider().canonical_state_stream()); + + ctx.task_executor().spawn_critical("payload builder service", Box::pin(payload_service)); + + Ok(payload_service_handle) + } } -impl PayloadServiceBuilder for F +impl PayloadServiceBuilder for F where Node: FullNodeTypes, Pool: TransactionPool, F: Fn(&BuilderContext, Pool) -> Fut + Send, - Fut: Future< - Output = eyre::Result< - PayloadBuilderHandle<::Engine>, - >, - > + Send, + Fut: Future> + Send, + Builder: PayloadBuilderFor + Unpin + 'static, { - fn spawn_payload_service( - self, + type PayloadBuilder = Builder; + + fn build_payload_builder( + &self, ctx: &BuilderContext, pool: Pool, - ) -> impl Future< - Output = eyre::Result::Engine>>, - > + Send { + ) -> impl Future> + Send { self(ctx, pool) } } diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 9622cca0b6bc..3e699b913632 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -17,7 +17,7 @@ use reth_network::{NetworkSyncUpdater, SyncState}; use reth_network_api::BlockDownloaderProvider; use reth_node_api::{ BeaconConsensusEngineHandle, BuiltPayload, FullNodeTypes, NodeTypesWithDBAdapter, - NodeTypesWithEngine, PayloadAttributesBuilder, PayloadBuilder, PayloadTypes, + NodeTypesWithEngine, PayloadAttributesBuilder, PayloadTypes, }; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, @@ -198,6 +198,7 @@ where info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); let event_sender = EventSender::default(); + let beacon_engine_handle = BeaconConsensusEngineHandle::new(consensus_engine_tx.clone()); // extract the jwt secret from the args if possible @@ -208,6 +209,7 @@ where config: ctx.node_config(), beacon_engine_handle: beacon_engine_handle.clone(), jwt_secret, + engine_events: event_sender.clone(), }; let engine_payload_validator = add_ons.engine_validator(&add_ons_ctx).await?; @@ -218,7 +220,7 @@ where ctx.provider_factory().clone(), ctx.blockchain_db().clone(), pruner, - ctx.components().payload_builder().clone(), + ctx.components().payload_builder_handle().clone(), engine_payload_validator, engine_tree_config, ctx.invalid_block_hook()?, @@ -227,6 +229,7 @@ where Box::pin(consensus_engine_stream), ctx.dev_mining_mode(ctx.components().pool()), LocalPayloadAttributesBuilder::new(ctx.chain_spec()), + ctx.components().evm_config().clone(), ); Either::Left(eth_service) @@ -242,11 +245,12 @@ where ctx.provider_factory().clone(), ctx.blockchain_db().clone(), pruner, - ctx.components().payload_builder().clone(), + ctx.components().payload_builder_handle().clone(), engine_payload_validator, engine_tree_config, ctx.invalid_block_hook()?, ctx.sync_metrics_tx(), + ctx.components().evm_config().clone(), ); Either::Right(eth_service) @@ -268,6 +272,7 @@ where pruner_events.map(Into::into), static_file_producer_events.map(Into::into), ); + ctx.task_executor().spawn_critical( "events task", node::handle_events( @@ -277,7 +282,7 @@ where ), ); - let RpcHandle { rpc_server_handles, rpc_registry } = + let RpcHandle { rpc_server_handles, rpc_registry, engine_events, beacon_engine_handle } = add_ons.launch_add_ons(add_ons_ctx).await?; // TODO: migrate to devmode with https://github.com/paradigmxyz/reth/issues/10104 @@ -315,7 +320,7 @@ where let network_handle = ctx.components().network().clone(); let mut built_payloads = ctx .components() - .payload_builder() + .payload_builder_handle() .subscribe() .await .map_err(|e| eyre::eyre!("Failed to subscribe to payload builder events: {:?}", e))? @@ -397,10 +402,16 @@ where network: ctx.components().network().clone(), provider: ctx.node_adapter().provider.clone(), payload_builder: ctx.components().payload_builder().clone(), + payload_builder_handle: ctx.components().payload_builder_handle().clone(), task_executor: ctx.task_executor().clone(), config: ctx.node_config().clone(), data_dir: ctx.data_dir().clone(), - add_ons_handle: RpcHandle { rpc_server_handles, rpc_registry }, + add_ons_handle: RpcHandle { + rpc_server_handles, + rpc_registry, + engine_events, + beacon_engine_handle, + }, }; // Notify on node started on_node_started.on_event(FullNode::clone(&full_node))?; diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index ce7d12fee3d3..d2e6ae8ae49f 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -1,5 +1,6 @@ // re-export the node api types pub use reth_node_api::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}; +use reth_payload_builder::PayloadBuilderHandle; use std::{ marker::PhantomData, @@ -12,7 +13,6 @@ use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, }; -use reth_payload_builder::PayloadBuilderHandle; use reth_provider::ChainSpecProvider; use reth_rpc_api::EngineApiClient; use reth_rpc_builder::{auth::AuthServerHandle, RpcServerHandle}; @@ -117,8 +117,10 @@ pub struct FullNode> { pub network: Node::Network, /// Provider to interact with the node's database pub provider: Node::Provider, + /// Node's configured payload builder. + pub payload_builder: Node::PayloadBuilder, /// Handle to the node's payload builder service. - pub payload_builder: PayloadBuilderHandle<::Engine>, + pub payload_builder_handle: PayloadBuilderHandle<::Engine>, /// Task executor for the node. pub task_executor: TaskExecutor, /// The initial node config. @@ -138,6 +140,7 @@ impl> Clone for FullNode { /// Handles to launched servers. pub rpc_server_handles: RethRpcServerHandles, /// Configured RPC modules. pub rpc_registry: RpcRegistry, + /// Notification channel for engine API events + /// + /// Caution: This is a multi-producer, multi-consumer broadcast and allows grants access to + /// dispatch events + pub engine_events: + EventSender::Primitives>>, + /// Handle to the beacon consensus engine. + pub beacon_engine_handle: + BeaconConsensusEngineHandle<::Engine>, +} + +impl Clone for RpcHandle { + fn clone(&self) -> Self { + Self { + rpc_server_handles: self.rpc_server_handles.clone(), + rpc_registry: self.rpc_registry.clone(), + engine_events: self.engine_events.clone(), + beacon_engine_handle: self.beacon_engine_handle.clone(), + } + } } impl Deref for RpcHandle { @@ -401,7 +418,9 @@ where impl RpcAddOns where - N: FullNodeComponents, + N: FullNodeComponents< + Types: NodeTypesWithEngine>, + >, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners @@ -422,7 +441,7 @@ where let Self { eth_api_builder, engine_validator_builder, hooks, _pd: _ } = self; let engine_validator = engine_validator_builder.build(&ctx).await?; - let AddOnsContext { node, config, beacon_engine_handle, jwt_secret } = ctx; + let AddOnsContext { node, config, beacon_engine_handle, jwt_secret, engine_events } = ctx; let client = ClientVersionV1 { code: CLIENT_CODE, @@ -434,8 +453,8 @@ where let engine_api = EngineApi::new( node.provider().clone(), config.chain.clone(), - beacon_engine_handle, - PayloadStore::new(node.payload_builder().clone()), + beacon_engine_handle.clone(), + PayloadStore::new(node.payload_builder_handle().clone()), node.pool().clone(), Box::new(node.task_executor().clone()), client, @@ -456,12 +475,7 @@ where .with_evm_config(node.evm_config().clone()) .with_block_executor(node.block_executor().clone()) .with_consensus(node.consensus().clone()) - .build_with_auth_server( - module_config, - engine_api, - eth_api_builder, - Arc::new(engine_validator), - ); + .build_with_auth_server(module_config, engine_api, eth_api_builder); // in dev mode we generate 20 random dev-signer accounts if config.dev.dev { @@ -522,13 +536,20 @@ where on_rpc_started.on_rpc_started(ctx, handles.clone())?; - Ok(RpcHandle { rpc_server_handles: handles, rpc_registry: registry }) + Ok(RpcHandle { + rpc_server_handles: handles, + rpc_registry: registry, + engine_events, + beacon_engine_handle, + }) } } impl NodeAddOns for RpcAddOns where - N: FullNodeComponents, + N: FullNodeComponents< + Types: NodeTypesWithEngine>, + >, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners @@ -572,12 +593,8 @@ pub trait EthApiBuilder: 'static { fn build(ctx: &EthApiBuilderCtx) -> Self; } -impl< - N: FullNodeComponents< - Provider: ChainSpecProvider, - Types: NodeTypes, - >, - > EthApiBuilder for EthApi +impl> EthApiBuilder + for EthApi { fn build(ctx: &EthApiBuilderCtx) -> Self { Self::with_spawner(ctx) diff --git a/crates/node/core/src/args/rpc_state_cache.rs b/crates/node/core/src/args/rpc_state_cache.rs index b140d47b5fed..5c74813ed836 100644 --- a/crates/node/core/src/args/rpc_state_cache.rs +++ b/crates/node/core/src/args/rpc_state_cache.rs @@ -24,7 +24,8 @@ pub struct RpcStateCacheArgs { /// Max number of headers in cache. #[arg( - long = "rpc-cache.max-envs", + long = "rpc-cache.max-headers", + alias = "rpc-cache.max-envs", default_value_t = DEFAULT_HEADER_CACHE_MAX_LEN, )] pub max_headers: u32, diff --git a/crates/node/core/src/args/txpool.rs b/crates/node/core/src/args/txpool.rs index 2b69d6317052..94e12135a233 100644 --- a/crates/node/core/src/args/txpool.rs +++ b/crates/node/core/src/args/txpool.rs @@ -38,6 +38,13 @@ pub struct TxPoolArgs { #[arg(long = "txpool.queued-max-size", alias = "txpool.queued_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] pub queued_max_size: usize, + /// Max number of transaction in the blobpool + #[arg(long = "txpool.blobpool-max-count", alias = "txpool.blobpool_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] + pub blobpool_max_count: usize, + /// Max size of the blobpool in megabytes. + #[arg(long = "txpool.blobpool-max-size", alias = "txpool.blobpool_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] + pub blobpool_max_size: usize, + /// Max number of executable transaction slots guaranteed per account #[arg(long = "txpool.max-account-slots", alias = "txpool.max_account_slots", default_value_t = TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER)] pub max_account_slots: usize, @@ -102,6 +109,8 @@ impl Default for TxPoolArgs { basefee_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, queued_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, queued_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, + blobpool_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, + blobpool_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, price_bump: DEFAULT_PRICE_BUMP, minimal_protocol_basefee: MIN_PROTOCOL_BASE_FEE, @@ -142,8 +151,8 @@ impl RethTransactionPoolConfig for TxPoolArgs { max_size: self.queued_max_size.saturating_mul(1024 * 1024), }, blob_limit: SubPoolLimit { - max_txs: self.queued_max_count, - max_size: self.queued_max_size.saturating_mul(1024 * 1024), + max_txs: self.blobpool_max_count, + max_size: self.blobpool_max_size.saturating_mul(1024 * 1024), }, max_account_slots: self.max_account_slots, price_bumps: PriceBumpConfig { diff --git a/crates/node/core/src/cli/config.rs b/crates/node/core/src/cli/config.rs index 370c6e67dd2a..6c34defb4396 100644 --- a/crates/node/core/src/cli/config.rs +++ b/crates/node/core/src/cli/config.rs @@ -55,7 +55,7 @@ impl RethNetworkConfig for reth_network::NetworkManager } fn secret_key(&self) -> secp256k1::SecretKey { - self.secret_key() + Self::secret_key(self) } } diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 00817b6a8834..9c3ba2fc4dad 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -250,7 +250,8 @@ impl NodeState { } } } - BeaconConsensusEngineEvent::CanonicalBlockAdded(block, elapsed) => { + BeaconConsensusEngineEvent::CanonicalBlockAdded(executed, elapsed) => { + let block = executed.sealed_block(); info!( number=block.number(), hash=?block.hash(), @@ -272,7 +273,8 @@ impl NodeState { info!(number=head.number(), hash=?head.hash(), ?elapsed, "Canonical chain committed"); } - BeaconConsensusEngineEvent::ForkBlockAdded(block, elapsed) => { + BeaconConsensusEngineEvent::ForkBlockAdded(executed, elapsed) => { + let block = executed.sealed_block(); info!(number=block.number(), hash=?block.hash(), ?elapsed, "Block added to fork chain"); } } diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 2095bbd53c36..3387d06c25af 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -19,7 +19,7 @@ mod op_sepolia; use alloc::{boxed::Box, vec, vec::Vec}; use alloy_chains::Chain; -use alloy_consensus::{BlockHeader, Header}; +use alloy_consensus::Header; use alloy_eips::eip7840::BlobParams; use alloy_genesis::Genesis; use alloy_primitives::{B256, U256}; @@ -28,7 +28,6 @@ pub use base_sepolia::BASE_SEPOLIA; use derive_more::{Constructor, Deref, Display, From, Into}; pub use dev::OP_DEV; pub use op::OP_MAINNET; -use op_alloy_consensus::{decode_holocene_extra_data, EIP1559ParamError}; pub use op_sepolia::OP_SEPOLIA; use reth_chainspec::{ BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, DepositContract, EthChainSpec, @@ -195,55 +194,6 @@ impl OpChainSpec { pub fn from_genesis(genesis: Genesis) -> Self { genesis.into() } - - /// Extracts the Holocene 1599 parameters from the encoded extra data from the parent header. - /// - /// Caution: Caller must ensure that holocene is active in the parent header. - /// - /// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) - pub fn decode_holocene_base_fee( - &self, - parent: &H, - timestamp: u64, - ) -> Result { - let (elasticity, denominator) = decode_holocene_extra_data(parent.extra_data())?; - let base_fee = if elasticity == 0 && denominator == 0 { - parent - .next_block_base_fee(self.base_fee_params_at_timestamp(timestamp)) - .unwrap_or_default() - } else { - let base_fee_params = BaseFeeParams::new(denominator as u128, elasticity as u128); - parent.next_block_base_fee(base_fee_params).unwrap_or_default() - }; - Ok(base_fee) - } - - /// Read from parent to determine the base fee for the next block - /// - /// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) - pub fn next_block_base_fee( - &self, - parent: &H, - timestamp: u64, - ) -> Result { - // > if Holocene is active in parent_header.timestamp, then the parameters from - // > parent_header.extraData are used. - let is_holocene_activated = - self.inner.is_fork_active_at_timestamp(OpHardfork::Holocene, parent.timestamp()); - - // If we are in the Holocene, we need to use the base fee params - // from the parent block's extra data. - // Else, use the base fee params (default values) from chainspec - if is_holocene_activated { - Ok(U256::from(self.decode_holocene_base_fee(parent, timestamp)?)) - } else { - Ok(U256::from( - parent - .next_block_base_fee(self.base_fee_params_at_timestamp(timestamp)) - .unwrap_or_default(), - )) - } - } } impl EthChainSpec for OpChainSpec { @@ -483,10 +433,8 @@ impl OpGenesisInfo { #[cfg(test)] mod tests { - use std::sync::Arc; - use alloy_genesis::{ChainConfig, Genesis}; - use alloy_primitives::{b256, hex, Bytes}; + use alloy_primitives::b256; use reth_chainspec::{test_fork_ids, BaseFeeParams, BaseFeeParamsKind}; use reth_ethereum_forks::{EthereumHardfork, ForkCondition, ForkHash, ForkId, Head}; use reth_optimism_forks::{OpHardfork, OpHardforks}; @@ -1026,105 +974,6 @@ mod tests { assert_eq!(expected_hardforks.len(), hardforks.len()); } - #[test] - fn test_get_base_fee_pre_holocene() { - let op_chain_spec = &BASE_SEPOLIA; - let parent = Header { - base_fee_per_gas: Some(1), - gas_used: 15763614, - gas_limit: 144000000, - ..Default::default() - }; - let base_fee = op_chain_spec.next_block_base_fee(&parent, 0); - assert_eq!( - base_fee.unwrap(), - U256::from( - parent - .next_block_base_fee(op_chain_spec.base_fee_params_at_timestamp(0)) - .unwrap_or_default() - ) - ); - } - - fn holocene_chainspec() -> Arc { - let mut hardforks = OpHardfork::base_sepolia(); - hardforks.insert(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1800000000)); - Arc::new(OpChainSpec { - inner: ChainSpec { - chain: BASE_SEPOLIA.inner.chain, - genesis: BASE_SEPOLIA.inner.genesis.clone(), - genesis_hash: BASE_SEPOLIA.inner.genesis_hash.clone(), - paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks, - base_fee_params: BASE_SEPOLIA.inner.base_fee_params.clone(), - prune_delete_limit: 10000, - ..Default::default() - }, - }) - } - - #[test] - fn test_get_base_fee_holocene_extra_data_not_set() { - let op_chain_spec = holocene_chainspec(); - let parent = Header { - base_fee_per_gas: Some(1), - gas_used: 15763614, - gas_limit: 144000000, - timestamp: 1800000003, - extra_data: Bytes::from_static(&[0, 0, 0, 0, 0, 0, 0, 0, 0]), - ..Default::default() - }; - let base_fee = op_chain_spec.next_block_base_fee(&parent, 1800000005); - assert_eq!( - base_fee.unwrap(), - U256::from( - parent - .next_block_base_fee(op_chain_spec.base_fee_params_at_timestamp(0)) - .unwrap_or_default() - ) - ); - } - - #[test] - fn test_get_base_fee_holocene_extra_data_set() { - let op_chain_spec = holocene_chainspec(); - let parent = Header { - base_fee_per_gas: Some(1), - gas_used: 15763614, - gas_limit: 144000000, - extra_data: Bytes::from_static(&[0, 0, 0, 0, 8, 0, 0, 0, 8]), - timestamp: 1800000003, - ..Default::default() - }; - - let base_fee = op_chain_spec.next_block_base_fee(&parent, 1800000005); - assert_eq!( - base_fee.unwrap(), - U256::from( - parent - .next_block_base_fee(BaseFeeParams::new(0x00000008, 0x00000008)) - .unwrap_or_default() - ) - ); - } - - // - #[test] - fn test_get_base_fee_holocene_extra_data_set_base_sepolia() { - let op_chain_spec = BASE_SEPOLIA.clone(); - let parent = Header { - base_fee_per_gas: Some(507), - gas_used: 4847634, - gas_limit: 60000000, - extra_data: hex!("00000000fa0000000a").into(), - timestamp: 1735315544, - ..Default::default() - }; - - let base_fee = op_chain_spec.next_block_base_fee(&parent, 1735315546).unwrap(); - assert_eq!(base_fee, U256::from(507)); - } - #[test] fn json_genesis() { let geth_genesis = r#" diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index d42ab0b068ef..7cf18d048a28 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -30,12 +30,12 @@ alloy-eips.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true alloy-trie.workspace = true +op-alloy-consensus.workspace = true tracing.workspace = true [dev-dependencies] alloy-primitives.workspace = true -op-alloy-consensus.workspace = true reth-optimism-chainspec.workspace = true [features] diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index ecb88049cb3d..01ac0b4eeb4c 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -13,6 +13,8 @@ extern crate alloc; +use core::fmt::Debug; + use alloc::sync::Arc; use alloy_consensus::{BlockHeader as _, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::{B64, U256}; @@ -26,45 +28,50 @@ use reth_consensus_common::validation::{ validate_body_against_header, validate_cancun_gas, validate_header_base_fee, validate_header_extra_data, validate_header_gas, validate_shanghai_withdrawals, }; -use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::OpHardforks; -use reth_optimism_primitives::{OpBlock, OpPrimitives, OpReceipt}; -use reth_primitives::{GotExpected, RecoveredBlock, SealedHeader}; +use reth_optimism_primitives::DepositReceipt; +use reth_primitives::{GotExpected, NodePrimitives, RecoveredBlock, SealedHeader}; mod proof; pub use proof::calculate_receipt_root_no_memo_optimism; use reth_primitives_traits::{Block, BlockBody, BlockHeader, SealedBlock}; mod validation; -pub use validation::validate_block_post_execution; +pub use validation::{ + decode_holocene_base_fee, next_block_base_fee, validate_block_post_execution, +}; /// Optimism consensus implementation. /// /// Provides basic checks as outlined in the execution specs. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct OpBeaconConsensus { +pub struct OpBeaconConsensus { /// Configuration - chain_spec: Arc, + chain_spec: Arc, } -impl OpBeaconConsensus { +impl OpBeaconConsensus { /// Create a new instance of [`OpBeaconConsensus`] - pub const fn new(chain_spec: Arc) -> Self { + pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } } } -impl FullConsensus for OpBeaconConsensus { +impl> + FullConsensus for OpBeaconConsensus +{ fn validate_block_post_execution( &self, - block: &RecoveredBlock, - input: PostExecutionInput<'_, OpReceipt>, + block: &RecoveredBlock, + input: PostExecutionInput<'_, N::Receipt>, ) -> Result<(), ConsensusError> { validate_block_post_execution(block.header(), &self.chain_spec, input.receipts) } } -impl Consensus for OpBeaconConsensus { +impl Consensus + for OpBeaconConsensus +{ type Error = ConsensusError; fn validate_body_against_header( @@ -106,7 +113,9 @@ impl Consensus for OpBeaconConsensus { } } -impl HeaderValidator for OpBeaconConsensus { +impl HeaderValidator + for OpBeaconConsensus +{ fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { validate_header_gas(header.header())?; validate_header_base_fee(header.header(), &self.chain_spec) @@ -130,10 +139,9 @@ impl HeaderValidator for OpBeaconConsensus { if self.chain_spec.is_holocene_active_at_timestamp(parent.timestamp()) { let header_base_fee = header.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; - let expected_base_fee = self - .chain_spec - .decode_holocene_base_fee(parent.header(), header.timestamp()) - .map_err(|_| ConsensusError::BaseFeeMissing)?; + let expected_base_fee = + decode_holocene_base_fee(&self.chain_spec, parent.header(), header.timestamp()) + .map_err(|_| ConsensusError::BaseFeeMissing)?; if expected_base_fee != header_base_fee { return Err(ConsensusError::BaseFeeDiff(GotExpected { expected: expected_base_fee, diff --git a/crates/optimism/consensus/src/proof.rs b/crates/optimism/consensus/src/proof.rs index 0b1e6c74f9f9..d473539e13f1 100644 --- a/crates/optimism/consensus/src/proof.rs +++ b/crates/optimism/consensus/src/proof.rs @@ -1,19 +1,17 @@ //! Helper function for Receipt root calculation for Optimism hardforks. use alloc::vec::Vec; -use alloy_consensus::TxReceipt; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; use alloy_trie::root::ordered_trie_root_with_encoder; -use reth_chainspec::ChainSpec; -use reth_optimism_forks::{OpHardfork, OpHardforks}; -use reth_optimism_primitives::{DepositReceipt, OpReceipt}; +use reth_optimism_forks::OpHardforks; +use reth_optimism_primitives::DepositReceipt; use reth_primitives::ReceiptWithBloom; /// Calculates the receipt root for a header. pub(crate) fn calculate_receipt_root_optimism( receipts: &[ReceiptWithBloom], - chain_spec: &ChainSpec, + chain_spec: impl OpHardforks, timestamp: u64, ) -> B256 { // There is a minor bug in op-geth and op-erigon where in the Regolith hardfork, @@ -21,8 +19,8 @@ pub(crate) fn calculate_receipt_root_optimism( // encoding. In the Regolith Hardfork, we must strip the deposit nonce from the // receipts before calculating the receipt root. This was corrected in the Canyon // hardfork. - if chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, timestamp) && - !chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, timestamp) + if chain_spec.is_regolith_active_at_timestamp(timestamp) && + !chain_spec.is_canyon_active_at_timestamp(timestamp) { let receipts = receipts .iter() @@ -41,11 +39,11 @@ pub(crate) fn calculate_receipt_root_optimism( ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_2718(buf)) } -/// Calculates the receipt root for a header for the reference type of [`OpReceipt`]. +/// Calculates the receipt root for a header for the reference type of an OP receipt. /// /// NOTE: Prefer calculate receipt root optimism if you have log blooms memoized. -pub fn calculate_receipt_root_no_memo_optimism( - receipts: &[OpReceipt], +pub fn calculate_receipt_root_no_memo_optimism( + receipts: &[R], chain_spec: impl OpHardforks, timestamp: u64, ) -> B256 { @@ -61,8 +59,8 @@ pub fn calculate_receipt_root_no_memo_optimism( .iter() .map(|r| { let mut r = (*r).clone(); - if let OpReceipt::Deposit(r) = &mut r { - r.deposit_nonce = None; + if let Some(receipt) = r.as_deposit_receipt_mut() { + receipt.deposit_nonce = None; } r }) @@ -85,6 +83,7 @@ mod tests { use alloy_primitives::{b256, bloom, hex, Address, Bloom, Bytes, Log, LogData}; use op_alloy_consensus::OpDepositReceipt; use reth_optimism_chainspec::BASE_SEPOLIA; + use reth_optimism_primitives::OpReceipt; use reth_primitives::ReceiptWithBloom; /// Tests that the receipt root is computed correctly for the regolith block. diff --git a/crates/optimism/consensus/src/validation.rs b/crates/optimism/consensus/src/validation.rs index 51d0745dd4d0..4343a1b51c82 100644 --- a/crates/optimism/consensus/src/validation.rs +++ b/crates/optimism/consensus/src/validation.rs @@ -2,8 +2,10 @@ use crate::proof::calculate_receipt_root_optimism; use alloc::vec::Vec; use alloy_consensus::{BlockHeader, TxReceipt}; use alloy_primitives::{Bloom, B256}; -use reth_chainspec::{ChainSpec, EthereumHardforks}; +use op_alloy_consensus::{decode_holocene_extra_data, EIP1559ParamError}; +use reth_chainspec::{BaseFeeParams, EthChainSpec}; use reth_consensus::ConsensusError; +use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::DepositReceipt; use reth_primitives::{gas_spent_by_transactions, GotExpected}; @@ -13,7 +15,7 @@ use reth_primitives::{gas_spent_by_transactions, GotExpected}; /// - Compares the gas used in the block header to the actual gas usage after execution pub fn validate_block_post_execution( header: impl BlockHeader, - chain_spec: &ChainSpec, + chain_spec: impl OpHardforks, receipts: &[R], ) -> Result<(), ConsensusError> { // Before Byzantium, receipts contained state root that would mean that expensive @@ -21,7 +23,7 @@ pub fn validate_block_post_execution( // transaction This was replaced with is_success flag. // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 if chain_spec.is_byzantium_active_at_block(header.number()) { - if let Err(error) = verify_receipts( + if let Err(error) = verify_receipts_optimism( header.receipts_root(), header.logs_bloom(), receipts, @@ -47,11 +49,11 @@ pub fn validate_block_post_execution( } /// Verify the calculated receipts root against the expected receipts root. -fn verify_receipts( +fn verify_receipts_optimism( expected_receipts_root: B256, expected_logs_bloom: Bloom, receipts: &[R], - chain_spec: &ChainSpec, + chain_spec: impl OpHardforks, timestamp: u64, ) -> Result<(), ConsensusError> { // Calculate receipts root. @@ -94,3 +96,145 @@ fn compare_receipts_root_and_logs_bloom( Ok(()) } + +/// Extracts the Holocene 1599 parameters from the encoded extra data from the parent header. +/// +/// Caution: Caller must ensure that holocene is active in the parent header. +/// +/// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) +pub fn decode_holocene_base_fee( + chain_spec: impl EthChainSpec + OpHardforks, + parent: impl BlockHeader, + timestamp: u64, +) -> Result { + let (elasticity, denominator) = decode_holocene_extra_data(parent.extra_data())?; + let base_fee_params = if elasticity == 0 && denominator == 0 { + chain_spec.base_fee_params_at_timestamp(timestamp) + } else { + BaseFeeParams::new(denominator as u128, elasticity as u128) + }; + + Ok(parent.next_block_base_fee(base_fee_params).unwrap_or_default()) +} + +/// Read from parent to determine the base fee for the next block +/// +/// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) +pub fn next_block_base_fee( + chain_spec: impl EthChainSpec + OpHardforks, + parent: impl BlockHeader, + timestamp: u64, +) -> Result { + // If we are in the Holocene, we need to use the base fee params + // from the parent block's extra data. + // Else, use the base fee params (default values) from chainspec + if chain_spec.is_holocene_active_at_timestamp(parent.timestamp()) { + Ok(decode_holocene_base_fee(chain_spec, parent, timestamp)?) + } else { + Ok(parent + .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(timestamp)) + .unwrap_or_default()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::Header; + use alloy_primitives::{hex, Bytes, U256}; + use reth_chainspec::{ChainSpec, ForkCondition, Hardfork}; + use reth_optimism_chainspec::{OpChainSpec, BASE_SEPOLIA}; + use reth_optimism_forks::OpHardfork; + use std::sync::Arc; + + fn holocene_chainspec() -> Arc { + let mut hardforks = OpHardfork::base_sepolia(); + hardforks.insert(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1800000000)); + Arc::new(OpChainSpec { + inner: ChainSpec { + chain: BASE_SEPOLIA.inner.chain, + genesis: BASE_SEPOLIA.inner.genesis.clone(), + genesis_hash: BASE_SEPOLIA.inner.genesis_hash.clone(), + paris_block_and_final_difficulty: Some((0, U256::from(0))), + hardforks, + base_fee_params: BASE_SEPOLIA.inner.base_fee_params.clone(), + prune_delete_limit: 10000, + ..Default::default() + }, + }) + } + + #[test] + fn test_get_base_fee_pre_holocene() { + let op_chain_spec = BASE_SEPOLIA.clone(); + let parent = Header { + base_fee_per_gas: Some(1), + gas_used: 15763614, + gas_limit: 144000000, + ..Default::default() + }; + let base_fee = next_block_base_fee(&op_chain_spec, &parent, 0); + assert_eq!( + base_fee.unwrap(), + parent + .next_block_base_fee(op_chain_spec.base_fee_params_at_timestamp(0)) + .unwrap_or_default() + ); + } + + #[test] + fn test_get_base_fee_holocene_extra_data_not_set() { + let op_chain_spec = holocene_chainspec(); + let parent = Header { + base_fee_per_gas: Some(1), + gas_used: 15763614, + gas_limit: 144000000, + timestamp: 1800000003, + extra_data: Bytes::from_static(&[0, 0, 0, 0, 0, 0, 0, 0, 0]), + ..Default::default() + }; + let base_fee = next_block_base_fee(&op_chain_spec, &parent, 1800000005); + assert_eq!( + base_fee.unwrap(), + parent + .next_block_base_fee(op_chain_spec.base_fee_params_at_timestamp(0)) + .unwrap_or_default() + ); + } + + #[test] + fn test_get_base_fee_holocene_extra_data_set() { + let parent = Header { + base_fee_per_gas: Some(1), + gas_used: 15763614, + gas_limit: 144000000, + extra_data: Bytes::from_static(&[0, 0, 0, 0, 8, 0, 0, 0, 8]), + timestamp: 1800000003, + ..Default::default() + }; + + let base_fee = next_block_base_fee(holocene_chainspec(), &parent, 1800000005); + assert_eq!( + base_fee.unwrap(), + parent + .next_block_base_fee(BaseFeeParams::new(0x00000008, 0x00000008)) + .unwrap_or_default() + ); + } + + // + #[test] + fn test_get_base_fee_holocene_extra_data_set_base_sepolia() { + let parent = Header { + base_fee_per_gas: Some(507), + gas_used: 4847634, + gas_limit: 60000000, + extra_data: hex!("00000000fa0000000a").into(), + timestamp: 1735315544, + ..Default::default() + }; + + let base_fee = next_block_base_fee(&*BASE_SEPOLIA, &parent, 1735315546).unwrap(); + assert_eq!(base_fee, 507); + } +} diff --git a/crates/optimism/evm/src/config.rs b/crates/optimism/evm/src/config.rs index 4544aea891e1..770791dba68c 100644 --- a/crates/optimism/evm/src/config.rs +++ b/crates/optimism/evm/src/config.rs @@ -1,10 +1,9 @@ use alloy_consensus::Header; -use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::OpHardfork; +use reth_optimism_forks::OpHardforks; /// Map the latest active hardfork at the given header to a revm /// [`SpecId`](revm_primitives::SpecId). -pub fn revm_spec(chain_spec: &OpChainSpec, header: &Header) -> revm_primitives::SpecId { +pub fn revm_spec(chain_spec: impl OpHardforks, header: &Header) -> revm_primitives::SpecId { revm_spec_by_timestamp_after_bedrock(chain_spec, header.timestamp) } @@ -15,22 +14,22 @@ pub fn revm_spec(chain_spec: &OpChainSpec, header: &Header) -> revm_primitives:: /// This is only intended to be used after the Bedrock, when hardforks are activated by /// timestamp. pub fn revm_spec_by_timestamp_after_bedrock( - chain_spec: &OpChainSpec, + chain_spec: impl OpHardforks, timestamp: u64, ) -> revm_primitives::SpecId { - if chain_spec.fork(OpHardfork::Isthmus).active_at_timestamp(timestamp) { + if chain_spec.is_isthmus_active_at_timestamp(timestamp) { revm_primitives::ISTHMUS - } else if chain_spec.fork(OpHardfork::Holocene).active_at_timestamp(timestamp) { + } else if chain_spec.is_holocene_active_at_timestamp(timestamp) { revm_primitives::HOLOCENE - } else if chain_spec.fork(OpHardfork::Granite).active_at_timestamp(timestamp) { + } else if chain_spec.is_granite_active_at_timestamp(timestamp) { revm_primitives::GRANITE - } else if chain_spec.fork(OpHardfork::Fjord).active_at_timestamp(timestamp) { + } else if chain_spec.is_fjord_active_at_timestamp(timestamp) { revm_primitives::FJORD - } else if chain_spec.fork(OpHardfork::Ecotone).active_at_timestamp(timestamp) { + } else if chain_spec.is_ecotone_active_at_timestamp(timestamp) { revm_primitives::ECOTONE - } else if chain_spec.fork(OpHardfork::Canyon).active_at_timestamp(timestamp) { + } else if chain_spec.is_canyon_active_at_timestamp(timestamp) { revm_primitives::CANYON - } else if chain_spec.fork(OpHardfork::Regolith).active_at_timestamp(timestamp) { + } else if chain_spec.is_regolith_active_at_timestamp(timestamp) { revm_primitives::REGOLITH } else { revm_primitives::BEDROCK @@ -51,35 +50,35 @@ mod tests { f(cs).build() } assert_eq!( - revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.isthmus_activated()), 0), + revm_spec_by_timestamp_after_bedrock(op_cs(|cs| cs.isthmus_activated()), 0), revm_primitives::ISTHMUS ); assert_eq!( - revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.holocene_activated()), 0), + revm_spec_by_timestamp_after_bedrock(op_cs(|cs| cs.holocene_activated()), 0), revm_primitives::HOLOCENE ); assert_eq!( - revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.granite_activated()), 0), + revm_spec_by_timestamp_after_bedrock(op_cs(|cs| cs.granite_activated()), 0), revm_primitives::GRANITE ); assert_eq!( - revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.fjord_activated()), 0), + revm_spec_by_timestamp_after_bedrock(op_cs(|cs| cs.fjord_activated()), 0), revm_primitives::FJORD ); assert_eq!( - revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.ecotone_activated()), 0), + revm_spec_by_timestamp_after_bedrock(op_cs(|cs| cs.ecotone_activated()), 0), revm_primitives::ECOTONE ); assert_eq!( - revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.canyon_activated()), 0), + revm_spec_by_timestamp_after_bedrock(op_cs(|cs| cs.canyon_activated()), 0), revm_primitives::CANYON ); assert_eq!( - revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.bedrock_activated()), 0), + revm_spec_by_timestamp_after_bedrock(op_cs(|cs| cs.bedrock_activated()), 0), revm_primitives::BEDROCK ); assert_eq!( - revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.regolith_activated()), 0), + revm_spec_by_timestamp_after_bedrock(op_cs(|cs| cs.regolith_activated()), 0), revm_primitives::REGOLITH ); } @@ -92,35 +91,35 @@ mod tests { f(cs).build() } assert_eq!( - revm_spec(&op_cs(|cs| cs.isthmus_activated()), &Default::default()), + revm_spec(op_cs(|cs| cs.isthmus_activated()), &Default::default()), revm_primitives::ISTHMUS ); assert_eq!( - revm_spec(&op_cs(|cs| cs.holocene_activated()), &Default::default()), + revm_spec(op_cs(|cs| cs.holocene_activated()), &Default::default()), revm_primitives::HOLOCENE ); assert_eq!( - revm_spec(&op_cs(|cs| cs.granite_activated()), &Default::default()), + revm_spec(op_cs(|cs| cs.granite_activated()), &Default::default()), revm_primitives::GRANITE ); assert_eq!( - revm_spec(&op_cs(|cs| cs.fjord_activated()), &Default::default()), + revm_spec(op_cs(|cs| cs.fjord_activated()), &Default::default()), revm_primitives::FJORD ); assert_eq!( - revm_spec(&op_cs(|cs| cs.ecotone_activated()), &Default::default()), + revm_spec(op_cs(|cs| cs.ecotone_activated()), &Default::default()), revm_primitives::ECOTONE ); assert_eq!( - revm_spec(&op_cs(|cs| cs.canyon_activated()), &Default::default()), + revm_spec(op_cs(|cs| cs.canyon_activated()), &Default::default()), revm_primitives::CANYON ); assert_eq!( - revm_spec(&op_cs(|cs| cs.bedrock_activated()), &Default::default()), + revm_spec(op_cs(|cs| cs.bedrock_activated()), &Default::default()), revm_primitives::BEDROCK ); assert_eq!( - revm_spec(&op_cs(|cs| cs.regolith_activated()), &Default::default()), + revm_spec(op_cs(|cs| cs.regolith_activated()), &Default::default()), revm_primitives::REGOLITH ); } diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs index 461f8c11e4fb..c5175ee5bd45 100644 --- a/crates/optimism/evm/src/error.rs +++ b/crates/optimism/evm/src/error.rs @@ -1,17 +1,54 @@ //! Error types for the Optimism EVM module. -use alloc::string::String; use reth_evm::execute::BlockExecutionError; +/// L1 Block Info specific errors +#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)] +pub enum L1BlockInfoError { + /// Could not find L1 block info transaction in the L2 block + #[error("could not find l1 block info tx in the L2 block")] + MissingTransaction, + /// Invalid L1 block info transaction calldata + #[error("invalid l1 block info transaction calldata in the L2 block")] + InvalidCalldata, + /// Unexpected L1 block info transaction calldata length + #[error("unexpected l1 block info tx calldata length found")] + UnexpectedCalldataLength, + /// Base fee conversion error + #[error("could not convert l1 base fee")] + BaseFeeConversion, + /// Fee overhead conversion error + #[error("could not convert l1 fee overhead")] + FeeOverheadConversion, + /// Fee scalar conversion error + #[error("could not convert l1 fee scalar")] + FeeScalarConversion, + /// Base Fee Scalar conversion error + #[error("could not convert base fee scalar")] + BaseFeeScalarConversion, + /// Blob base fee conversion error + #[error("could not convert l1 blob base fee")] + BlobBaseFeeConversion, + /// Blob base fee scalar conversion error + #[error("could not convert l1 blob base fee scalar")] + BlobBaseFeeScalarConversion, + /// Operator fee scalar conversion error + #[error("could not convert operator fee scalar")] + OperatorFeeScalarConversion, + /// Operator fee constant conversion error + #[error("could not convert operator fee constant")] + OperatorFeeConstantConversion, + /// Optimism hardforks not active + #[error("Optimism hardforks are not active")] + HardforksNotActive, +} + /// Optimism Block Executor Errors #[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)] pub enum OpBlockExecutionError { /// Error when trying to parse L1 block info - #[error("could not get L1 block info from L2 block: {message}")] - L1BlockInfoError { - /// The inner error message - message: String, - }, + #[error(transparent)] + L1BlockInfo(#[from] L1BlockInfoError), /// Thrown when force deploy of create2deployer code fails. #[error("failed to force create2deployer account code")] ForceCreate2DeployerFail, diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 69bd395729de..d7e0d9df87d8 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -8,7 +8,6 @@ use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::{BlockHeader, Eip658Value, Receipt, Transaction as _}; use alloy_eips::eip7685::Requests; use op_alloy_consensus::OpDepositReceipt; -use reth_chainspec::EthereumHardforks; use reth_consensus::ConsensusError; use reth_evm::{ execute::{ @@ -21,10 +20,8 @@ use reth_evm::{ }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; -use reth_optimism_forks::OpHardfork; -use reth_optimism_primitives::{ - transaction::signed::OpTransaction, DepositReceipt, OpPrimitives, OpReceipt, -}; +use reth_optimism_forks::OpHardforks; +use reth_optimism_primitives::{transaction::signed::OpTransaction, DepositReceipt, OpPrimitives}; use reth_primitives::{NodePrimitives, RecoveredBlock}; use reth_primitives_traits::{BlockBody, SignedTransaction}; use reth_revm::State; @@ -33,9 +30,13 @@ use tracing::trace; /// Factory for [`OpExecutionStrategy`]. #[derive(Debug, Clone)] -pub struct OpExecutionStrategyFactory { +pub struct OpExecutionStrategyFactory< + N: NodePrimitives = OpPrimitives, + ChainSpec = OpChainSpec, + EvmConfig = OpEvmConfig, +> { /// The chainspec - chain_spec: Arc, + chain_spec: Arc, /// How to create an EVM. evm_config: EvmConfig, /// Receipt builder. @@ -53,10 +54,10 @@ impl OpExecutionStrategyFactory { } } -impl OpExecutionStrategyFactory { +impl OpExecutionStrategyFactory { /// Creates a new executor strategy factory. pub fn new( - chain_spec: Arc, + chain_spec: Arc, evm_config: EvmConfig, receipt_builder: impl OpReceiptBuilder, ) -> Self { @@ -64,17 +65,15 @@ impl OpExecutionStrategyFactory { } } -impl BlockExecutionStrategyFactory for OpExecutionStrategyFactory +impl BlockExecutionStrategyFactory + for OpExecutionStrategyFactory where - N: NodePrimitives< - BlockHeader = alloy_consensus::Header, - Receipt = OpReceipt, - SignedTx: OpTransaction, - >, + N: NodePrimitives, + ChainSpec: OpHardforks + Clone + Unpin + Sync + Send + 'static, EvmConfig: ConfigureEvmFor + Clone + Unpin + Sync + Send + 'static, { type Primitives = N; - type Strategy = OpExecutionStrategy; + type Strategy = OpExecutionStrategy; fn create_strategy(&self, db: DB) -> Self::Strategy where @@ -93,31 +92,32 @@ where /// Block execution strategy for Optimism. #[allow(missing_debug_implementations)] -pub struct OpExecutionStrategy +pub struct OpExecutionStrategy where EvmConfig: Clone, { /// The chainspec - chain_spec: Arc, + chain_spec: Arc, /// How to create an EVM. evm_config: EvmConfig, /// Current state for block execution. state: State, /// Utility to call system smart contracts. - system_caller: SystemCaller, + system_caller: SystemCaller, /// Receipt builder. receipt_builder: Arc>, } -impl OpExecutionStrategy +impl OpExecutionStrategy where N: NodePrimitives, + ChainSpec: OpHardforks, EvmConfig: Clone, { /// Creates a new [`OpExecutionStrategy`] pub fn new( state: State, - chain_spec: Arc, + chain_spec: Arc, evm_config: EvmConfig, receipt_builder: Arc>, ) -> Self { @@ -126,14 +126,12 @@ where } } -impl BlockExecutionStrategy for OpExecutionStrategy +impl BlockExecutionStrategy + for OpExecutionStrategy where DB: Database, - N: NodePrimitives< - BlockHeader = alloy_consensus::Header, - SignedTx: OpTransaction, - Receipt: DepositReceipt, - >, + N: NodePrimitives, + ChainSpec: OpHardforks, EvmConfig: ConfigureEvmFor, { type DB = DB; @@ -152,9 +150,9 @@ where let mut evm = self.evm_config.evm_for_block(&mut self.state, block.header()); self.system_caller.apply_beacon_root_contract_call( - block.header().timestamp, - block.header().number, - block.header().parent_beacon_block_root, + block.header().timestamp(), + block.header().number(), + block.header().parent_beacon_block_root(), &mut evm, )?; @@ -162,7 +160,7 @@ where // blocks will always have at least a single transaction in them (the L1 info transaction), // so we can safely assume that this will always be triggered upon the transition and that // the above check for empty blocks will never be hit on OP chains. - ensure_create2_deployer(self.chain_spec.clone(), block.header().timestamp, evm.db_mut()) + ensure_create2_deployer(self.chain_spec.clone(), block.header().timestamp(), evm.db_mut()) .map_err(|_| OpBlockExecutionError::ForceCreate2DeployerFail)?; Ok(()) @@ -174,8 +172,7 @@ where ) -> Result, Self::Error> { let mut evm = self.evm_config.evm_for_block(&mut self.state, block.header()); - let is_regolith = - self.chain_spec.fork(OpHardfork::Regolith).active_at_timestamp(block.timestamp()); + let is_regolith = self.chain_spec.is_regolith_active_at_timestamp(block.timestamp()); let mut cumulative_gas_used = 0; let mut receipts = Vec::with_capacity(block.body().transaction_count()); @@ -232,7 +229,6 @@ where receipts.push( match self.receipt_builder.build_receipt(ReceiptBuilderCtx { - header: block.header(), tx: transaction, result, cumulative_gas_used, @@ -256,10 +252,8 @@ where // this is only set for post-Canyon deposit // transactions. deposit_receipt_version: (transaction.is_deposit() && - self.chain_spec.is_fork_active_at_timestamp( - OpHardfork::Canyon, - block.header().timestamp, - )) + self.chain_spec + .is_canyon_active_at_timestamp(block.timestamp())) .then_some(1), }) } @@ -305,7 +299,7 @@ where receipts: &[N::Receipt], _requests: &Requests, ) -> Result<(), ConsensusError> { - validate_block_post_execution(block.header(), &self.chain_spec.clone(), receipts) + validate_block_post_execution(block.header(), self.chain_spec.clone(), receipts) } } @@ -334,7 +328,7 @@ mod tests { use reth_chainspec::MIN_TRANSACTION_GAS; use reth_evm::execute::{BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider}; use reth_optimism_chainspec::OpChainSpecBuilder; - use reth_optimism_primitives::OpTransactionSigned; + use reth_optimism_primitives::{OpReceipt, OpTransactionSigned}; use reth_primitives::{Account, Block, BlockBody}; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 9b8658357327..d2c482640c09 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -1,13 +1,10 @@ //! Optimism-specific implementation and utilities for the executor -use crate::OpBlockExecutionError; -use alloc::{string::ToString, sync::Arc}; +use crate::{error::L1BlockInfoError, OpBlockExecutionError}; use alloy_consensus::Transaction; use alloy_primitives::{address, b256, hex, Address, Bytes, B256, U256}; -use reth_chainspec::ChainSpec; use reth_execution_errors::BlockExecutionError; -use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::OpHardfork; +use reth_optimism_forks::OpHardforks; use reth_primitives_traits::BlockBody; use revm::{ primitives::{Bytecode, HashMap, SpecId}, @@ -28,15 +25,18 @@ const CREATE_2_DEPLOYER_BYTECODE: [u8; 1584] = hex!("608060405260043610610043576 /// The function selector of the "setL1BlockValuesEcotone" function in the `L1Block` contract. const L1_BLOCK_ECOTONE_SELECTOR: [u8; 4] = hex!("440a5e20"); +/// The function selector of the "setL1BlockValuesIsthmus" function in the `L1Block` contract. +const L1_BLOCK_ISTHMUS_SELECTOR: [u8; 4] = hex!("098999be"); + /// Extracts the [`L1BlockInfo`] from the L2 block. The L1 info transaction is always the first /// transaction in the L2 block. /// /// Returns an error if the L1 info transaction is not found, if the block is empty. pub fn extract_l1_info(body: &B) -> Result { - let l1_info_tx = - body.transactions().first().ok_or_else(|| OpBlockExecutionError::L1BlockInfoError { - message: "could not find l1 block info tx in the L2 block".to_string(), - })?; + let l1_info_tx = body + .transactions() + .first() + .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::MissingTransaction))?; extract_l1_info_from_tx(l1_info_tx) } @@ -49,9 +49,7 @@ pub fn extract_l1_info_from_tx( ) -> Result { let l1_info_tx_data = tx.input(); if l1_info_tx_data.len() < 4 { - return Err(OpBlockExecutionError::L1BlockInfoError { - message: "invalid l1 block info transaction calldata in the L2 block".to_string(), - }) + return Err(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::InvalidCalldata)); } parse_l1_info(l1_info_tx_data) @@ -70,7 +68,9 @@ pub fn parse_l1_info(input: &[u8]) -> Result // If the first 4 bytes of the calldata are the L1BlockInfoEcotone selector, then we parse the // calldata as an Ecotone hardfork L1BlockInfo transaction. Otherwise, we parse it as a // Bedrock hardfork L1BlockInfo transaction. - if input[0..4] == L1_BLOCK_ECOTONE_SELECTOR { + if input[0..4] == L1_BLOCK_ISTHMUS_SELECTOR { + parse_l1_info_tx_isthmus(input[4..].as_ref()) + } else if input[0..4] == L1_BLOCK_ECOTONE_SELECTOR { parse_l1_info_tx_ecotone(input[4..].as_ref()) } else { parse_l1_info_tx_bedrock(input[4..].as_ref()) @@ -90,26 +90,15 @@ pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result Result pub fn parse_l1_info_tx_ecotone(data: &[u8]) -> Result { if data.len() != 160 { - return Err(OpBlockExecutionError::L1BlockInfoError { - message: "unexpected l1 block info tx calldata length found".to_string(), - }) + return Err(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::UnexpectedCalldataLength)); } // https://github.com/ethereum-optimism/op-geth/blob/60038121c7571a59875ff9ed7679c48c9f73405d/core/types/rollup_cost.go#L317-L328 @@ -155,25 +142,75 @@ pub fn parse_l1_info_tx_ecotone(data: &[u8]) -> Result Result { + if data.len() != 172 { + return Err(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::UnexpectedCalldataLength)); + } + + // https://github.com/ethereum-optimism/op-geth/blob/60038121c7571a59875ff9ed7679c48c9f73405d/core/types/rollup_cost.go#L317-L328 + // + // data layout assumed for Ecotone: + // offset type varname + // 0 + // 4 uint32 _basefeeScalar (start offset in this scope) + // 8 uint32 _blobBaseFeeScalar + // 12 uint64 _sequenceNumber, + // 20 uint64 _timestamp, + // 28 uint64 _l1BlockNumber + // 36 uint256 _basefee, + // 68 uint256 _blobBaseFee, + // 100 bytes32 _hash, + // 132 bytes32 _batcherHash, + // 164 uint32 _operatorFeeScalar + // 168 uint64 _operatorFeeConstant + + let l1_base_fee_scalar = U256::try_from_be_slice(&data[..4]) + .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BaseFeeScalarConversion))?; + let l1_blob_base_fee_scalar = U256::try_from_be_slice(&data[4..8]).ok_or({ + OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BlobBaseFeeScalarConversion) })?; - let l1_base_fee = U256::try_from_be_slice(&data[32..64]).ok_or_else(|| { - OpBlockExecutionError::L1BlockInfoError { - message: "could not convert l1 blob base fee".to_string(), - } + let l1_base_fee = U256::try_from_be_slice(&data[32..64]) + .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BaseFeeConversion))?; + let l1_blob_base_fee = U256::try_from_be_slice(&data[64..96]) + .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BlobBaseFeeConversion))?; + let operator_fee_scalar = U256::try_from_be_slice(&data[160..164]).ok_or({ + OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::OperatorFeeScalarConversion) })?; - let l1_blob_base_fee = U256::try_from_be_slice(&data[64..96]).ok_or_else(|| { - OpBlockExecutionError::L1BlockInfoError { - message: "could not convert l1 blob base fee".to_string(), - } + let operator_fee_constant = U256::try_from_be_slice(&data[164..172]).ok_or({ + OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::OperatorFeeConstantConversion) })?; let mut l1block = L1BlockInfo::default(); @@ -181,24 +218,27 @@ pub fn parse_l1_info_tx_ecotone(data: &[u8]) -> Result Result; @@ -206,13 +246,14 @@ pub trait RethL1BlockInfo { /// Computes the data gas cost for an L2 transaction. /// /// ### Takes - /// - `chain_spec`: The [`ChainSpec`] for the node. + /// - `chain_spec`: The chain spec for the node. /// - `timestamp`: The timestamp of the current block. /// - `input`: The calldata of the transaction. fn l1_data_gas( &self, - chain_spec: &ChainSpec, + chain_spec: impl OpHardforks, timestamp: u64, + block_number: u64, input: &[u8], ) -> Result; } @@ -220,49 +261,49 @@ pub trait RethL1BlockInfo { impl RethL1BlockInfo for L1BlockInfo { fn l1_tx_data_fee( &mut self, - chain_spec: &ChainSpec, + chain_spec: impl OpHardforks, timestamp: u64, + block_number: u64, input: &[u8], is_deposit: bool, ) -> Result { if is_deposit { - return Ok(U256::ZERO) + return Ok(U256::ZERO); } - let spec_id = if chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, timestamp) { + let spec_id = if chain_spec.is_fjord_active_at_timestamp(timestamp) { SpecId::FJORD - } else if chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, timestamp) { + } else if chain_spec.is_ecotone_active_at_timestamp(timestamp) { SpecId::ECOTONE - } else if chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, timestamp) { + } else if chain_spec.is_regolith_active_at_timestamp(timestamp) { SpecId::REGOLITH - } else if chain_spec.is_fork_active_at_timestamp(OpHardfork::Bedrock, timestamp) { + } else if chain_spec.is_bedrock_active_at_block(block_number) { SpecId::BEDROCK } else { - return Err(OpBlockExecutionError::L1BlockInfoError { - message: "Optimism hardforks are not active".to_string(), - } - .into()) + return Err( + OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::HardforksNotActive).into() + ); }; Ok(self.calculate_tx_l1_cost(input, spec_id)) } fn l1_data_gas( &self, - chain_spec: &ChainSpec, + chain_spec: impl OpHardforks, timestamp: u64, + block_number: u64, input: &[u8], ) -> Result { - let spec_id = if chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, timestamp) { + let spec_id = if chain_spec.is_fjord_active_at_timestamp(timestamp) { SpecId::FJORD - } else if chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, timestamp) { + } else if chain_spec.is_regolith_active_at_timestamp(timestamp) { SpecId::REGOLITH - } else if chain_spec.is_fork_active_at_timestamp(OpHardfork::Bedrock, timestamp) { + } else if chain_spec.is_bedrock_active_at_block(block_number) { SpecId::BEDROCK } else { - return Err(OpBlockExecutionError::L1BlockInfoError { - message: "Optimism hardforks are not active".to_string(), - } - .into()) + return Err( + OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::HardforksNotActive).into() + ); }; Ok(self.data_gas(input, spec_id)) } @@ -272,7 +313,7 @@ impl RethL1BlockInfo for L1BlockInfo { /// deployer contract. This is done by directly setting the code of the create2 deployer account /// prior to executing any transactions on the timestamp activation of the fork. pub fn ensure_create2_deployer( - chain_spec: Arc, + chain_spec: impl OpHardforks, timestamp: u64, db: &mut revm::State, ) -> Result<(), DB::Error> @@ -282,8 +323,8 @@ where // If the canyon hardfork is active at the current timestamp, and it was not active at the // previous block timestamp (heuristically, block time is not perfectly constant at 2s), and the // chain is an optimism chain, then we need to force-deploy the create2 deployer contract. - if chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, timestamp) && - !chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, timestamp.saturating_sub(2)) + if chain_spec.is_canyon_active_at_timestamp(timestamp) && + !chain_spec.is_canyon_active_at_timestamp(timestamp.saturating_sub(2)) { trace!(target: "evm", "Forcing create2 deployer contract deployment on Canyon transition"); diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index cb103f2bb76d..74820e5ea25b 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -21,6 +21,8 @@ use op_alloy_consensus::EIP1559ParamError; use reth_chainspec::EthChainSpec; use reth_evm::{env::EvmEnv, ConfigureEvm, ConfigureEvmEnv, Database, Evm, NextBlockEnvAttributes}; use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_consensus::next_block_base_fee; +use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::OpTransactionSigned; use reth_primitives_traits::FillTxEnv; use reth_revm::{ @@ -127,24 +129,30 @@ impl Evm for OpEvm<'_, EXT, DB> { } /// Optimism-related EVM configuration. -#[derive(Debug, Clone)] -pub struct OpEvmConfig { - chain_spec: Arc, +#[derive(Debug)] +pub struct OpEvmConfig { + chain_spec: Arc, } -impl OpEvmConfig { +impl Clone for OpEvmConfig { + fn clone(&self) -> Self { + Self { chain_spec: self.chain_spec.clone() } + } +} + +impl OpEvmConfig { /// Creates a new [`OpEvmConfig`] with the given chain spec. - pub const fn new(chain_spec: Arc) -> Self { + pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } } /// Returns the chain spec associated with this configuration. - pub const fn chain_spec(&self) -> &Arc { + pub const fn chain_spec(&self) -> &Arc { &self.chain_spec } } -impl ConfigureEvmEnv for OpEvmConfig { +impl ConfigureEvmEnv for OpEvmConfig { type Header = Header; type Transaction = OpTransactionSigned; type Error = EIP1559ParamError; @@ -209,7 +217,11 @@ impl ConfigureEvmEnv for OpEvmConfig { prevrandao: Some(attributes.prev_randao), gas_limit: U256::from(attributes.gas_limit), // calculate basefee based on parent block's gas usage - basefee: self.chain_spec.next_block_base_fee(parent, attributes.timestamp)?, + basefee: U256::from(next_block_base_fee( + &self.chain_spec, + parent, + attributes.timestamp, + )?), // calculate excess gas based on parent block's blob gas usage blob_excess_gas_and_price, }; @@ -226,7 +238,7 @@ impl ConfigureEvmEnv for OpEvmConfig { } } -impl ConfigureEvm for OpEvmConfig { +impl ConfigureEvm for OpEvmConfig { type Evm<'a, DB: Database + 'a, I: 'a> = OpEvm<'a, I, DB>; type EvmError = EVMError; type HaltReason = HaltReason; diff --git a/crates/optimism/evm/src/receipts.rs b/crates/optimism/evm/src/receipts.rs index a2f6228d29f2..2ced9c7b7c6c 100644 --- a/crates/optimism/evm/src/receipts.rs +++ b/crates/optimism/evm/src/receipts.rs @@ -1,4 +1,4 @@ -use alloy_consensus::{Eip658Value, Header, Receipt}; +use alloy_consensus::{Eip658Value, Receipt}; use core::fmt; use op_alloy_consensus::{OpDepositReceipt, OpTxType}; use reth_optimism_primitives::{OpReceipt, OpTransactionSigned}; @@ -7,8 +7,6 @@ use revm_primitives::ExecutionResult; /// Context for building a receipt. #[derive(Debug)] pub struct ReceiptBuilderCtx<'a, T> { - /// Block header. - pub header: &'a Header, /// Transaction pub tx: &'a T, /// Result of transaction execution. diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index a8c4f30c2cf0..7c520558930a 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -40,6 +40,7 @@ reth-tasks = { workspace = true, optional = true } reth-optimism-payload-builder.workspace = true reth-optimism-evm.workspace = true reth-optimism-rpc.workspace = true +reth-optimism-txpool.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-consensus.workspace = true reth-optimism-forks.workspace = true @@ -55,6 +56,7 @@ op-alloy-consensus.workspace = true op-alloy-rpc-types-engine.workspace = true op-alloy-flz.workspace = true alloy-rpc-types-engine.workspace = true +alloy-rpc-types-eth.workspace = true alloy-consensus.workspace = true # misc @@ -101,6 +103,7 @@ optimism = [ "reth-optimism-node/optimism", "reth-node-core/optimism", "reth-optimism-primitives/optimism", + "reth-optimism-txpool/optimism", ] asm-keccak = [ "reth-primitives/asm-keccak", diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 640ec84a0b68..9f646929bf56 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -1,6 +1,5 @@ use alloy_rpc_types_engine::{ - ExecutionPayload, ExecutionPayloadEnvelopeV2, ExecutionPayloadSidecar, ExecutionPayloadV1, - PayloadError, + ExecutionPayload, ExecutionPayloadEnvelopeV2, ExecutionPayloadV1, PayloadError, }; use op_alloy_rpc_types_engine::{ OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4, OpPayloadAttributes, @@ -12,13 +11,13 @@ use reth_node_api::{ EngineObjectValidationError, MessageValidationKind, PayloadOrAttributes, PayloadTypes, VersionSpecificValidationError, }, - validate_version_specific_fields, BuiltPayload, EngineTypes, EngineValidator, NodePrimitives, - PayloadValidator, + validate_version_specific_fields, BuiltPayload, EngineTypes, EngineValidator, ExecutionData, + NodePrimitives, PayloadValidator, }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::{OpHardfork, OpHardforks}; use reth_optimism_payload_builder::{OpBuiltPayload, OpPayloadBuilderAttributes}; -use reth_optimism_primitives::OpBlock; +use reth_optimism_primitives::{OpBlock, OpPrimitives}; use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::SealedBlock; use std::sync::Arc; @@ -48,25 +47,28 @@ where type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; type ExecutionPayloadEnvelopeV3 = OpExecutionPayloadEnvelopeV3; type ExecutionPayloadEnvelopeV4 = OpExecutionPayloadEnvelopeV4; + type ExecutionData = ExecutionData; fn block_to_payload( block: SealedBlock< <::Primitives as NodePrimitives>::Block, >, - ) -> (ExecutionPayload, ExecutionPayloadSidecar) { - ExecutionPayload::from_block_unchecked(block.hash(), &block.into_block()) + ) -> ExecutionData { + let (payload, sidecar) = + ExecutionPayload::from_block_unchecked(block.hash(), &block.into_block()); + ExecutionData { payload, sidecar } } } /// A default payload type for [`OpEngineTypes`] #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] #[non_exhaustive] -pub struct OpPayloadTypes; +pub struct OpPayloadTypes(core::marker::PhantomData); -impl PayloadTypes for OpPayloadTypes { - type BuiltPayload = OpBuiltPayload; +impl PayloadTypes for OpPayloadTypes { + type BuiltPayload = OpBuiltPayload; type PayloadAttributes = OpPayloadAttributes; - type PayloadBuilderAttributes = OpPayloadBuilderAttributes; + type PayloadBuilderAttributes = OpPayloadBuilderAttributes; } /// Validator for Optimism engine API. @@ -91,19 +93,19 @@ impl OpEngineValidator { impl PayloadValidator for OpEngineValidator { type Block = OpBlock; + type ExecutionData = ExecutionData; fn ensure_well_formed_payload( &self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, + payload: ExecutionData, ) -> Result, PayloadError> { - self.inner.ensure_well_formed_payload(payload, sidecar) + self.inner.ensure_well_formed_payload(payload) } } impl EngineValidator for OpEngineValidator where - Types: EngineTypes, + Types: EngineTypes, { fn validate_execution_requests( &self, diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index 2f3d5e5c082d..a782e11ed0d6 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -24,7 +24,7 @@ pub use engine::OpEngineTypes; pub mod node; pub use node::{OpNetworkPrimitives, OpNode}; -pub mod txpool; +pub use reth_optimism_txpool as txpool; /// Helpers for running test node instances. #[cfg(feature = "test-utils")] diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 3fa2ac6852b3..835cc9a22226 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -7,13 +7,14 @@ use crate::{ OpEngineTypes, }; use op_alloy_consensus::OpPooledTransaction; -use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_chainspec::{EthChainSpec, Hardforks}; use reth_evm::{ execute::BasicBlockExecutorProvider, ConfigureEvm, ConfigureEvmEnv, ConfigureEvmFor, }; use reth_network::{NetworkConfig, NetworkHandle, NetworkManager, NetworkPrimitives, PeersInfo}; -use reth_node_api::{AddOnsContext, FullNodeComponents, NodeAddOns, PrimitivesTy, TxTy}; +use reth_node_api::{ + AddOnsContext, FullNodeComponents, NodeAddOns, NodePrimitives, PrimitivesTy, TxTy, +}; use reth_node_builder::{ components::{ ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, @@ -21,29 +22,29 @@ use reth_node_builder::{ }, node::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}, rpc::{EngineValidatorAddOn, EngineValidatorBuilder, RethRpcAddOns, RpcAddOns, RpcHandle}, - BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, + BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::OpBeaconConsensus; -use reth_optimism_evm::{OpEvmConfig, OpExecutionStrategyFactory}; +use reth_optimism_evm::{BasicOpReceiptBuilder, OpEvmConfig, OpExecutionStrategyFactory}; +use reth_optimism_forks::OpHardforks; use reth_optimism_payload_builder::{ builder::OpPayloadTransactions, config::{OpBuilderConfig, OpDAConfig}, }; -use reth_optimism_primitives::{OpPrimitives, OpReceipt, OpTransactionSigned}; +use reth_optimism_primitives::{DepositReceipt, OpPrimitives, OpReceipt, OpTransactionSigned}; use reth_optimism_rpc::{ miner::{MinerApiExtServer, OpMinerExtApi}, witness::{DebugExecutionWitnessApiServer, OpDebugWitnessApi}, OpEthApi, OpEthApiError, SequencerClient, }; -use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_provider::{CanonStateSubscriptions, EthStorage}; +use reth_provider::{providers::ProviderFactoryBuilder, CanonStateSubscriptions, EthStorage}; use reth_rpc_eth_types::error::FromEvmError; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ - blobstore::DiskFileBlobStore, CoinbaseTipOrdering, PoolTransaction, TransactionPool, - TransactionValidationTaskExecutor, + blobstore::DiskFileBlobStore, CoinbaseTipOrdering, EthPoolTransaction, PoolTransaction, + TransactionPool, TransactionValidationTaskExecutor, }; use reth_trie_db::MerklePatriciaTrie; use revm::primitives::TxEnv; @@ -114,6 +115,40 @@ impl OpNode { .executor(OpExecutorBuilder::default()) .consensus(OpConsensusBuilder::default()) } + + /// Instantiates the [`ProviderFactoryBuilder`] for an opstack node. + /// + /// # Open a Providerfactory in read-only mode from a datadir + /// + /// See also: [`ProviderFactoryBuilder`] and + /// [`ReadOnlyConfig`](reth_provider::providers::ReadOnlyConfig). + /// + /// ```no_run + /// use reth_optimism_chainspec::BASE_MAINNET; + /// use reth_optimism_node::OpNode; + /// + /// let factory = + /// OpNode::provider_factory_builder().open_read_only(BASE_MAINNET.clone(), "datadir").unwrap(); + /// ``` + /// + /// # Open a Providerfactory manually with with all required components + /// + /// ```no_run + /// use reth_db::open_db_read_only; + /// use reth_optimism_chainspec::OpChainSpecBuilder; + /// use reth_optimism_node::OpNode; + /// use reth_provider::providers::StaticFileProvider; + /// use std::sync::Arc; + /// + /// let factory = OpNode::provider_factory_builder() + /// .db(Arc::new(open_db_read_only("db", Default::default()).unwrap())) + /// .chainspec(OpChainSpecBuilder::base_mainnet().build().into()) + /// .static_file(StaticFileProvider::read_only("db/static_files", false).unwrap()) + /// .build_provider_factory(); + /// ``` + pub fn provider_factory_builder() -> ProviderFactoryBuilder { + ProviderFactoryBuilder::default() + } } impl Node for OpNode @@ -205,11 +240,18 @@ where ctx: reth_node_api::AddOnsContext<'_, N>, ) -> eyre::Result { let Self { rpc_add_ons, da_config } = self; + + let builder = reth_optimism_payload_builder::OpPayloadBuilder::new( + ctx.node.pool().clone(), + ctx.node.provider().clone(), + ctx.node.evm_config().clone(), + BasicOpReceiptBuilder::default(), + ); // install additional OP specific rpc methods let debug_ext = OpDebugWitnessApi::new( ctx.node.provider().clone(), - ctx.node.evm_config().clone(), Box::new(ctx.node.task_executor().clone()), + builder, ); let miner_ext = OpMinerExtApi::new(da_config); @@ -344,41 +386,48 @@ where /// /// This contains various settings that can be configured and take precedence over the node's /// config. -#[derive(Debug, Default, Clone)] -pub struct OpPoolBuilder { +#[derive(Debug, Clone)] +pub struct OpPoolBuilder { /// Enforced overrides that are applied to the pool config. pub pool_config_overrides: PoolBuilderConfigOverrides, + /// Marker for the pooled transaction type. + _pd: core::marker::PhantomData, +} + +impl Default for OpPoolBuilder { + fn default() -> Self { + Self { pool_config_overrides: Default::default(), _pd: Default::default() } + } } -impl PoolBuilder for OpPoolBuilder +impl PoolBuilder for OpPoolBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, + T: EthPoolTransaction>, { - type Pool = OpTransactionPool; + type Pool = OpTransactionPool; async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { - let Self { pool_config_overrides } = self; + let Self { pool_config_overrides, .. } = self; let data_dir = ctx.config().datadir(); let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), Default::default())?; - let validator = TransactionValidationTaskExecutor::eth_builder(Arc::new( - ctx.chain_spec().inner.clone(), - )) - .no_eip4844() - .with_head_timestamp(ctx.head().timestamp) - .kzg_settings(ctx.kzg_settings()?) - .with_additional_tasks( - pool_config_overrides - .additional_validation_tasks - .unwrap_or_else(|| ctx.config().txpool.additional_validation_tasks), - ) - .build_with_tasks(ctx.provider().clone(), ctx.task_executor().clone(), blob_store.clone()) - .map(|validator| { - OpTransactionValidator::new(validator) - // In --dev mode we can't require gas fees because we're unable to decode - // the L1 block info - .require_l1_data_gas_fee(!ctx.config().dev.dev) - }); + let validator = TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone()) + .no_eip4844() + .with_head_timestamp(ctx.head().timestamp) + .kzg_settings(ctx.kzg_settings()?) + .with_additional_tasks( + pool_config_overrides + .additional_validation_tasks + .unwrap_or_else(|| ctx.config().txpool.additional_validation_tasks), + ) + .build_with_tasks(ctx.task_executor().clone(), blob_store.clone()) + .map(|validator| { + OpTransactionValidator::new(validator) + // In --dev mode we can't require gas fees because we're unable to decode + // the L1 block info + .require_l1_data_gas_fee(!ctx.config().dev.dev) + }); let transaction_pool = reth_transaction_pool::Pool::new( validator, @@ -460,27 +509,31 @@ impl OpPayloadBuilder { } } -impl OpPayloadBuilder -where - Txs: OpPayloadTransactions, -{ +impl OpPayloadBuilder { /// Configures the type responsible for yielding the transactions that should be included in the /// payload. - pub fn with_transactions( - self, - best_transactions: T, - ) -> OpPayloadBuilder { + pub fn with_transactions(self, best_transactions: T) -> OpPayloadBuilder { let Self { compute_pending_block, da_config, .. } = self; OpPayloadBuilder { compute_pending_block, best_transactions, da_config } } - /// A helper method to initialize [`PayloadBuilderService`] with the given EVM config. - pub fn spawn( - self, + /// A helper method to initialize [`reth_optimism_payload_builder::OpPayloadBuilder`] with the + /// given EVM config. + #[expect(clippy::type_complexity)] + pub fn build( + &self, evm_config: Evm, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result> + ) -> eyre::Result< + reth_optimism_payload_builder::OpPayloadBuilder< + Pool, + Node::Provider, + Evm, + PrimitivesTy, + Txs, + >, + > where Node: FullNodeTypes< Types: NodeTypesWithEngine< @@ -493,32 +546,17 @@ where + Unpin + 'static, Evm: ConfigureEvmFor>, + Txs: OpPayloadTransactions, { let payload_builder = reth_optimism_payload_builder::OpPayloadBuilder::with_builder_config( + pool, + ctx.provider().clone(), evm_config, - OpBuilderConfig { da_config: self.da_config }, + BasicOpReceiptBuilder::default(), + OpBuilderConfig { da_config: self.da_config.clone() }, ) - .with_transactions(self.best_transactions) + .with_transactions(self.best_transactions.clone()) .set_compute_pending_block(self.compute_pending_block); - let conf = ctx.payload_builder_config(); - - let payload_job_config = BasicPayloadJobGeneratorConfig::default() - .interval(conf.interval()) - .deadline(conf.deadline()) - .max_payload_tasks(conf.max_payload_tasks()); - - let payload_generator = BasicPayloadJobGenerator::with_builder( - ctx.provider().clone(), - pool, - ctx.task_executor().clone(), - payload_job_config, - payload_builder, - ); - let (payload_service, payload_builder) = - PayloadBuilderService::new(payload_generator, ctx.provider().canonical_state_stream()); - - ctx.task_executor().spawn_critical("payload builder service", Box::pin(payload_service)); - Ok(payload_builder) } } @@ -535,14 +573,22 @@ where Pool: TransactionPool>> + Unpin + 'static, - Txs: OpPayloadTransactions, + Txs: OpPayloadTransactions, { - async fn spawn_payload_service( - self, + type PayloadBuilder = reth_optimism_payload_builder::OpPayloadBuilder< + Pool, + Node::Provider, + OpEvmConfig, + PrimitivesTy, + Txs, + >; + + async fn build_payload_builder( + &self, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result> { - self.spawn(OpEvmConfig::new(ctx.chain_spec()), ctx, pool) + ) -> eyre::Result { + self.build(OpEvmConfig::new(ctx.chain_spec()), ctx, pool) } } @@ -637,9 +683,14 @@ pub struct OpConsensusBuilder; impl ConsensusBuilder for OpConsensusBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes< + Types: NodeTypes< + ChainSpec: OpHardforks, + Primitives: NodePrimitives, + >, + >, { - type Consensus = Arc; + type Consensus = Arc::ChainSpec>>; async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { Ok(Arc::new(OpBeaconConsensus::new(ctx.chain_spec()))) diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs deleted file mode 100644 index 31b1f6708bde..000000000000 --- a/crates/optimism/node/src/txpool.rs +++ /dev/null @@ -1,496 +0,0 @@ -//! OP transaction pool types -use alloy_consensus::{ - BlobTransactionSidecar, BlobTransactionValidationError, BlockHeader, Transaction, Typed2718, -}; -use alloy_eips::eip2718::Encodable2718; -use alloy_primitives::{Address, TxHash, TxKind, U256}; -use op_alloy_consensus::OpTypedTransaction; -use parking_lot::RwLock; -use reth_chainspec::ChainSpec; -use reth_node_api::{Block, BlockBody}; -use reth_optimism_evm::RethL1BlockInfo; -use reth_optimism_primitives::{OpBlock, OpTransactionSigned}; -use reth_primitives::{ - transaction::TransactionConversionError, GotExpected, InvalidTransactionError, Recovered, - SealedBlock, -}; -use reth_primitives_traits::SignedTransaction; -use reth_provider::{BlockReaderIdExt, StateProviderFactory}; -use reth_revm::L1BlockInfo; -use reth_transaction_pool::{ - CoinbaseTipOrdering, EthBlobTransactionSidecar, EthPoolTransaction, EthPooledTransaction, - EthTransactionValidator, Pool, PoolTransaction, TransactionOrigin, - TransactionValidationOutcome, TransactionValidationTaskExecutor, TransactionValidator, -}; -use revm::primitives::{AccessList, KzgSettings}; -use std::sync::{ - atomic::{AtomicU64, Ordering}, - Arc, OnceLock, -}; - -/// Type alias for default optimism transaction pool -pub type OpTransactionPool = Pool< - TransactionValidationTaskExecutor>, - CoinbaseTipOrdering, - S, ->; - -/// Pool transaction for OP. -/// -/// This type wraps the actual transaction and caches values that are frequently used by the pool. -/// For payload building this lazily tracks values that are required during payload building: -/// - Estimated compressed size of this transaction -#[derive(Debug, Clone, derive_more::Deref)] -pub struct OpPooledTransaction { - #[deref] - inner: EthPooledTransaction, - /// The estimated size of this transaction, lazily computed. - estimated_tx_compressed_size: OnceLock, -} - -impl OpPooledTransaction { - /// Create new instance of [Self]. - pub fn new(transaction: Recovered, encoded_length: usize) -> Self { - Self { - inner: EthPooledTransaction::new(transaction, encoded_length), - estimated_tx_compressed_size: Default::default(), - } - } - - /// Returns the estimated compressed size of a transaction in bytes scaled by 1e6. - /// This value is computed based on the following formula: - /// `max(minTransactionSize, intercept + fastlzCoef*fastlzSize)` - pub fn estimated_compressed_size(&self) -> u64 { - *self.estimated_tx_compressed_size.get_or_init(|| { - op_alloy_flz::tx_estimated_size_fjord(&self.inner.transaction().encoded_2718()) - }) - } -} - -impl From> for OpPooledTransaction { - fn from(tx: Recovered) -> Self { - let encoded_len = tx.encode_2718_len(); - let tx = tx.map_transaction(|tx| tx.into()); - Self { - inner: EthPooledTransaction::new(tx, encoded_len), - estimated_tx_compressed_size: Default::default(), - } - } -} - -impl TryFrom> for OpPooledTransaction { - type Error = TransactionConversionError; - - fn try_from(value: Recovered) -> Result { - let (tx, signer) = value.into_parts(); - let pooled: Recovered = - Recovered::new_unchecked(tx.try_into()?, signer); - Ok(pooled.into()) - } -} - -impl From for Recovered { - fn from(value: OpPooledTransaction) -> Self { - value.inner.transaction - } -} - -impl PoolTransaction for OpPooledTransaction { - type TryFromConsensusError = >>::Error; - type Consensus = OpTransactionSigned; - type Pooled = op_alloy_consensus::OpPooledTransaction; - - fn clone_into_consensus(&self) -> Recovered { - self.inner.transaction().clone() - } - - fn try_consensus_into_pooled( - tx: Recovered, - ) -> Result, Self::TryFromConsensusError> { - let (tx, signer) = tx.into_parts(); - Ok(Recovered::new_unchecked(tx.try_into()?, signer)) - } - - fn hash(&self) -> &TxHash { - self.inner.transaction.tx_hash() - } - - fn sender(&self) -> Address { - self.inner.transaction.signer() - } - - fn sender_ref(&self) -> &Address { - self.inner.transaction.signer_ref() - } - - fn nonce(&self) -> u64 { - self.inner.transaction.nonce() - } - - fn cost(&self) -> &U256 { - &self.inner.cost - } - - fn gas_limit(&self) -> u64 { - self.inner.transaction.gas_limit() - } - - fn max_fee_per_gas(&self) -> u128 { - self.inner.transaction.max_fee_per_gas() - } - - fn access_list(&self) -> Option<&AccessList> { - self.inner.transaction.access_list() - } - - fn max_priority_fee_per_gas(&self) -> Option { - self.inner.transaction.max_priority_fee_per_gas() - } - - fn max_fee_per_blob_gas(&self) -> Option { - self.inner.transaction.max_fee_per_blob_gas() - } - - fn effective_tip_per_gas(&self, base_fee: u64) -> Option { - self.inner.transaction.effective_tip_per_gas(base_fee) - } - - fn priority_fee_or_price(&self) -> u128 { - self.inner.transaction.priority_fee_or_price() - } - - fn kind(&self) -> TxKind { - self.inner.transaction.kind() - } - - fn is_create(&self) -> bool { - self.inner.transaction.is_create() - } - - fn input(&self) -> &[u8] { - self.inner.transaction.input() - } - - fn size(&self) -> usize { - self.inner.transaction.input().len() - } - - fn tx_type(&self) -> u8 { - self.inner.transaction.ty() - } - - fn encoded_length(&self) -> usize { - self.inner.encoded_length - } - - fn chain_id(&self) -> Option { - self.inner.transaction.chain_id() - } -} - -impl EthPoolTransaction for OpPooledTransaction { - fn take_blob(&mut self) -> EthBlobTransactionSidecar { - EthBlobTransactionSidecar::None - } - - fn blob_count(&self) -> usize { - 0 - } - - fn try_into_pooled_eip4844( - self, - _sidecar: Arc, - ) -> Option> { - None - } - - fn try_from_eip4844( - _tx: Recovered, - _sidecar: BlobTransactionSidecar, - ) -> Option { - None - } - - fn validate_blob( - &self, - _sidecar: &BlobTransactionSidecar, - _settings: &KzgSettings, - ) -> Result<(), BlobTransactionValidationError> { - Err(BlobTransactionValidationError::NotBlobTransaction(self.tx_type())) - } - - fn authorization_count(&self) -> usize { - match self.inner.transaction.transaction() { - OpTypedTransaction::Eip7702(tx) => tx.authorization_list.len(), - _ => 0, - } - } -} - -/// Validator for Optimism transactions. -#[derive(Debug, Clone)] -pub struct OpTransactionValidator { - /// The type that performs the actual validation. - inner: EthTransactionValidator, - /// Additional block info required for validation. - block_info: Arc, - /// If true, ensure that the transaction's sender has enough balance to cover the L1 gas fee - /// derived from the tracked L1 block info that is extracted from the first transaction in the - /// L2 block. - require_l1_data_gas_fee: bool, -} - -impl OpTransactionValidator { - /// Returns the configured chain spec - pub fn chain_spec(&self) -> &Arc { - self.inner.chain_spec() - } - - /// Returns the configured client - pub fn client(&self) -> &Client { - self.inner.client() - } - - /// Returns the current block timestamp. - fn block_timestamp(&self) -> u64 { - self.block_info.timestamp.load(Ordering::Relaxed) - } - - /// Whether to ensure that the transaction's sender has enough balance to also cover the L1 gas - /// fee. - pub fn require_l1_data_gas_fee(self, require_l1_data_gas_fee: bool) -> Self { - Self { require_l1_data_gas_fee, ..self } - } - - /// Returns whether this validator also requires the transaction's sender to have enough balance - /// to cover the L1 gas fee. - pub const fn requires_l1_data_gas_fee(&self) -> bool { - self.require_l1_data_gas_fee - } -} - -impl OpTransactionValidator -where - Client: StateProviderFactory + BlockReaderIdExt, - Tx: EthPoolTransaction, -{ - /// Create a new [`OpTransactionValidator`]. - pub fn new(inner: EthTransactionValidator) -> Self { - let this = Self::with_block_info(inner, OpL1BlockInfo::default()); - if let Ok(Some(block)) = - this.inner.client().block_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest) - { - // genesis block has no txs, so we can't extract L1 info, we set the block info to empty - // so that we will accept txs into the pool before the first block - if block.header().number() == 0 { - this.block_info.timestamp.store(block.header().timestamp(), Ordering::Relaxed); - } else { - this.update_l1_block_info(block.header(), block.body().transactions().first()); - } - } - - this - } - - /// Create a new [`OpTransactionValidator`] with the given [`OpL1BlockInfo`]. - pub fn with_block_info( - inner: EthTransactionValidator, - block_info: OpL1BlockInfo, - ) -> Self { - Self { inner, block_info: Arc::new(block_info), require_l1_data_gas_fee: true } - } - - /// Update the L1 block info for the given header and system transaction, if any. - /// - /// Note: this supports optional system transaction, in case this is used in a dev setuo - pub fn update_l1_block_info(&self, header: &H, tx: Option<&T>) - where - H: BlockHeader, - T: Transaction, - { - self.block_info.timestamp.store(header.timestamp(), Ordering::Relaxed); - - if let Some(Ok(cost_addition)) = tx.map(reth_optimism_evm::extract_l1_info_from_tx) { - *self.block_info.l1_block_info.write() = cost_addition; - } - } - - /// Validates a single transaction. - /// - /// See also [`TransactionValidator::validate_transaction`] - /// - /// This behaves the same as [`EthTransactionValidator::validate_one`], but in addition, ensures - /// that the account has enough balance to cover the L1 gas cost. - pub fn validate_one( - &self, - origin: TransactionOrigin, - transaction: Tx, - ) -> TransactionValidationOutcome { - if transaction.is_eip4844() { - return TransactionValidationOutcome::Invalid( - transaction, - InvalidTransactionError::TxTypeNotSupported.into(), - ) - } - - let outcome = self.inner.validate_one(origin, transaction); - - if !self.requires_l1_data_gas_fee() { - // no need to check L1 gas fee - return outcome - } - - // ensure that the account has enough balance to cover the L1 gas cost - if let TransactionValidationOutcome::Valid { - balance, - state_nonce, - transaction: valid_tx, - propagate, - } = outcome - { - let mut l1_block_info = self.block_info.l1_block_info.read().clone(); - - let mut encoded = Vec::with_capacity(valid_tx.transaction().encoded_length()); - let tx = valid_tx.transaction().clone_into_consensus(); - tx.encode_2718(&mut encoded); - - let cost_addition = match l1_block_info.l1_tx_data_fee( - self.chain_spec(), - self.block_timestamp(), - &encoded, - false, - ) { - Ok(cost) => cost, - Err(err) => { - return TransactionValidationOutcome::Error(*valid_tx.hash(), Box::new(err)) - } - }; - let cost = valid_tx.transaction().cost().saturating_add(cost_addition); - - // Checks for max cost - if cost > balance { - return TransactionValidationOutcome::Invalid( - valid_tx.into_transaction(), - InvalidTransactionError::InsufficientFunds( - GotExpected { got: balance, expected: cost }.into(), - ) - .into(), - ) - } - - return TransactionValidationOutcome::Valid { - balance, - state_nonce, - transaction: valid_tx, - propagate, - } - } - - outcome - } - - /// Validates all given transactions. - /// - /// Returns all outcomes for the given transactions in the same order. - /// - /// See also [`Self::validate_one`] - pub fn validate_all( - &self, - transactions: Vec<(TransactionOrigin, Tx)>, - ) -> Vec> { - transactions.into_iter().map(|(origin, tx)| self.validate_one(origin, tx)).collect() - } -} - -impl TransactionValidator for OpTransactionValidator -where - Client: StateProviderFactory + BlockReaderIdExt, - Tx: EthPoolTransaction, -{ - type Transaction = Tx; - - async fn validate_transaction( - &self, - origin: TransactionOrigin, - transaction: Self::Transaction, - ) -> TransactionValidationOutcome { - self.validate_one(origin, transaction) - } - - async fn validate_transactions( - &self, - transactions: Vec<(TransactionOrigin, Self::Transaction)>, - ) -> Vec> { - self.validate_all(transactions) - } - - fn on_new_head_block(&self, new_tip_block: &SealedBlock) - where - B: Block, - { - self.inner.on_new_head_block(new_tip_block); - self.update_l1_block_info( - new_tip_block.header(), - new_tip_block.body().transactions().first(), - ); - } -} - -/// Tracks additional infos for the current block. -#[derive(Debug, Default)] -pub struct OpL1BlockInfo { - /// The current L1 block info. - l1_block_info: RwLock, - /// Current block timestamp. - timestamp: AtomicU64, -} - -#[cfg(test)] -mod tests { - use crate::txpool::{OpPooledTransaction, OpTransactionValidator}; - use alloy_eips::eip2718::Encodable2718; - use alloy_primitives::{PrimitiveSignature as Signature, TxKind, U256}; - use op_alloy_consensus::{OpTypedTransaction, TxDeposit}; - use reth_chainspec::MAINNET; - use reth_optimism_primitives::OpTransactionSigned; - use reth_primitives::Recovered; - use reth_provider::test_utils::MockEthProvider; - use reth_transaction_pool::{ - blobstore::InMemoryBlobStore, validate::EthTransactionValidatorBuilder, TransactionOrigin, - TransactionValidationOutcome, - }; - #[test] - fn validate_optimism_transaction() { - let client = MockEthProvider::default(); - let validator = EthTransactionValidatorBuilder::new(MAINNET.clone()) - .no_shanghai() - .no_cancun() - .build(client, InMemoryBlobStore::default()); - let validator = OpTransactionValidator::new(validator); - - let origin = TransactionOrigin::External; - let signer = Default::default(); - let deposit_tx = OpTypedTransaction::Deposit(TxDeposit { - source_hash: Default::default(), - from: signer, - to: TxKind::Create, - mint: None, - value: U256::ZERO, - gas_limit: 0, - is_system_transaction: false, - input: Default::default(), - }); - let signature = Signature::test_signature(); - let signed_tx = OpTransactionSigned::new_unhashed(deposit_tx, signature); - let signed_recovered = Recovered::new_unchecked(signed_tx, signer); - let len = signed_recovered.encode_2718_len(); - let pooled_tx = OpPooledTransaction::new(signed_recovered, len); - let outcome = validator.validate_one(origin, pooled_tx); - - let err = match outcome { - TransactionValidationOutcome::Invalid(_, err) => err, - _ => panic!("Expected invalid transaction"), - }; - assert_eq!(err.to_string(), "transaction type not supported"); - } -} diff --git a/crates/optimism/node/src/utils.rs b/crates/optimism/node/src/utils.rs index 95875a767366..b92466aaf762 100644 --- a/crates/optimism/node/src/utils.rs +++ b/crates/optimism/node/src/utils.rs @@ -7,6 +7,7 @@ use reth_e2e_test_utils::{ }; use reth_node_api::NodeTypesWithDBAdapter; use reth_optimism_chainspec::OpChainSpecBuilder; +use reth_optimism_primitives::OpTransactionSigned; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_provider::providers::BlockchainProvider; use reth_tasks::TaskManager; @@ -35,7 +36,7 @@ pub async fn advance_chain( length: usize, node: &mut OpNode, wallet: Arc>, -) -> eyre::Result> { +) -> eyre::Result)>> { node.advance(length as u64, |_| { let wallet = wallet.clone(); Box::pin(async move { @@ -53,7 +54,7 @@ pub async fn advance_chain( } /// Helper function to create a new eth payload attributes -pub fn optimism_payload_attributes(timestamp: u64) -> OpPayloadBuilderAttributes { +pub fn optimism_payload_attributes(timestamp: u64) -> OpPayloadBuilderAttributes { let attributes = PayloadAttributes { timestamp, prev_randao: B256::ZERO, diff --git a/crates/optimism/node/tests/it/priority.rs b/crates/optimism/node/tests/it/priority.rs index f110b44ee957..b1169c832061 100644 --- a/crates/optimism/node/tests/it/priority.rs +++ b/crates/optimism/node/tests/it/priority.rs @@ -1,6 +1,6 @@ //! Node builder test that customizes priority of transactions in the block. -use alloy_consensus::TxEip1559; +use alloy_consensus::{SignableTransaction, TxEip1559}; use alloy_genesis::Genesis; use alloy_network::TxSignerSync; use alloy_primitives::{Address, ChainId, TxKind}; @@ -22,16 +22,20 @@ use reth_optimism_node::{ OpAddOns, OpConsensusBuilder, OpExecutorBuilder, OpNetworkBuilder, OpPayloadBuilder, OpPoolBuilder, }, + txpool::OpPooledTransaction, utils::optimism_payload_attributes, OpEngineTypes, OpNode, }; use reth_optimism_payload_builder::builder::OpPayloadTransactions; -use reth_optimism_primitives::{OpPrimitives, OpTransactionSigned}; -use reth_payload_util::{PayloadTransactions, PayloadTransactionsChain, PayloadTransactionsFixed}; +use reth_optimism_primitives::OpPrimitives; +use reth_payload_util::{ + BestPayloadTransactions, PayloadTransactions, PayloadTransactionsChain, + PayloadTransactionsFixed, +}; use reth_primitives::Recovered; use reth_provider::providers::BlockchainProvider; use reth_tasks::TaskManager; -use reth_transaction_pool::{pool::BestPayloadTransactions, PoolTransaction}; +use reth_transaction_pool::PoolTransaction; use std::sync::Arc; use tokio::sync::Mutex; @@ -40,16 +44,14 @@ struct CustomTxPriority { chain_id: ChainId, } -impl OpPayloadTransactions for CustomTxPriority { +impl OpPayloadTransactions for CustomTxPriority { fn best_transactions( &self, pool: Pool, attr: reth_transaction_pool::BestTransactionsAttributes, - ) -> impl PayloadTransactions + ) -> impl PayloadTransactions where - Pool: reth_transaction_pool::TransactionPool< - Transaction: PoolTransaction, - >, + Pool: reth_transaction_pool::TransactionPool, { // Block composition: // 1. Best transactions from the pool (up to 250k gas) @@ -67,13 +69,12 @@ impl OpPayloadTransactions for CustomTxPriority { ..Default::default() }; let signature = sender.sign_transaction_sync(&mut end_of_block_tx).unwrap(); - let end_of_block_tx = Recovered::new_unchecked( - OpTransactionSigned::new_unhashed( - OpTypedTransaction::Eip1559(end_of_block_tx), - signature, + let end_of_block_tx = OpPooledTransaction::from_pooled(Recovered::new_unchecked( + op_alloy_consensus::OpPooledTransaction::Eip1559( + end_of_block_tx.into_signed(signature), ), sender.address(), - ); + )); PayloadTransactionsChain::new( BestPayloadTransactions::new(pool.best_transactions_with_attributes(attr)), diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index d7248c0b6355..6339e7876d85 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -4,45 +4,52 @@ use crate::{ config::{OpBuilderConfig, OpDAConfig}, error::OpPayloadBuilderError, payload::{OpBuiltPayload, OpPayloadBuilderAttributes}, + OpPayloadPrimitives, +}; +use alloy_consensus::{ + constants::EMPTY_WITHDRAWALS, Eip658Value, Header, Transaction, Typed2718, + EMPTY_OMMER_ROOT_HASH, }; -use alloy_consensus::{Eip658Value, Header, Transaction, Typed2718, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{eip4895::Withdrawals, merge::BEACON_NONCE}; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_engine::PayloadId; -use op_alloy_consensus::{OpDepositReceipt, OpTxType}; +use op_alloy_consensus::OpDepositReceipt; use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_basic_payload_builder::*; use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates}; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_evm::{ - env::EvmEnv, system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv, Database, Evm, + env::EvmEnv, system_calls::SystemCaller, ConfigureEvmEnv, ConfigureEvmFor, Database, Evm, EvmError, InvalidTxError, NextBlockEnvAttributes, }; use reth_execution_types::ExecutionOutcome; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; +use reth_optimism_evm::{OpReceiptBuilder, ReceiptBuilderCtx}; use reth_optimism_forks::OpHardforks; -use reth_optimism_primitives::{OpPrimitives, OpReceipt, OpTransactionSigned}; +use reth_optimism_primitives::{ + transaction::signed::OpTransaction, ADDRESS_L2_TO_L1_MESSAGE_PASSER, +}; use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::PayloadBuilderAttributes; -use reth_payload_util::{NoopPayloadTransactions, PayloadTransactions}; +use reth_payload_util::{BestPayloadTransactions, NoopPayloadTransactions, PayloadTransactions}; use reth_primitives::{ - transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, SealedHeader, + transaction::SignedTransactionIntoRecoveredExt, BlockBody, NodePrimitives, SealedHeader, }; use reth_primitives_traits::{block::Block as _, proofs, RecoveredBlock}; use reth_provider::{ HashedPostStateProvider, ProviderError, StateProofProvider, StateProviderFactory, - StateRootProvider, + StateRootProvider, StorageRootProvider, }; -use reth_revm::{database::StateProviderDatabase, witness::ExecutionWitnessRecord}; -use reth_transaction_pool::{ - pool::BestPayloadTransactions, BestTransactionsAttributes, PoolTransaction, TransactionPool, +use reth_revm::{ + cancelled::CancelOnDrop, database::StateProviderDatabase, witness::ExecutionWitnessRecord, }; +use reth_transaction_pool::{BestTransactionsAttributes, PoolTransaction, TransactionPool}; use revm::{ db::{states::bundle_state::BundleRetention, State}, - primitives::ResultAndState, + primitives::{ExecutionResult, ResultAndState}, DatabaseCommit, }; use std::{fmt::Display, sync::Arc}; @@ -50,34 +57,61 @@ use tracing::{debug, trace, warn}; /// Optimism's payload builder #[derive(Debug, Clone)] -pub struct OpPayloadBuilder { +pub struct OpPayloadBuilder { /// The rollup's compute pending block configuration option. // TODO(clabby): Implement this feature. pub compute_pending_block: bool, /// The type responsible for creating the evm. pub evm_config: EvmConfig, + /// Transaction pool. + pub pool: Pool, + /// Node client. + pub client: Client, /// Settings for the builder, e.g. DA settings. pub config: OpBuilderConfig, /// The type responsible for yielding the best transactions for the payload if mempool /// transactions are allowed. pub best_transactions: Txs, + /// Node primitive types. + pub receipt_builder: Arc>, } -impl OpPayloadBuilder { +impl OpPayloadBuilder { /// `OpPayloadBuilder` constructor. /// /// Configures the builder with the default settings. - pub fn new(evm_config: EvmConfig) -> Self { - Self::with_builder_config(evm_config, Default::default()) + pub fn new( + pool: Pool, + client: Client, + evm_config: EvmConfig, + receipt_builder: impl OpReceiptBuilder, + ) -> Self { + Self::with_builder_config(pool, client, evm_config, receipt_builder, Default::default()) } /// Configures the builder with the given [`OpBuilderConfig`]. - pub const fn with_builder_config(evm_config: EvmConfig, config: OpBuilderConfig) -> Self { - Self { compute_pending_block: true, evm_config, config, best_transactions: () } + pub fn with_builder_config( + pool: Pool, + client: Client, + evm_config: EvmConfig, + receipt_builder: impl OpReceiptBuilder, + config: OpBuilderConfig, + ) -> Self { + Self { + pool, + client, + compute_pending_block: true, + receipt_builder: Arc::new(receipt_builder), + evm_config, + config, + best_transactions: (), + } } } -impl OpPayloadBuilder { +impl + OpPayloadBuilder +{ /// Sets the rollup's compute pending block configuration option. pub const fn set_compute_pending_block(mut self, compute_pending_block: bool) -> Self { self.compute_pending_block = compute_pending_block; @@ -86,12 +120,22 @@ impl OpPayloadBuilder { /// Configures the type responsible for yielding the transactions that should be included in the /// payload. - pub fn with_transactions( + pub fn with_transactions( self, best_transactions: T, - ) -> OpPayloadBuilder { - let Self { compute_pending_block, evm_config, config, .. } = self; - OpPayloadBuilder { compute_pending_block, evm_config, best_transactions, config } + ) -> OpPayloadBuilder { + let Self { + pool, client, compute_pending_block, evm_config, config, receipt_builder, .. + } = self; + OpPayloadBuilder { + pool, + client, + compute_pending_block, + evm_config, + best_transactions, + config, + receipt_builder, + } } /// Enables the rollup's compute pending block configuration option. @@ -104,9 +148,13 @@ impl OpPayloadBuilder { self.compute_pending_block } } -impl OpPayloadBuilder + +impl OpPayloadBuilder where - EvmConfig: ConfigureEvm
, + Pool: TransactionPool>, + Client: StateProviderFactory + ChainSpecProvider, + N: OpPayloadPrimitives, + EvmConfig: ConfigureEvmFor, { /// Constructs an Optimism payload from the transactions sent via the /// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in @@ -116,35 +164,34 @@ where /// Given build arguments including an Optimism client, transaction pool, /// and configuration, this function creates a transaction payload. Returns /// a result indicating success with the payload or an error in case of failure. - fn build_payload<'a, Client, Pool, Txs>( + fn build_payload<'a, Txs>( &self, - args: BuildArguments, + args: BuildArguments, OpBuiltPayload>, best: impl FnOnce(BestTransactionsAttributes) -> Txs + Send + Sync + 'a, - ) -> Result, PayloadBuilderError> + ) -> Result>, PayloadBuilderError> where - Client: StateProviderFactory + ChainSpecProvider, - Txs: PayloadTransactions, + Txs: PayloadTransactions>, { let evm_env = self .evm_env(&args.config.attributes, &args.config.parent_header) .map_err(PayloadBuilderError::other)?; - let BuildArguments { client, pool: _, mut cached_reads, config, cancel, best_payload } = - args; + let BuildArguments { mut cached_reads, config, cancel, best_payload } = args; let ctx = OpPayloadBuilderCtx { evm_config: self.evm_config.clone(), da_config: self.config.da_config.clone(), - chain_spec: client.chain_spec(), + chain_spec: self.client.chain_spec(), config, evm_env, cancel, best_payload, + receipt_builder: self.receipt_builder.clone(), }; let builder = OpBuilder::new(best); - let state_provider = client.state_by_block_hash(ctx.parent().hash())?; + let state_provider = self.client.state_by_block_hash(ctx.parent().hash())?; let state = StateProviderDatabase::new(state_provider); if ctx.attributes().no_tx_pool { @@ -165,7 +212,7 @@ where /// (that has the `parent` as its parent). pub fn evm_env( &self, - attributes: &OpPayloadBuilderAttributes, + attributes: &OpPayloadBuilderAttributes, parent: &Header, ) -> Result, EvmConfig::Error> { let next_attributes = NextBlockEnvAttributes { @@ -178,62 +225,61 @@ where } /// Computes the witness for the payload. - pub fn payload_witness( + pub fn payload_witness( &self, - client: &Client, parent: SealedHeader, attributes: OpPayloadAttributes, - ) -> Result - where - Client: StateProviderFactory + ChainSpecProvider, - { + ) -> Result { let attributes = OpPayloadBuilderAttributes::try_new(parent.hash(), attributes, 3) .map_err(PayloadBuilderError::other)?; let evm_env = self.evm_env(&attributes, &parent).map_err(PayloadBuilderError::other)?; let config = PayloadConfig { parent_header: Arc::new(parent), attributes }; - let ctx = OpPayloadBuilderCtx { + let ctx: OpPayloadBuilderCtx = OpPayloadBuilderCtx { evm_config: self.evm_config.clone(), da_config: self.config.da_config.clone(), - chain_spec: client.chain_spec(), + chain_spec: self.client.chain_spec(), config, evm_env, cancel: Default::default(), best_payload: Default::default(), + receipt_builder: self.receipt_builder.clone(), }; - let state_provider = client.state_by_block_hash(ctx.parent().hash())?; + let state_provider = self.client.state_by_block_hash(ctx.parent().hash())?; let state = StateProviderDatabase::new(state_provider); let mut state = State::builder().with_database(state).with_bundle_update().build(); - let builder = OpBuilder::new(|_| NoopPayloadTransactions::default()); + let builder = OpBuilder::new(|_| NoopPayloadTransactions::::default()); builder.witness(&mut state, &ctx) } } /// Implementation of the [`PayloadBuilder`] trait for [`OpPayloadBuilder`]. -impl PayloadBuilder for OpPayloadBuilder +impl PayloadBuilder + for OpPayloadBuilder where - Client: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool>, - EvmConfig: ConfigureEvm
, - Txs: OpPayloadTransactions, + Client: StateProviderFactory + ChainSpecProvider + Clone, + N: OpPayloadPrimitives, + Pool: TransactionPool>, + EvmConfig: ConfigureEvmFor, + Txs: OpPayloadTransactions, { - type Attributes = OpPayloadBuilderAttributes; - type BuiltPayload = OpBuiltPayload; + type Attributes = OpPayloadBuilderAttributes; + type BuiltPayload = OpBuiltPayload; fn try_build( &self, - args: BuildArguments, - ) -> Result, PayloadBuilderError> { - let pool = args.pool.clone(); + args: BuildArguments, + ) -> Result, PayloadBuilderError> { + let pool = self.pool.clone(); self.build_payload(args, |attrs| self.best_transactions.best_transactions(pool, attrs)) } fn on_missing_payload( &self, - _args: BuildArguments, + _args: BuildArguments, ) -> MissingPayloadBehaviour { // we want to await the job that's already in progress because that should be returned as // is, there's no benefit in racing another job @@ -244,19 +290,15 @@ where // system txs, hence on_missing_payload we return [MissingPayloadBehaviour::AwaitInProgress]. fn build_empty_payload( &self, - client: &Client, config: PayloadConfig, - ) -> Result { + ) -> Result { let args = BuildArguments { - client, config, - // we use defaults here because for the empty payload we don't need to execute anything - pool: (), cached_reads: Default::default(), cancel: Default::default(), best_payload: None, }; - self.build_payload(args, |_| NoopPayloadTransactions::default())? + self.build_payload(args, |_| NoopPayloadTransactions::::default())? .into_payload() .ok_or_else(|| PayloadBuilderError::MissingPayload) } @@ -290,19 +332,19 @@ impl<'a, Txs> OpBuilder<'a, Txs> { } } -impl OpBuilder<'_, Txs> -where - Txs: PayloadTransactions, -{ +impl OpBuilder<'_, Txs> { /// Executes the payload and returns the outcome. - pub fn execute( + pub fn execute( self, state: &mut State, - ctx: &OpPayloadBuilderCtx, - ) -> Result, PayloadBuilderError> + ctx: &OpPayloadBuilderCtx, + ) -> Result>, PayloadBuilderError> where - EvmConfig: ConfigureEvm
, - DB: Database, + N: OpPayloadPrimitives, + Txs: PayloadTransactions>, + EvmConfig: ConfigureEvmFor, + DB: Database + AsRef

, + P: StorageRootProvider, { let Self { best } = self; debug!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number, "building new payload"); @@ -330,25 +372,42 @@ where } } - let withdrawals_root = ctx.commit_withdrawals(state)?; - // merge all transitions into bundle state, this would apply the withdrawal balance changes // and 4788 contract call state.merge_transitions(BundleRetention::Reverts); - Ok(BuildOutcomeKind::Better { payload: ExecutedPayload { info, withdrawals_root } }) + let withdrawals_root = if ctx.is_isthmus_active() { + // withdrawals root field in block header is used for storage root of L2 predeploy + // `l2tol1-message-passer` + Some( + state + .database + .as_ref() + .storage_root(ADDRESS_L2_TO_L1_MESSAGE_PASSER, Default::default())?, + ) + } else if ctx.is_canyon_active() { + Some(EMPTY_WITHDRAWALS) + } else { + None + }; + + let payload = ExecutedPayload { info, withdrawals_root }; + + Ok(BuildOutcomeKind::Better { payload }) } /// Builds the payload on top of the state. - pub fn build( + pub fn build( self, mut state: State, - ctx: OpPayloadBuilderCtx, - ) -> Result, PayloadBuilderError> + ctx: OpPayloadBuilderCtx, + ) -> Result>, PayloadBuilderError> where - EvmConfig: ConfigureEvm

, + EvmConfig: ConfigureEvmFor, + N: OpPayloadPrimitives, + Txs: PayloadTransactions>, DB: Database + AsRef

, - P: StateRootProvider + HashedPostStateProvider, + P: StateRootProvider + HashedPostStateProvider + StorageRootProvider, { let ExecutedPayload { info, withdrawals_root } = match self.execute(&mut state, &ctx)? { BuildOutcomeKind::Better { payload } | BuildOutcomeKind::Freeze(payload) => payload, @@ -422,20 +481,20 @@ where }; // seal the block - let block = Block { + let block = N::Block::new( header, - body: BlockBody { + BlockBody { transactions: info.executed_transactions, ommers: vec![], withdrawals: ctx.withdrawals().cloned(), }, - }; + ); let sealed_block = Arc::new(block.seal_slow()); debug!(target: "payload_builder", id=%ctx.attributes().payload_id(), sealed_block_header = ?sealed_block.header(), "sealed built block"); // create the executed block data - let executed: ExecutedBlockWithTrieUpdates = ExecutedBlockWithTrieUpdates { + let executed: ExecutedBlockWithTrieUpdates = ExecutedBlockWithTrieUpdates { block: ExecutedBlock { recovered_block: Arc::new(RecoveredBlock::new_sealed( sealed_block.as_ref().clone(), @@ -449,7 +508,8 @@ where let no_tx_pool = ctx.attributes().no_tx_pool; - let payload = OpBuiltPayload::new(ctx.payload_id(), info.total_fees, executed); + let payload = + OpBuiltPayload::new(ctx.payload_id(), sealed_block, info.total_fees, Some(executed)); if no_tx_pool { // if `no_tx_pool` is set only transactions from the payload attributes will be included @@ -462,15 +522,17 @@ where } /// Builds the payload and returns its [`ExecutionWitness`] based on the state after execution. - pub fn witness( + pub fn witness( self, state: &mut State, - ctx: &OpPayloadBuilderCtx, + ctx: &OpPayloadBuilderCtx, ) -> Result where - EvmConfig: ConfigureEvm

, + EvmConfig: ConfigureEvmFor, + N: OpPayloadPrimitives, + Txs: PayloadTransactions>, DB: Database + AsRef

, - P: StateProofProvider, + P: StateProofProvider + StorageRootProvider, { let _ = self.execute(state, ctx)?; let ExecutionWitnessRecord { hashed_state, codes, keys } = @@ -481,48 +543,44 @@ where } /// A type that returns a the [`PayloadTransactions`] that should be included in the pool. -pub trait OpPayloadTransactions: Clone + Send + Sync + Unpin + 'static { +pub trait OpPayloadTransactions: Clone + Send + Sync + Unpin + 'static { /// Returns an iterator that yields the transaction in the order they should get included in the /// new payload. - fn best_transactions< - Pool: TransactionPool>, - >( + fn best_transactions>( &self, pool: Pool, attr: BestTransactionsAttributes, - ) -> impl PayloadTransactions; + ) -> impl PayloadTransactions; } -impl OpPayloadTransactions for () { - fn best_transactions< - Pool: TransactionPool>, - >( +impl OpPayloadTransactions for () { + fn best_transactions>( &self, pool: Pool, attr: BestTransactionsAttributes, - ) -> impl PayloadTransactions { + ) -> impl PayloadTransactions { BestPayloadTransactions::new(pool.best_transactions_with_attributes(attr)) } } /// Holds the state after execution #[derive(Debug)] -pub struct ExecutedPayload { +pub struct ExecutedPayload { /// Tracked execution info - pub info: ExecutionInfo, + pub info: ExecutionInfo, /// Withdrawal hash. pub withdrawals_root: Option, } /// This acts as the container for executed transactions and its byproducts (receipts, gas used) #[derive(Default, Debug)] -pub struct ExecutionInfo { +pub struct ExecutionInfo { /// All executed transactions (unrecovered). - pub executed_transactions: Vec, + pub executed_transactions: Vec, /// The recovered senders for the executed transactions. pub executed_senders: Vec

, /// The transaction receipts - pub receipts: Vec, + pub receipts: Vec, /// All gas used so far pub cumulative_gas_used: u64, /// Estimated DA size @@ -531,7 +589,7 @@ pub struct ExecutionInfo { pub total_fees: U256, } -impl ExecutionInfo { +impl ExecutionInfo { /// Create a new instance with allocated slots. pub fn with_capacity(capacity: usize) -> Self { Self { @@ -552,7 +610,7 @@ impl ExecutionInfo { /// maximum allowed DA limit per block. pub fn is_tx_over_limits( &self, - tx: &OpTransactionSigned, + tx: &N::SignedTx, block_gas_limit: u64, tx_data_limit: Option, block_data_limit: Option, @@ -573,7 +631,7 @@ impl ExecutionInfo { /// Container type that holds all necessities to build a new payload. #[derive(Debug)] -pub struct OpPayloadBuilderCtx { +pub struct OpPayloadBuilderCtx { /// The type that knows how to perform system calls and configure the evm. pub evm_config: EvmConfig, /// The DA config for the payload builder @@ -581,16 +639,18 @@ pub struct OpPayloadBuilderCtx { /// The chainspec pub chain_spec: Arc, /// How to build the payload. - pub config: PayloadConfig, + pub config: PayloadConfig>, /// Evm Settings pub evm_env: EvmEnv, /// Marker to check whether the job has been cancelled. - pub cancel: Cancelled, + pub cancel: CancelOnDrop, /// The currently best payload. - pub best_payload: Option, + pub best_payload: Option>, + /// Receipt builder. + pub receipt_builder: Arc>, } -impl OpPayloadBuilderCtx { +impl OpPayloadBuilderCtx { /// Returns the parent block the payload will be build on. #[allow(clippy::missing_const_for_fn)] pub fn parent(&self) -> &SealedHeader { @@ -598,7 +658,7 @@ impl OpPayloadBuilderCtx { } /// Returns the builder attributes. - pub const fn attributes(&self) -> &OpPayloadBuilderAttributes { + pub const fn attributes(&self) -> &OpPayloadBuilderAttributes { &self.config.attributes } @@ -692,24 +752,16 @@ impl OpPayloadBuilderCtx { self.chain_spec.is_holocene_active_at_timestamp(self.attributes().timestamp()) } + /// Returns true if isthmus is active for the payload. + pub fn is_isthmus_active(&self) -> bool { + self.chain_spec.is_isthmus_active_at_timestamp(self.attributes().timestamp()) + } + /// Returns true if the fees are higher than the previous payload. pub fn is_better_payload(&self, total_fees: U256) -> bool { is_better_payload(self.best_payload.as_ref(), total_fees) } - /// Commits the withdrawals from the payload attributes to the state. - pub fn commit_withdrawals(&self, db: &mut State) -> Result, ProviderError> - where - DB: Database, - { - commit_withdrawals( - db, - &self.chain_spec, - self.attributes().payload_attributes.timestamp, - &self.attributes().payload_attributes.withdrawals, - ) - } - /// Ensure that the create2deployer is force-deployed at the canyon transition. Optimism /// blocks will always have at least a single transaction in them (the L1 info transaction), /// so we can safely assume that this will always be triggered upon the transition and that @@ -731,9 +783,10 @@ impl OpPayloadBuilderCtx { } } -impl OpPayloadBuilderCtx +impl OpPayloadBuilderCtx where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvmFor, + N: OpPayloadPrimitives, { /// apply eip-4788 pre block contract call pub fn apply_pre_beacon_root_contract_call( @@ -762,11 +815,48 @@ where Ok(()) } + /// Constructs a receipt for the given transaction. + fn build_receipt( + &self, + info: &ExecutionInfo, + result: ExecutionResult, + deposit_nonce: Option, + tx: &N::SignedTx, + ) -> N::Receipt { + match self.receipt_builder.build_receipt(ReceiptBuilderCtx { + tx, + result, + cumulative_gas_used: info.cumulative_gas_used, + }) { + Ok(receipt) => receipt, + Err(ctx) => { + let receipt = alloy_consensus::Receipt { + // Success flag was added in `EIP-658: Embedding transaction status code + // in receipts`. + status: Eip658Value::Eip658(ctx.result.is_success()), + cumulative_gas_used: ctx.cumulative_gas_used, + logs: ctx.result.into_logs(), + }; + + self.receipt_builder.build_deposit_receipt(OpDepositReceipt { + inner: receipt, + deposit_nonce, + // The deposit receipt version was introduced in Canyon to indicate an + // update to how receipt hashes should be computed + // when set. The state transition process ensures + // this is only set for post-Canyon deposit + // transactions. + deposit_receipt_version: self.is_canyon_active().then_some(1), + }) + } + } + } + /// Executes all sequencer transactions that are included in the payload attributes. pub fn execute_sequencer_transactions( &self, db: &mut State, - ) -> Result + ) -> Result, PayloadBuilderError> where DB: Database, { @@ -794,11 +884,11 @@ where // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces // were not introduced in Bedrock. In addition, regular transactions don't have deposit // nonces, so we don't need to touch the DB for those. - let depositor = (self.is_regolith_active() && sequencer_tx.is_deposit()) + let depositor_nonce = (self.is_regolith_active() && sequencer_tx.is_deposit()) .then(|| { evm.db_mut() .load_cache_account(sequencer_tx.signer()) - .map(|acc| acc.account_info().unwrap_or_default()) + .map(|acc| acc.account_info().unwrap_or_default().nonce) }) .transpose() .map_err(|_| { @@ -829,28 +919,13 @@ where // add gas used by the transaction to cumulative gas used, before creating the receipt info.cumulative_gas_used += gas_used; - let receipt = alloy_consensus::Receipt { - status: Eip658Value::Eip658(result.is_success()), - cumulative_gas_used: info.cumulative_gas_used, - logs: result.into_logs().into_iter().collect(), - }; - // Push transaction changeset and calculate header bloom filter for receipt. - info.receipts.push(match sequencer_tx.tx_type() { - OpTxType::Legacy => OpReceipt::Legacy(receipt), - OpTxType::Eip2930 => OpReceipt::Eip2930(receipt), - OpTxType::Eip1559 => OpReceipt::Eip1559(receipt), - OpTxType::Eip7702 => OpReceipt::Eip7702(receipt), - OpTxType::Deposit => OpReceipt::Deposit(OpDepositReceipt { - inner: receipt, - deposit_nonce: depositor.map(|account| account.nonce), - // The deposit receipt version was introduced in Canyon to indicate an update to - // how receipt hashes should be computed when set. The state - // transition process ensures this is only set for - // post-Canyon deposit transactions. - deposit_receipt_version: self.is_canyon_active().then_some(1), - }), - }); + info.receipts.push(self.build_receipt( + &info, + result, + depositor_nonce, + sequencer_tx.tx(), + )); // append sender and transaction to the respective lists info.executed_senders.push(sequencer_tx.signer()); @@ -865,9 +940,11 @@ where /// Returns `Ok(Some(())` if the job was cancelled. pub fn execute_best_transactions( &self, - info: &mut ExecutionInfo, + info: &mut ExecutionInfo, db: &mut State, - mut best_txs: impl PayloadTransactions, + mut best_txs: impl PayloadTransactions< + Transaction: PoolTransaction, + >, ) -> Result, PayloadBuilderError> where DB: Database, @@ -880,6 +957,7 @@ where let mut evm = self.evm_config.evm_with_env(&mut *db, self.evm_env.clone()); while let Some(tx) = best_txs.next(()) { + let tx = tx.into_consensus(); if info.is_tx_over_limits(tx.tx(), block_gas_limit, tx_da_limit, block_da_limit) { // we can't fit this transaction into the block, so we need to mark it as // invalid which also removes all dependent transaction from @@ -889,7 +967,7 @@ where } // A sequencer's block should never contain blob or deposit transactions from the pool. - if tx.is_eip4844() || tx.tx_type() == OpTxType::Deposit { + if tx.is_eip4844() || tx.is_deposit() { best_txs.mark_invalid(tx.signer(), tx.nonce()); continue } @@ -933,24 +1011,8 @@ where info.cumulative_gas_used += gas_used; info.cumulative_da_bytes_used += tx.length() as u64; - let receipt = alloy_consensus::Receipt { - status: Eip658Value::Eip658(result.is_success()), - cumulative_gas_used: info.cumulative_gas_used, - logs: result.into_logs().into_iter().collect(), - }; - // Push transaction changeset and calculate header bloom filter for receipt. - info.receipts.push(match tx.tx_type() { - OpTxType::Legacy => OpReceipt::Legacy(receipt), - OpTxType::Eip2930 => OpReceipt::Eip2930(receipt), - OpTxType::Eip1559 => OpReceipt::Eip1559(receipt), - OpTxType::Eip7702 => OpReceipt::Eip7702(receipt), - OpTxType::Deposit => OpReceipt::Deposit(OpDepositReceipt { - inner: receipt, - deposit_nonce: None, - deposit_receipt_version: None, - }), - }); + info.receipts.push(self.build_receipt(info, result, None, &tx)); // update add to total fees let miner_fee = tx diff --git a/crates/optimism/payload/src/lib.rs b/crates/optimism/payload/src/lib.rs index 7ba359fc83e5..d74785b85d8a 100644 --- a/crates/optimism/payload/src/lib.rs +++ b/crates/optimism/payload/src/lib.rs @@ -17,5 +17,7 @@ pub use builder::OpPayloadBuilder; pub mod error; pub mod payload; pub use payload::{OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilderAttributes}; +mod traits; +pub use traits::*; pub mod config; diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index d4b41932c4e4..da2f64c73fa8 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -1,5 +1,7 @@ //! Payload related types +use std::{fmt::Debug, sync::Arc}; + use alloy_eips::{ eip1559::BaseFeeParams, eip2718::Decodable2718, eip4895::Withdrawals, eip7685::Requests, }; @@ -14,28 +16,41 @@ use op_alloy_consensus::{encode_holocene_extra_data, EIP1559ParamError}; pub use op_alloy_rpc_types_engine::OpPayloadAttributes; use op_alloy_rpc_types_engine::{OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4}; use reth_chain_state::ExecutedBlockWithTrieUpdates; -use reth_optimism_primitives::{OpBlock, OpPrimitives, OpTransactionSigned}; +use reth_optimism_primitives::OpPrimitives; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; -use reth_primitives::{transaction::WithEncoded, SealedBlock}; +use reth_primitives::{transaction::WithEncoded, Block, NodePrimitives, SealedBlock}; +use reth_primitives_traits::SignedTransaction; /// Optimism Payload Builder Attributes -#[derive(Debug, Clone, PartialEq, Eq, Default)] -pub struct OpPayloadBuilderAttributes { +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct OpPayloadBuilderAttributes { /// Inner ethereum payload builder attributes pub payload_attributes: EthPayloadBuilderAttributes, /// `NoTxPool` option for the generated payload pub no_tx_pool: bool, /// Decoded transactions and the original EIP-2718 encoded bytes as received in the payload /// attributes. - pub transactions: Vec>, + pub transactions: Vec>, /// The gas limit for the generated payload pub gas_limit: Option, /// EIP-1559 parameters for the generated payload pub eip_1559_params: Option, } -impl OpPayloadBuilderAttributes { +impl Default for OpPayloadBuilderAttributes { + fn default() -> Self { + Self { + payload_attributes: Default::default(), + no_tx_pool: Default::default(), + gas_limit: Default::default(), + eip_1559_params: Default::default(), + transactions: Default::default(), + } + } +} + +impl OpPayloadBuilderAttributes { /// Extracts the `eip1559` parameters for the payload. pub fn get_holocene_extra_data( &self, @@ -47,7 +62,9 @@ impl OpPayloadBuilderAttributes { } } -impl PayloadBuilderAttributes for OpPayloadBuilderAttributes { +impl PayloadBuilderAttributes + for OpPayloadBuilderAttributes +{ type RpcPayloadAttributes = OpPayloadAttributes; type Error = alloy_rlp::Error; @@ -127,25 +144,28 @@ impl PayloadBuilderAttributes for OpPayloadBuilderAttributes { /// Contains the built payload. #[derive(Debug, Clone)] -pub struct OpBuiltPayload { +pub struct OpBuiltPayload { /// Identifier of the payload pub(crate) id: PayloadId, + /// Sealed block + pub(crate) block: Arc>, /// Block execution data for the payload, if any. - pub(crate) block: ExecutedBlockWithTrieUpdates, + pub(crate) executed_block: Option>, /// The fees of the block pub(crate) fees: U256, } // === impl BuiltPayload === -impl OpBuiltPayload { +impl OpBuiltPayload { /// Initializes the payload with the given initial block. pub const fn new( id: PayloadId, + block: Arc>, fees: U256, - block: ExecutedBlockWithTrieUpdates, + executed_block: Option>, ) -> Self { - Self { id, block, fees } + Self { id, block, fees, executed_block } } /// Returns the identifier of the payload. @@ -154,9 +174,8 @@ impl OpBuiltPayload { } /// Returns the built block(sealed) - #[allow(clippy::missing_const_for_fn)] - pub fn block(&self) -> &SealedBlock { - self.block.sealed_block() + pub fn block(&self) -> &SealedBlock { + &self.block } /// Fees of the block @@ -165,10 +184,10 @@ impl OpBuiltPayload { } } -impl BuiltPayload for OpBuiltPayload { - type Primitives = OpPrimitives; +impl BuiltPayload for OpBuiltPayload { + type Primitives = N; - fn block(&self) -> &SealedBlock { + fn block(&self) -> &SealedBlock { self.block() } @@ -176,28 +195,8 @@ impl BuiltPayload for OpBuiltPayload { self.fees } - fn executed_block(&self) -> Option> { - Some(self.block.clone()) - } - - fn requests(&self) -> Option { - None - } -} - -impl BuiltPayload for &OpBuiltPayload { - type Primitives = OpPrimitives; - - fn block(&self) -> &SealedBlock { - (**self).block() - } - - fn fees(&self) -> U256 { - (**self).fees() - } - - fn executed_block(&self) -> Option> { - Some(self.block.clone()) + fn executed_block(&self) -> Option> { + self.executed_block.clone() } fn requests(&self) -> Option { @@ -206,42 +205,52 @@ impl BuiltPayload for &OpBuiltPayload { } // V1 engine_getPayloadV1 response -impl From for ExecutionPayloadV1 { - fn from(value: OpBuiltPayload) -> Self { +impl From> for ExecutionPayloadV1 +where + T: SignedTransaction, + N: NodePrimitives>, +{ + fn from(value: OpBuiltPayload) -> Self { Self::from_block_unchecked( value.block().hash(), - &value.block.into_sealed_block().into_block(), + &Arc::unwrap_or_clone(value.block).into_block(), ) } } // V2 engine_getPayloadV2 response -impl From for ExecutionPayloadEnvelopeV2 { - fn from(value: OpBuiltPayload) -> Self { +impl From> for ExecutionPayloadEnvelopeV2 +where + T: SignedTransaction, + N: NodePrimitives>, +{ + fn from(value: OpBuiltPayload) -> Self { let OpBuiltPayload { block, fees, .. } = value; - let block = block.into_sealed_block(); Self { block_value: fees, execution_payload: ExecutionPayloadFieldV2::from_block_unchecked( block.hash(), - &block.into_block(), + &Arc::unwrap_or_clone(block).into_block(), ), } } } -impl From for OpExecutionPayloadEnvelopeV3 { - fn from(value: OpBuiltPayload) -> Self { +impl From> for OpExecutionPayloadEnvelopeV3 +where + T: SignedTransaction, + N: NodePrimitives>, +{ + fn from(value: OpBuiltPayload) -> Self { let OpBuiltPayload { block, fees, .. } = value; - let parent_beacon_block_root = - block.sealed_block().parent_beacon_block_root.unwrap_or_default(); + let parent_beacon_block_root = block.parent_beacon_block_root.unwrap_or_default(); Self { execution_payload: ExecutionPayloadV3::from_block_unchecked( - block.sealed_block().hash(), - &block.into_sealed_block().into_block(), + block.hash(), + &Arc::unwrap_or_clone(block).into_block(), ), block_value: fees, // From the engine API spec: @@ -259,17 +268,21 @@ impl From for OpExecutionPayloadEnvelopeV3 { } } } -impl From for OpExecutionPayloadEnvelopeV4 { - fn from(value: OpBuiltPayload) -> Self { + +impl From> for OpExecutionPayloadEnvelopeV4 +where + T: SignedTransaction, + N: NodePrimitives>, +{ + fn from(value: OpBuiltPayload) -> Self { let OpBuiltPayload { block, fees, .. } = value; - let parent_beacon_block_root = - block.sealed_block().parent_beacon_block_root.unwrap_or_default(); + let parent_beacon_block_root = block.parent_beacon_block_root.unwrap_or_default(); Self { execution_payload: ExecutionPayloadV3::from_block_unchecked( - block.sealed_block().hash(), - &block.into_sealed_block().into_block(), + block.hash(), + &Arc::unwrap_or_clone(block).into_block(), ), block_value: fees, // From the engine API spec: @@ -348,6 +361,7 @@ mod tests { use crate::OpPayloadAttributes; use alloy_primitives::{address, b256, bytes, FixedBytes}; use alloy_rpc_types_engine::PayloadAttributes; + use reth_optimism_primitives::OpTransactionSigned; use reth_payload_primitives::EngineApiMessageVersion; use std::str::FromStr; @@ -384,17 +398,18 @@ mod tests { #[test] fn test_get_extra_data_post_holocene() { - let attributes = OpPayloadBuilderAttributes { - eip_1559_params: Some(B64::from_str("0x0000000800000008").unwrap()), - ..Default::default() - }; + let attributes: OpPayloadBuilderAttributes = + OpPayloadBuilderAttributes { + eip_1559_params: Some(B64::from_str("0x0000000800000008").unwrap()), + ..Default::default() + }; let extra_data = attributes.get_holocene_extra_data(BaseFeeParams::new(80, 60)); assert_eq!(extra_data.unwrap(), Bytes::copy_from_slice(&[0, 0, 0, 0, 8, 0, 0, 0, 8])); } #[test] fn test_get_extra_data_post_holocene_default() { - let attributes = + let attributes: OpPayloadBuilderAttributes = OpPayloadBuilderAttributes { eip_1559_params: Some(B64::ZERO), ..Default::default() }; let extra_data = attributes.get_holocene_extra_data(BaseFeeParams::new(80, 60)); assert_eq!(extra_data.unwrap(), Bytes::copy_from_slice(&[0, 0, 0, 0, 80, 0, 0, 0, 60])); diff --git a/crates/optimism/payload/src/traits.rs b/crates/optimism/payload/src/traits.rs new file mode 100644 index 000000000000..325e91218390 --- /dev/null +++ b/crates/optimism/payload/src/traits.rs @@ -0,0 +1,30 @@ +use alloy_consensus::{BlockBody, Header}; +use reth_optimism_primitives::{transaction::signed::OpTransaction, DepositReceipt}; +use reth_primitives::NodePrimitives; +use reth_primitives_traits::SignedTransaction; + +/// Helper trait to encapsulate common bounds on [`NodePrimitives`] for OP payload builder. +pub trait OpPayloadPrimitives: + NodePrimitives< + Receipt: DepositReceipt, + SignedTx = Self::_TX, + BlockHeader = Header, + BlockBody = BlockBody, +> +{ + /// Helper AT to bound [`NodePrimitives::Block`] type without causing bound cycle. + type _TX: SignedTransaction + OpTransaction; +} + +impl OpPayloadPrimitives for T +where + Tx: SignedTransaction + OpTransaction, + T: NodePrimitives< + SignedTx = Tx, + Receipt: DepositReceipt, + BlockHeader = Header, + BlockBody = BlockBody, + >, +{ + type _TX = Tx; +} diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index 15d21dd6148e..77056420afc4 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -12,9 +12,11 @@ extern crate alloc; pub mod bedrock; -pub mod transaction; -use reth_primitives_traits::Block; +pub mod predeploys; +pub use predeploys::ADDRESS_L2_TO_L1_MESSAGE_PASSER; + +pub mod transaction; pub use transaction::{signed::OpTransactionSigned, tx_type::OpTxType}; mod receipt; @@ -24,7 +26,7 @@ pub use receipt::{DepositReceipt, OpReceipt}; pub type OpBlock = alloy_consensus::Block; /// Optimism-specific block body type. -pub type OpBlockBody = ::Body; +pub type OpBlockBody = ::Body; /// Primitive types for Optimism Node. #[derive(Debug, Default, Clone, PartialEq, Eq)] diff --git a/crates/optimism/primitives/src/predeploys.rs b/crates/optimism/primitives/src/predeploys.rs new file mode 100644 index 000000000000..c29f72a0daea --- /dev/null +++ b/crates/optimism/primitives/src/predeploys.rs @@ -0,0 +1,8 @@ +//! Addresses of OP pre-deploys. +// todo: move to op-alloy + +use alloy_primitives::{address, Address}; + +/// The L2 contract `L2ToL1MessagePasser`, stores commitments to withdrawal transactions. +pub const ADDRESS_L2_TO_L1_MESSAGE_PASSER: Address = + address!("4200000000000000000000000000000000000016"); diff --git a/crates/optimism/primitives/src/transaction/signed.rs b/crates/optimism/primitives/src/transaction/signed.rs index 513dd6660f56..64790226b928 100644 --- a/crates/optimism/primitives/src/transaction/signed.rs +++ b/crates/optimism/primitives/src/transaction/signed.rs @@ -166,7 +166,6 @@ impl From for OpTransactionSigned { OpTxEnvelope::Eip1559(tx) => tx.into(), OpTxEnvelope::Eip7702(tx) => tx.into(), OpTxEnvelope::Deposit(tx) => tx.into(), - _ => unreachable!(), } } } diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index cd37a8d24886..7f12d2595184 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -59,6 +59,8 @@ where /// deposit transaction. #[derive(Debug, Clone)] pub struct OpReceiptFieldsBuilder { + /// Block number. + pub block_number: u64, /// Block timestamp. pub block_timestamp: u64, /// The L1 fee for transaction. @@ -87,8 +89,9 @@ pub struct OpReceiptFieldsBuilder { impl OpReceiptFieldsBuilder { /// Returns a new builder. - pub const fn new(block_timestamp: u64) -> Self { + pub const fn new(block_timestamp: u64, block_number: u64) -> Self { Self { + block_number, block_timestamp, l1_fee: None, l1_data_gas: None, @@ -110,18 +113,19 @@ impl OpReceiptFieldsBuilder { l1_block_info: &mut revm::L1BlockInfo, ) -> Result { let raw_tx = tx.encoded_2718(); + let block_number = self.block_number; let timestamp = self.block_timestamp; self.l1_fee = Some( l1_block_info - .l1_tx_data_fee(chain_spec, timestamp, &raw_tx, tx.is_deposit()) + .l1_tx_data_fee(chain_spec, timestamp, block_number, &raw_tx, tx.is_deposit()) .map_err(|_| OpEthApiError::L1BlockFeeError)? .saturating_to(), ); self.l1_data_gas = Some( l1_block_info - .l1_data_gas(chain_spec, timestamp, &raw_tx) + .l1_data_gas(chain_spec, timestamp, block_number, &raw_tx) .map_err(|_| OpEthApiError::L1BlockGasError)? .saturating_add(l1_block_info.l1_fee_overhead.unwrap_or_default()) .saturating_to(), @@ -154,6 +158,7 @@ impl OpReceiptFieldsBuilder { /// Builds the [`OpTransactionReceiptFields`] object. pub const fn build(self) -> OpTransactionReceiptFields { let Self { + block_number: _, // used to compute other fields block_timestamp: _, // used to compute other fields l1_fee, l1_data_gas: l1_gas_used, @@ -202,6 +207,7 @@ impl OpReceiptBuilder { l1_block_info: &mut revm::L1BlockInfo, ) -> Result { let timestamp = meta.timestamp; + let block_number = meta.block_number; let core_receipt = build_receipt(transaction, meta, receipt, all_receipts, None, |receipt_with_bloom| { match receipt { @@ -222,7 +228,7 @@ impl OpReceiptBuilder { } })?; - let op_receipt_fields = OpReceiptFieldsBuilder::new(timestamp) + let op_receipt_fields = OpReceiptFieldsBuilder::new(timestamp, block_number) .l1_block_info(chain_spec, transaction, l1_block_info)? .build(); @@ -304,7 +310,7 @@ mod test { // test assert!(OP_MAINNET.is_fjord_active_at_timestamp(BLOCK_124665056_TIMESTAMP)); - let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP) + let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP, 124665056) .l1_block_info(&OP_MAINNET, &tx_1, &mut l1_block_info) .expect("should parse revm l1 info") .build(); @@ -370,7 +376,7 @@ mod test { let tx = hex!("02f86c8221058034839a4ae283021528942f16386bb37709016023232523ff6d9daf444be380841249c58bc080a001b927eda2af9b00b52a57be0885e0303c39dd2831732e14051c2336470fd468a0681bf120baf562915841a48601c2b54a6742511e535cf8f71c95115af7ff63bd"); let tx_1 = OpTransactionSigned::decode_2718(&mut &tx[..]).unwrap(); - let receipt_meta = OpReceiptFieldsBuilder::new(1730216981) + let receipt_meta = OpReceiptFieldsBuilder::new(1730216981, 21713817) .l1_block_info(&BASE_MAINNET, &tx_1, &mut l1_block_info) .expect("should parse revm l1 info") .build(); diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 60a0295417e7..e2e6adf974ff 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -169,7 +169,6 @@ where std::mem::swap(tx, &mut deposit); return } - _ => return, }; *input = input.slice(..4); } diff --git a/crates/optimism/rpc/src/witness.rs b/crates/optimism/rpc/src/witness.rs index 387e6597edc2..3bf9c30cc779 100644 --- a/crates/optimism/rpc/src/witness.rs +++ b/crates/optimism/rpc/src/witness.rs @@ -1,45 +1,48 @@ //! Support for optimism specific witness RPCs. -use alloy_consensus::Header; use alloy_primitives::B256; use alloy_rpc_types_debug::ExecutionWitness; use jsonrpsee_core::{async_trait, RpcResult}; use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_chainspec::ChainSpecProvider; -use reth_evm::ConfigureEvm; +use reth_evm::ConfigureEvmFor; +use reth_node_api::NodePrimitives; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_payload_builder::OpPayloadBuilder; -use reth_optimism_primitives::OpTransactionSigned; +use reth_optimism_payload_builder::{OpPayloadBuilder, OpPayloadPrimitives}; use reth_primitives::SealedHeader; -use reth_provider::{BlockReaderIdExt, ProviderError, ProviderResult, StateProviderFactory}; +use reth_provider::{ + BlockReaderIdExt, NodePrimitivesProvider, ProviderError, ProviderResult, StateProviderFactory, +}; pub use reth_rpc_api::DebugExecutionWitnessApiServer; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_tasks::TaskSpawner; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; use std::{fmt::Debug, sync::Arc}; use tokio::sync::{oneshot, Semaphore}; /// An extension to the `debug_` namespace of the RPC API. -pub struct OpDebugWitnessApi { - inner: Arc>, +pub struct OpDebugWitnessApi { + inner: Arc>, } -impl OpDebugWitnessApi { +impl + OpDebugWitnessApi +{ /// Creates a new instance of the `OpDebugWitnessApi`. pub fn new( provider: Provider, - evm_config: EvmConfig, task_spawner: Box, + builder: OpPayloadBuilder, ) -> Self { - let builder = OpPayloadBuilder::new(evm_config); let semaphore = Arc::new(Semaphore::new(3)); let inner = OpDebugWitnessApiInner { provider, builder, task_spawner, semaphore }; Self { inner: Arc::new(inner) } } } -impl OpDebugWitnessApi +impl OpDebugWitnessApi where - Provider: BlockReaderIdExt
, + Provider: NodePrimitivesProvider + BlockReaderIdExt
, { /// Fetches the parent header by hash. fn parent_header(&self, parent_block_hash: B256) -> ProviderResult { @@ -51,15 +54,21 @@ where } #[async_trait] -impl DebugExecutionWitnessApiServer - for OpDebugWitnessApi +impl DebugExecutionWitnessApiServer + for OpDebugWitnessApi where + Pool: TransactionPool< + Transaction: PoolTransaction< + Consensus = ::SignedTx, + >, + > + 'static, Provider: BlockReaderIdExt
+ + NodePrimitivesProvider + StateProviderFactory + ChainSpecProvider + Clone + 'static, - EvmConfig: ConfigureEvm
+ 'static, + EvmConfig: ConfigureEvmFor + 'static, { async fn execute_payload( &self, @@ -73,8 +82,7 @@ where let (tx, rx) = oneshot::channel(); let this = self.clone(); self.inner.task_spawner.spawn_blocking(Box::pin(async move { - let res = - this.inner.builder.payload_witness(&this.inner.provider, parent_header, attributes); + let res = this.inner.builder.payload_witness(parent_header, attributes); let _ = tx.send(res); })); @@ -84,20 +92,26 @@ where } } -impl Clone for OpDebugWitnessApi { +impl Clone for OpDebugWitnessApi +where + Provider: NodePrimitivesProvider, +{ fn clone(&self) -> Self { Self { inner: Arc::clone(&self.inner) } } } -impl Debug for OpDebugWitnessApi { +impl Debug for OpDebugWitnessApi +where + Provider: NodePrimitivesProvider, +{ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("OpDebugWitnessApi").finish_non_exhaustive() } } -struct OpDebugWitnessApiInner { +struct OpDebugWitnessApiInner { provider: Provider, - builder: OpPayloadBuilder, + builder: OpPayloadBuilder, task_spawner: Box, semaphore: Arc, } diff --git a/crates/optimism/txpool/Cargo.toml b/crates/optimism/txpool/Cargo.toml new file mode 100644 index 000000000000..b4c115960d67 --- /dev/null +++ b/crates/optimism/txpool/Cargo.toml @@ -0,0 +1,52 @@ +[package] +name = "reth-optimism-txpool" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "OP-Reth Transaction Pool" + +[lints] +workspace = true + +[dependencies] +# ethereum +alloy-consensus.workspace = true +alloy-eips.workspace = true +alloy-primitives.workspace = true +alloy-rpc-types-eth.workspace = true + +# reth +reth-chainspec.workspace = true +reth-primitives-traits.workspace = true +reth-storage-api.workspace = true +reth-transaction-pool.workspace = true + +# revm +revm.workspace = true + +# optimism +op-alloy-consensus.workspace = true +op-alloy-flz.workspace = true +reth-optimism-evm.workspace = true +reth-optimism-forks.workspace = true +reth-optimism-primitives = { workspace = true, features = ["reth-codec"] } + +# misc +c-kzg.workspace = true +derive_more.workspace = true +parking_lot.workspace = true + +[dev-dependencies] +reth-optimism-chainspec.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } + +[features] +optimism = [ + "reth-optimism-evm/optimism", + "reth-optimism-primitives/optimism", + "revm/optimism", +] +scroll = [] diff --git a/crates/optimism/txpool/src/lib.rs b/crates/optimism/txpool/src/lib.rs new file mode 100644 index 000000000000..3857f32c37b2 --- /dev/null +++ b/crates/optimism/txpool/src/lib.rs @@ -0,0 +1,27 @@ +//! OP-Reth Transaction pool. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(all(not(test), feature = "optimism"), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(feature = "scroll", allow(unused_crate_dependencies))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(all(feature = "optimism", not(feature = "scroll")))] + +mod validator; +pub use validator::{OpL1BlockInfo, OpTransactionValidator}; + +mod transaction; +pub use transaction::OpPooledTransaction; + +use reth_transaction_pool::{CoinbaseTipOrdering, Pool, TransactionValidationTaskExecutor}; + +/// Type alias for default optimism transaction pool +pub type OpTransactionPool = Pool< + TransactionValidationTaskExecutor>, + CoinbaseTipOrdering, + S, +>; diff --git a/crates/optimism/txpool/src/transaction.rs b/crates/optimism/txpool/src/transaction.rs new file mode 100644 index 000000000000..3cc83ed70720 --- /dev/null +++ b/crates/optimism/txpool/src/transaction.rs @@ -0,0 +1,280 @@ +use alloy_consensus::{ + transaction::Recovered, BlobTransactionSidecar, BlobTransactionValidationError, Typed2718, +}; +use alloy_eips::{eip2930::AccessList, eip7702::SignedAuthorization}; +use alloy_primitives::{Address, Bytes, TxHash, TxKind, B256, U256}; +use alloy_rpc_types_eth::erc4337::TransactionConditional; +use c_kzg::KzgSettings; +use core::fmt::Debug; +use reth_optimism_primitives::OpTransactionSigned; +use reth_primitives_traits::{InMemorySize, SignedTransaction}; +use reth_transaction_pool::{ + EthBlobTransactionSidecar, EthPoolTransaction, EthPooledTransaction, PoolTransaction, +}; +use std::sync::{Arc, OnceLock}; + +/// Pool transaction for OP. +/// +/// This type wraps the actual transaction and caches values that are frequently used by the pool. +/// For payload building this lazily tracks values that are required during payload building: +/// - Estimated compressed size of this transaction +#[derive(Debug, Clone, derive_more::Deref)] +pub struct OpPooledTransaction< + Cons = OpTransactionSigned, + Pooled = op_alloy_consensus::OpPooledTransaction, +> { + #[deref] + inner: EthPooledTransaction, + /// The estimated size of this transaction, lazily computed. + estimated_tx_compressed_size: OnceLock, + /// The pooled transaction type. + _pd: core::marker::PhantomData, + + /// Optional conditional attached to this transaction. + conditional: Option>, +} + +impl OpPooledTransaction { + /// Create new instance of [Self]. + pub fn new(transaction: Recovered, encoded_length: usize) -> Self { + Self { + inner: EthPooledTransaction::new(transaction, encoded_length), + estimated_tx_compressed_size: Default::default(), + conditional: None, + _pd: core::marker::PhantomData, + } + } + + /// Returns the estimated compressed size of a transaction in bytes scaled by 1e6. + /// This value is computed based on the following formula: + /// `max(minTransactionSize, intercept + fastlzCoef*fastlzSize)` + pub fn estimated_compressed_size(&self) -> u64 { + *self.estimated_tx_compressed_size.get_or_init(|| { + op_alloy_flz::tx_estimated_size_fjord(&self.inner.transaction().encoded_2718()) + }) + } + + /// Conditional setter. + pub fn with_conditional(mut self, conditional: TransactionConditional) -> Self { + self.conditional = Some(Box::new(conditional)); + self + } + + /// Conditional getter. + pub fn conditional(&self) -> Option<&TransactionConditional> { + self.conditional.as_deref() + } +} + +impl PoolTransaction for OpPooledTransaction +where + Cons: SignedTransaction + From, + Pooled: SignedTransaction + TryFrom, +{ + type TryFromConsensusError = >::Error; + type Consensus = Cons; + type Pooled = Pooled; + + fn clone_into_consensus(&self) -> Recovered { + self.inner.transaction().clone() + } + + fn into_consensus(self) -> Recovered { + self.inner.transaction + } + + fn from_pooled(tx: Recovered) -> Self { + let encoded_len = tx.encode_2718_len(); + let tx = tx.map_transaction(|tx| tx.into()); + Self::new(tx, encoded_len) + } + + fn hash(&self) -> &TxHash { + self.inner.transaction.tx_hash() + } + + fn sender(&self) -> Address { + self.inner.transaction.signer() + } + + fn sender_ref(&self) -> &Address { + self.inner.transaction.signer_ref() + } + + fn cost(&self) -> &U256 { + &self.inner.cost + } + + fn encoded_length(&self) -> usize { + self.inner.encoded_length + } +} + +impl Typed2718 for OpPooledTransaction { + fn ty(&self) -> u8 { + self.inner.ty() + } +} + +impl InMemorySize for OpPooledTransaction { + fn size(&self) -> usize { + self.inner.size() + } +} + +impl alloy_consensus::Transaction for OpPooledTransaction +where + Cons: alloy_consensus::Transaction, + Pooled: Debug + Send + Sync + 'static, +{ + fn chain_id(&self) -> Option { + self.inner.chain_id() + } + + fn nonce(&self) -> u64 { + self.inner.nonce() + } + + fn gas_limit(&self) -> u64 { + self.inner.gas_limit() + } + + fn gas_price(&self) -> Option { + self.inner.gas_price() + } + + fn max_fee_per_gas(&self) -> u128 { + self.inner.max_fee_per_gas() + } + + fn max_priority_fee_per_gas(&self) -> Option { + self.inner.max_priority_fee_per_gas() + } + + fn max_fee_per_blob_gas(&self) -> Option { + self.inner.max_fee_per_blob_gas() + } + + fn priority_fee_or_price(&self) -> u128 { + self.inner.priority_fee_or_price() + } + + fn effective_gas_price(&self, base_fee: Option) -> u128 { + self.inner.effective_gas_price(base_fee) + } + + fn is_dynamic_fee(&self) -> bool { + self.inner.is_dynamic_fee() + } + + fn kind(&self) -> TxKind { + self.inner.kind() + } + + fn is_create(&self) -> bool { + self.inner.is_create() + } + + fn value(&self) -> U256 { + self.inner.value() + } + + fn input(&self) -> &Bytes { + self.inner.input() + } + + fn access_list(&self) -> Option<&AccessList> { + self.inner.access_list() + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + self.inner.blob_versioned_hashes() + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + self.inner.authorization_list() + } +} + +impl EthPoolTransaction for OpPooledTransaction +where + Cons: SignedTransaction + From, + Pooled: SignedTransaction + TryFrom, + >::Error: core::error::Error, +{ + fn take_blob(&mut self) -> EthBlobTransactionSidecar { + EthBlobTransactionSidecar::None + } + + fn try_into_pooled_eip4844( + self, + _sidecar: Arc, + ) -> Option> { + None + } + + fn try_from_eip4844( + _tx: Recovered, + _sidecar: BlobTransactionSidecar, + ) -> Option { + None + } + + fn validate_blob( + &self, + _sidecar: &BlobTransactionSidecar, + _settings: &KzgSettings, + ) -> Result<(), BlobTransactionValidationError> { + Err(BlobTransactionValidationError::NotBlobTransaction(self.ty())) + } +} + +#[cfg(test)] +mod tests { + use crate::{OpPooledTransaction, OpTransactionValidator}; + use alloy_consensus::transaction::Recovered; + use alloy_eips::eip2718::Encodable2718; + use alloy_primitives::{PrimitiveSignature as Signature, TxKind, U256}; + use op_alloy_consensus::{OpTypedTransaction, TxDeposit}; + use reth_optimism_chainspec::OP_MAINNET; + use reth_optimism_primitives::OpTransactionSigned; + use reth_provider::test_utils::MockEthProvider; + use reth_transaction_pool::{ + blobstore::InMemoryBlobStore, validate::EthTransactionValidatorBuilder, TransactionOrigin, + TransactionValidationOutcome, + }; + #[test] + fn validate_optimism_transaction() { + let client = MockEthProvider::default().with_chain_spec(OP_MAINNET.clone()); + let validator = EthTransactionValidatorBuilder::new(client) + .no_shanghai() + .no_cancun() + .build(InMemoryBlobStore::default()); + let validator = OpTransactionValidator::new(validator); + + let origin = TransactionOrigin::External; + let signer = Default::default(); + let deposit_tx = OpTypedTransaction::Deposit(TxDeposit { + source_hash: Default::default(), + from: signer, + to: TxKind::Create, + mint: None, + value: U256::ZERO, + gas_limit: 0, + is_system_transaction: false, + input: Default::default(), + }); + let signature = Signature::test_signature(); + let signed_tx = OpTransactionSigned::new_unhashed(deposit_tx, signature); + let signed_recovered = Recovered::new_unchecked(signed_tx, signer); + let len = signed_recovered.encode_2718_len(); + let pooled_tx: OpPooledTransaction = OpPooledTransaction::new(signed_recovered, len); + let outcome = validator.validate_one(origin, pooled_tx); + + let err = match outcome { + TransactionValidationOutcome::Invalid(_, err) => err, + _ => panic!("Expected invalid transaction"), + }; + assert_eq!(err.to_string(), "transaction type not supported"); + } +} diff --git a/crates/optimism/txpool/src/validator.rs b/crates/optimism/txpool/src/validator.rs new file mode 100644 index 000000000000..cd41475dca79 --- /dev/null +++ b/crates/optimism/txpool/src/validator.rs @@ -0,0 +1,250 @@ +use alloy_consensus::{BlockHeader, Transaction}; +use alloy_eips::Encodable2718; +use parking_lot::RwLock; +use reth_chainspec::ChainSpecProvider; +use reth_optimism_evm::RethL1BlockInfo; +use reth_optimism_forks::OpHardforks; +use reth_primitives_traits::{ + transaction::error::InvalidTransactionError, Block, BlockBody, GotExpected, SealedBlock, +}; +use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; +use reth_transaction_pool::{ + EthPoolTransaction, EthTransactionValidator, TransactionOrigin, TransactionValidationOutcome, + TransactionValidator, +}; +use revm::L1BlockInfo; +use std::sync::{ + atomic::{AtomicU64, Ordering}, + Arc, +}; + +/// Tracks additional infos for the current block. +#[derive(Debug, Default)] +pub struct OpL1BlockInfo { + /// The current L1 block info. + l1_block_info: RwLock, + /// Current block timestamp. + timestamp: AtomicU64, + /// Current block number. + number: AtomicU64, +} + +/// Validator for Optimism transactions. +#[derive(Debug, Clone)] +pub struct OpTransactionValidator { + /// The type that performs the actual validation. + inner: EthTransactionValidator, + /// Additional block info required for validation. + block_info: Arc, + /// If true, ensure that the transaction's sender has enough balance to cover the L1 gas fee + /// derived from the tracked L1 block info that is extracted from the first transaction in the + /// L2 block. + require_l1_data_gas_fee: bool, +} + +impl OpTransactionValidator { + /// Returns the configured chain spec + pub fn chain_spec(&self) -> Arc + where + Client: ChainSpecProvider, + { + self.inner.chain_spec() + } + + /// Returns the configured client + pub fn client(&self) -> &Client { + self.inner.client() + } + + /// Returns the current block timestamp. + fn block_timestamp(&self) -> u64 { + self.block_info.timestamp.load(Ordering::Relaxed) + } + + /// Returns the current block number. + fn block_number(&self) -> u64 { + self.block_info.number.load(Ordering::Relaxed) + } + + /// Whether to ensure that the transaction's sender has enough balance to also cover the L1 gas + /// fee. + pub fn require_l1_data_gas_fee(self, require_l1_data_gas_fee: bool) -> Self { + Self { require_l1_data_gas_fee, ..self } + } + + /// Returns whether this validator also requires the transaction's sender to have enough balance + /// to cover the L1 gas fee. + pub const fn requires_l1_data_gas_fee(&self) -> bool { + self.require_l1_data_gas_fee + } +} + +impl OpTransactionValidator +where + Client: ChainSpecProvider + StateProviderFactory + BlockReaderIdExt, + Tx: EthPoolTransaction, +{ + /// Create a new [`OpTransactionValidator`]. + pub fn new(inner: EthTransactionValidator) -> Self { + let this = Self::with_block_info(inner, OpL1BlockInfo::default()); + if let Ok(Some(block)) = + this.inner.client().block_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest) + { + // genesis block has no txs, so we can't extract L1 info, we set the block info to empty + // so that we will accept txs into the pool before the first block + if block.header().number() == 0 { + this.block_info.timestamp.store(block.header().timestamp(), Ordering::Relaxed); + this.block_info.number.store(block.header().number(), Ordering::Relaxed); + } else { + this.update_l1_block_info(block.header(), block.body().transactions().first()); + } + } + + this + } + + /// Create a new [`OpTransactionValidator`] with the given [`OpL1BlockInfo`]. + pub fn with_block_info( + inner: EthTransactionValidator, + block_info: OpL1BlockInfo, + ) -> Self { + Self { inner, block_info: Arc::new(block_info), require_l1_data_gas_fee: true } + } + + /// Update the L1 block info for the given header and system transaction, if any. + /// + /// Note: this supports optional system transaction, in case this is used in a dev setuo + pub fn update_l1_block_info(&self, header: &H, tx: Option<&T>) + where + H: BlockHeader, + T: Transaction, + { + self.block_info.timestamp.store(header.timestamp(), Ordering::Relaxed); + self.block_info.number.store(header.number(), Ordering::Relaxed); + + if let Some(Ok(cost_addition)) = tx.map(reth_optimism_evm::extract_l1_info_from_tx) { + *self.block_info.l1_block_info.write() = cost_addition; + } + } + + /// Validates a single transaction. + /// + /// See also [`TransactionValidator::validate_transaction`] + /// + /// This behaves the same as [`EthTransactionValidator::validate_one`], but in addition, ensures + /// that the account has enough balance to cover the L1 gas cost. + pub fn validate_one( + &self, + origin: TransactionOrigin, + transaction: Tx, + ) -> TransactionValidationOutcome { + if transaction.is_eip4844() { + return TransactionValidationOutcome::Invalid( + transaction, + InvalidTransactionError::TxTypeNotSupported.into(), + ) + } + + let outcome = self.inner.validate_one(origin, transaction); + + if !self.requires_l1_data_gas_fee() { + // no need to check L1 gas fee + return outcome + } + + // ensure that the account has enough balance to cover the L1 gas cost + if let TransactionValidationOutcome::Valid { + balance, + state_nonce, + transaction: valid_tx, + propagate, + } = outcome + { + let mut l1_block_info = self.block_info.l1_block_info.read().clone(); + + let mut encoded = Vec::with_capacity(valid_tx.transaction().encoded_length()); + let tx = valid_tx.transaction().clone_into_consensus(); + tx.encode_2718(&mut encoded); + + let cost_addition = match l1_block_info.l1_tx_data_fee( + self.chain_spec(), + self.block_timestamp(), + self.block_number(), + &encoded, + false, + ) { + Ok(cost) => cost, + Err(err) => { + return TransactionValidationOutcome::Error(*valid_tx.hash(), Box::new(err)) + } + }; + let cost = valid_tx.transaction().cost().saturating_add(cost_addition); + + // Checks for max cost + if cost > balance { + return TransactionValidationOutcome::Invalid( + valid_tx.into_transaction(), + InvalidTransactionError::InsufficientFunds( + GotExpected { got: balance, expected: cost }.into(), + ) + .into(), + ) + } + + return TransactionValidationOutcome::Valid { + balance, + state_nonce, + transaction: valid_tx, + propagate, + } + } + + outcome + } + + /// Validates all given transactions. + /// + /// Returns all outcomes for the given transactions in the same order. + /// + /// See also [`Self::validate_one`] + pub fn validate_all( + &self, + transactions: Vec<(TransactionOrigin, Tx)>, + ) -> Vec> { + transactions.into_iter().map(|(origin, tx)| self.validate_one(origin, tx)).collect() + } +} + +impl TransactionValidator for OpTransactionValidator +where + Client: ChainSpecProvider + StateProviderFactory + BlockReaderIdExt, + Tx: EthPoolTransaction, +{ + type Transaction = Tx; + + async fn validate_transaction( + &self, + origin: TransactionOrigin, + transaction: Self::Transaction, + ) -> TransactionValidationOutcome { + self.validate_one(origin, transaction) + } + + async fn validate_transactions( + &self, + transactions: Vec<(TransactionOrigin, Self::Transaction)>, + ) -> Vec> { + self.validate_all(transactions) + } + + fn on_new_head_block(&self, new_tip_block: &SealedBlock) + where + B: Block, + { + self.inner.on_new_head_block(new_tip_block); + self.update_l1_block_info( + new_tip_block.header(), + new_tip_block.body().transactions().first(), + ); + } +} diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index d8e3b0faf91c..7c975ef7d4e2 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -16,7 +16,6 @@ workspace = true reth-chainspec.workspace = true reth-primitives.workspace = true reth-primitives-traits.workspace = true -reth-transaction-pool.workspace = true reth-provider.workspace = true reth-payload-builder.workspace = true reth-payload-builder-primitives.workspace = true diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 32e6ec30201b..84995d3e77e0 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -20,18 +20,17 @@ use reth_payload_builder::{KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJo use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadKind}; use reth_primitives::{NodePrimitives, SealedHeader}; -use reth_primitives_traits::proofs; +use reth_primitives_traits::{proofs, HeaderTy}; use reth_provider::{BlockReaderIdExt, CanonStateNotification, StateProviderFactory}; -use reth_revm::cached::CachedReads; +use reth_revm::{cached::CachedReads, cancelled::CancelOnDrop}; use reth_tasks::TaskSpawner; -use reth_transaction_pool::TransactionPool; use revm::{Database, State}; use std::{ fmt, future::Future, ops::Deref, pin::Pin, - sync::{atomic::AtomicBool, Arc}, + sync::Arc, task::{Context, Poll}, time::{Duration, SystemTime, UNIX_EPOCH}, }; @@ -46,13 +45,14 @@ mod stack; pub use stack::PayloadBuilderStack; +/// Helper to access [`NodePrimitives::BlockHeader`] from [`PayloadBuilder::BuiltPayload`]. +pub type HeaderForPayload

= <

::Primitives as NodePrimitives>::BlockHeader; + /// The [`PayloadJobGenerator`] that creates [`BasicPayloadJob`]s. #[derive(Debug)] -pub struct BasicPayloadJobGenerator { +pub struct BasicPayloadJobGenerator { /// The client that can interact with the chain. client: Client, - /// The transaction pool to pull transactions from. - pool: Pool, /// The task executor to spawn payload building tasks on. executor: Tasks, /// The configuration for the job generator. @@ -69,19 +69,17 @@ pub struct BasicPayloadJobGenerator { // === impl BasicPayloadJobGenerator === -impl BasicPayloadJobGenerator { +impl BasicPayloadJobGenerator { /// Creates a new [`BasicPayloadJobGenerator`] with the given config and custom /// [`PayloadBuilder`] pub fn with_builder( client: Client, - pool: Pool, executor: Tasks, config: BasicPayloadJobGeneratorConfig, builder: Builder, ) -> Self { Self { client, - pool, executor, payload_task_guard: PayloadTaskGuard::new(config.max_payload_tasks), config, @@ -129,21 +127,20 @@ impl BasicPayloadJobGenerator PayloadJobGenerator - for BasicPayloadJobGenerator +impl PayloadJobGenerator + for BasicPayloadJobGenerator where Client: StateProviderFactory - + BlockReaderIdExt

+ + BlockReaderIdExt
> + Clone + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, Tasks: TaskSpawner + Clone + Unpin + 'static, - Builder: PayloadBuilder + Unpin + 'static, - >::Attributes: Unpin + Clone, - >::BuiltPayload: Unpin + Clone, + Builder: PayloadBuilder + Unpin + 'static, + Builder::Attributes: Unpin + Clone, + Builder::BuiltPayload: Unpin + Clone, { - type Job = BasicPayloadJob; + type Job = BasicPayloadJob; fn new_payload_job( &self, @@ -172,8 +169,6 @@ where let mut job = BasicPayloadJob { config, - client: self.client.clone(), - pool: self.pool.clone(), executor: self.executor.clone(), deadline, // ticks immediately @@ -306,16 +301,12 @@ impl Default for BasicPayloadJobGeneratorConfig { /// built and this future will wait to be resolved: [`PayloadJob::resolve`] or terminated if the /// deadline is reached.. #[derive(Debug)] -pub struct BasicPayloadJob +pub struct BasicPayloadJob where - Builder: PayloadBuilder, + Builder: PayloadBuilder, { /// The configuration for how the payload will be created. - config: PayloadConfig, - /// The client that can interact with the chain. - client: Client, - /// The transaction pool. - pool: Pool, + config: PayloadConfig>, /// How to spawn building tasks executor: Tasks, /// The deadline when this job should resolve. @@ -341,22 +332,18 @@ where builder: Builder, } -impl BasicPayloadJob +impl BasicPayloadJob where - Client: StateProviderFactory + Clone + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, - Builder: PayloadBuilder + Unpin + 'static, - >::Attributes: Unpin + Clone, - >::BuiltPayload: Unpin + Clone, + Builder: PayloadBuilder + Unpin + 'static, + Builder::Attributes: Unpin + Clone, + Builder::BuiltPayload: Unpin + Clone, { /// Spawns a new payload build task. fn spawn_build_job(&mut self) { trace!(target: "payload_builder", id = %self.config.payload_id(), "spawn new payload build task"); let (tx, rx) = oneshot::channel(); - let client = self.client.clone(); - let pool = self.pool.clone(); - let cancel = Cancelled::default(); + let cancel = CancelOnDrop::default(); let _cancel = cancel.clone(); let guard = self.payload_task_guard.clone(); let payload_config = self.config.clone(); @@ -367,14 +354,8 @@ where self.executor.spawn_blocking(Box::pin(async move { // acquire the permit for executing the task let _permit = guard.acquire().await; - let args = BuildArguments { - client, - pool, - cached_reads, - config: payload_config, - cancel, - best_payload, - }; + let args = + BuildArguments { cached_reads, config: payload_config, cancel, best_payload }; let result = builder.try_build(args); let _ = tx.send(result); })); @@ -383,14 +364,12 @@ where } } -impl Future for BasicPayloadJob +impl Future for BasicPayloadJob where - Client: StateProviderFactory + Clone + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, - Builder: PayloadBuilder + Unpin + 'static, - >::Attributes: Unpin + Clone, - >::BuiltPayload: Unpin + Clone, + Builder: PayloadBuilder + Unpin + 'static, + Builder::Attributes: Unpin + Clone, + Builder::BuiltPayload: Unpin + Clone, { type Output = Result<(), PayloadBuilderError>; @@ -448,14 +427,12 @@ where } } -impl PayloadJob for BasicPayloadJob +impl PayloadJob for BasicPayloadJob where - Client: StateProviderFactory + Clone + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, - Builder: PayloadBuilder + Unpin + 'static, - >::Attributes: Unpin + Clone, - >::BuiltPayload: Unpin + Clone, + Builder: PayloadBuilder + Unpin + 'static, + Builder::Attributes: Unpin + Clone, + Builder::BuiltPayload: Unpin + Clone, { type PayloadAttributes = Builder::Attributes; type ResolvePayloadFuture = ResolveBestPayload; @@ -472,7 +449,7 @@ where // started right away and the first full block should have been // built by the time CL is requesting the payload. self.metrics.inc_requested_empty_payload(); - self.builder.build_empty_payload(&self.client, self.config.clone()) + self.builder.build_empty_payload(self.config.clone()) } } @@ -497,11 +474,9 @@ where debug!(target: "payload_builder", id=%self.config.payload_id(), "no best payload yet to resolve, building empty payload"); let args = BuildArguments { - client: self.client.clone(), - pool: self.pool.clone(), cached_reads: self.cached_reads.take().unwrap_or_default(), config: self.config.clone(), - cancel: Cancelled::default(), + cancel: CancelOnDrop::default(), best_payload: None, }; @@ -516,11 +491,10 @@ where self.metrics.inc_requested_empty_payload(); // no payload built yet, so we need to return an empty payload let (tx, rx) = oneshot::channel(); - let client = self.client.clone(); let config = self.config.clone(); let builder = self.builder.clone(); self.executor.spawn_blocking(Box::pin(async move { - let res = builder.build_empty_payload(&client, config); + let res = builder.build_empty_payload(config); let _ = tx.send(res); })); @@ -657,7 +631,7 @@ where #[derive(Debug)] pub struct PendingPayload

{ /// The marker to cancel the job on drop - _cancel: Cancelled, + _cancel: CancelOnDrop, /// The channel to send the result to. payload: oneshot::Receiver, PayloadBuilderError>>, } @@ -665,7 +639,7 @@ pub struct PendingPayload

{ impl

PendingPayload

{ /// Constructs a `PendingPayload` future. pub const fn new( - cancel: Cancelled, + cancel: CancelOnDrop, payload: oneshot::Receiver, PayloadBuilderError>>, ) -> Self { Self { _cancel: cancel, payload } @@ -681,42 +655,21 @@ impl

Future for PendingPayload

{ } } -/// A marker that can be used to cancel a job. -/// -/// If dropped, it will set the `cancelled` flag to true. -#[derive(Default, Clone, Debug)] -pub struct Cancelled(Arc); - -// === impl Cancelled === - -impl Cancelled { - /// Returns true if the job was cancelled. - pub fn is_cancelled(&self) -> bool { - self.0.load(std::sync::atomic::Ordering::Relaxed) - } -} - -impl Drop for Cancelled { - fn drop(&mut self) { - self.0.store(true, std::sync::atomic::Ordering::Relaxed); - } -} - /// Static config for how to build a payload. #[derive(Clone, Debug)] -pub struct PayloadConfig { +pub struct PayloadConfig { /// The parent header. - pub parent_header: Arc, + pub parent_header: Arc>, /// Requested attributes for the payload. pub attributes: Attributes, } -impl PayloadConfig +impl PayloadConfig where Attributes: PayloadBuilderAttributes, { /// Create new payload config. - pub const fn new(parent_header: Arc, attributes: Attributes) -> Self { + pub const fn new(parent_header: Arc>, attributes: Attributes) -> Self { Self { parent_header, attributes } } @@ -827,61 +780,26 @@ impl BuildOutcomeKind { /// building process. It holds references to the Ethereum client, transaction pool, cached reads, /// payload configuration, cancellation status, and the best payload achieved so far. #[derive(Debug)] -pub struct BuildArguments { - /// How to interact with the chain. - pub client: Client, - /// The transaction pool. - /// - /// Or the type that provides the transactions to build the payload. - pub pool: Pool, +pub struct BuildArguments { /// Previously cached disk reads pub cached_reads: CachedReads, /// How to configure the payload. - pub config: PayloadConfig, + pub config: PayloadConfig>, /// A marker that can be used to cancel the job. - pub cancel: Cancelled, + pub cancel: CancelOnDrop, /// The best payload achieved so far. pub best_payload: Option, } -impl BuildArguments { +impl BuildArguments { /// Create new build arguments. pub const fn new( - client: Client, - pool: Pool, cached_reads: CachedReads, - config: PayloadConfig, - cancel: Cancelled, + config: PayloadConfig>, + cancel: CancelOnDrop, best_payload: Option, ) -> Self { - Self { client, pool, cached_reads, config, cancel, best_payload } - } - - /// Maps the transaction pool to a new type. - pub fn with_pool

(self, pool: P) -> BuildArguments { - BuildArguments { - client: self.client, - pool, - cached_reads: self.cached_reads, - config: self.config, - cancel: self.cancel, - best_payload: self.best_payload, - } - } - - /// Maps the transaction pool to a new type using a closure with the current pool type as input. - pub fn map_pool(self, f: F) -> BuildArguments - where - F: FnOnce(Pool) -> P, - { - BuildArguments { - client: self.client, - pool: f(self.pool), - cached_reads: self.cached_reads, - config: self.config, - cancel: self.cancel, - best_payload: self.best_payload, - } + Self { cached_reads, config, cancel, best_payload } } } @@ -893,7 +811,7 @@ impl BuildArguments: Send + Sync + Clone { +pub trait PayloadBuilder: Send + Sync + Clone { /// The payload attributes type to accept for building. type Attributes: PayloadBuilderAttributes; /// The type of the built payload. @@ -913,7 +831,7 @@ pub trait PayloadBuilder: Send + Sync + Clone { /// A `Result` indicating the build outcome or an error. fn try_build( &self, - args: BuildArguments, + args: BuildArguments, ) -> Result, PayloadBuilderError>; /// Invoked when the payload job is being resolved and there is no payload yet. @@ -921,7 +839,7 @@ pub trait PayloadBuilder: Send + Sync + Clone { /// This can happen if the CL requests a payload before the first payload has been built. fn on_missing_payload( &self, - _args: BuildArguments, + _args: BuildArguments, ) -> MissingPayloadBehaviour { MissingPayloadBehaviour::RaceEmptyPayload } @@ -929,8 +847,7 @@ pub trait PayloadBuilder: Send + Sync + Clone { /// Builds an empty payload without any transaction. fn build_empty_payload( &self, - client: &Client, - config: PayloadConfig, + config: PayloadConfig>, ) -> Result; } diff --git a/crates/payload/basic/src/stack.rs b/crates/payload/basic/src/stack.rs index 1dd57d155685..d7a581752500 100644 --- a/crates/payload/basic/src/stack.rs +++ b/crates/payload/basic/src/stack.rs @@ -1,6 +1,6 @@ use crate::{ - BuildArguments, BuildOutcome, PayloadBuilder, PayloadBuilderAttributes, PayloadBuilderError, - PayloadConfig, + BuildArguments, BuildOutcome, HeaderForPayload, PayloadBuilder, PayloadBuilderAttributes, + PayloadBuilderError, PayloadConfig, }; use alloy_eips::eip4895::Withdrawals; @@ -177,54 +177,45 @@ where } } -impl PayloadBuilder for PayloadBuilderStack +impl PayloadBuilder for PayloadBuilderStack where - L: PayloadBuilder + Unpin + 'static, - R: PayloadBuilder + Unpin + 'static, - Client: Clone, - Pool: Clone, + L: PayloadBuilder + Unpin + 'static, + R: PayloadBuilder + Unpin + 'static, L::Attributes: Unpin + Clone, R::Attributes: Unpin + Clone, L::BuiltPayload: Unpin + Clone, R::BuiltPayload: BuiltPayload::Primitives> + Unpin + Clone, - <>::Attributes as PayloadBuilderAttributes>::Error: 'static, - <>::Attributes as PayloadBuilderAttributes>::Error: 'static, { type Attributes = Either; type BuiltPayload = Either; fn try_build( &self, - args: BuildArguments, + args: BuildArguments, ) -> Result, PayloadBuilderError> { match args.config.attributes { Either::Left(ref left_attr) => { - let left_args: BuildArguments = - BuildArguments { - client: args.client.clone(), - pool: args.pool.clone(), - cached_reads: args.cached_reads.clone(), - config: PayloadConfig { - parent_header: args.config.parent_header.clone(), - attributes: left_attr.clone(), - }, - cancel: args.cancel.clone(), - best_payload: args.best_payload.clone().and_then(|payload| { - if let Either::Left(p) = payload { - Some(p) - } else { - None - } - }), - }; + let left_args: BuildArguments = BuildArguments { + cached_reads: args.cached_reads.clone(), + config: PayloadConfig { + parent_header: args.config.parent_header.clone(), + attributes: left_attr.clone(), + }, + cancel: args.cancel.clone(), + best_payload: args.best_payload.clone().and_then(|payload| { + if let Either::Left(p) = payload { + Some(p) + } else { + None + } + }), + }; self.left.try_build(left_args).map(|out| out.map_payload(Either::Left)) } Either::Right(ref right_attr) => { let right_args = BuildArguments { - client: args.client.clone(), - pool: args.pool.clone(), cached_reads: args.cached_reads.clone(), config: PayloadConfig { parent_header: args.config.parent_header.clone(), @@ -247,8 +238,7 @@ where fn build_empty_payload( &self, - client: &Client, - config: PayloadConfig, + config: PayloadConfig>, ) -> Result { match config.attributes { Either::Left(left_attr) => { @@ -256,14 +246,14 @@ where parent_header: config.parent_header.clone(), attributes: left_attr, }; - self.left.build_empty_payload(client, left_config).map(Either::Left) + self.left.build_empty_payload(left_config).map(Either::Left) } Either::Right(right_attr) => { let right_config = PayloadConfig { parent_header: config.parent_header.clone(), attributes: right_attr, }; - self.right.build_empty_payload(client, right_config).map(Either::Right) + self.right.build_empty_payload(right_config).map(Either::Right) } } } diff --git a/crates/payload/builder-primitives/Cargo.toml b/crates/payload/builder-primitives/Cargo.toml index 6d89ea89d03a..70ff2d58833c 100644 --- a/crates/payload/builder-primitives/Cargo.toml +++ b/crates/payload/builder-primitives/Cargo.toml @@ -15,11 +15,7 @@ workspace = true # reth reth-payload-primitives.workspace = true -# alloy -alloy-rpc-types-engine = { workspace = true, features = ["serde"] } - # async -async-trait.workspace = true pin-project.workspace = true tokio = { workspace = true, features = ["sync"] } tokio-stream.workspace = true diff --git a/crates/payload/builder-primitives/src/lib.rs b/crates/payload/builder-primitives/src/lib.rs index af7ad736d44e..d181531ca328 100644 --- a/crates/payload/builder-primitives/src/lib.rs +++ b/crates/payload/builder-primitives/src/lib.rs @@ -11,8 +11,4 @@ mod events; pub use crate::events::{Events, PayloadEvents}; -/// Contains the payload builder trait to abstract over payload attributes. -mod traits; -pub use traits::{PayloadBuilder, PayloadStoreExt}; - pub use reth_payload_primitives::PayloadBuilderError; diff --git a/crates/payload/builder-primitives/src/traits.rs b/crates/payload/builder-primitives/src/traits.rs deleted file mode 100644 index b5e8910b6c26..000000000000 --- a/crates/payload/builder-primitives/src/traits.rs +++ /dev/null @@ -1,111 +0,0 @@ -use crate::{PayloadBuilderError, PayloadEvents}; -use alloy_rpc_types_engine::PayloadId; -use reth_payload_primitives::{PayloadKind, PayloadTypes}; -use std::fmt::Debug; -use tokio::sync::oneshot; - -/// A helper trait for internal usage to retrieve and resolve payloads. -#[async_trait::async_trait] -pub trait PayloadStoreExt: Debug + Send + Sync + Unpin { - /// Resolves the payload job and returns the best payload that has been built so far. - async fn resolve_kind( - &self, - id: PayloadId, - kind: PayloadKind, - ) -> Option>; - - /// Resolves the payload job as fast and possible and returns the best payload that has been - /// built so far. - async fn resolve(&self, id: PayloadId) -> Option> { - self.resolve_kind(id, PayloadKind::Earliest).await - } - - /// Returns the best payload for the given identifier. - async fn best_payload( - &self, - id: PayloadId, - ) -> Option>; - - /// Returns the payload attributes associated with the given identifier. - async fn payload_attributes( - &self, - id: PayloadId, - ) -> Option>; -} - -#[async_trait::async_trait] -impl PayloadStoreExt for P -where - P: PayloadBuilder, -{ - async fn resolve_kind( - &self, - id: PayloadId, - kind: PayloadKind, - ) -> Option> { - Some(PayloadBuilder::resolve_kind(self, id, kind).await?.map_err(Into::into)) - } - - async fn best_payload( - &self, - id: PayloadId, - ) -> Option> { - Some(PayloadBuilder::best_payload(self, id).await?.map_err(Into::into)) - } - - async fn payload_attributes( - &self, - id: PayloadId, - ) -> Option> { - Some(PayloadBuilder::payload_attributes(self, id).await?.map_err(Into::into)) - } -} - -/// A type that can request, subscribe to and resolve payloads. -#[async_trait::async_trait] -pub trait PayloadBuilder: Debug + Send + Sync + Unpin { - /// The Payload type for the builder. - type PayloadType: PayloadTypes; - /// The error type returned by the builder. - type Error: Into; - - /// Sends a message to the service to start building a new payload for the given payload. - /// - /// Returns a receiver that will receive the payload id. - fn send_new_payload( - &self, - attr: ::PayloadBuilderAttributes, - ) -> oneshot::Receiver>; - - /// Returns the best payload for the given identifier. - async fn best_payload( - &self, - id: PayloadId, - ) -> Option::BuiltPayload, Self::Error>>; - - /// Resolves the payload job and returns the best payload that has been built so far. - async fn resolve_kind( - &self, - id: PayloadId, - kind: PayloadKind, - ) -> Option::BuiltPayload, Self::Error>>; - - /// Resolves the payload job as fast and possible and returns the best payload that has been - /// built so far. - async fn resolve( - &self, - id: PayloadId, - ) -> Option::BuiltPayload, Self::Error>> { - self.resolve_kind(id, PayloadKind::Earliest).await - } - - /// Sends a message to the service to subscribe to payload events. - /// Returns a receiver that will receive them. - async fn subscribe(&self) -> Result, Self::Error>; - - /// Returns the payload attributes associated with the given identifier. - async fn payload_attributes( - &self, - id: PayloadId, - ) -> Option::PayloadBuilderAttributes, Self::Error>>; -} diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index 33bed09f6a46..de04105df043 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -26,7 +26,6 @@ alloy-primitives = { workspace = true, optional = true } alloy-rpc-types = { workspace = true, features = ["engine"] } # async -async-trait.workspace = true tokio = { workspace = true, features = ["sync"] } tokio-stream.workspace = true futures-util.workspace = true diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index de51a791c78c..fc37a3830e4c 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -11,9 +11,7 @@ use alloy_consensus::BlockHeader; use alloy_rpc_types::engine::PayloadId; use futures_util::{future::FutureExt, Stream, StreamExt}; use reth_chain_state::CanonStateNotification; -use reth_payload_builder_primitives::{ - Events, PayloadBuilder, PayloadBuilderError, PayloadEvents, PayloadStoreExt, -}; +use reth_payload_builder_primitives::{Events, PayloadBuilderError, PayloadEvents}; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadKind, PayloadTypes}; use reth_primitives_traits::NodePrimitives; use std::{ @@ -38,7 +36,7 @@ type PayloadFuture

= Pin { - inner: Arc>, + inner: Arc>, } impl PayloadStore @@ -91,10 +89,7 @@ where T: PayloadTypes, { /// Create a new instance - pub fn new

(inner: P) -> Self - where - P: PayloadStoreExt + 'static, - { + pub fn new(inner: PayloadBuilderHandle) -> Self { Self { inner: Arc::new(inner) } } } @@ -117,36 +112,40 @@ pub struct PayloadBuilderHandle { to_service: mpsc::UnboundedSender>, } -// === impl PayloadBuilderHandle === - -#[async_trait::async_trait] -impl PayloadBuilder for PayloadBuilderHandle -where - T: PayloadTypes, -{ - type PayloadType = T; - type Error = PayloadBuilderError; +impl PayloadBuilderHandle { + /// Creates a new payload builder handle for the given channel. + /// + /// Note: this is only used internally by the [`PayloadBuilderService`] to manage the payload + /// building flow See [`PayloadBuilderService::poll`] for implementation details. + pub const fn new(to_service: mpsc::UnboundedSender>) -> Self { + Self { to_service } + } - fn send_new_payload( + /// Sends a message to the service to start building a new payload for the given payload. + /// + /// Returns a receiver that will receive the payload id. + pub fn send_new_payload( &self, - attr: ::PayloadBuilderAttributes, - ) -> Receiver> { + attr: T::PayloadBuilderAttributes, + ) -> Receiver> { let (tx, rx) = oneshot::channel(); let _ = self.to_service.send(PayloadServiceCommand::BuildNewPayload(attr, tx)); rx } + /// Returns the best payload for the given identifier. /// Note: this does not resolve the job if it's still in progress. - async fn best_payload( + pub async fn best_payload( &self, id: PayloadId, - ) -> Option::BuiltPayload, Self::Error>> { + ) -> Option> { let (tx, rx) = oneshot::channel(); self.to_service.send(PayloadServiceCommand::BestPayload(id, tx)).ok()?; rx.await.ok()? } - async fn resolve_kind( + /// Resolves the payload job and returns the best payload that has been built so far. + pub async fn resolve_kind( &self, id: PayloadId, kind: PayloadKind, @@ -159,7 +158,9 @@ where } } - async fn subscribe(&self) -> Result, Self::Error> { + /// Sends a message to the service to subscribe to payload events. + /// Returns a receiver that will receive them. + pub async fn subscribe(&self) -> Result, PayloadBuilderError> { let (tx, rx) = oneshot::channel(); let _ = self.to_service.send(PayloadServiceCommand::Subscribe(tx)); Ok(PayloadEvents { receiver: rx.await? }) @@ -168,7 +169,7 @@ where /// Returns the payload attributes associated with the given identifier. /// /// Note: this returns the attributes of the payload and does not resolve the job. - async fn payload_attributes( + pub async fn payload_attributes( &self, id: PayloadId, ) -> Option> { @@ -178,19 +179,6 @@ where } } -impl PayloadBuilderHandle -where - T: PayloadTypes, -{ - /// Creates a new payload builder handle for the given channel. - /// - /// Note: this is only used internally by the [`PayloadBuilderService`] to manage the payload - /// building flow See [`PayloadBuilderService::poll`] for implementation details. - pub const fn new(to_service: mpsc::UnboundedSender>) -> Self { - Self { to_service } - } -} - impl Clone for PayloadBuilderHandle where T: PayloadTypes, diff --git a/crates/payload/primitives/Cargo.toml b/crates/payload/primitives/Cargo.toml index b3f2f99c1200..48d8224f456f 100644 --- a/crates/payload/primitives/Cargo.toml +++ b/crates/payload/primitives/Cargo.toml @@ -26,6 +26,7 @@ op-alloy-rpc-types-engine = { workspace = true, optional = true } scroll-alloy-rpc-types-engine = { workspace = true, optional = true, features = ["serde"] } # misc +auto_impl.workspace = true serde.workspace = true thiserror.workspace = true tokio = { workspace = true, default-features = false, features = ["sync"] } diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index 03977eb9ceab..03a44dabd8d4 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -58,7 +58,7 @@ pub fn validate_payload_timestamp( timestamp: u64, ) -> Result<(), EngineObjectValidationError> { let is_cancun = chain_spec.is_cancun_active_at_timestamp(timestamp); - if version == EngineApiMessageVersion::V2 && is_cancun { + if version.is_v2() && is_cancun { // From the Engine API spec: // // ### Update the methods of previous forks @@ -79,7 +79,7 @@ pub fn validate_payload_timestamp( return Err(EngineObjectValidationError::UnsupportedFork) } - if version == EngineApiMessageVersion::V3 && !is_cancun { + if version.is_v3() && !is_cancun { // From the Engine API spec: // // @@ -102,7 +102,7 @@ pub fn validate_payload_timestamp( } let is_prague = chain_spec.is_prague_active_at_timestamp(timestamp); - if version == EngineApiMessageVersion::V4 && !is_prague { + if version.is_v4() && !is_prague { // From the Engine API spec: // // @@ -347,6 +347,28 @@ pub enum EngineApiMessageVersion { V4 = 4, } +impl EngineApiMessageVersion { + /// Returns true if the version is V1. + pub const fn is_v1(&self) -> bool { + matches!(self, Self::V1) + } + + /// Returns true if the version is V2. + pub const fn is_v2(&self) -> bool { + matches!(self, Self::V2) + } + + /// Returns true if the version is V3. + pub const fn is_v3(&self) -> bool { + matches!(self, Self::V3) + } + + /// Returns true if the version is V4. + pub const fn is_v4(&self) -> bool { + matches!(self, Self::V4) + } +} + /// Determines how we should choose the payload to return. #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum PayloadKind { diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index cd213067f36f..4328884ca45e 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -11,6 +11,7 @@ use reth_primitives::{NodePrimitives, SealedBlock}; /// Represents a built payload type that contains a built `SealedBlock` and can be converted into /// engine API execution payloads. +#[auto_impl::auto_impl(&, Arc)] pub trait BuiltPayload: Send + Sync + fmt::Debug { /// The node's primitive types type Primitives: NodePrimitives; diff --git a/crates/payload/util/Cargo.toml b/crates/payload/util/Cargo.toml index 4eed45331656..f52484f92fec 100644 --- a/crates/payload/util/Cargo.toml +++ b/crates/payload/util/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true +reth-transaction-pool.workspace = true # alloy alloy-primitives.workspace = true diff --git a/crates/payload/util/src/lib.rs b/crates/payload/util/src/lib.rs index 7cf0f0a6e1e4..ffffc936fe19 100644 --- a/crates/payload/util/src/lib.rs +++ b/crates/payload/util/src/lib.rs @@ -11,5 +11,5 @@ mod traits; mod transaction; -pub use traits::{NoopPayloadTransactions, PayloadTransactions}; +pub use traits::{BestPayloadTransactions, NoopPayloadTransactions, PayloadTransactions}; pub use transaction::{PayloadTransactionsChain, PayloadTransactionsFixed}; diff --git a/crates/payload/util/src/traits.rs b/crates/payload/util/src/traits.rs index 43e38312f678..c88950a7bb1c 100644 --- a/crates/payload/util/src/traits.rs +++ b/crates/payload/util/src/traits.rs @@ -1,5 +1,7 @@ -use alloy_primitives::Address; -use reth_primitives::Recovered; +use std::sync::Arc; + +use alloy_primitives::{map::HashSet, Address}; +use reth_transaction_pool::{PoolTransaction, ValidPoolTransaction}; /// Iterator that returns transactions for the block building process in the order they should be /// included in the block. @@ -15,7 +17,7 @@ pub trait PayloadTransactions { &mut self, // In the future, `ctx` can include access to state for block building purposes. ctx: (), - ) -> Option>; + ) -> Option; /// Exclude descendants of the transaction with given sender and nonce from the iterator, /// because this transaction won't be included in the block. @@ -35,9 +37,149 @@ impl Default for NoopPayloadTransactions { impl PayloadTransactions for NoopPayloadTransactions { type Transaction = T; - fn next(&mut self, _ctx: ()) -> Option> { + fn next(&mut self, _ctx: ()) -> Option { None } fn mark_invalid(&mut self, _sender: Address, _nonce: u64) {} } + +/// Wrapper struct that allows to convert `BestTransactions` (used in tx pool) to +/// `PayloadTransactions` (used in block composition). +#[derive(Debug)] +pub struct BestPayloadTransactions +where + T: PoolTransaction, + I: Iterator>>, +{ + invalid: HashSet

, + best: I, +} + +impl BestPayloadTransactions +where + T: PoolTransaction, + I: Iterator>>, +{ + /// Create a new `BestPayloadTransactions` with the given iterator. + pub fn new(best: I) -> Self { + Self { invalid: Default::default(), best } + } +} + +impl PayloadTransactions for BestPayloadTransactions +where + T: PoolTransaction, + I: Iterator>>, +{ + type Transaction = T; + + fn next(&mut self, _ctx: ()) -> Option { + loop { + let tx = self.best.next()?; + if self.invalid.contains(&tx.sender()) { + continue + } + return Some(tx.transaction.clone()) + } + } + + fn mark_invalid(&mut self, sender: Address, _nonce: u64) { + self.invalid.insert(sender); + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use crate::{ + BestPayloadTransactions, PayloadTransactions, PayloadTransactionsChain, + PayloadTransactionsFixed, + }; + use alloy_primitives::{map::HashSet, Address}; + use reth_transaction_pool::{ + pool::{BestTransactionsWithPrioritizedSenders, PendingPool}, + test_utils::{MockOrdering, MockTransaction, MockTransactionFactory}, + PoolTransaction, + }; + + #[test] + fn test_best_transactions_chained_iterators() { + let mut priority_pool = PendingPool::new(MockOrdering::default()); + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Block composition + // === + // (1) up to 100 gas: custom top-of-block transaction + // (2) up to 100 gas: transactions from the priority pool + // (3) up to 200 gas: only transactions from address A + // (4) up to 200 gas: only transactions from address B + // (5) until block gas limit: all transactions from the main pool + + // Notes: + // - If prioritized addresses overlap, a single transaction will be prioritized twice and + // therefore use the per-segment gas limit twice. + // - Priority pool and main pool must synchronize between each other to make sure there are + // no conflicts for the same nonce. For example, in this scenario, pools can't reject + // transactions with seemingly incorrect nonces, because previous transactions might be in + // the other pool. + + let address_top_of_block = Address::random(); + let address_in_priority_pool = Address::random(); + let address_a = Address::random(); + let address_b = Address::random(); + let address_regular = Address::random(); + + // Add transactions to the main pool + { + let prioritized_tx_a = + MockTransaction::eip1559().with_gas_price(5).with_sender(address_a); + // without our custom logic, B would be prioritized over A due to gas price: + let prioritized_tx_b = + MockTransaction::eip1559().with_gas_price(10).with_sender(address_b); + let regular_tx = + MockTransaction::eip1559().with_gas_price(15).with_sender(address_regular); + pool.add_transaction(Arc::new(f.validated(prioritized_tx_a)), 0); + pool.add_transaction(Arc::new(f.validated(prioritized_tx_b)), 0); + pool.add_transaction(Arc::new(f.validated(regular_tx)), 0); + } + + // Add transactions to the priority pool + { + let prioritized_tx = + MockTransaction::eip1559().with_gas_price(0).with_sender(address_in_priority_pool); + let valid_prioritized_tx = f.validated(prioritized_tx); + priority_pool.add_transaction(Arc::new(valid_prioritized_tx), 0); + } + + let mut block = PayloadTransactionsChain::new( + PayloadTransactionsFixed::single( + MockTransaction::eip1559().with_sender(address_top_of_block), + ), + Some(100), + PayloadTransactionsChain::new( + BestPayloadTransactions::new(priority_pool.best()), + Some(100), + BestPayloadTransactions::new(BestTransactionsWithPrioritizedSenders::new( + HashSet::from([address_a]), + 200, + BestTransactionsWithPrioritizedSenders::new( + HashSet::from([address_b]), + 200, + pool.best(), + ), + )), + None, + ), + None, + ); + + assert_eq!(block.next(()).unwrap().sender(), address_top_of_block); + assert_eq!(block.next(()).unwrap().sender(), address_in_priority_pool); + assert_eq!(block.next(()).unwrap().sender(), address_a); + assert_eq!(block.next(()).unwrap().sender(), address_b); + assert_eq!(block.next(()).unwrap().sender(), address_regular); + } +} diff --git a/crates/payload/util/src/transaction.rs b/crates/payload/util/src/transaction.rs index 0893e8d10325..818b06f0a1aa 100644 --- a/crates/payload/util/src/transaction.rs +++ b/crates/payload/util/src/transaction.rs @@ -1,7 +1,7 @@ use crate::PayloadTransactions; use alloy_consensus::Transaction; use alloy_primitives::Address; -use reth_primitives::Recovered; +use reth_transaction_pool::PoolTransaction; /// An implementation of [`crate::traits::PayloadTransactions`] that yields /// a pre-defined set of transactions. @@ -26,10 +26,10 @@ impl PayloadTransactionsFixed { } } -impl PayloadTransactions for PayloadTransactionsFixed> { +impl PayloadTransactions for PayloadTransactionsFixed { type Transaction = T; - fn next(&mut self, _ctx: ()) -> Option> { + fn next(&mut self, _ctx: ()) -> Option { (self.index < self.transactions.len()).then(|| { let tx = self.transactions[self.index].clone(); self.index += 1; @@ -91,20 +91,20 @@ impl PayloadTransactionsChain PayloadTransactions for PayloadTransactionsChain where - A: PayloadTransactions, + A: PayloadTransactions, B: PayloadTransactions, { type Transaction = A::Transaction; - fn next(&mut self, ctx: ()) -> Option> { + fn next(&mut self, ctx: ()) -> Option { while let Some(tx) = self.before.next(ctx) { if let Some(before_max_gas) = self.before_max_gas { - if self.before_gas + tx.tx().gas_limit() <= before_max_gas { - self.before_gas += tx.tx().gas_limit(); + if self.before_gas + tx.gas_limit() <= before_max_gas { + self.before_gas += tx.gas_limit(); return Some(tx); } - self.before.mark_invalid(tx.signer(), tx.tx().nonce()); - self.after.mark_invalid(tx.signer(), tx.tx().nonce()); + self.before.mark_invalid(tx.sender(), tx.nonce()); + self.after.mark_invalid(tx.sender(), tx.nonce()); } else { return Some(tx); } @@ -112,11 +112,11 @@ where while let Some(tx) = self.after.next(ctx) { if let Some(after_max_gas) = self.after_max_gas { - if self.after_gas + tx.tx().gas_limit() <= after_max_gas { - self.after_gas += tx.tx().gas_limit(); + if self.after_gas + tx.gas_limit() <= after_max_gas { + self.after_gas += tx.gas_limit(); return Some(tx); } - self.after.mark_invalid(tx.signer(), tx.tx().nonce()); + self.after.mark_invalid(tx.sender(), tx.nonce()); } else { return Some(tx); } diff --git a/crates/payload/validator/Cargo.toml b/crates/payload/validator/Cargo.toml index 5c34a9f456f9..11d0480da546 100644 --- a/crates/payload/validator/Cargo.toml +++ b/crates/payload/validator/Cargo.toml @@ -16,6 +16,7 @@ workspace = true reth-chainspec.workspace = true reth-primitives.workspace = true reth-primitives-traits.workspace = true +reth-engine-primitives.workspace = true # alloy alloy-rpc-types = { workspace = true, features = ["engine"] } diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index ba78f1cbc8d5..6920de7fb76e 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -8,10 +8,9 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use alloy_rpc_types::engine::{ - ExecutionPayload, ExecutionPayloadSidecar, MaybeCancunPayloadFields, PayloadError, -}; +use alloy_rpc_types::engine::{MaybeCancunPayloadFields, PayloadError}; use reth_chainspec::EthereumHardforks; +use reth_engine_primitives::ExecutionData; use reth_primitives::SealedBlock; use reth_primitives_traits::{Block, SignedTransaction}; use std::sync::Arc; @@ -114,9 +113,10 @@ impl ExecutionPayloadValidator { /// pub fn ensure_well_formed_payload( &self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, + payload: ExecutionData, ) -> Result>, PayloadError> { + let ExecutionData { payload, sidecar } = payload; + let expected_hash = payload.block_hash(); // First parse the block diff --git a/crates/prune/types/Cargo.toml b/crates/prune/types/Cargo.toml index 7764fd8378c0..c6e6451585f0 100644 --- a/crates/prune/types/Cargo.toml +++ b/crates/prune/types/Cargo.toml @@ -12,16 +12,22 @@ description = "Commonly used types for prune usage in reth." workspace = true [dependencies] -reth-codecs.workspace = true +reth-codecs = { workspace = true, optional = true } alloy-primitives.workspace = true derive_more.workspace = true -modular-bitfield.workspace = true -serde.workspace = true thiserror.workspace = true + +modular-bitfield = { workspace = true, optional = true } +serde = { workspace = true, features = ["derive"], optional = true } arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] +reth-codecs.workspace = true + +alloy-primitives = { workspace = true, features = ["serde"] } +serde.workspace = true +modular-bitfield.workspace = true arbitrary = { workspace = true, features = ["derive"] } assert_matches.workspace = true proptest.workspace = true @@ -33,9 +39,18 @@ toml.workspace = true [features] test-utils = [ "dep:arbitrary", - "reth-codecs/test-utils", + "reth-codecs?/test-utils", ] arbitrary = [ "alloy-primitives/arbitrary", - "reth-codecs/arbitrary", + "reth-codecs?/arbitrary", +] +reth-codec = [ + "dep:reth-codecs", + "dep:modular-bitfield", +] +serde = [ + "dep:serde", + "alloy-primitives/serde", + "reth-codecs?/serde", ] diff --git a/crates/prune/types/src/checkpoint.rs b/crates/prune/types/src/checkpoint.rs index e0397c5afc86..7b61028062f9 100644 --- a/crates/prune/types/src/checkpoint.rs +++ b/crates/prune/types/src/checkpoint.rs @@ -1,12 +1,12 @@ use crate::PruneMode; use alloy_primitives::{BlockNumber, TxNumber}; -use reth_codecs::{add_arbitrary_tests, Compact}; -use serde::{Deserialize, Serialize}; /// Saves the pruning progress of a stage. -#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] #[cfg_attr(any(test, feature = "test-utils"), derive(Default, arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct PruneCheckpoint { /// Highest pruned block number. If it's [None], the pruning for block `0` is not finished yet. pub block_number: Option, diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index bbf2cfe5ffc1..54c62b42d9d6 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -23,7 +23,6 @@ pub use pruner::{ SegmentOutputCheckpoint, }; pub use segment::{PrunePurpose, PruneSegment, PruneSegmentError}; -use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; pub use target::{PruneModes, MINIMUM_PRUNING_DISTANCE}; @@ -31,7 +30,8 @@ use alloy_primitives::{Address, BlockNumber}; use std::ops::Deref; /// Configuration for pruning receipts not associated with logs emitted by the specified contracts. -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct ReceiptsLogPruneConfig(pub BTreeMap); impl ReceiptsLogPruneConfig { diff --git a/crates/prune/types/src/mode.rs b/crates/prune/types/src/mode.rs index de9b9e6dc081..42d34b30cc72 100644 --- a/crates/prune/types/src/mode.rs +++ b/crates/prune/types/src/mode.rs @@ -1,13 +1,13 @@ use crate::{segment::PrunePurpose, PruneSegment, PruneSegmentError}; use alloy_primitives::BlockNumber; -use reth_codecs::{add_arbitrary_tests, Compact}; -use serde::{Deserialize, Serialize}; /// Prune mode. -#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] -#[serde(rename_all = "lowercase")] +#[derive(Debug, PartialEq, Eq, Clone, Copy)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), serde(rename_all = "lowercase"))] pub enum PruneMode { /// Prune all blocks. Full, @@ -48,8 +48,8 @@ impl PruneMode { } Self::Before(n) if *n == tip + 1 && purpose.is_static_file() => Some((tip, *self)), Self::Before(n) if *n > tip => None, // Nothing to prune yet - Self::Before(n) if tip - n >= segment.min_blocks(purpose) => { - Some(((*n).saturating_sub(1), *self)) + Self::Before(n) => { + (tip - n >= segment.min_blocks(purpose)).then(|| ((*n).saturating_sub(1), *self)) } _ => return Err(PruneSegmentError::Configuration(segment)), }; @@ -113,7 +113,8 @@ mod tests { PruneMode::Before(tip - MINIMUM_PRUNING_DISTANCE - 1), Ok(Some(tip - MINIMUM_PRUNING_DISTANCE - 2)), ), - (PruneMode::Before(tip - 1), Err(PruneSegmentError::Configuration(segment))), + // Nothing to prune + (PruneMode::Before(tip - 1), Ok(None)), ]; for (index, (mode, expected_result)) in tests.into_iter().enumerate() { diff --git a/crates/prune/types/src/segment.rs b/crates/prune/types/src/segment.rs index e0b73aab7a4d..443acf1ed797 100644 --- a/crates/prune/types/src/segment.rs +++ b/crates/prune/types/src/segment.rs @@ -1,26 +1,13 @@ use crate::MINIMUM_PRUNING_DISTANCE; use derive_more::Display; -use reth_codecs::{add_arbitrary_tests, Compact}; -use serde::{Deserialize, Serialize}; use thiserror::Error; /// Segment of the data that can be pruned. -#[derive( - Debug, - Display, - Clone, - Copy, - Eq, - PartialEq, - Ord, - PartialOrd, - Hash, - Serialize, - Deserialize, - Compact, -)] +#[derive(Debug, Display, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] #[cfg_attr(test, derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub enum PruneSegment { /// Prune segment responsible for the `TransactionSenders` table. SenderRecovery, diff --git a/crates/prune/types/src/target.rs b/crates/prune/types/src/target.rs index ca355b3944a3..59ebd803b900 100644 --- a/crates/prune/types/src/target.rs +++ b/crates/prune/types/src/target.rs @@ -1,5 +1,4 @@ use crate::{PruneMode, ReceiptsLogPruneConfig}; -use serde::{Deserialize, Deserializer, Serialize}; /// Minimum distance from the tip necessary for the node to work correctly: /// 1. Minimum 2 epochs (32 blocks per epoch) required to handle any reorg according to the @@ -9,32 +8,42 @@ use serde::{Deserialize, Deserializer, Serialize}; pub const MINIMUM_PRUNING_DISTANCE: u64 = 32 * 2 + 10_000; /// Pruning configuration for every segment of the data that can be pruned. -#[derive(Debug, Clone, Default, Deserialize, Eq, PartialEq, Serialize)] -#[serde(default)] +#[derive(Debug, Clone, Default, Eq, PartialEq)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), serde(default))] pub struct PruneModes { /// Sender Recovery pruning configuration. - #[serde(skip_serializing_if = "Option::is_none")] + #[cfg_attr(any(test, feature = "serde"), serde(skip_serializing_if = "Option::is_none"))] pub sender_recovery: Option, /// Transaction Lookup pruning configuration. - #[serde(skip_serializing_if = "Option::is_none")] + #[cfg_attr(any(test, feature = "serde"), serde(skip_serializing_if = "Option::is_none"))] pub transaction_lookup: Option, /// Receipts pruning configuration. This setting overrides `receipts_log_filter` /// and offers improved performance. - #[serde( - skip_serializing_if = "Option::is_none", - deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::" + #[cfg_attr( + any(test, feature = "serde"), + serde( + skip_serializing_if = "Option::is_none", + deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::" + ) )] pub receipts: Option, /// Account History pruning configuration. - #[serde( - skip_serializing_if = "Option::is_none", - deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::" + #[cfg_attr( + any(test, feature = "serde"), + serde( + skip_serializing_if = "Option::is_none", + deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::" + ) )] pub account_history: Option, /// Storage History pruning configuration. - #[serde( - skip_serializing_if = "Option::is_none", - deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::" + #[cfg_attr( + any(test, feature = "serde"), + serde( + skip_serializing_if = "Option::is_none", + deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::" + ) )] pub storage_history: Option, /// Receipts pruning configuration by retaining only those receipts that contain logs emitted @@ -82,9 +91,15 @@ impl PruneModes { /// 2. For [`PruneMode::Distance(distance`)], it fails if `distance < MIN_BLOCKS + 1`. `+ 1` is /// needed because `PruneMode::Distance(0)` means that we leave zero blocks from the latest, /// meaning we have one block in the database. -fn deserialize_opt_prune_mode_with_min_blocks<'de, const MIN_BLOCKS: u64, D: Deserializer<'de>>( +#[cfg(any(test, feature = "serde"))] +fn deserialize_opt_prune_mode_with_min_blocks< + 'de, + const MIN_BLOCKS: u64, + D: serde::Deserializer<'de>, +>( deserializer: D, ) -> Result, D::Error> { + use serde::Deserialize; let prune_mode = Option::::deserialize(deserializer)?; match prune_mode { diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index f4236150b1a7..7fa02a8dd5b1 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -13,7 +13,8 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true +reth-primitives-traits.workspace = true +reth-ethereum-primitives.workspace = true reth-storage-errors.workspace = true reth-storage-api.workspace = true reth-trie = { workspace = true, optional = true } @@ -34,19 +35,21 @@ alloy-consensus.workspace = true [features] default = ["std"] std = [ - "reth-primitives/std", + "reth-primitives-traits/std", "alloy-primitives/std", "revm/std", "alloy-eips/std", "alloy-consensus/std", "reth-ethereum-forks/std", + "reth-ethereum-primitives/std", ] witness = ["dep:reth-trie"] test-utils = [ "dep:reth-trie", - "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", "reth-trie?/test-utils", "revm/test-utils", + "reth-ethereum-primitives/test-utils", ] serde = [ "revm/serde", @@ -55,5 +58,6 @@ serde = [ "alloy-consensus/serde", "reth-trie?/serde", "reth-ethereum-forks/serde", + "reth-primitives-traits/serde", ] scroll = ["reth-trie?/scroll"] diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index daf355e169a2..51ed91d22dac 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -10,7 +10,7 @@ use alloy_primitives::BlockNumber; /// - pruning receipts according to the pruning configuration. /// - batch range if known #[derive(Debug)] -pub struct BlockBatchRecord { +pub struct BlockBatchRecord { /// The collection of receipts. /// Outer vector stores receipts for each block sequentially. /// The inner vector stores receipts ordered by transaction number. diff --git a/crates/revm/src/cancelled.rs b/crates/revm/src/cancelled.rs new file mode 100644 index 000000000000..b692d2db7bb7 --- /dev/null +++ b/crates/revm/src/cancelled.rs @@ -0,0 +1,111 @@ +use alloc::sync::Arc; +use core::sync::atomic::AtomicBool; + +/// A marker that can be used to cancel execution. +/// +/// If dropped, it will set the `cancelled` flag to true. +/// +/// This is most useful when a payload job needs to be cancelled. +#[derive(Default, Clone, Debug)] +pub struct CancelOnDrop(Arc); + +// === impl CancelOnDrop === + +impl CancelOnDrop { + /// Returns true if the job was cancelled. + pub fn is_cancelled(&self) -> bool { + self.0.load(core::sync::atomic::Ordering::Relaxed) + } +} + +impl Drop for CancelOnDrop { + fn drop(&mut self) { + self.0.store(true, core::sync::atomic::Ordering::Relaxed); + } +} + +/// A marker that can be used to cancel execution. +/// +/// If dropped, it will NOT set the `cancelled` flag to true. +/// If `cancel` is called, the `cancelled` flag will be set to true. +/// +/// This is useful in prewarming, when an external signal is received to cancel many prewarming +/// tasks. +#[derive(Default, Clone, Debug)] +pub struct ManualCancel(Arc); + +// === impl ManualCancel === + +impl ManualCancel { + /// Returns true if the job was cancelled. + pub fn is_cancelled(&self) -> bool { + self.0.load(core::sync::atomic::Ordering::Relaxed) + } + + /// Drops the [`ManualCancel`], setting the cancelled flag to true. + pub fn cancel(self) { + self.0.store(true, core::sync::atomic::Ordering::Relaxed); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_default_cancelled() { + let c = CancelOnDrop::default(); + assert!(!c.is_cancelled()); + } + + #[test] + fn test_default_cancel_task() { + let c = ManualCancel::default(); + assert!(!c.is_cancelled()); + } + + #[test] + fn test_set_cancel_task() { + let c = ManualCancel::default(); + assert!(!c.is_cancelled()); + let c2 = c.clone(); + let c3 = c.clone(); + c.cancel(); + assert!(c3.is_cancelled()); + assert!(c2.is_cancelled()); + } + + #[test] + fn test_cancel_task_multiple_threads() { + let c = ManualCancel::default(); + let cloned_cancel = c.clone(); + + // we want to make sure that: + // * we can spawn tasks that do things + // * those tasks can run to completion and the flag remains unset unless we call cancel + let mut handles = vec![]; + for _ in 0..10 { + let c = c.clone(); + let handle = std::thread::spawn(move || { + for _ in 0..1000 { + if c.is_cancelled() { + return; + } + } + }); + handles.push(handle); + } + + // wait for all the threads to finish + for handle in handles { + handle.join().unwrap(); + } + + // check that the flag is still unset + assert!(!c.is_cancelled()); + + // cancel and check that the flag is set + c.cancel(); + assert!(cloned_cancel.is_cancelled()); + } +} diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 3c161b348589..e039ca9d6299 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -1,7 +1,7 @@ use crate::primitives::alloy_primitives::{BlockNumber, StorageKey, StorageValue}; use alloy_primitives::{Address, B256, U256}; use core::ops::{Deref, DerefMut}; -use reth_primitives::Account; +use reth_primitives_traits::Account; use reth_storage_api::{AccountReader, BlockHashReader, StateProvider}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use revm::{ @@ -27,7 +27,7 @@ pub trait EvmStateProvider: Send + Sync { fn bytecode_by_hash( &self, code_hash: &B256, - ) -> ProviderResult>; + ) -> ProviderResult>; /// Get storage of the given account. fn storage( @@ -50,7 +50,7 @@ impl EvmStateProvider for T { fn bytecode_by_hash( &self, code_hash: &B256, - ) -> ProviderResult> { + ) -> ProviderResult> { ::bytecode_by_hash(self, code_hash) } diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index 5f18a0fe6166..a2cbc5ce4602 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -17,6 +17,9 @@ pub mod batch; /// Database adapters for payload building. pub mod cached; +/// A marker that can be used to cancel execution. +pub mod cancelled; + /// Contains glue code for integrating reth database into revm's [Database]. pub mod database; diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 6d2dbf2ca01d..b40fbd0d7f48 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -4,7 +4,7 @@ use alloy_primitives::{ map::{B256HashMap, HashMap}, Address, BlockNumber, Bytes, StorageKey, B256, U256, }; -use reth_primitives::{Account, Bytecode}; +use reth_primitives_traits::{Account, Bytecode}; use reth_storage_api::{ AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index abcdf98b5448..4eb9a6e653ce 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -32,6 +32,7 @@ alloy-rpc-types-admin.workspace = true alloy-serde.workspace = true alloy-rpc-types-beacon.workspace = true alloy-rpc-types-engine.workspace = true +alloy-genesis.workspace = true # misc jsonrpsee = { workspace = true, features = ["server", "macros"] } diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 281e0798fcb7..4b9dc82467c7 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -1,4 +1,5 @@ use alloy_eips::{BlockId, BlockNumberOrTag}; +use alloy_genesis::ChainConfig; use alloy_primitives::{Address, Bytes, B256}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_eth::{transaction::TransactionRequest, Block, Bundle, StateContext}; @@ -186,6 +187,10 @@ pub trait DebugApi { #[method(name = "chaindbCompact")] async fn debug_chaindb_compact(&self) -> RpcResult<()>; + /// Returns the current chain config. + #[method(name = "chainConfig")] + async fn debug_chain_config(&self) -> RpcResult; + /// Returns leveldb properties of the key-value database. #[method(name = "chaindbProperty")] async fn debug_chaindb_property(&self, property: String) -> RpcResult<()>; diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index 22dbc822ddfb..d75d44cfa924 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -16,8 +16,19 @@ use alloy_rpc_types_eth::{ EIP1186AccountProofResponse, Filter, Log, SyncStatus, }; use alloy_serde::JsonStorageKey; -use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use jsonrpsee::{core::RpcResult, proc_macros::rpc, RpcModule}; use reth_engine_primitives::EngineTypes; + +/// Helper trait for the engine api server. +/// +/// This type-erases the concrete [`jsonrpsee`] server implementation and only returns the +/// [`RpcModule`] that contains all the endpoints of the server. +pub trait IntoEngineApiRpcModule { + /// Consumes the type and returns all the methods and subscriptions defined in the trait and + /// returns them as a single [`RpcModule`] + fn into_rpc_module(self) -> RpcModule<()>; +} + // NOTE: We can't use associated types in the `EngineApi` trait because of jsonrpsee, so we use a // generic here. It would be nice if the rpc macro would understand which types need to have serde. // By default, if the trait has a generic, the rpc macro will add e.g. `Engine: DeserializeOwned` to diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index 196a4f4ec1ad..87c6605dfa92 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -39,7 +39,7 @@ pub mod servers { pub use crate::{ admin::AdminApiServer, debug::{DebugApiServer, DebugExecutionWitnessApiServer}, - engine::{EngineApiServer, EngineEthApiServer}, + engine::{EngineApiServer, EngineEthApiServer, IntoEngineApiRpcModule}, mev::{MevFullApiServer, MevSimApiServer}, miner::MinerApiServer, net::NetApiServer, diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 4ed30493765d..16caf4d791cd 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -29,7 +29,6 @@ reth-rpc-server-types.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true reth-evm.workspace = true -reth-engine-primitives.workspace = true # rpc/net jsonrpsee = { workspace = true, features = ["server"] } @@ -66,6 +65,7 @@ reth-tracing.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-rpc-types-compat.workspace = true reth-primitives.workspace = true +reth-engine-primitives.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index d3cbfb2fc71f..7fb527017ba2 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -6,7 +6,6 @@ use jsonrpsee::{ server::{AlreadyStoppedError, RpcModule}, Methods, }; -use reth_engine_primitives::EngineTypes; use reth_rpc_api::servers::*; use reth_rpc_eth_types::EthSubscriptionIdProvider; use reth_rpc_layer::{ @@ -189,18 +188,10 @@ pub struct AuthRpcModule { pub(crate) inner: RpcModule<()>, } -// === impl AuthRpcModule === - impl AuthRpcModule { /// Create a new `AuthRpcModule` with the given `engine_api`. - pub fn new(engine: EngineApi) -> Self - where - EngineT: EngineTypes, - EngineApi: EngineApiServer, - { - let mut module = RpcModule::new(()); - module.merge(engine.into_rpc()).expect("No conflicting methods"); - Self { inner: module } + pub fn new(engine: impl IntoEngineApiRpcModule) -> Self { + Self { inner: engine.into_rpc_module() } } /// Get a reference to the inner `RpcModule`. diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 0b45b2431340..50e43d8aa45a 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -17,7 +17,7 @@ //! //! ``` //! use reth_consensus::{ConsensusError, FullConsensus}; -//! use reth_engine_primitives::PayloadValidator; +//! use reth_engine_primitives::{ExecutionData, PayloadValidator}; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_evm_ethereum::EthEvmConfig; //! use reth_network_api::{NetworkInfo, Peers}; @@ -31,14 +31,13 @@ //! use reth_transaction_pool::{PoolTransaction, TransactionPool}; //! use std::sync::Arc; //! -//! pub async fn launch( +//! pub async fn launch( //! provider: Provider, //! pool: Pool, //! network: Network, //! evm_config: EthEvmConfig, //! block_executor: BlockExecutor, //! consensus: Consensus, -//! validator: Validator, //! ) where //! Provider: FullRpcProvider< //! Transaction = TransactionSigned, @@ -58,7 +57,6 @@ //! Network: NetworkInfo + Peers + Clone + 'static, //! BlockExecutor: BlockExecutorProvider, //! Consensus: FullConsensus + Clone + 'static, -//! Validator: PayloadValidator, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -76,7 +74,7 @@ //! block_executor, //! consensus, //! ) -//! .build(transports, Box::new(EthApi::with_spawner), Arc::new(validator)); +//! .build(transports, Box::new(EthApi::with_spawner)); //! let handle = RpcServerConfig::default() //! .with_http(ServerBuilder::default()) //! .start(&transport_modules) @@ -89,14 +87,14 @@ //! //! ``` //! use reth_consensus::{ConsensusError, FullConsensus}; -//! use reth_engine_primitives::{EngineTypes, PayloadValidator}; +//! use reth_engine_primitives::{ExecutionData, PayloadValidator}; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_evm_ethereum::EthEvmConfig; //! use reth_network_api::{NetworkInfo, Peers}; //! use reth_primitives::{Header, PooledTransaction, TransactionSigned}; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; -//! use reth_rpc_api::EngineApiServer; +//! use reth_rpc_api::{EngineApiServer, IntoEngineApiRpcModule}; //! use reth_rpc_builder::{ //! auth::AuthServerConfig, RethRpcModule, RpcModuleBuilder, RpcServerConfig, //! TransportRpcModuleConfig, @@ -107,24 +105,14 @@ //! use std::sync::Arc; //! use tokio::try_join; //! -//! pub async fn launch< -//! Provider, -//! Pool, -//! Network, -//! EngineApi, -//! EngineT, -//! BlockExecutor, -//! Consensus, -//! Validator, -//! >( +//! pub async fn launch( //! provider: Provider, //! pool: Pool, //! network: Network, -//! engine_api: EngineApi, +//! engine_api: impl IntoEngineApiRpcModule, //! evm_config: EthEvmConfig, //! block_executor: BlockExecutor, //! consensus: Consensus, -//! validator: Validator, //! ) where //! Provider: FullRpcProvider< //! Transaction = TransactionSigned, @@ -142,11 +130,8 @@ //! > + Unpin //! + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, -//! EngineApi: EngineApiServer, -//! EngineT: EngineTypes, //! BlockExecutor: BlockExecutorProvider, //! Consensus: FullConsensus + Clone + 'static, -//! Validator: PayloadValidator, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -166,12 +151,8 @@ //! ); //! //! // configure the server modules -//! let (modules, auth_module, _registry) = builder.build_with_auth_server( -//! transports, -//! engine_api, -//! Box::new(EthApi::with_spawner), -//! Arc::new(validator), -//! ); +//! let (modules, auth_module, _registry) = +//! builder.build_with_auth_server(transports, engine_api, Box::new(EthApi::with_spawner)); //! //! // start the servers //! let auth_config = AuthServerConfig::builder(JwtSecret::random()).build(); @@ -213,7 +194,6 @@ use jsonrpsee::{ }; use reth_chainspec::EthereumHardforks; use reth_consensus::{ConsensusError, FullConsensus}; -use reth_engine_primitives::{EngineTypes, PayloadValidator}; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; use reth_primitives::NodePrimitives; @@ -223,7 +203,7 @@ use reth_provider::{ }; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthBundle, MinerApi, NetApi, OtterscanApi, RPCApi, RethApi, - TraceApi, TxPoolApi, ValidationApi, ValidationApiConfig, Web3Api, + TraceApi, TxPoolApi, ValidationApiConfig, Web3Api, }; use reth_rpc_api::servers::*; use reth_rpc_eth_api::{ @@ -284,7 +264,6 @@ pub async fn launch, block_executor: BlockExecutor, consensus: Arc>, - payload_validator: Arc>, ) -> Result where Provider: FullRpcProvider< @@ -323,7 +302,7 @@ where block_executor, consensus, ) - .build(module_config, eth, payload_validator), + .build(module_config, eth), ) .await } @@ -609,20 +588,17 @@ where /// also configures the auth (engine api) server, which exposes a subset of the `eth_` /// namespace. #[allow(clippy::type_complexity)] - pub fn build_with_auth_server( + pub fn build_with_auth_server( self, module_config: TransportRpcModuleConfig, - engine: EngineApi, + engine: impl IntoEngineApiRpcModule, eth: DynEthApiBuilder, - payload_validator: Arc>, ) -> ( TransportRpcModules, AuthRpcModule, RpcRegistryInner, ) where - EngineT: EngineTypes, - EngineApi: EngineApiServer, EthApi: FullEthApiServer< Provider: BlockReader< Block = ::Block, @@ -646,7 +622,6 @@ where evm_config, eth, block_executor, - payload_validator, ); let modules = registry.create_transport_rpc_modules(module_config); @@ -665,7 +640,7 @@ where /// /// ```no_run /// use reth_consensus::noop::NoopConsensus; - /// use reth_engine_primitives::PayloadValidator; + /// use reth_engine_primitives::{ExecutionData, PayloadValidator}; /// use reth_evm::ConfigureEvm; /// use reth_evm_ethereum::execute::EthExecutorProvider; /// use reth_network_api::noop::NoopNetwork; @@ -677,10 +652,9 @@ where /// use reth_transaction_pool::noop::NoopTransactionPool; /// use std::sync::Arc; /// - /// fn init(evm: Evm, validator: Validator) + /// fn init(evm: Evm) /// where /// Evm: ConfigureEvm
+ 'static, - /// Validator: PayloadValidator + 'static, /// { /// let mut registry = RpcModuleBuilder::default() /// .with_provider(NoopProvider::default()) @@ -690,7 +664,7 @@ where /// .with_evm_config(evm) /// .with_block_executor(EthExecutorProvider::mainnet()) /// .with_consensus(NoopConsensus::default()) - /// .into_registry(Default::default(), Box::new(EthApi::with_spawner), Arc::new(validator)); + /// .into_registry(Default::default(), Box::new(EthApi::with_spawner)); /// /// let eth_api = registry.eth_api(); /// } @@ -699,7 +673,6 @@ where self, config: RpcModuleConfig, eth: DynEthApiBuilder, - payload_validator: Arc>, ) -> RpcRegistryInner where EthApi: EthApiTypes + 'static, @@ -716,7 +689,6 @@ where evm_config, eth, block_executor, - payload_validator, ) } @@ -726,7 +698,6 @@ where self, module_config: TransportRpcModuleConfig, eth: DynEthApiBuilder, - payload_validator: Arc>, ) -> TransportRpcModules<()> where EthApi: FullEthApiServer< @@ -756,7 +727,6 @@ where evm_config, eth, block_executor, - payload_validator, ); modules.config = module_config; @@ -854,6 +824,7 @@ impl RpcModuleConfigBuilder { /// A Helper type the holds instances of the configured modules. #[derive(Debug, Clone)] +#[expect(dead_code)] // Consensus generic, might be useful in the future pub struct RpcRegistryInner< Provider: BlockReader, Pool, @@ -869,9 +840,6 @@ pub struct RpcRegistryInner< executor: Tasks, block_executor: BlockExecutor, consensus: Consensus, - payload_validator: Arc>, - /// Holds the configuration for the RPC modules - config: RpcModuleConfig, /// Holds a all `eth_` namespace handlers eth: EthHandlers, /// to put trace calls behind semaphore @@ -910,7 +878,6 @@ where evm_config: EvmConfig, eth_api_builder: DynEthApiBuilder, block_executor: BlockExecutor, - payload_validator: Arc>, ) -> Self where EvmConfig: ConfigureEvm
, @@ -934,11 +901,9 @@ where eth, executor, consensus, - config, modules: Default::default(), blocking_pool_guard, block_executor, - payload_validator, } } } @@ -1212,23 +1177,6 @@ where pub fn reth_api(&self) -> RethApi { RethApi::new(self.provider.clone(), Box::new(self.executor.clone())) } - - /// Instantiates `ValidationApi` - pub fn validation_api(&self) -> ValidationApi - where - Consensus: - FullConsensus + Clone + 'static, - Provider: BlockReader::Block>, - { - ValidationApi::new( - self.provider.clone(), - Arc::new(self.consensus.clone()), - self.block_executor.clone(), - self.config.flashbots.clone(), - Box::new(self.executor.clone()), - self.payload_validator.clone(), - ) - } } impl @@ -1256,14 +1204,8 @@ where /// * `api_` namespace /// /// Note: This does _not_ register the `engine_` in this registry. - pub fn create_auth_module(&self, engine_api: EngineApi) -> AuthRpcModule - where - EngineT: EngineTypes, - EngineApi: EngineApiServer, - { - let mut module = RpcModule::new(()); - - module.merge(engine_api.into_rpc()).expect("No conflicting methods"); + pub fn create_auth_module(&self, engine_api: impl IntoEngineApiRpcModule) -> AuthRpcModule { + let mut module = engine_api.into_rpc_module(); // also merge a subset of `eth_` handlers let eth_handlers = self.eth_handlers(); @@ -1391,16 +1333,10 @@ where .into_rpc() .into() } - RethRpcModule::Flashbots => ValidationApi::new( - eth_api.provider().clone(), - Arc::new(self.consensus.clone()), - self.block_executor.clone(), - self.config.flashbots.clone(), - Box::new(self.executor.clone()), - self.payload_validator.clone(), - ) - .into_rpc() - .into(), + // only relevant for Ethereum and configured in `EthereumAddOns` + // implementation + // TODO: can we get rid of this here? + RethRpcModule::Flashbots => Default::default(), RethRpcModule::Miner => MinerApi::default().into_rpc().into(), }) .clone() diff --git a/crates/rpc/rpc-builder/tests/it/middleware.rs b/crates/rpc/rpc-builder/tests/it/middleware.rs index 0e0bb80c08b9..96d818ed4f94 100644 --- a/crates/rpc/rpc-builder/tests/it/middleware.rs +++ b/crates/rpc/rpc-builder/tests/it/middleware.rs @@ -5,8 +5,6 @@ use jsonrpsee::{ types::Request, MethodResponse, }; -use reth_chainspec::MAINNET; -use reth_ethereum_engine_primitives::EthereumEngineValidator; use reth_rpc::EthApi; use reth_rpc_builder::{RpcServerConfig, TransportRpcModuleConfig}; use reth_rpc_eth_api::EthApiClient; @@ -65,7 +63,6 @@ async fn test_rpc_middleware() { let modules = builder.build( TransportRpcModuleConfig::set_http(RpcModuleSelection::All), Box::new(EthApi::with_spawner), - Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let mylayer = MyMiddlewareLayer::default(); diff --git a/crates/rpc/rpc-builder/tests/it/startup.rs b/crates/rpc/rpc-builder/tests/it/startup.rs index ac53b014956a..9f6961fbba0d 100644 --- a/crates/rpc/rpc-builder/tests/it/startup.rs +++ b/crates/rpc/rpc-builder/tests/it/startup.rs @@ -1,9 +1,7 @@ //! Startup tests -use std::{io, sync::Arc}; +use std::io; -use reth_chainspec::MAINNET; -use reth_ethereum_engine_primitives::EthereumEngineValidator; use reth_rpc::EthApi; use reth_rpc_builder::{ error::{RpcError, ServerKind, WsHttpSamePortError}, @@ -32,7 +30,6 @@ async fn test_http_addr_in_use() { let server = builder.build( TransportRpcModuleConfig::set_http(vec![RethRpcModule::Admin]), Box::new(EthApi::with_spawner), - Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let result = RpcServerConfig::http(Default::default()).with_http_address(addr).start(&server).await; @@ -48,7 +45,6 @@ async fn test_ws_addr_in_use() { let server = builder.build( TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin]), Box::new(EthApi::with_spawner), - Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let result = RpcServerConfig::ws(Default::default()).with_ws_address(addr).start(&server).await; let err = result.unwrap_err(); @@ -70,7 +66,6 @@ async fn test_launch_same_port_different_modules() { TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin]) .with_http(vec![RethRpcModule::Eth]), Box::new(EthApi::with_spawner), - Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let addr = test_address(); let res = RpcServerConfig::ws(Default::default()) @@ -93,7 +88,6 @@ async fn test_launch_same_port_same_cors() { TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Eth]) .with_http(vec![RethRpcModule::Eth]), Box::new(EthApi::with_spawner), - Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let addr = test_address(); let res = RpcServerConfig::ws(Default::default()) @@ -114,7 +108,6 @@ async fn test_launch_same_port_different_cors() { TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Eth]) .with_http(vec![RethRpcModule::Eth]), Box::new(EthApi::with_spawner), - Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let addr = test_address(); let res = RpcServerConfig::ws(Default::default()) diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index 21251628e8e5..67b845970e94 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -1,7 +1,4 @@ -use std::{ - net::{Ipv4Addr, SocketAddr, SocketAddrV4}, - sync::Arc, -}; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use reth_chainspec::MAINNET; @@ -63,11 +60,8 @@ pub async fn launch_auth(secret: JwtSecret) -> AuthServerHandle { /// Launches a new server with http only with the given modules pub async fn launch_http(modules: impl Into) -> RpcServerHandle { let builder = test_rpc_builder(); - let server = builder.build( - TransportRpcModuleConfig::set_http(modules), - Box::new(EthApi::with_spawner), - Arc::new(EthereumEngineValidator::new(MAINNET.clone())), - ); + let server = + builder.build(TransportRpcModuleConfig::set_http(modules), Box::new(EthApi::with_spawner)); RpcServerConfig::http(Default::default()) .with_http_address(test_address()) .start(&server) @@ -78,11 +72,8 @@ pub async fn launch_http(modules: impl Into) -> RpcServerHan /// Launches a new server with ws only with the given modules pub async fn launch_ws(modules: impl Into) -> RpcServerHandle { let builder = test_rpc_builder(); - let server = builder.build( - TransportRpcModuleConfig::set_ws(modules), - Box::new(EthApi::with_spawner), - Arc::new(EthereumEngineValidator::new(MAINNET.clone())), - ); + let server = + builder.build(TransportRpcModuleConfig::set_ws(modules), Box::new(EthApi::with_spawner)); RpcServerConfig::ws(Default::default()) .with_ws_address(test_address()) .start(&server) @@ -97,7 +88,6 @@ pub async fn launch_http_ws(modules: impl Into) -> RpcServer let server = builder.build( TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), Box::new(EthApi::with_spawner), - Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); RpcServerConfig::ws(Default::default()) .with_ws_address(test_address()) @@ -116,7 +106,6 @@ pub async fn launch_http_ws_same_port(modules: impl Into) -> let server = builder.build( TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), Box::new(EthApi::with_spawner), - Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let addr = test_address(); RpcServerConfig::ws(Default::default()) diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index b358480e702f..d21f8dc5c9d6 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -20,9 +20,9 @@ reth-payload-builder.workspace = true reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true -reth-rpc-types-compat.workspace = true reth-engine-primitives.workspace = true reth-transaction-pool.workspace = true +reth-primitives-traits.workspace = true # ethereum alloy-eips.workspace = true @@ -48,10 +48,9 @@ parking_lot.workspace = true [dev-dependencies] reth-ethereum-engine-primitives.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } -reth-primitives.workspace = true +reth-ethereum-primitives.workspace = true reth-primitives-traits.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } -reth-tokio-util.workspace = true reth-testing-utils.workspace = true alloy-rlp.workspace = true diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index a092bdbdc705..e990479a06a9 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -4,27 +4,30 @@ use crate::{ use alloy_eips::{ eip1898::BlockHashOrNumber, eip4844::BlobAndProofV1, + eip4895::Withdrawals, eip7685::{Requests, RequestsOrHash}, }; use alloy_primitives::{BlockHash, BlockNumber, B256, U64}; use alloy_rpc_types_engine::{ CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, - ExecutionPayloadInputV2, ExecutionPayloadSidecar, ExecutionPayloadV1, ExecutionPayloadV3, - ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, PraguePayloadFields, - TransitionConfiguration, + ExecutionPayloadBodyV1, ExecutionPayloadInputV2, ExecutionPayloadSidecar, ExecutionPayloadV1, + ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, + PraguePayloadFields, TransitionConfiguration, }; use async_trait::async_trait; -use jsonrpsee_core::RpcResult; +use jsonrpsee_core::{server::RpcModule, RpcResult}; use parking_lot::Mutex; use reth_chainspec::{EthereumHardfork, EthereumHardforks}; -use reth_engine_primitives::{BeaconConsensusEngineHandle, EngineTypes, EngineValidator}; +use reth_engine_primitives::{ + BeaconConsensusEngineHandle, EngineTypes, EngineValidator, ExecutionData, +}; use reth_payload_builder::PayloadStore; use reth_payload_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, PayloadBuilderAttributes, PayloadOrAttributes, }; -use reth_rpc_api::EngineApiServer; -use reth_rpc_types_compat::engine::payload::convert_to_payload_body_v1; +use reth_primitives_traits::{Block, BlockBody}; +use reth_rpc_api::{EngineApiServer, IntoEngineApiRpcModule}; use reth_storage_api::{BlockReader, HeaderProvider, StateProviderFactory}; use reth_tasks::TaskSpawner; use reth_transaction_pool::TransactionPool; @@ -76,7 +79,7 @@ impl EngineApi where Provider: HeaderProvider + BlockReader + StateProviderFactory + 'static, - EngineT: EngineTypes, + EngineT: EngineTypes, Pool: TransactionPool + 'static, Validator: EngineValidator, ChainSpec: EthereumHardforks + Send + Sync + 'static, @@ -148,7 +151,7 @@ where Ok(self .inner .beacon_consensus - .new_payload(payload, ExecutionPayloadSidecar::none()) + .new_payload(ExecutionData { payload, sidecar: ExecutionPayloadSidecar::none() }) .await .inspect(|_| self.inner.on_new_payload_response())?) } @@ -183,7 +186,7 @@ where Ok(self .inner .beacon_consensus - .new_payload(payload, ExecutionPayloadSidecar::none()) + .new_payload(ExecutionData { payload, sidecar: ExecutionPayloadSidecar::none() }) .await .inspect(|_| self.inner.on_new_payload_response())?) } @@ -222,13 +225,13 @@ where Ok(self .inner .beacon_consensus - .new_payload( + .new_payload(ExecutionData { payload, - ExecutionPayloadSidecar::v3(CancunPayloadFields { + sidecar: ExecutionPayloadSidecar::v3(CancunPayloadFields { versioned_hashes, parent_beacon_block_root, }), - ) + }) .await .inspect(|_| self.inner.on_new_payload_response())?) } @@ -272,13 +275,13 @@ where Ok(self .inner .beacon_consensus - .new_payload( + .new_payload(ExecutionData { payload, - ExecutionPayloadSidecar::v4( + sidecar: ExecutionPayloadSidecar::v4( CancunPayloadFields { versioned_hashes, parent_beacon_block_root }, PraguePayloadFields { requests: RequestsOrHash::Requests(execution_requests) }, ), - ) + }) .await .inspect(|_| self.inner.on_new_payload_response())?) } @@ -551,7 +554,11 @@ where start: BlockNumber, count: u64, ) -> EngineApiResult { - self.get_payload_bodies_by_range_with(start, count, convert_to_payload_body_v1).await + self.get_payload_bodies_by_range_with(start, count, |block| ExecutionPayloadBodyV1 { + transactions: block.body().encoded_2718_transactions(), + withdrawals: block.body().withdrawals().cloned().map(Withdrawals::into_inner), + }) + .await } /// Called to retrieve execution payload bodies by hashes. @@ -597,7 +604,11 @@ where &self, hashes: Vec, ) -> EngineApiResult { - self.get_payload_bodies_by_hash_with(hashes, convert_to_payload_body_v1).await + self.get_payload_bodies_by_hash_with(hashes, |block| ExecutionPayloadBodyV1 { + transactions: block.body().encoded_2718_transactions(), + withdrawals: block.body().withdrawals().cloned().map(Withdrawals::into_inner), + }) + .await } /// Called to verify network configuration parameters and ensure that Consensus and Execution @@ -737,7 +748,7 @@ impl EngineApiServer for EngineApi where Provider: HeaderProvider + BlockReader + StateProviderFactory + 'static, - EngineT: EngineTypes, + EngineT: EngineTypes, Pool: TransactionPool + 'static, Validator: EngineValidator, ChainSpec: EthereumHardforks + Send + Sync + 'static, @@ -1009,6 +1020,17 @@ where } } +impl IntoEngineApiRpcModule + for EngineApi +where + EngineT: EngineTypes, + Self: EngineApiServer, +{ + fn into_rpc_module(self) -> RpcModule<()> { + self.into_rpc().remove_context() + } +} + impl std::fmt::Debug for EngineApi where @@ -1027,8 +1049,8 @@ mod tests { use reth_chainspec::{ChainSpec, EthereumHardfork, MAINNET}; use reth_engine_primitives::BeaconEngineMessage; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; + use reth_ethereum_primitives::Block; use reth_payload_builder::test_utils::spawn_test_payload_service; - use reth_primitives::{Block, TransactionSigned}; use reth_provider::test_utils::MockEthProvider; use reth_tasks::TokioTaskExecutor; use reth_testing_utils::generators::random_block; @@ -1096,11 +1118,9 @@ mod tests { let (mut handle, api) = setup_engine_api(); tokio::spawn(async move { - api.new_payload_v1(ExecutionPayloadV1::from_block_slow( - &Block::::default(), - )) - .await - .unwrap(); + api.new_payload_v1(ExecutionPayloadV1::from_block_slow(&Block::default())) + .await + .unwrap(); }); assert_matches!(handle.from_api.recv().await, Some(BeaconEngineMessage::NewPayload { .. })); } diff --git a/crates/rpc/rpc-engine-api/src/metrics.rs b/crates/rpc/rpc-engine-api/src/metrics.rs index 9325ce267784..6a7cf7ce3c84 100644 --- a/crates/rpc/rpc-engine-api/src/metrics.rs +++ b/crates/rpc/rpc-engine-api/src/metrics.rs @@ -2,7 +2,7 @@ use std::time::Duration; use crate::EngineApiError; use alloy_rpc_types_engine::{ForkchoiceUpdated, PayloadStatus, PayloadStatusEnum}; -use metrics::{Counter, Histogram}; +use metrics::{Counter, Gauge, Histogram}; use reth_metrics::Metrics; /// All beacon consensus engine metrics @@ -100,6 +100,8 @@ pub(crate) struct NewPayloadStatusResponseMetrics { pub(crate) new_payload_total_gas: Histogram, /// The gas per second of valid new payload messages received. pub(crate) new_payload_gas_per_second: Histogram, + /// Latency for the last `engine_newPayloadV*` call + pub(crate) new_payload_last: Gauge, } impl NewPayloadStatusResponseMetrics { @@ -110,6 +112,7 @@ impl NewPayloadStatusResponseMetrics { gas_used: u64, time: Duration, ) { + self.new_payload_last.set(time); match result { Ok(status) => match status.status { PayloadStatusEnum::Valid => { diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 2158320b5814..c7eae89ff555 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -8,13 +8,13 @@ use alloy_rpc_types_engine::{ PayloadError, }; use assert_matches::assert_matches; -use reth_primitives::{Block, SealedBlock, TransactionSigned}; -use reth_primitives_traits::proofs; +use reth_ethereum_primitives::{Block, TransactionSigned}; +use reth_primitives_traits::{proofs, SealedBlock}; use reth_testing_utils::generators::{ self, random_block, random_block_range, BlockParams, BlockRangeParams, Rng, }; -fn transform_block Block>(src: SealedBlock, f: F) -> ExecutionPayload { +fn transform_block Block>(src: SealedBlock, f: F) -> ExecutionPayload { let unsealed = src.into_block(); let mut transformed: Block = f(unsealed); // Recalculate roots diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs index b26f9e25c913..6b868a457d3b 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -139,6 +139,16 @@ pub trait EstimateCall: Call { &mut db, )) } + Err(err) if err.is_gas_too_low() => { + // This failed because the configured gas cost of the tx was lower than what + // actually consumed by the tx This can happen if the + // request provided fee values manually and the resulting gas cost exceeds the + // sender's allowance, so we return the appropriate error here + return Err(RpcInvalidTransactionError::GasRequiredExceedsAllowance { + gas_limit: tx_env.gas_limit(), + } + .into_eth_err()) + } // Propagate other results (successful or other errors). ethres => ethres?, }; diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index f73dbb5a1b81..b4aa4a99117e 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -1,19 +1,24 @@ //! Trait for specifying `eth` network dependent API types. -use std::{ - error::Error, - fmt::{self}, -}; - +use crate::{AsEthApiError, FromEthApiError, RpcNodeCore}; use alloy_network::Network; use alloy_rpc_types_eth::Block; use reth_provider::{ProviderTx, ReceiptProvider, TransactionsProvider}; use reth_rpc_types_compat::TransactionCompat; use reth_transaction_pool::{PoolTransaction, TransactionPool}; - -use crate::{AsEthApiError, FromEthApiError, RpcNodeCore}; +use std::{ + error::Error, + fmt::{self}, +}; /// Network specific `eth` API types. +/// +/// This trait defines the network specific rpc types and helpers required for the `eth_` and +/// adjacent endpoints. `NetworkTypes` is [`Network`] as defined by the alloy crate, see also +/// [`alloy_network::Ethereum`]. +/// +/// This type is stateful so that it can provide additional context if necessary, e.g. populating +/// receipts with additional data. pub trait EthApiTypes: Send + Sync + Clone { /// Extension of [`FromEthApiError`], with network specific errors. type Error: Into> diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index d798002f33cd..9a62a7a1fc5e 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -8,7 +8,7 @@ use core::time::Duration; use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, U256}; use alloy_rpc_types_eth::{error::EthRpcErrorCode, request::TransactionInputError, BlockError}; -use alloy_sol_types::decode_revert_reason; +use alloy_sol_types::{ContractError, RevertReason}; use reth_errors::RethError; use reth_primitives_traits::transaction::signed::RecoveryError; use reth_rpc_server_types::result::{ @@ -338,6 +338,16 @@ pub enum RpcInvalidTransactionError { /// Current balance of transaction sender. balance: U256, }, + /// This is similar to [`Self::InsufficientFunds`] but with a different error message and + /// exists for compatibility reasons. + /// + /// This error is used in `eth_estimateCall` when the highest available gas limit, capped with + /// the allowance of the caller is too low: [`Self::GasTooLow`]. + #[error("gas required exceeds allowance ({gas_limit})")] + GasRequiredExceedsAllowance { + /// The gas limit the transaction was executed with. + gas_limit: u64, + }, /// Thrown when calculating gas usage #[error("gas uint64 overflow")] GasUintOverflow, @@ -448,9 +458,10 @@ impl RpcInvalidTransactionError { /// Returns the rpc error code for this error. pub const fn error_code(&self) -> i32 { match self { - Self::InvalidChainId | Self::GasTooLow | Self::GasTooHigh => { - EthRpcErrorCode::InvalidInput.code() - } + Self::InvalidChainId | + Self::GasTooLow | + Self::GasTooHigh | + Self::GasRequiredExceedsAllowance { .. } => EthRpcErrorCode::InvalidInput.code(), Self::Revert(_) => EthRpcErrorCode::ExecutionError.code(), _ => EthRpcErrorCode::TransactionRejected.code(), } @@ -615,8 +626,14 @@ impl RevertError { impl std::fmt::Display for RevertError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("execution reverted")?; - if let Some(reason) = self.output.as_ref().and_then(|bytes| decode_revert_reason(bytes)) { - write!(f, ": {reason}")?; + if let Some(reason) = self.output.as_ref().and_then(|out| RevertReason::decode(out)) { + let error = reason.to_string(); + let mut error = error.as_str(); + if matches!(reason, RevertReason::ContractError(ContractError::Revert(_))) { + // we strip redundant `revert: ` prefix from the revert reason + error = error.trim_start_matches("revert: "); + } + write!(f, ": {error}")?; } Ok(()) } @@ -768,9 +785,9 @@ pub fn ensure_success(result: ExecutionResult) -> EthResult { #[cfg(test)] mod tests { - use revm_primitives::b256; - use super::*; + use alloy_sol_types::{Revert, SolError}; + use revm_primitives::b256; #[test] fn timed_out_error() { @@ -805,4 +822,12 @@ mod tests { EthApiError::HeaderNotFound(BlockId::finalized()).into(); assert_eq!(err.message(), "block not found: finalized"); } + + #[test] + fn revert_err_display() { + let revert = Revert::from("test_revert_reason"); + let err = RevertError::new(revert.abi_encode().into()); + let msg = err.to_string(); + assert_eq!(msg, "execution reverted: test_revert_reason"); + } } diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index f2d5ce2e2a30..5a8456f3c79b 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -17,10 +17,8 @@ reth-primitives.workspace = true reth-primitives-traits.workspace = true # ethereum -alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth = { workspace = true, default-features = false, features = ["serde"] } -alloy-rpc-types-engine.workspace = true alloy-consensus.workspace = true # io diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index 35c5e6c07f2d..76abbb19ed74 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -6,7 +6,7 @@ use alloy_primitives::U256; use alloy_rpc_types_eth::{ Block, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, }; -use reth_primitives::{transaction::SignedTransactionIntoRecoveredExt, RecoveredBlock}; +use reth_primitives::RecoveredBlock; use reth_primitives_traits::{Block as BlockTrait, BlockBody, SealedHeader, SignedTransaction}; /// Converts the given primitive block into a [`Block`] response with the given @@ -64,28 +64,23 @@ where B: BlockTrait, { let block_number = block.header().number(); - let base_fee_per_gas = block.header().base_fee_per_gas(); - - // NOTE: we can safely remove the body here because not needed to finalize the `Block` in - // `from_block_with_transactions`, however we need to compute the length before + let base_fee = block.header().base_fee_per_gas(); let block_length = block.rlp_length(); - let transactions = block.body().transactions().to_vec(); - let transactions_with_senders = transactions.into_iter().zip(block.senders_iter().copied()); let block_hash = Some(block.hash()); - let transactions = transactions_with_senders + + let transactions = block + .transactions_recovered() .enumerate() - .map(|(idx, (tx, sender))| { - let tx_hash = *tx.tx_hash(); - let signed_tx_ec_recovered = tx.with_signer(sender); + .map(|(idx, tx)| { let tx_info = TransactionInfo { - hash: Some(tx_hash), + hash: Some(*tx.tx_hash()), block_hash, block_number: Some(block_number), - base_fee: base_fee_per_gas, + base_fee, index: Some(idx as u64), }; - tx_resp_builder.fill(signed_tx_ec_recovered, tx_info) + tx_resp_builder.fill(tx.cloned(), tx_info) }) .collect::, T::Error>>()?; diff --git a/crates/rpc/rpc-types-compat/src/engine/mod.rs b/crates/rpc/rpc-types-compat/src/engine/mod.rs deleted file mode 100644 index 9d5ebb9a5f1c..000000000000 --- a/crates/rpc/rpc-types-compat/src/engine/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! Standalone functions for engine specific rpc type conversions -pub mod payload; diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs deleted file mode 100644 index 3690d682c89d..000000000000 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ /dev/null @@ -1,253 +0,0 @@ -//! Standalone Conversion Functions for Handling Different Versions of Execution Payloads in -//! Ethereum's Engine - -use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; -use alloy_rpc_types_engine::payload::ExecutionPayloadBodyV1; -use reth_primitives_traits::BlockBody as _; - -/// Converts a [`reth_primitives_traits::Block`] to [`ExecutionPayloadBodyV1`] -pub fn convert_to_payload_body_v1( - value: impl reth_primitives_traits::Block, -) -> ExecutionPayloadBodyV1 { - let transactions = value.body().transactions_iter().map(|tx| tx.encoded_2718().into()); - ExecutionPayloadBodyV1 { - transactions: transactions.collect(), - withdrawals: value.body().withdrawals().cloned().map(Withdrawals::into_inner), - } -} - -#[cfg(test)] -mod tests { - use alloy_primitives::{b256, hex, Bytes, U256}; - use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, ExecutionPayloadV1, - ExecutionPayloadV2, ExecutionPayloadV3, - }; - use reth_primitives::{Block, TransactionSigned}; - - #[test] - fn roundtrip_payload_to_block() { - let first_transaction_raw = Bytes::from_static(&hex!("02f9017a8501a1f0ff438211cc85012a05f2008512a05f2000830249f094d5409474fd5a725eab2ac9a8b26ca6fb51af37ef80b901040cc7326300000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000001bdd2ed4b616c800000000000000000000000000001e9ee781dd4b97bdef92e5d1785f73a1f931daa20000000000000000000000007a40026a3b9a41754a95eec8c92c6b99886f440c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000009ae80eb647dd09968488fa1d7e412bf8558a0b7a0000000000000000000000000f9815537d361cb02befd9918c95c97d4d8a4a2bc001a0ba8f1928bb0efc3fcd01524a2039a9a2588fa567cd9a7cc18217e05c615e9d69a0544bfd11425ac7748e76b3795b57a5563e2b0eff47b5428744c62ff19ccfc305")[..]); - let second_transaction_raw = Bytes::from_static(&hex!("03f901388501a1f0ff430c843b9aca00843b9aca0082520894e7249813d8ccf6fa95a2203f46a64166073d58878080c005f8c6a00195f6dff17753fc89b60eac6477026a805116962c9e412de8015c0484e661c1a001aae314061d4f5bbf158f15d9417a238f9589783f58762cd39d05966b3ba2fba0013f5be9b12e7da06f0dd11a7bdc4e0db8ef33832acc23b183bd0a2c1408a757a0019d9ac55ea1a615d92965e04d960cb3be7bff121a381424f1f22865bd582e09a001def04412e76df26fefe7b0ed5e10580918ae4f355b074c0cfe5d0259157869a0011c11a415db57e43db07aef0de9280b591d65ca0cce36c7002507f8191e5d4a80a0c89b59970b119187d97ad70539f1624bbede92648e2dc007890f9658a88756c5a06fb2e3d4ce2c438c0856c2de34948b7032b1aadc4642a9666228ea8cdc7786b7")[..]); - - let new_payload = ExecutionPayloadV3 { - payload_inner: ExecutionPayloadV2 { - payload_inner: ExecutionPayloadV1 { - base_fee_per_gas: U256::from(7u64), - block_number: 0xa946u64, - block_hash: hex!("a5ddd3f286f429458a39cafc13ffe89295a7efa8eb363cf89a1a4887dbcf272b").into(), - logs_bloom: hex!("00200004000000000000000080000000000200000000000000000000000000000000200000000000000000000000000000000000800000000200000000000000000000000000000000000008000000200000000000000000000001000000000000000000000000000000800000000000000000000100000000000030000000000000000040000000000000000000000000000000000800080080404000000000000008000000000008200000000000200000000000000000000000000000000000000002000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000100000000000000000000").into(), - extra_data: hex!("d883010d03846765746888676f312e32312e31856c696e7578").into(), - gas_limit: 0x1c9c380, - gas_used: 0x1f4a9, - timestamp: 0x651f35b8, - fee_recipient: hex!("f97e180c050e5ab072211ad2c213eb5aee4df134").into(), - parent_hash: hex!("d829192799c73ef28a7332313b3c03af1f2d5da2c36f8ecfafe7a83a3bfb8d1e").into(), - prev_randao: hex!("753888cc4adfbeb9e24e01c84233f9d204f4a9e1273f0e29b43c4c148b2b8b7e").into(), - receipts_root: hex!("4cbc48e87389399a0ea0b382b1c46962c4b8e398014bf0cc610f9c672bee3155").into(), - state_root: hex!("017d7fa2b5adb480f5e05b2c95cb4186e12062eed893fc8822798eed134329d1").into(), - transactions: vec![first_transaction_raw, second_transaction_raw], - }, - withdrawals: vec![], - }, - blob_gas_used: 0xc0000, - excess_blob_gas: 0x580000, - }; - - let mut block: Block = new_payload.clone().try_into_block().unwrap(); - - // this newPayload came with a parent beacon block root, we need to manually insert it - // before hashing - let parent_beacon_block_root = - b256!("531cd53b8e68deef0ea65edfa3cda927a846c307b0907657af34bc3f313b5871"); - block.header.parent_beacon_block_root = Some(parent_beacon_block_root); - - let converted_payload = ExecutionPayloadV3::from_block_unchecked(block.hash_slow(), &block); - - // ensure the payloads are the same - assert_eq!(new_payload, converted_payload); - } - - #[test] - fn payload_to_block_rejects_network_encoded_tx() { - let first_transaction_raw = Bytes::from_static(&hex!("b9017e02f9017a8501a1f0ff438211cc85012a05f2008512a05f2000830249f094d5409474fd5a725eab2ac9a8b26ca6fb51af37ef80b901040cc7326300000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000001bdd2ed4b616c800000000000000000000000000001e9ee781dd4b97bdef92e5d1785f73a1f931daa20000000000000000000000007a40026a3b9a41754a95eec8c92c6b99886f440c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000009ae80eb647dd09968488fa1d7e412bf8558a0b7a0000000000000000000000000f9815537d361cb02befd9918c95c97d4d8a4a2bc001a0ba8f1928bb0efc3fcd01524a2039a9a2588fa567cd9a7cc18217e05c615e9d69a0544bfd11425ac7748e76b3795b57a5563e2b0eff47b5428744c62ff19ccfc305")[..]); - let second_transaction_raw = Bytes::from_static(&hex!("b9013c03f901388501a1f0ff430c843b9aca00843b9aca0082520894e7249813d8ccf6fa95a2203f46a64166073d58878080c005f8c6a00195f6dff17753fc89b60eac6477026a805116962c9e412de8015c0484e661c1a001aae314061d4f5bbf158f15d9417a238f9589783f58762cd39d05966b3ba2fba0013f5be9b12e7da06f0dd11a7bdc4e0db8ef33832acc23b183bd0a2c1408a757a0019d9ac55ea1a615d92965e04d960cb3be7bff121a381424f1f22865bd582e09a001def04412e76df26fefe7b0ed5e10580918ae4f355b074c0cfe5d0259157869a0011c11a415db57e43db07aef0de9280b591d65ca0cce36c7002507f8191e5d4a80a0c89b59970b119187d97ad70539f1624bbede92648e2dc007890f9658a88756c5a06fb2e3d4ce2c438c0856c2de34948b7032b1aadc4642a9666228ea8cdc7786b7")[..]); - - let new_payload = ExecutionPayloadV3 { - payload_inner: ExecutionPayloadV2 { - payload_inner: ExecutionPayloadV1 { - base_fee_per_gas: U256::from(7u64), - block_number: 0xa946u64, - block_hash: hex!("a5ddd3f286f429458a39cafc13ffe89295a7efa8eb363cf89a1a4887dbcf272b").into(), - logs_bloom: hex!("00200004000000000000000080000000000200000000000000000000000000000000200000000000000000000000000000000000800000000200000000000000000000000000000000000008000000200000000000000000000001000000000000000000000000000000800000000000000000000100000000000030000000000000000040000000000000000000000000000000000800080080404000000000000008000000000008200000000000200000000000000000000000000000000000000002000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000100000000000000000000").into(), - extra_data: hex!("d883010d03846765746888676f312e32312e31856c696e7578").into(), - gas_limit: 0x1c9c380, - gas_used: 0x1f4a9, - timestamp: 0x651f35b8, - fee_recipient: hex!("f97e180c050e5ab072211ad2c213eb5aee4df134").into(), - parent_hash: hex!("d829192799c73ef28a7332313b3c03af1f2d5da2c36f8ecfafe7a83a3bfb8d1e").into(), - prev_randao: hex!("753888cc4adfbeb9e24e01c84233f9d204f4a9e1273f0e29b43c4c148b2b8b7e").into(), - receipts_root: hex!("4cbc48e87389399a0ea0b382b1c46962c4b8e398014bf0cc610f9c672bee3155").into(), - state_root: hex!("017d7fa2b5adb480f5e05b2c95cb4186e12062eed893fc8822798eed134329d1").into(), - transactions: vec![first_transaction_raw, second_transaction_raw], - }, - withdrawals: vec![], - }, - blob_gas_used: 0xc0000, - excess_blob_gas: 0x580000, - }; - - let _block = new_payload - .try_into_block::() - .expect_err("execution payload conversion requires typed txs without a rlp header"); - } - - #[test] - fn devnet_invalid_block_hash_repro() { - let deser_block = r#" - { - "parentHash": "0xae8315ee86002e6269a17dd1e9516a6cf13223e9d4544d0c32daff826fb31acc", - "feeRecipient": "0xf97e180c050e5ab072211ad2c213eb5aee4df134", - "stateRoot": "0x03787f1579efbaa4a8234e72465eb4e29ef7e62f61242d6454661932e1a282a1", - "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "prevRandao": "0x918e86b497dc15de7d606457c36ca583e24d9b0a110a814de46e33d5bb824a66", - "blockNumber": "0x6a784", - "gasLimit": "0x1c9c380", - "gasUsed": "0x0", - "timestamp": "0x65bc1d60", - "extraData": "0x9a726574682f76302e312e302d616c7068612e31362f6c696e7578", - "baseFeePerGas": "0x8", - "blobGasUsed": "0x0", - "excessBlobGas": "0x0", - "blockHash": "0x340c157eca9fd206b87c17f0ecbe8d411219de7188a0a240b635c88a96fe91c5", - "transactions": [], - "withdrawals": [ - { - "index": "0x5ab202", - "validatorIndex": "0xb1b", - "address": "0x388ea662ef2c223ec0b047d41bf3c0f362142ad5", - "amount": "0x19b3d" - }, - { - "index": "0x5ab203", - "validatorIndex": "0xb1c", - "address": "0x388ea662ef2c223ec0b047d41bf3c0f362142ad5", - "amount": "0x15892" - }, - { - "index": "0x5ab204", - "validatorIndex": "0xb1d", - "address": "0x388ea662ef2c223ec0b047d41bf3c0f362142ad5", - "amount": "0x19b3d" - }, - { - "index": "0x5ab205", - "validatorIndex": "0xb1e", - "address": "0x388ea662ef2c223ec0b047d41bf3c0f362142ad5", - "amount": "0x19b3d" - }, - { - "index": "0x5ab206", - "validatorIndex": "0xb1f", - "address": "0x388ea662ef2c223ec0b047d41bf3c0f362142ad5", - "amount": "0x19b3d" - }, - { - "index": "0x5ab207", - "validatorIndex": "0xb20", - "address": "0x388ea662ef2c223ec0b047d41bf3c0f362142ad5", - "amount": "0x19b3d" - }, - { - "index": "0x5ab208", - "validatorIndex": "0xb21", - "address": "0x388ea662ef2c223ec0b047d41bf3c0f362142ad5", - "amount": "0x15892" - }, - { - "index": "0x5ab209", - "validatorIndex": "0xb22", - "address": "0x388ea662ef2c223ec0b047d41bf3c0f362142ad5", - "amount": "0x19b3d" - }, - { - "index": "0x5ab20a", - "validatorIndex": "0xb23", - "address": "0x388ea662ef2c223ec0b047d41bf3c0f362142ad5", - "amount": "0x19b3d" - }, - { - "index": "0x5ab20b", - "validatorIndex": "0xb24", - "address": "0x388ea662ef2c223ec0b047d41bf3c0f362142ad5", - "amount": "0x17db2" - }, - { - "index": "0x5ab20c", - "validatorIndex": "0xb25", - "address": "0x388ea662ef2c223ec0b047d41bf3c0f362142ad5", - "amount": "0x19b3d" - }, - { - "index": "0x5ab20d", - "validatorIndex": "0xb26", - "address": "0x388ea662ef2c223ec0b047d41bf3c0f362142ad5", - "amount": "0x19b3d" - }, - { - "index": "0x5ab20e", - "validatorIndex": "0xa91", - "address": "0x388ea662ef2c223ec0b047d41bf3c0f362142ad5", - "amount": "0x15892" - }, - { - "index": "0x5ab20f", - "validatorIndex": "0xa92", - "address": "0x388ea662ef2c223ec0b047d41bf3c0f362142ad5", - "amount": "0x1c05d" - }, - { - "index": "0x5ab210", - "validatorIndex": "0xa93", - "address": "0x388ea662ef2c223ec0b047d41bf3c0f362142ad5", - "amount": "0x15892" - }, - { - "index": "0x5ab211", - "validatorIndex": "0xa94", - "address": "0x388ea662ef2c223ec0b047d41bf3c0f362142ad5", - "amount": "0x19b3d" - } - ] - } - "#; - - // deserialize payload - let payload: ExecutionPayload = - serde_json::from_str::(deser_block).unwrap().into(); - - // NOTE: the actual block hash here is incorrect, it is a result of a bug, this was the - // fix: - // - let block_hash_with_blob_fee_fields = - b256!("a7cdd5f9e54147b53a15833a8c45dffccbaed534d7fdc23458f45102a4bf71b0"); - - let versioned_hashes = vec![]; - let parent_beacon_block_root = - b256!("1162de8a0f4d20d86b9ad6e0a2575ab60f00a433dc70d9318c8abc9041fddf54"); - - // set up cancun payload fields - let cancun_fields = CancunPayloadFields { parent_beacon_block_root, versioned_hashes }; - - // convert into block - let block = payload - .try_into_block_with_sidecar::(&ExecutionPayloadSidecar::v3( - cancun_fields, - )) - .unwrap(); - - // Ensure the actual hash is calculated if we set the fields to what they should be - assert_eq!(block_hash_with_blob_fee_fields, block.header.hash_slow()); - } -} diff --git a/crates/rpc/rpc-types-compat/src/lib.rs b/crates/rpc/rpc-types-compat/src/lib.rs index 206d502f87d3..40e2a20c4a90 100644 --- a/crates/rpc/rpc-types-compat/src/lib.rs +++ b/crates/rpc/rpc-types-compat/src/lib.rs @@ -11,6 +11,5 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] pub mod block; -pub mod engine; pub mod transaction; pub use transaction::TransactionCompat; diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 5192f0892a14..3886902fb019 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -36,6 +36,7 @@ reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-network-types.workspace = true reth-consensus.workspace = true +reth-node-api.workspace = true # ethereum alloy-consensus.workspace = true diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 1770ea386e1a..b87c8a05c3e3 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -1,5 +1,6 @@ use alloy_consensus::BlockHeader; use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag}; +use alloy_genesis::ChainConfig; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rlp::{Decodable, Encodable}; use alloy_rpc_types_debug::ExecutionWitness; @@ -14,7 +15,7 @@ use alloy_rpc_types_trace::geth::{ }; use async_trait::async_trait; use jsonrpsee::core::RpcResult; -use reth_chainspec::EthereumHardforks; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::{ env::EvmEnv, execute::{BlockExecutorProvider, Executor}, @@ -1036,6 +1037,10 @@ where Ok(()) } + async fn debug_chain_config(&self) -> RpcResult { + Ok(self.provider().chain_spec().genesis().config.clone()) + } + async fn debug_chaindb_property(&self, _property: String) -> RpcResult<()> { Ok(()) } diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index bc4bdc9bbe18..bbc583ae77a2 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -163,7 +163,7 @@ where while let Some(tx) = transactions.next() { let signer = tx.signer(); let tx = { - let mut tx: ::Transaction = tx.into(); + let mut tx = ::Transaction::from_pooled(tx); if let EthBlobTransactionSidecar::Present(sidecar) = tx.take_blob() { tx.validate_blob(&sidecar, EnvKzgSettings::Default.get()).map_err( diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 63978871e4cb..3adcf9e5bd3c 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -9,6 +9,7 @@ use alloy_eips::BlockNumberOrTag; use alloy_network::Ethereum; use alloy_primitives::{Bytes, U256}; use derive_more::Deref; +use reth_node_api::{FullNodeComponents, FullNodeTypes}; use reth_primitives::NodePrimitives; use reth_provider::{ BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ProviderBlock, @@ -31,6 +32,14 @@ use tokio::sync::{broadcast, Mutex}; const DEFAULT_BROADCAST_CAPACITY: usize = 2000; +/// Helper type alias for [`EthApi`] with components from the given [`FullNodeComponents`]. +pub type EthApiFor = EthApi< + ::Provider, + ::Pool, + ::Network, + ::Evm, +>; + /// `Eth` API implementation. /// /// This type provides the functionality for handling `eth_` related requests. @@ -40,6 +49,11 @@ const DEFAULT_BROADCAST_CAPACITY: usize = 2000; /// separately in submodules. The rpc handler implementation can then delegate to the main impls. /// This way [`EthApi`] is not limited to [`jsonrpsee`] and can be used standalone or in other /// network handlers (for example ipc). +/// +/// ## Trait requirements +/// +/// While this type requires various unrestricted generic components, trait bounds are enforced when +/// additional traits are implemented for this type. #[derive(Deref)] pub struct EthApi { /// All nested fields bundled together. @@ -62,7 +76,31 @@ impl EthApi { /// All nested fields bundled together inner: Arc>, @@ -69,6 +71,25 @@ where /// See also [`EthFilterConfig`]. /// /// This also spawns a task that periodically clears stale filters. + /// + /// # Create a new instance with [`EthApi`](crate::EthApi) + /// + /// ```no_run + /// use reth_evm_ethereum::EthEvmConfig; + /// use reth_network_api::noop::NoopNetwork; + /// use reth_provider::noop::NoopProvider; + /// use reth_rpc::{EthApi, EthFilter}; + /// use reth_tasks::TokioTaskExecutor; + /// use reth_transaction_pool::noop::NoopTransactionPool; + /// let eth_api = EthApi::builder( + /// NoopProvider::default(), + /// NoopTransactionPool::default(), + /// NoopNetwork::default(), + /// EthEvmConfig::mainnet(), + /// ) + /// .build(); + /// let filter = EthFilter::new(eth_api, Default::default(), TokioTaskExecutor::default().boxed()); + /// ``` pub fn new(eth_api: Eth, config: EthFilterConfig, task_spawner: Box) -> Self { let EthFilterConfig { max_blocks_per_filter, max_logs_per_response, stale_filter_ttl } = config; diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index dbeeab40b3d5..b92e1972c6dc 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -14,7 +14,7 @@ use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_consensus::{Consensus, FullConsensus, PostExecutionInput}; -use reth_engine_primitives::PayloadValidator; +use reth_engine_primitives::{ExecutionData, PayloadValidator}; use reth_errors::{BlockExecutionError, ConsensusError, ProviderError}; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_metrics::{metrics, metrics::Gauge, Metrics}; @@ -51,7 +51,10 @@ where config: ValidationApiConfig, task_spawner: Box, payload_validator: Arc< - dyn PayloadValidator::Block>, + dyn PayloadValidator< + Block = ::Block, + ExecutionData = ExecutionData, + >, >, ) -> Self { let ValidationApiConfig { disallow, validation_window } = config; @@ -348,13 +351,13 @@ where ) -> Result<(), ValidationApiError> { let block = self .payload_validator - .ensure_well_formed_payload( - ExecutionPayload::V3(request.request.execution_payload), - ExecutionPayloadSidecar::v3(CancunPayloadFields { + .ensure_well_formed_payload(ExecutionData { + payload: ExecutionPayload::V3(request.request.execution_payload), + sidecar: ExecutionPayloadSidecar::v3(CancunPayloadFields { parent_beacon_block_root: request.parent_beacon_block_root, versioned_hashes: self.validate_blobs_bundle(request.request.blobs_bundle)?, }), - )? + })? .try_recover() .map_err(|_| ValidationApiError::InvalidTransactionSignature)?; @@ -373,9 +376,9 @@ where ) -> Result<(), ValidationApiError> { let block = self .payload_validator - .ensure_well_formed_payload( - ExecutionPayload::V3(request.request.execution_payload), - ExecutionPayloadSidecar::v4( + .ensure_well_formed_payload(ExecutionData { + payload: ExecutionPayload::V3(request.request.execution_payload), + sidecar: ExecutionPayloadSidecar::v4( CancunPayloadFields { parent_beacon_block_root: request.parent_beacon_block_root, versioned_hashes: self @@ -387,7 +390,7 @@ where ), }, ), - )? + })? .try_recover() .map_err(|_| ValidationApiError::InvalidTransactionSignature)?; @@ -468,7 +471,12 @@ pub struct ValidationApiInner { /// Consensus implementation. consensus: Arc>, /// Execution payload validator. - payload_validator: Arc::Block>>, + payload_validator: Arc< + dyn PayloadValidator< + Block = ::Block, + ExecutionData = ExecutionData, + >, + >, /// Block executor factory. executor_provider: E, /// Set of disallowed addresses diff --git a/crates/scroll/alloy/provider/Cargo.toml b/crates/scroll/alloy/provider/Cargo.toml index ffd8c2d561fb..6c225416ac41 100644 --- a/crates/scroll/alloy/provider/Cargo.toml +++ b/crates/scroll/alloy/provider/Cargo.toml @@ -43,7 +43,7 @@ reth-rpc-builder.workspace = true reth-rpc-engine-api.workspace = true reth-scroll-engine-primitives.workspace = true reth-scroll-node.workspace = true -reth-scroll-payload.workspace = true +reth-scroll-payload = { workspace = true, features = ["test-utils"] } reth-scroll-chainspec.workspace = true reth-tasks.workspace = true reth-tracing.workspace = true @@ -62,6 +62,7 @@ std = [ "reth-engine-primitives/std", "reth-primitives/std", "reth-primitives-traits/std", + "reth-scroll-payload/std", ] scroll = [ "reth-scroll-node/scroll", @@ -70,4 +71,5 @@ scroll = [ optimism = [ "reth-scroll-engine-primitives/optimism", "reth-scroll-node/optimism", + "reth-scroll-payload/optimism", ] diff --git a/crates/scroll/engine-primitives/src/payload/built.rs b/crates/scroll/engine-primitives/src/payload/built.rs index 53b4d02940df..4ca6b08a4d39 100644 --- a/crates/scroll/engine-primitives/src/payload/built.rs +++ b/crates/scroll/engine-primitives/src/payload/built.rs @@ -72,26 +72,6 @@ impl BuiltPayload for ScrollBuiltPayload { } } -impl BuiltPayload for &ScrollBuiltPayload { - type Primitives = ScrollPrimitives; - - fn block(&self) -> &SealedBlock { - (**self).block() - } - - fn fees(&self) -> U256 { - (**self).fees() - } - - fn executed_block(&self) -> Option> { - Some(self.block.clone()) - } - - fn requests(&self) -> Option { - None - } -} - // V1 engine_getPayloadV1 response impl From for ExecutionPayloadV1 { fn from(value: ScrollBuiltPayload) -> Self { diff --git a/crates/scroll/engine-primitives/src/payload/mod.rs b/crates/scroll/engine-primitives/src/payload/mod.rs index 6d0a2ddf9848..f9b09378b882 100644 --- a/crates/scroll/engine-primitives/src/payload/mod.rs +++ b/crates/scroll/engine-primitives/src/payload/mod.rs @@ -14,10 +14,10 @@ use alloy_eips::eip2718::Decodable2718; use alloy_rlp::BufMut; use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, - ExecutionPayloadEnvelopeV4, ExecutionPayloadSidecar, ExecutionPayloadV1, ExecutionPayloadV2, - ExecutionPayloadV3, PayloadError, + ExecutionPayloadEnvelopeV4, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, + PayloadError, }; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineTypes, ExecutionData}; use reth_payload_primitives::{BuiltPayload, PayloadTypes}; use reth_primitives::{Block, BlockBody, Header}; use reth_primitives_traits::{NodePrimitives, SealedBlock}; @@ -52,13 +52,16 @@ where type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3; type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4; + type ExecutionData = ExecutionData; fn block_to_payload( block: SealedBlock< <::Primitives as NodePrimitives>::Block, >, - ) -> (ExecutionPayload, ExecutionPayloadSidecar) { - ExecutionPayload::from_block_unchecked(block.hash(), &block.into_block()) + ) -> ExecutionData { + let (payload, sidecar) = + ExecutionPayload::from_block_unchecked(block.hash(), &block.into_block()); + ExecutionData { payload, sidecar } } } @@ -79,18 +82,17 @@ impl PayloadTypes for ScrollPayloadTypes { /// [`PayloadError::ExtraData`] due to the Scroll blocks containing extra data for the Clique /// consensus. pub fn try_into_block( - value: ExecutionPayload, - sidecar: &ExecutionPayloadSidecar, + value: ExecutionData, chainspec: Arc, ) -> Result, PayloadError> { - let mut block = match value { + let mut block = match value.payload { ExecutionPayload::V1(payload) => try_payload_v1_to_block(payload, chainspec)?, ExecutionPayload::V2(payload) => try_payload_v2_to_block(payload, chainspec)?, ExecutionPayload::V3(payload) => try_payload_v3_to_block(payload, chainspec)?, }; - block.header.parent_beacon_block_root = sidecar.parent_beacon_block_root(); - block.header.requests_hash = sidecar.requests_hash(); + block.header.parent_beacon_block_root = value.sidecar.parent_beacon_block_root(); + block.header.requests_hash = value.sidecar.requests_hash(); Ok(block) } @@ -225,9 +227,10 @@ mod tests { block_hash: B256::random(), transactions: vec![], }); + let execution_data = ExecutionData::new(execution_payload, Default::default()); let _: Block = - try_into_block(execution_payload, &Default::default(), SCROLL_MAINNET.clone())?; + try_into_block(execution_data, SCROLL_MAINNET.clone())?; Ok(()) } @@ -260,9 +263,10 @@ mod tests { }, withdrawals: vec![], }); + let execution_data = ExecutionData::new(execution_payload, Default::default()); let _: Block = - try_into_block(execution_payload, &Default::default(), SCROLL_MAINNET.clone())?; + try_into_block(execution_data, SCROLL_MAINNET.clone())?; Ok(()) } @@ -299,9 +303,10 @@ mod tests { blob_gas_used: 0, excess_blob_gas: 0, }); + let execution_data = ExecutionData::new(execution_payload, Default::default()); let _: Block = - try_into_block(execution_payload, &Default::default(), SCROLL_MAINNET.clone())?; + try_into_block(execution_data, SCROLL_MAINNET.clone())?; Ok(()) } diff --git a/crates/scroll/node/Cargo.toml b/crates/scroll/node/Cargo.toml index e5aac58e7e08..51b7cbc6cb6a 100644 --- a/crates/scroll/node/Cargo.toml +++ b/crates/scroll/node/Cargo.toml @@ -23,7 +23,6 @@ reth-network.workspace = true reth-node-api.workspace = true reth-node-types.workspace = true reth-node-builder.workspace = true -reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-primitives.workspace = true reth-primitives-traits.workspace = true reth-provider.workspace = true @@ -64,6 +63,7 @@ optimism = [ "reth-scroll-evm/optimism", "reth-scroll-rpc/optimism", "reth-scroll-engine-primitives/optimism", + "reth-scroll-payload/optimism", "revm/optimism", ] scroll = [ @@ -77,6 +77,7 @@ scroll = [ "reth-primitives-traits/scroll", "reth-engine-local/scroll-alloy-traits", "reth-scroll-engine-primitives/scroll", + "reth-scroll-payload/scroll", ] reth-codec = [ "reth-primitives/reth-codec", diff --git a/crates/scroll/node/src/builder/engine.rs b/crates/scroll/node/src/builder/engine.rs index 60cefcbca71b..c2a6ac6c4621 100644 --- a/crates/scroll/node/src/builder/engine.rs +++ b/crates/scroll/node/src/builder/engine.rs @@ -1,9 +1,9 @@ use alloy_primitives::U256; -use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}; +use alloy_rpc_types_engine::PayloadError; use reth_node_api::PayloadValidator; use reth_node_builder::{ rpc::EngineValidatorBuilder, AddOnsContext, EngineApiMessageVersion, - EngineObjectValidationError, EngineTypes, EngineValidator, FullNodeComponents, + EngineObjectValidationError, EngineTypes, EngineValidator, ExecutionData, FullNodeComponents, PayloadOrAttributes, }; use reth_node_types::NodeTypesWithEngine; @@ -56,7 +56,7 @@ impl ScrollEngineValidator { impl EngineValidator for ScrollEngineValidator where - Types: EngineTypes, + Types: EngineTypes, { fn validate_version_specific_fields( &self, @@ -77,16 +77,16 @@ where impl PayloadValidator for ScrollEngineValidator { type Block = ScrollBlock; + type ExecutionData = ExecutionData; fn ensure_well_formed_payload( &self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, + payload: ExecutionData, ) -> Result, PayloadError> { - let expected_hash = payload.block_hash(); + let expected_hash = payload.payload.block_hash(); // First parse the block - let mut block = try_into_block(payload, &sidecar, self.chainspec.clone())?; + let mut block = try_into_block(payload, self.chainspec.clone())?; // Seal the block with the in-turn difficulty and return if hashes match block.header.difficulty = CLIQUE_IN_TURN_DIFFICULTY; diff --git a/crates/scroll/node/src/builder/payload.rs b/crates/scroll/node/src/builder/payload.rs index 7c133e976b16..216003c2e993 100644 --- a/crates/scroll/node/src/builder/payload.rs +++ b/crates/scroll/node/src/builder/payload.rs @@ -1,45 +1,38 @@ -use reth_node_builder::{ - components::PayloadServiceBuilder, BuilderContext, FullNodeTypes, PayloadTypes, -}; -use reth_node_types::NodeTypesWithEngine; -use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_provider::CanonStateSubscriptions; -use reth_scroll_engine_primitives::{ScrollBuiltPayload, ScrollPayloadBuilderAttributes}; -use reth_scroll_payload::NoopPayloadJobGenerator; +use reth_node_builder::{components::PayloadServiceBuilder, BuilderContext, FullNodeTypes}; +use reth_node_types::{NodeTypesWithEngine, TxTy}; +use reth_scroll_chainspec::ScrollChainSpec; +use reth_scroll_engine_primitives::ScrollEngineTypes; +use reth_scroll_payload::ScrollPayloadTransactions; use reth_scroll_primitives::ScrollPrimitives; -use reth_transaction_pool::TransactionPool; -use scroll_alloy_rpc_types_engine::ScrollPayloadAttributes; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; /// Payload builder for Scroll. -#[derive(Debug, Default, Clone, Copy)] -pub struct ScrollPayloadBuilder; +#[derive(Debug, Clone, Default, Copy)] +pub struct ScrollPayloadBuilder { + /// Returns the current best transactions from the mempool. + pub best_transactions: Txs, +} -impl PayloadServiceBuilder for ScrollPayloadBuilder +impl PayloadServiceBuilder for ScrollPayloadBuilder where Node: FullNodeTypes, - Node::Types: NodeTypesWithEngine, - ::Engine: PayloadTypes< - BuiltPayload = ScrollBuiltPayload, - PayloadAttributes = ScrollPayloadAttributes, - PayloadBuilderAttributes = ScrollPayloadBuilderAttributes, + Node::Types: NodeTypesWithEngine< + Primitives = ScrollPrimitives, + Engine = ScrollEngineTypes, + ChainSpec = ScrollChainSpec, >, - Pool: TransactionPool, + Pool: TransactionPool>> + + Unpin + + 'static, + Txs: ScrollPayloadTransactions, { - async fn spawn_payload_service( - self, - ctx: &BuilderContext, - _pool: Pool, - ) -> eyre::Result< - PayloadBuilderHandle<<::Types as NodeTypesWithEngine>::Engine>, - > { - let payload_generator = - NoopPayloadJobGenerator::::default( - ); - let (payload_service, payload_builder) = - PayloadBuilderService::new(payload_generator, ctx.provider().canonical_state_stream()); + type PayloadBuilder = reth_scroll_payload::ScrollEmptyPayloadBuilder; - ctx.task_executor().spawn_critical("payload builder service", Box::pin(payload_service)); - - eyre::Ok(payload_builder) + async fn build_payload_builder( + &self, + _ctx: &BuilderContext, + _pool: Pool, + ) -> eyre::Result { + Ok(reth_scroll_payload::ScrollEmptyPayloadBuilder::default()) } } diff --git a/crates/scroll/node/src/node.rs b/crates/scroll/node/src/node.rs index dec84a47e016..bc8597b71bde 100644 --- a/crates/scroll/node/src/node.rs +++ b/crates/scroll/node/src/node.rs @@ -8,15 +8,12 @@ use crate::{ use reth_node_builder::{ components::ComponentsBuilder, node::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}, - Node, NodeAdapter, NodeComponentsBuilder, PayloadTypes, + Node, NodeAdapter, NodeComponentsBuilder, }; use reth_scroll_chainspec::ScrollChainSpec; -use reth_scroll_engine_primitives::{ - ScrollBuiltPayload, ScrollEngineTypes, ScrollPayloadBuilderAttributes, -}; +use reth_scroll_engine_primitives::ScrollEngineTypes; use reth_scroll_primitives::ScrollPrimitives; use reth_trie_db::MerklePatriciaTrie; -use scroll_alloy_rpc_types_engine::ScrollPayloadAttributes; /// The Scroll node implementation. #[derive(Clone, Debug, Default)] @@ -34,18 +31,17 @@ impl ScrollNode { > where Node: FullNodeTypes< - Types: NodeTypes, - >, - ::Engine: PayloadTypes< - BuiltPayload = ScrollBuiltPayload, - PayloadAttributes = ScrollPayloadAttributes, - PayloadBuilderAttributes = ScrollPayloadBuilderAttributes, + Types: NodeTypesWithEngine< + ChainSpec = ScrollChainSpec, + Primitives = ScrollPrimitives, + Engine = ScrollEngineTypes, + >, >, { ComponentsBuilder::default() .node_types::() .pool(ScrollPoolBuilder) - .payload(ScrollPayloadBuilder) + .payload(ScrollPayloadBuilder::default()) .network(ScrollNetworkBuilder) .executor(ScrollExecutorBuilder) .consensus(ScrollConsensusBuilder) diff --git a/crates/scroll/node/src/pool.rs b/crates/scroll/node/src/pool.rs index cc4ddd62364e..657b5a25d4de 100644 --- a/crates/scroll/node/src/pool.rs +++ b/crates/scroll/node/src/pool.rs @@ -4,8 +4,9 @@ use alloy_eips::{ eip2718::Encodable2718, eip2930::AccessList, eip4844::{BlobAndProofV1, BlobTransactionSidecar, BlobTransactionValidationError}, + eip7702::SignedAuthorization, }; -use alloy_primitives::{Address, TxHash, TxKind, B256, U256}; +use alloy_primitives::{Address, Bytes, ChainId, TxHash, TxKind, B256, U256}; use reth_eth_wire_types::HandleMempoolData; use reth_primitives::{kzg::KzgSettings, Recovered}; use reth_primitives_traits::{ @@ -376,13 +377,14 @@ impl PoolTransaction for ScrollPooledTransaction { self.0.transaction().clone() } - fn try_consensus_into_pooled( - tx: Recovered, - ) -> Result, Self::TryFromConsensusError> { - let (tx, signer) = tx.into_parts(); - let pooled = - tx.try_into().map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing)?; - Ok(Recovered::new_unchecked(pooled, signer)) + fn into_consensus(self) -> Recovered { + self.0.transaction + } + + fn from_pooled(tx: Recovered) -> Self { + let encoded_len = tx.encode_2718_len(); + let tx = tx.map_transaction(|tx| tx.into()); + Self::new(tx, encoded_len) } /// Returns hash of the transaction. @@ -400,11 +402,6 @@ impl PoolTransaction for ScrollPooledTransaction { self.0.transaction.signer_ref() } - /// Returns the nonce for this transaction. - fn nonce(&self) -> u64 { - self.0.transaction.nonce() - } - /// Returns the cost that this transaction is allowed to consume: /// /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. @@ -415,82 +412,91 @@ impl PoolTransaction for ScrollPooledTransaction { &self.0.cost } - /// Amount of gas that should be used in executing this transaction. This is paid up-front. + /// Returns the length of the rlp encoded object + fn encoded_length(&self) -> usize { + self.0.encoded_length + } +} + +impl reth_primitives_traits::InMemorySize for ScrollPooledTransaction { + fn size(&self) -> usize { + self.0.size() + } +} + +impl Typed2718 for ScrollPooledTransaction { + fn ty(&self) -> u8 { + self.0.ty() + } +} + +impl Transaction for ScrollPooledTransaction { + fn chain_id(&self) -> Option { + self.0.chain_id() + } + + fn nonce(&self) -> u64 { + self.0.nonce() + } + fn gas_limit(&self) -> u64 { - self.0.transaction.gas_limit() + self.0.gas_limit() } - /// Returns the EIP-1559 Max base fee the caller is willing to pay. - /// - /// For legacy transactions this is `gas_price`. - /// - /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). - fn max_fee_per_gas(&self) -> u128 { - self.0.transaction.transaction.max_fee_per_gas() + fn gas_price(&self) -> Option { + self.0.gas_price() } - fn access_list(&self) -> Option<&AccessList> { - self.0.transaction.access_list() + fn max_fee_per_gas(&self) -> u128 { + self.0.max_fee_per_gas() } - /// Returns the EIP-1559 Priority fee the caller is paying to the block author. - /// - /// This will return `None` for non-EIP1559 transactions fn max_priority_fee_per_gas(&self) -> Option { - self.0.transaction.transaction.max_priority_fee_per_gas() + self.0.max_priority_fee_per_gas() } fn max_fee_per_blob_gas(&self) -> Option { - self.0.transaction.max_fee_per_blob_gas() + self.0.max_fee_per_blob_gas() } - /// Returns the effective tip for this transaction. - /// - /// For EIP-1559 transactions: `min(max_fee_per_gas - base_fee, max_priority_fee_per_gas)`. - /// For legacy transactions: `gas_price - base_fee`. - fn effective_tip_per_gas(&self, base_fee: u64) -> Option { - self.0.transaction.effective_tip_per_gas(base_fee) + fn priority_fee_or_price(&self) -> u128 { + self.0.priority_fee_or_price() } - /// Returns the max priority fee per gas if the transaction is an EIP-1559 transaction, and - /// otherwise returns the gas price. - fn priority_fee_or_price(&self) -> u128 { - self.0.transaction.priority_fee_or_price() + fn effective_gas_price(&self, base_fee: Option) -> u128 { + self.0.effective_gas_price(base_fee) + } + + fn is_dynamic_fee(&self) -> bool { + self.0.is_dynamic_fee() } - /// Returns the transaction's [`TxKind`], which is the address of the recipient or - /// [`TxKind::Create`] if the transaction is a contract creation. fn kind(&self) -> TxKind { - self.0.transaction.kind() + self.0.kind() } - /// Returns true if the transaction is a contract creation. fn is_create(&self) -> bool { - self.0.transaction.is_create() + self.0.is_create() } - fn input(&self) -> &[u8] { - self.0.transaction.input() + fn value(&self) -> U256 { + self.0.value() } - /// Returns a measurement of the heap usage of this type and all its internals. - fn size(&self) -> usize { - self.0.transaction.transaction.input().len() + fn input(&self) -> &Bytes { + self.0.input() } - /// Returns the transaction type - fn tx_type(&self) -> u8 { - self.0.transaction.ty() + fn access_list(&self) -> Option<&AccessList> { + self.0.access_list() } - /// Returns the length of the rlp encoded object - fn encoded_length(&self) -> usize { - self.0.encoded_length + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + self.0.blob_versioned_hashes() } - /// Returns `chain_id` - fn chain_id(&self) -> Option { - self.0.transaction.chain_id() + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + self.0.authorization_list() } } @@ -499,10 +505,6 @@ impl EthPoolTransaction for ScrollPooledTransaction { EthBlobTransactionSidecar::None } - fn blob_count(&self) -> usize { - 0 - } - fn try_into_pooled_eip4844( self, _sidecar: Arc, @@ -522,11 +524,7 @@ impl EthPoolTransaction for ScrollPooledTransaction { _blob: &BlobTransactionSidecar, _settings: &KzgSettings, ) -> Result<(), BlobTransactionValidationError> { - Err(BlobTransactionValidationError::NotBlobTransaction(self.tx_type())) - } - - fn authorization_count(&self) -> usize { - 0 + Err(BlobTransactionValidationError::NotBlobTransaction(self.ty())) } } diff --git a/crates/scroll/payload/Cargo.toml b/crates/scroll/payload/Cargo.toml index 4f62017d25b1..5b0769c4554c 100644 --- a/crates/scroll/payload/Cargo.toml +++ b/crates/scroll/payload/Cargo.toml @@ -13,8 +13,33 @@ workspace = true [dependencies] # reth -reth-payload-builder.workspace = true +reth-basic-payload-builder.workspace = true +reth-payload-builder = { workspace = true, optional = true } reth-payload-primitives.workspace = true +reth-primitives-traits.workspace = true +reth-transaction-pool.workspace = true +reth-payload-util.workspace = true -# misc -futures-util.workspace = true +# scroll +reth-scroll-engine-primitives.workspace = true +futures-util = { workspace = true, optional = true } + +[features] +std = [ + "futures-util/std", + "reth-payload-primitives/std", + "reth-primitives-traits/std", + "reth-scroll-engine-primitives/std", +] +scroll = [ + "reth-primitives-traits/scroll", + "reth-scroll-engine-primitives/scroll", +] +optimism = ["reth-scroll-engine-primitives/optimism"] +test-utils = [ + "dep:futures-util", + "dep:reth-payload-builder", + "reth-payload-builder/test-utils", + "reth-primitives-traits/test-utils", + "reth-transaction-pool/test-utils", +] diff --git a/crates/scroll/payload/src/builder.rs b/crates/scroll/payload/src/builder.rs index 8f1e7248a271..57ec6ea45be6 100644 --- a/crates/scroll/payload/src/builder.rs +++ b/crates/scroll/payload/src/builder.rs @@ -1,70 +1,54 @@ -use reth_payload_builder::{KeepPayloadJobAlive, PayloadJob, PayloadJobGenerator}; -use reth_payload_primitives::{ - BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, -}; -use std::{ - fmt::Debug, - future::Future, - pin::Pin, - task::{Context, Poll}, +use core::fmt::Debug; +use reth_basic_payload_builder::{ + BuildArguments, BuildOutcome, HeaderForPayload, PayloadBuilder, PayloadConfig, }; +use reth_payload_primitives::PayloadBuilderError; +use reth_payload_util::{BestPayloadTransactions, PayloadTransactions}; +use reth_scroll_engine_primitives::{ScrollBuiltPayload, ScrollPayloadBuilderAttributes}; +use reth_transaction_pool::{BestTransactionsAttributes, PoolTransaction, TransactionPool}; -/// A [`PayloadJobGenerator`] that doesn't produce any useful payload. -#[derive(Debug, Default)] +/// A type that implements [`PayloadBuilder`] by building empty payloads. +#[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct NoopPayloadJobGenerator { - _types: std::marker::PhantomData<(PA, BP)>, -} - -impl PayloadJobGenerator for NoopPayloadJobGenerator -where - PA: PayloadBuilderAttributes + Default + Debug + Send + Sync, - BP: BuiltPayload + Default + Clone + Debug + Send + Sync + 'static, -{ - type Job = NoopPayloadJob; - - fn new_payload_job(&self, _attr: PA) -> Result { - Ok(NoopPayloadJob::::default()) +pub struct ScrollEmptyPayloadBuilder; + +impl PayloadBuilder for ScrollEmptyPayloadBuilder { + type Attributes = ScrollPayloadBuilderAttributes; + type BuiltPayload = ScrollBuiltPayload; + + fn try_build( + &self, + _args: BuildArguments, + ) -> Result, PayloadBuilderError> { + // we can't currently actually build a payload, so we mark the outcome as cancelled. + Ok(BuildOutcome::Cancelled) } -} - -/// A [`PayloadJobGenerator`] that doesn't produce any payload. -#[derive(Debug, Default)] -pub struct NoopPayloadJob { - _types: std::marker::PhantomData<(PA, BP)>, -} -impl Future for NoopPayloadJob { - type Output = Result<(), PayloadBuilderError>; - - fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { - Poll::Pending + fn build_empty_payload( + &self, + _config: PayloadConfig>, + ) -> Result { + Ok(ScrollBuiltPayload::default()) } } -impl PayloadJob for NoopPayloadJob -where - PA: PayloadBuilderAttributes + Default + Debug, - BP: BuiltPayload + Default + Clone + Debug + 'static, -{ - type PayloadAttributes = PA; - type ResolvePayloadFuture = - futures_util::future::Ready>; - type BuiltPayload = BP; - - fn best_payload(&self) -> Result { - Ok(Self::BuiltPayload::default()) - } - - fn payload_attributes(&self) -> Result { - Ok(Self::PayloadAttributes::default()) - } +/// A type that returns the [`PayloadTransactions`] that should be included in the pool. +pub trait ScrollPayloadTransactions: Clone + Send + Sync + Unpin + 'static { + /// Returns an iterator that yields the transaction in the order they should get included in the + /// new payload. + fn best_transactions>( + &self, + pool: Pool, + attr: BestTransactionsAttributes, + ) -> impl PayloadTransactions; +} - fn resolve_kind( - &mut self, - _kind: PayloadKind, - ) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { - let fut = futures_util::future::ready(self.best_payload()); - (fut, KeepPayloadJobAlive::No) +impl ScrollPayloadTransactions for () { + fn best_transactions>( + &self, + pool: Pool, + attr: BestTransactionsAttributes, + ) -> impl PayloadTransactions { + BestPayloadTransactions::new(pool.best_transactions_with_attributes(attr)) } } diff --git a/crates/scroll/payload/src/lib.rs b/crates/scroll/payload/src/lib.rs index a70f9fa30c04..86efbaa41ace 100644 --- a/crates/scroll/payload/src/lib.rs +++ b/crates/scroll/payload/src/lib.rs @@ -1,4 +1,14 @@ //! Engine Payload related types. +#![cfg_attr(feature = "optimism", allow(unused_crate_dependencies))] +// The `scroll` feature must be enabled to use this crate. +#![cfg(all(feature = "scroll", not(feature = "optimism")))] +#![cfg_attr(not(feature = "std"), no_std)] + mod builder; -pub use builder::{NoopPayloadJob, NoopPayloadJobGenerator}; +pub use builder::{ScrollEmptyPayloadBuilder, ScrollPayloadTransactions}; + +#[cfg(feature = "test-utils")] +mod test_utils; +#[cfg(feature = "test-utils")] +pub use test_utils::{NoopPayloadJob, NoopPayloadJobGenerator}; diff --git a/crates/scroll/payload/src/test_utils.rs b/crates/scroll/payload/src/test_utils.rs new file mode 100644 index 000000000000..a9d494f40fda --- /dev/null +++ b/crates/scroll/payload/src/test_utils.rs @@ -0,0 +1,70 @@ +use core::{ + fmt::Debug, + future::Future, + pin::Pin, + task::{Context, Poll}, +}; +use reth_payload_builder::{KeepPayloadJobAlive, PayloadJob, PayloadJobGenerator}; +use reth_payload_primitives::{ + BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, +}; + +/// A [`PayloadJobGenerator`] that doesn't produce any useful payload. +#[derive(Debug, Default)] +#[non_exhaustive] +pub struct NoopPayloadJobGenerator { + _types: core::marker::PhantomData<(PA, BP)>, +} + +impl PayloadJobGenerator for NoopPayloadJobGenerator +where + PA: PayloadBuilderAttributes + Default + Debug + Send + Sync, + BP: BuiltPayload + Default + Clone + Debug + Send + Sync + 'static, +{ + type Job = NoopPayloadJob; + + fn new_payload_job(&self, _attr: PA) -> Result { + Ok(NoopPayloadJob::::default()) + } +} + +/// A [`PayloadJobGenerator`] that doesn't produce any payload. +#[derive(Debug, Default)] +pub struct NoopPayloadJob { + _types: core::marker::PhantomData<(PA, BP)>, +} + +impl Future for NoopPayloadJob { + type Output = Result<(), PayloadBuilderError>; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + Poll::Pending + } +} + +impl PayloadJob for NoopPayloadJob +where + PA: PayloadBuilderAttributes + Default + Debug, + BP: BuiltPayload + Default + Clone + Debug + 'static, +{ + type PayloadAttributes = PA; + type ResolvePayloadFuture = + futures_util::future::Ready>; + type BuiltPayload = BP; + + fn best_payload(&self) -> Result { + Ok(Self::BuiltPayload::default()) + } + + fn payload_attributes(&self) -> Result { + Ok(Self::PayloadAttributes::default()) + } + + fn resolve_kind( + &mut self, + _kind: PayloadKind, + ) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { + let fut = futures_util::future::ready(self.best_payload()); + (fut, KeepPayloadJobAlive::No) + } +} diff --git a/crates/scroll/primitives/src/transaction/signed.rs b/crates/scroll/primitives/src/transaction/signed.rs index cbfb57fe73c7..f5045d214e7e 100644 --- a/crates/scroll/primitives/src/transaction/signed.rs +++ b/crates/scroll/primitives/src/transaction/signed.rs @@ -28,7 +28,7 @@ use once_cell::sync::OnceCell as OnceLock; use proptest as _; use reth_primitives_traits::{ crypto::secp256k1::{recover_signer, recover_signer_unchecked}, - transaction::{error::TransactionConversionError, signed::RecoveryError}, + transaction::{error::TryFromRecoveredTransactionError, signed::RecoveryError}, InMemorySize, SignedTransaction, }; use scroll_alloy_consensus::{ScrollPooledTransaction, ScrollTypedTransaction, TxL1Message}; @@ -527,8 +527,8 @@ impl> From> for ScrollTransactionSigne } } -impl TryFrom for scroll_alloy_consensus::ScrollPooledTransaction { - type Error = TransactionConversionError; +impl TryFrom for ScrollPooledTransaction { + type Error = TryFromRecoveredTransactionError; fn try_from(value: ScrollTransactionSigned) -> Result { let hash = *value.tx_hash(); @@ -545,7 +545,7 @@ impl TryFrom for scroll_alloy_consensus::ScrollPooledTr Ok(Self::Eip1559(Signed::new_unchecked(tx, signature, hash))) } ScrollTypedTransaction::L1Message(_) => { - Err(TransactionConversionError::UnsupportedForP2P) + Err(TryFromRecoveredTransactionError::UnsupportedTransactionType(0xfe)) } } } diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 2cb98d44f93d..06ce1182db17 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -131,9 +131,7 @@ impl Pipeline { /// pipeline and its result as a future. #[track_caller] pub fn run_as_fut(mut self, target: Option) -> PipelineFut { - // TODO: fix this in a follow up PR. ideally, consensus engine would be responsible for - // updating metrics. - let _ = self.register_metrics(); // ignore error + let _ = self.register_metrics(); Box::pin(async move { // NOTE: the tip should only be None if we are in continuous sync mode. if let Some(target) = target { diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 8cf74bdcaeaf..ddd811a291a1 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -181,7 +181,7 @@ where where Provider: StaticFileProviderFactory + DBProvider + BlockReader + HeaderProvider, { - // If thre's any receipts pruning configured, receipts are written directly to database and + // If there's any receipts pruning configured, receipts are written directly to database and // inconsistencies are expected. if provider.prune_modes_ref().has_receipts_pruning() { return Ok(()) @@ -651,7 +651,6 @@ mod tests { use reth_db_api::{models::AccountBeforeTx, transaction::DbTxMut}; use reth_evm::execute::BasicBlockExecutorProvider; use reth_evm_ethereum::execute::EthExecutionStrategyFactory; - use reth_execution_errors::BlockValidationError; use reth_primitives::{Account, Bytecode, SealedBlock, StorageEntry}; use reth_provider::{ test_utils::create_test_provider_factory, AccountReader, DatabaseProviderFactory, @@ -714,14 +713,7 @@ mod tests { let genesis = SealedBlock::::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::::decode(&mut block_rlp).unwrap(); - provider - .insert_historical_block( - genesis - .try_recover() - .map_err(|_| BlockValidationError::SenderRecoveryError) - .unwrap(), - ) - .unwrap(); + provider.insert_historical_block(genesis.try_recover().unwrap()).unwrap(); provider.insert_historical_block(block.clone().try_recover().unwrap()).unwrap(); provider .static_file_provider() diff --git a/crates/stages/types/Cargo.toml b/crates/stages/types/Cargo.toml index 8e880dfc72bd..c88f53dcdaa1 100644 --- a/crates/stages/types/Cargo.toml +++ b/crates/stages/types/Cargo.toml @@ -16,7 +16,7 @@ reth-codecs = { workspace = true, optional = true } reth-trie-common.workspace = true alloy-primitives.workspace = true -serde.workspace = true +serde = { workspace = true, optional = true } arbitrary = { workspace = true, features = ["derive"], optional = true } bytes = { workspace = true, optional = true } @@ -34,6 +34,13 @@ rand.workspace = true bytes.workspace = true [features] +default = ["std"] +std = [ + "alloy-primitives/std", + "bytes?/std", + "reth-trie-common/std", + "serde?/std", +] reth-codec = [ "dep:reth-codecs", "dep:bytes", @@ -41,12 +48,22 @@ reth-codec = [ "reth-trie-common/reth-codec", ] test-utils = [ - "dep:arbitrary", + "arbitrary", "reth-codecs?/test-utils", "reth-trie-common/test-utils", ] arbitrary = [ + "std", + "dep:arbitrary", "alloy-primitives/arbitrary", "reth-codecs?/arbitrary", "reth-trie-common/arbitrary", ] +serde = [ + "dep:serde", + "alloy-primitives/serde", + "bytes?/serde", + "rand/serde", + "reth-codecs?/serde", + "reth-trie-common/serde", +] diff --git a/crates/stages/types/src/checkpoints.rs b/crates/stages/types/src/checkpoints.rs index 160c901e1cb3..51dc84ad9b91 100644 --- a/crates/stages/types/src/checkpoints.rs +++ b/crates/stages/types/src/checkpoints.rs @@ -1,9 +1,8 @@ use super::StageId; -use alloc::vec::Vec; +use alloc::{format, string::String, vec::Vec}; use alloy_primitives::{Address, BlockNumber, B256}; use core::ops::RangeInclusive; use reth_trie_common::{hash_builder::HashBuilderState, StoredSubNode}; -use serde::{Deserialize, Serialize}; /// Saves the progress of Merkle stage. #[derive(Default, Debug, Clone, PartialEq, Eq)] @@ -75,10 +74,11 @@ impl reth_codecs::Compact for MerkleCheckpoint { } /// Saves the progress of AccountHashing stage. -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct AccountHashingCheckpoint { /// The next account to start hashing from. pub address: Option
, @@ -89,10 +89,11 @@ pub struct AccountHashingCheckpoint { } /// Saves the progress of StorageHashing stage. -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct StorageHashingCheckpoint { /// The next account to start hashing from. pub address: Option
, @@ -105,10 +106,11 @@ pub struct StorageHashingCheckpoint { } /// Saves the progress of Execution stage. -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct ExecutionCheckpoint { /// Block range which this checkpoint is valid for. pub block_range: CheckpointBlockRange, @@ -117,10 +119,11 @@ pub struct ExecutionCheckpoint { } /// Saves the progress of Headers stage. -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct HeadersCheckpoint { /// Block range which this checkpoint is valid for. pub block_range: CheckpointBlockRange, @@ -129,10 +132,11 @@ pub struct HeadersCheckpoint { } /// Saves the progress of Index History stages. -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct IndexHistoryCheckpoint { /// Block range which this checkpoint is valid for. pub block_range: CheckpointBlockRange, @@ -141,10 +145,11 @@ pub struct IndexHistoryCheckpoint { } /// Saves the progress of abstract stage iterating over or downloading entities. -#[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] +#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct EntitiesCheckpoint { /// Number of entities already processed. pub processed: u64, @@ -165,16 +170,22 @@ impl EntitiesCheckpoint { let percentage = 100.0 * self.processed as f64 / self.total as f64; // Truncate to 2 decimal places, rounding down so that 99.999% becomes 99.99% and not 100%. + #[cfg(not(feature = "std"))] + { + Some(format!("{:.2}%", (percentage * 100.0) / 100.0)) + } + #[cfg(feature = "std")] Some(format!("{:.2}%", (percentage * 100.0).floor() / 100.0)) } } /// Saves the block range. Usually, it's used to check the validity of some stage checkpoint across /// multiple executions. -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct CheckpointBlockRange { /// The first block of the range, inclusive. pub from: BlockNumber, @@ -195,10 +206,11 @@ impl From<&RangeInclusive> for CheckpointBlockRange { } /// Saves the progress of a stage. -#[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] +#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct StageCheckpoint { /// The maximum block processed by the stage. pub block_number: BlockNumber, @@ -263,10 +275,11 @@ impl StageCheckpoint { // TODO(alexey): add a merkle checkpoint. Currently it's hard because [`MerkleCheckpoint`] // is not a Copy type. /// Stage-specific checkpoint metrics. -#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Eq, Clone, Copy)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum StageUnitCheckpoint { /// Saves the progress of AccountHashing stage. Account(AccountHashingCheckpoint), diff --git a/crates/stages/types/src/id.rs b/crates/stages/types/src/id.rs index 547575f5951e..ec7da22578bb 100644 --- a/crates/stages/types/src/id.rs +++ b/crates/stages/types/src/id.rs @@ -97,8 +97,8 @@ impl StageId { } } -impl std::fmt::Display for StageId { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl core::fmt::Display for StageId { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "{}", self.as_str()) } } diff --git a/crates/stages/types/src/lib.rs b/crates/stages/types/src/lib.rs index 4e01bf7dbf4a..13d59de34332 100644 --- a/crates/stages/types/src/lib.rs +++ b/crates/stages/types/src/lib.rs @@ -7,6 +7,7 @@ )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; @@ -68,7 +69,7 @@ impl From for PipelineTarget { } impl core::fmt::Display for PipelineTarget { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { Self::Sync(block) => { write!(f, "Sync({block})") diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index 2f6fa8971762..b5b2403d5d1e 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -17,8 +17,8 @@ reth-codecs.workspace = true reth-db-models.workspace = true reth-primitives = { workspace = true, features = ["reth-codec"] } reth-primitives-traits = { workspace = true, features = ["serde", "reth-codec"] } -reth-prune-types.workspace = true -reth-stages-types = { workspace = true, features = ["reth-codec"] } +reth-stages-types = { workspace = true, features = ["serde", "reth-codec"] } +reth-prune-types = { workspace = true, features = ["reth-codec"] } reth-storage-errors.workspace = true reth-trie-common.workspace = true diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 311e1952c77d..766e862f3317 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -19,7 +19,7 @@ reth-primitives-traits = { workspace = true, features = ["reth-codec"] } reth-fs-util.workspace = true reth-storage-errors.workspace = true reth-nippy-jar.workspace = true -reth-prune-types.workspace = true +reth-prune-types = { workspace = true, features = ["reth-codec", "serde"] } reth-stages-types.workspace = true reth-trie-common = { workspace = true, features = ["serde"] } reth-tracing.workspace = true diff --git a/crates/storage/db/src/lockfile.rs b/crates/storage/db/src/lockfile.rs index 15ddee2f0fef..c44f88790d5f 100644 --- a/crates/storage/db/src/lockfile.rs +++ b/crates/storage/db/src/lockfile.rs @@ -86,7 +86,7 @@ impl StorageLockInner { fn new(file_path: PathBuf) -> Result { // Create the directory if it doesn't exist if let Some(parent) = file_path.parent() { - reth_fs_util::create_dir_all(parent)?; + reth_fs_util::create_dir_all(parent).map_err(StorageLockError::other)?; } // Write this process unique identifier (pid & start_time) to file @@ -148,7 +148,8 @@ impl ProcessUID { /// Writes `pid` and `start_time` to a file. fn write(&self, path: &Path) -> Result<(), StorageLockError> { - Ok(reth_fs_util::write(path, format!("{}\n{}", self.pid, self.start_time))?) + reth_fs_util::write(path, format!("{}\n{}", self.pid, self.start_time)) + .map_err(StorageLockError::other) } } diff --git a/crates/storage/errors/Cargo.toml b/crates/storage/errors/Cargo.toml index ece3340c676c..24e282605eaf 100644 --- a/crates/storage/errors/Cargo.toml +++ b/crates/storage/errors/Cargo.toml @@ -12,7 +12,6 @@ workspace = true [dependencies] # reth -reth-fs-util.workspace = true reth-primitives-traits.workspace = true reth-prune-types.workspace = true reth-static-file-types.workspace = true diff --git a/crates/storage/errors/src/any.rs b/crates/storage/errors/src/any.rs new file mode 100644 index 000000000000..d5f1a34988dd --- /dev/null +++ b/crates/storage/errors/src/any.rs @@ -0,0 +1,41 @@ +use alloc::sync::Arc; +use core::{error::Error, fmt}; + +/// A thread-safe cloneable wrapper for any error type. +#[derive(Clone)] +pub struct AnyError { + inner: Arc, +} + +impl AnyError { + /// Creates a new `AnyError` wrapping the given error value. + pub fn new(error: E) -> Self + where + E: Error + Send + Sync + 'static, + { + Self { inner: Arc::new(error) } + } + + /// Returns a reference to the underlying error value. + pub fn as_error(&self) -> &(dyn Error + Send + Sync + 'static) { + self.inner.as_ref() + } +} + +impl fmt::Debug for AnyError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.inner, f) + } +} + +impl fmt::Display for AnyError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.inner, f) + } +} + +impl Error for AnyError { + fn source(&self) -> Option<&(dyn Error + 'static)> { + self.inner.source() + } +} diff --git a/crates/storage/errors/src/lib.rs b/crates/storage/errors/src/lib.rs index 6abb0cd9b425..179978c1cc36 100644 --- a/crates/storage/errors/src/lib.rs +++ b/crates/storage/errors/src/lib.rs @@ -22,3 +22,6 @@ pub mod provider; /// Writer error pub mod writer; + +/// Any error +pub mod any; diff --git a/crates/storage/errors/src/lockfile.rs b/crates/storage/errors/src/lockfile.rs index ec276cdce5e4..3a5cb362b6ac 100644 --- a/crates/storage/errors/src/lockfile.rs +++ b/crates/storage/errors/src/lockfile.rs @@ -1,5 +1,4 @@ use alloc::string::{String, ToString}; -use reth_fs_util::FsPathError; /// Storage lock error. #[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)] @@ -12,9 +11,9 @@ pub enum StorageLockError { Other(String), } -/// TODO: turn into variant once `ProviderError` -impl From for StorageLockError { - fn from(error: FsPathError) -> Self { - Self::Other(error.to_string()) +impl StorageLockError { + /// Converts any error into the `Other` variant of `StorageLockError`. + pub fn other(err: E) -> Self { + Self::Other(err.to_string()) } } diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index 48b893759d15..f0b317b0ac2e 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -1,4 +1,6 @@ -use crate::{db::DatabaseError, lockfile::StorageLockError, writer::UnifiedStorageWriterError}; +use crate::{ + any::AnyError, db::DatabaseError, lockfile::StorageLockError, writer::UnifiedStorageWriterError, +}; use alloc::{boxed::Box, string::String}; use alloy_eips::{BlockHashOrNumber, HashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256}; @@ -143,6 +145,42 @@ pub enum ProviderError { /// Received invalid output from configured storage implementation. #[error("received invalid output from storage")] InvalidStorageOutput, + /// Any other error type wrapped into a clonable [`AnyError`]. + #[error(transparent)] + Other(#[from] AnyError), +} + +impl ProviderError { + /// Creates a new [`ProviderError::Other`] variant by wrapping the given error into an + /// [`AnyError`] + pub fn other(error: E) -> Self + where + E: core::error::Error + Send + Sync + 'static, + { + Self::Other(AnyError::new(error)) + } + + /// Returns the arbitrary error if it is [`ProviderError::Other`] + pub fn as_other(&self) -> Option<&(dyn core::error::Error + Send + Sync + 'static)> { + match self { + Self::Other(err) => Some(err.as_error()), + _ => None, + } + } + + /// Returns a reference to the [`ProviderError::Other`] value if this type is a + /// [`ProviderError::Other`] and the [`AnyError`] wraps an error of that type. Returns None + /// otherwise. + pub fn downcast_other_ref(&self) -> Option<&T> { + let other = self.as_other()?; + other.downcast_ref() + } + + /// Returns true if the this type is a [`ProviderError::Other`] of that error + /// type. Returns false otherwise. + pub fn is_other(&self) -> bool { + self.as_other().map(|err| err.is::()).unwrap_or(false) + } } impl From for ProviderError { @@ -197,3 +235,19 @@ impl From for ProviderError { Self::ConsistentView(Box::new(error)) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[derive(thiserror::Error, Debug)] + #[error("E")] + struct E; + + #[test] + fn other_err() { + let err = ProviderError::other(E); + assert!(err.is_other::()); + assert!(err.downcast_other_ref::().is_some()); + } +} diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index c65c1522763d..c62f3d6177e7 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -581,11 +581,13 @@ impl TransactionPtr { self.timed_out.store(true, std::sync::atomic::Ordering::SeqCst); } + /// Acquires the inner transaction lock to guarantee exclusive access to the transaction + /// pointer. fn lock(&self) -> MutexGuard<'_, ()> { if let Some(lock) = self.lock.try_lock() { lock } else { - tracing::debug!( + tracing::trace!( target: "libmdbx", txn = %self.txn as usize, backtrace = %std::backtrace::Backtrace::capture(), diff --git a/crates/storage/libmdbx-rs/src/txn_manager.rs b/crates/storage/libmdbx-rs/src/txn_manager.rs index 4fdaddc82081..f2b16a1b98af 100644 --- a/crates/storage/libmdbx-rs/src/txn_manager.rs +++ b/crates/storage/libmdbx-rs/src/txn_manager.rs @@ -5,10 +5,7 @@ use crate::{ }; use std::{ ptr, - sync::{ - mpsc::{sync_channel, Receiver, SyncSender}, - Arc, - }, + sync::mpsc::{sync_channel, Receiver, SyncSender}, }; #[derive(Copy, Clone, Debug)] @@ -31,7 +28,7 @@ pub(crate) enum TxnManagerMessage { pub(crate) struct TxnManager { sender: SyncSender, #[cfg(feature = "read-tx-timeouts")] - read_transactions: Option>, + read_transactions: Option>, } impl TxnManager { diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 70c85f774feb..3e66ddeadc16 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -99,6 +99,8 @@ serde = [ "reth-execution-types/serde", "reth-trie-db/serde", "reth-trie/serde", + "reth-stages-types/serde", + "reth-prune-types/serde", ] test-utils = [ "reth-db/test-utils", diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 4d47cc23d4e6..1bd0fc03ccc1 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -797,22 +797,21 @@ impl BlockReader for ConsistentProvider { source: BlockSource, ) -> ProviderResult> { if matches!(source, BlockSource::Canonical | BlockSource::Any) { - return self.get_in_memory_or_storage_by_block( + if let Some(block) = self.get_in_memory_or_storage_by_block( hash.into(), |db_provider| db_provider.find_block_by_hash(hash, BlockSource::Canonical), |block_state| Ok(Some(block_state.block_ref().recovered_block().clone_block())), - ) + )? { + return Ok(Some(block)) + } } if matches!(source, BlockSource::Pending | BlockSource::Any) { - if let Some(block) = self + return Ok(self .canonical_in_memory_state .pending_block() .filter(|b| b.hash() == hash) - .map(|b| b.into_block()) - { - return Ok(Some(block)) - } + .map(|b| b.into_block())) } Ok(None) diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index b3b9f420b09d..47e54dfcaf15 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -15,7 +15,7 @@ use alloy_primitives::{ Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, }; use parking_lot::Mutex; -use reth_chainspec::{ChainInfo, ChainSpec}; +use reth_chainspec::{ChainInfo, EthChainSpec}; use reth_db::mock::{DatabaseMock, TxMock}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_execution_types::ExecutionOutcome; @@ -43,8 +43,8 @@ use std::{ }; /// A mock implementation for Provider interfaces. -#[derive(Debug, Clone)] -pub struct MockEthProvider { +#[derive(Debug)] +pub struct MockEthProvider { /// Local block store pub blocks: Arc>>>, /// Local header store @@ -57,6 +57,18 @@ pub struct MockEthProvider { pub state_roots: Arc>>, } +impl Clone for MockEthProvider { + fn clone(&self) -> Self { + Self { + blocks: self.blocks.clone(), + headers: self.headers.clone(), + accounts: self.accounts.clone(), + chain_spec: self.chain_spec.clone(), + state_roots: self.state_roots.clone(), + } + } +} + impl MockEthProvider { /// Create a new, empty instance pub fn new() -> Self { @@ -68,7 +80,9 @@ impl MockEthProvider { state_roots: Default::default(), } } +} +impl MockEthProvider { /// Add block to local block store pub fn add_block(&self, hash: B256, block: Block) { self.add_header(hash, block.header.clone()); @@ -111,6 +125,17 @@ impl MockEthProvider { pub fn add_state_root(&self, state_root: B256) { self.state_roots.lock().push(state_root); } + + /// Set chain spec. + pub fn with_chain_spec(self, chain_spec: C) -> MockEthProvider { + MockEthProvider { + blocks: self.blocks, + headers: self.headers, + accounts: self.accounts, + chain_spec: Arc::new(chain_spec), + state_roots: self.state_roots, + } + } } impl Default for MockEthProvider { @@ -162,16 +187,20 @@ pub struct MockNode; impl NodeTypes for MockNode { type Primitives = EthPrimitives; - type ChainSpec = ChainSpec; + type ChainSpec = reth_chainspec::ChainSpec; type StateCommitment = MerklePatriciaTrie; type Storage = EthStorage; } -impl StateCommitmentProvider for MockEthProvider { +impl StateCommitmentProvider + for MockEthProvider +{ type StateCommitment = ::StateCommitment; } -impl DatabaseProviderFactory for MockEthProvider { +impl DatabaseProviderFactory + for MockEthProvider +{ type DB = DatabaseMock; type Provider = DatabaseProvider; type ProviderRW = DatabaseProvider; @@ -185,7 +214,7 @@ impl DatabaseProviderFactory for MockEthProvider { } } -impl HeaderProvider for MockEthProvider { +impl HeaderProvider for MockEthProvider { type Header = Header; fn header(&self, block_hash: &BlockHash) -> ProviderResult> { @@ -244,7 +273,9 @@ impl HeaderProvider for MockEthProvider { } } -impl ChainSpecProvider for MockEthProvider { +impl ChainSpecProvider + for MockEthProvider +{ type ChainSpec = ChainSpec; fn chain_spec(&self) -> Arc { @@ -252,7 +283,9 @@ impl ChainSpecProvider for MockEthProvider { } } -impl TransactionsProvider for MockEthProvider { +impl TransactionsProvider + for MockEthProvider +{ type Transaction = T; fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { @@ -391,7 +424,7 @@ impl TransactionsProvider for MockEthProvider { } } -impl ReceiptProvider for MockEthProvider { +impl ReceiptProvider for MockEthProvider { type Receipt = Receipt; fn receipt(&self, _id: TxNumber) -> ProviderResult> { @@ -414,9 +447,12 @@ impl ReceiptProvider for MockEthProvider { } } -impl ReceiptProviderIdExt for MockEthProvider {} +impl ReceiptProviderIdExt + for MockEthProvider +{ +} -impl BlockHashReader for MockEthProvider { +impl BlockHashReader for MockEthProvider { fn block_hash(&self, number: u64) -> ProviderResult> { let lock = self.blocks.lock(); @@ -440,7 +476,7 @@ impl BlockHashReader for MockEthProvider { } } -impl BlockNumReader for MockEthProvider { +impl BlockNumReader for MockEthProvider { fn chain_info(&self) -> ProviderResult { let best_block_number = self.best_block_number()?; let lock = self.headers.lock(); @@ -471,7 +507,7 @@ impl BlockNumReader for MockEthProvider { } } -impl BlockIdReader for MockEthProvider { +impl BlockIdReader for MockEthProvider { fn pending_block_num_hash(&self) -> ProviderResult> { Ok(None) } @@ -485,7 +521,7 @@ impl BlockIdReader for MockEthProvider { } } -impl BlockReader for MockEthProvider { +impl BlockReader for MockEthProvider { type Block = Block; fn find_block_by_hash( @@ -559,7 +595,9 @@ impl BlockReader for MockEthProvider { } } -impl BlockReaderIdExt for MockEthProvider { +impl BlockReaderIdExt + for MockEthProvider +{ fn block_by_id(&self, id: BlockId) -> ProviderResult>> { match id { BlockId::Number(num) => self.block_by_number_or_tag(num), @@ -586,13 +624,15 @@ impl BlockReaderIdExt for MockEthProvider { } } -impl AccountReader for MockEthProvider { +impl AccountReader for MockEthProvider { fn basic_account(&self, address: &Address) -> ProviderResult> { Ok(self.accounts.lock().get(address).cloned().map(|a| a.account)) } } -impl StageCheckpointReader for MockEthProvider { +impl StageCheckpointReader + for MockEthProvider +{ fn get_stage_checkpoint(&self, _id: StageId) -> ProviderResult> { Ok(None) } @@ -606,7 +646,7 @@ impl StageCheckpointReader for MockEthProvider { } } -impl StateRootProvider for MockEthProvider { +impl StateRootProvider for MockEthProvider { fn state_root(&self, _state: HashedPostState) -> ProviderResult { Ok(self.state_roots.lock().pop().unwrap_or_default()) } @@ -632,7 +672,9 @@ impl StateRootProvider for MockEthProvider { } } -impl StorageRootProvider for MockEthProvider { +impl StorageRootProvider + for MockEthProvider +{ fn storage_root( &self, _address: Address, @@ -660,7 +702,7 @@ impl StorageRootProvider for MockEthProvider { } } -impl StateProofProvider for MockEthProvider { +impl StateProofProvider for MockEthProvider { fn proof( &self, _input: TrieInput, @@ -687,13 +729,17 @@ impl StateProofProvider for MockEthProvider { } } -impl HashedPostStateProvider for MockEthProvider { +impl HashedPostStateProvider + for MockEthProvider +{ fn hashed_post_state(&self, _state: &revm::db::BundleState) -> HashedPostState { HashedPostState::default() } } -impl StateProvider for MockEthProvider { +impl StateProvider + for MockEthProvider +{ fn storage( &self, account: Address, @@ -716,7 +762,9 @@ impl StateProvider for MockEthProvider { } } -impl StateProviderFactory for MockEthProvider { +impl StateProviderFactory + for MockEthProvider +{ fn latest(&self) -> ProviderResult { Ok(Box::new(self.clone())) } @@ -768,7 +816,9 @@ impl StateProviderFactory for MockEthProvider { } } -impl WithdrawalsProvider for MockEthProvider { +impl WithdrawalsProvider + for MockEthProvider +{ fn withdrawals_by_block( &self, _id: BlockHashOrNumber, @@ -778,13 +828,15 @@ impl WithdrawalsProvider for MockEthProvider { } } -impl OmmersProvider for MockEthProvider { +impl OmmersProvider for MockEthProvider { fn ommers(&self, _id: BlockHashOrNumber) -> ProviderResult>> { Ok(None) } } -impl BlockBodyIndicesProvider for MockEthProvider { +impl BlockBodyIndicesProvider + for MockEthProvider +{ fn block_body_indices(&self, _num: u64) -> ProviderResult> { Ok(None) } @@ -796,7 +848,7 @@ impl BlockBodyIndicesProvider for MockEthProvider { } } -impl ChangeSetReader for MockEthProvider { +impl ChangeSetReader for MockEthProvider { fn account_block_changeset( &self, _block_number: BlockNumber, @@ -805,7 +857,7 @@ impl ChangeSetReader for MockEthProvider { } } -impl StateReader for MockEthProvider { +impl StateReader for MockEthProvider { type Receipt = Receipt; fn get_state(&self, _block: BlockNumber) -> ProviderResult> { diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index 7ebff976d135..4d8fb640a8e3 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -34,3 +34,17 @@ alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true auto_impl.workspace = true + +[features] +default = ["std"] +std = [ + "reth-chainspec/std", + "alloy-consensus/std", + "alloy-eips/std", + "alloy-primitives/std", + "alloy-rpc-types-engine/std", + "reth-primitives/std", + "reth-primitives-traits/std", + "revm/std", + "reth-stages-types/std", +] diff --git a/crates/storage/storage-api/src/account.rs b/crates/storage/storage-api/src/account.rs index abcb289a29f5..b01deb35969f 100644 --- a/crates/storage/storage-api/src/account.rs +++ b/crates/storage/storage-api/src/account.rs @@ -1,12 +1,13 @@ +use alloc::{ + collections::{BTreeMap, BTreeSet}, + vec::Vec, +}; use alloy_primitives::{Address, BlockNumber}; use auto_impl::auto_impl; +use core::ops::{RangeBounds, RangeInclusive}; use reth_db_models::AccountBeforeTx; use reth_primitives_traits::Account; use reth_storage_errors::provider::ProviderResult; -use std::{ - collections::{BTreeMap, BTreeSet}, - ops::{RangeBounds, RangeInclusive}, -}; /// Account reader #[auto_impl(&, Arc, Box)] diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 23c0f8460aa0..5f0e35700637 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -2,11 +2,12 @@ use crate::{ BlockBodyIndicesProvider, BlockNumReader, HeaderProvider, OmmersProvider, ReceiptProvider, ReceiptProviderIdExt, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; +use alloc::{sync::Arc, vec::Vec}; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, B256}; +use core::ops::RangeInclusive; use reth_primitives::{RecoveredBlock, SealedBlock, SealedHeader}; use reth_storage_errors::provider::ProviderResult; -use std::ops::RangeInclusive; /// A helper enum that represents the origin of the requested block. /// @@ -153,7 +154,7 @@ pub trait BlockReader: ) -> ProviderResult>>; } -impl BlockReader for std::sync::Arc { +impl BlockReader for Arc { type Block = T::Block; fn find_block_by_hash( diff --git a/crates/storage/storage-api/src/block_hash.rs b/crates/storage/storage-api/src/block_hash.rs index aa8624b83441..a617d31ebdf6 100644 --- a/crates/storage/storage-api/src/block_hash.rs +++ b/crates/storage/storage-api/src/block_hash.rs @@ -1,3 +1,4 @@ +use alloc::vec::Vec; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use reth_storage_errors::provider::ProviderResult; diff --git a/crates/storage/storage-api/src/block_indices.rs b/crates/storage/storage-api/src/block_indices.rs index 3c6860fb2717..5a4f1e22bb0c 100644 --- a/crates/storage/storage-api/src/block_indices.rs +++ b/crates/storage/storage-api/src/block_indices.rs @@ -1,7 +1,8 @@ +use alloc::vec::Vec; use alloy_primitives::BlockNumber; +use core::ops::RangeInclusive; use reth_db_models::StoredBlockBodyIndices; use reth_storage_errors::provider::ProviderResult; -use std::ops::RangeInclusive; /// Client trait for fetching block body indices related data. #[auto_impl::auto_impl(&, Arc)] diff --git a/crates/storage/storage-api/src/chain.rs b/crates/storage/storage-api/src/chain.rs index 4a77c4a064c3..dba4428d685b 100644 --- a/crates/storage/storage-api/src/chain.rs +++ b/crates/storage/storage-api/src/chain.rs @@ -1,6 +1,8 @@ use crate::{DBProvider, OmmersProvider, StorageLocation}; +use alloc::vec::Vec; use alloy_consensus::Header; use alloy_primitives::BlockNumber; +use core::marker::PhantomData; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, @@ -83,7 +85,7 @@ impl ChainStorageReader(std::marker::PhantomData<(T, H)>); +pub struct EthStorage(PhantomData<(T, H)>); impl Default for EthStorage { fn default() -> Self { diff --git a/crates/storage/storage-api/src/chain_info.rs b/crates/storage/storage-api/src/chain_info.rs index a15bf58a0138..77b5e5cebc78 100644 --- a/crates/storage/storage-api/src/chain_info.rs +++ b/crates/storage/storage-api/src/chain_info.rs @@ -1,6 +1,5 @@ use alloy_rpc_types_engine::ForkchoiceState; use reth_primitives_traits::SealedHeader; -use std::time::Instant; /// A type that can track updates related to fork choice updates. pub trait CanonChainTracker: Send + Sync { @@ -12,14 +11,16 @@ pub trait CanonChainTracker: Send + Sync { /// Returns the last time a fork choice update was received from the CL /// ([`CanonChainTracker::on_forkchoice_update_received`]) - fn last_received_update_timestamp(&self) -> Option; + #[cfg(feature = "std")] + fn last_received_update_timestamp(&self) -> Option; /// Notify the tracker about a transition configuration exchange. fn on_transition_configuration_exchanged(&self); /// Returns the last time a transition configuration was exchanged with the CL /// ([`CanonChainTracker::on_transition_configuration_exchanged`]) - fn last_exchanged_transition_configuration_timestamp(&self) -> Option; + #[cfg(feature = "std")] + fn last_exchanged_transition_configuration_timestamp(&self) -> Option; /// Sets the canonical head of the chain. fn set_canonical_head(&self, header: SealedHeader); diff --git a/crates/storage/storage-api/src/database_provider.rs b/crates/storage/storage-api/src/database_provider.rs index 20aebce88feb..a4c734677f73 100644 --- a/crates/storage/storage-api/src/database_provider.rs +++ b/crates/storage/storage-api/src/database_provider.rs @@ -1,3 +1,5 @@ +use alloc::vec::Vec; +use core::ops::{Bound, RangeBounds}; use reth_db_api::{ common::KeyValue, cursor::DbCursorRO, @@ -8,7 +10,6 @@ use reth_db_api::{ }; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderResult; -use std::ops::{Bound, RangeBounds}; /// Database provider. pub trait DBProvider: Send + Sync + Sized + 'static { diff --git a/crates/storage/storage-api/src/hashing.rs b/crates/storage/storage-api/src/hashing.rs index 7cd30a82510c..c6753e5540bc 100644 --- a/crates/storage/storage-api/src/hashing.rs +++ b/crates/storage/storage-api/src/hashing.rs @@ -1,12 +1,10 @@ +use alloc::collections::{BTreeMap, BTreeSet}; use alloy_primitives::{map::HashMap, Address, BlockNumber, B256}; use auto_impl::auto_impl; +use core::ops::{RangeBounds, RangeInclusive}; use reth_db::models::{AccountBeforeTx, BlockNumberAddress}; use reth_primitives::{Account, StorageEntry}; use reth_storage_errors::provider::ProviderResult; -use std::{ - collections::{BTreeMap, BTreeSet}, - ops::{RangeBounds, RangeInclusive}, -}; /// Hashing Writer #[auto_impl(&, Arc, Box)] diff --git a/crates/storage/storage-api/src/header.rs b/crates/storage/storage-api/src/header.rs index b331a9e3a9b7..a4c9b215f82d 100644 --- a/crates/storage/storage-api/src/header.rs +++ b/crates/storage/storage-api/src/header.rs @@ -1,8 +1,9 @@ +use alloc::vec::Vec; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockHash, BlockNumber, U256}; +use core::ops::RangeBounds; use reth_primitives_traits::{BlockHeader, SealedHeader}; use reth_storage_errors::provider::ProviderResult; -use std::ops::RangeBounds; /// A helper type alias to access [`HeaderProvider::Header`]. pub type ProviderHeader

=

::Header; diff --git a/crates/storage/storage-api/src/history.rs b/crates/storage/storage-api/src/history.rs index 77c6324d88d6..ec7b1a5e3a23 100644 --- a/crates/storage/storage-api/src/history.rs +++ b/crates/storage/storage-api/src/history.rs @@ -1,9 +1,9 @@ use alloy_primitives::{Address, BlockNumber, B256}; use auto_impl::auto_impl; +use core::ops::{RangeBounds, RangeInclusive}; use reth_db::models::{AccountBeforeTx, BlockNumberAddress}; use reth_primitives_traits::StorageEntry; use reth_storage_errors::provider::ProviderResult; -use std::ops::{RangeBounds, RangeInclusive}; /// History Writer #[auto_impl(&, Arc, Box)] diff --git a/crates/storage/storage-api/src/legacy.rs b/crates/storage/storage-api/src/legacy.rs index e53a5d8bfa2b..bb6a21e4e158 100644 --- a/crates/storage/storage-api/src/legacy.rs +++ b/crates/storage/storage-api/src/legacy.rs @@ -2,6 +2,7 @@ //! //! This module is scheduled for removal in the future. +use alloc::boxed::Box; use alloy_eips::BlockNumHash; use alloy_primitives::{BlockHash, BlockNumber}; use auto_impl::auto_impl; diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index 150d745e3bfe..70714dcc826a 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -7,6 +7,9 @@ )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; // Re-export used error types. pub use reth_storage_errors as errors; diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index 20d975852f4d..a16b49e73a64 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -8,12 +8,17 @@ use crate::{ StateProviderBox, StateProviderFactory, StateRootProvider, StorageRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; +use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec}; use alloy_consensus::transaction::TransactionMeta; use alloy_eips::{eip4895::Withdrawals, BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{ map::{B256HashMap, HashMap}, Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, }; +use core::{ + marker::PhantomData, + ops::{RangeBounds, RangeInclusive}, +}; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, MAINNET}; use reth_db_models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_primitives::{EthPrimitives, RecoveredBlock, SealedBlock}; @@ -25,11 +30,6 @@ use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, TrieInput, }; -use std::{ - marker::PhantomData, - ops::{RangeBounds, RangeInclusive}, - sync::Arc, -}; /// Supports various api interfaces for testing purposes. #[derive(Debug)] @@ -83,7 +83,7 @@ impl BlockHashReader for NoopProvider ProviderResult> { - Ok(vec![]) + Ok(Vec::new()) } } @@ -196,21 +196,21 @@ impl BlockReader for NoopProvider { } fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { - Ok(vec![]) + Ok(Vec::new()) } fn block_with_senders_range( &self, _range: RangeInclusive, ) -> ProviderResult>> { - Ok(vec![]) + Ok(Vec::new()) } fn sealed_block_with_senders_range( &self, _range: RangeInclusive, ) -> ProviderResult>> { - Ok(vec![]) + Ok(Vec::new()) } } @@ -302,7 +302,7 @@ impl ReceiptProvider for NoopProvider { &self, _range: impl RangeBounds, ) -> ProviderResult> { - Ok(vec![]) + Ok(Vec::new()) } } @@ -331,7 +331,7 @@ impl HeaderProvider for NoopProvider { &self, _range: impl RangeBounds, ) -> ProviderResult> { - Ok(vec![]) + Ok(Vec::new()) } fn sealed_header( @@ -346,7 +346,7 @@ impl HeaderProvider for NoopProvider { _range: impl RangeBounds, _predicate: impl FnMut(&SealedHeader) -> bool, ) -> ProviderResult>> { - Ok(vec![]) + Ok(Vec::new()) } } @@ -572,6 +572,6 @@ impl BlockBodyIndicesProvider for NoopProvider, ) -> ProviderResult> { - Ok(vec![]) + Ok(Vec::new()) } } diff --git a/crates/storage/storage-api/src/ommers.rs b/crates/storage/storage-api/src/ommers.rs index 52c54104ecf5..c3f68b4f96e0 100644 --- a/crates/storage/storage-api/src/ommers.rs +++ b/crates/storage/storage-api/src/ommers.rs @@ -1,4 +1,5 @@ use crate::HeaderProvider; +use alloc::{sync::Arc, vec::Vec}; use alloy_eips::BlockHashOrNumber; use reth_storage_errors::provider::ProviderResult; @@ -10,7 +11,7 @@ pub trait OmmersProvider: HeaderProvider + Send + Sync { fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>>; } -impl OmmersProvider for std::sync::Arc { +impl OmmersProvider for Arc { fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { T::ommers(self, id) } diff --git a/crates/storage/storage-api/src/prune_checkpoint.rs b/crates/storage/storage-api/src/prune_checkpoint.rs index ba83ea51030a..6b3abebd6c53 100644 --- a/crates/storage/storage-api/src/prune_checkpoint.rs +++ b/crates/storage/storage-api/src/prune_checkpoint.rs @@ -1,3 +1,4 @@ +use alloc::vec::Vec; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_storage_errors::provider::ProviderResult; diff --git a/crates/storage/storage-api/src/receipts.rs b/crates/storage/storage-api/src/receipts.rs index fdb703238564..969e0627c9be 100644 --- a/crates/storage/storage-api/src/receipts.rs +++ b/crates/storage/storage-api/src/receipts.rs @@ -1,9 +1,10 @@ use crate::BlockIdReader; +use alloc::vec::Vec; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{TxHash, TxNumber}; +use core::ops::RangeBounds; use reth_primitives_traits::Receipt; use reth_storage_errors::provider::ProviderResult; -use std::ops::RangeBounds; /// A helper type alias to access [`ReceiptProvider::Receipt`]. pub type ProviderReceipt

=

::Receipt; diff --git a/crates/storage/storage-api/src/stage_checkpoint.rs b/crates/storage/storage-api/src/stage_checkpoint.rs index 90ad9eadbab0..37324e60820e 100644 --- a/crates/storage/storage-api/src/stage_checkpoint.rs +++ b/crates/storage/storage-api/src/stage_checkpoint.rs @@ -1,3 +1,4 @@ +use alloc::{string::String, vec::Vec}; use alloy_primitives::BlockNumber; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_errors::provider::ProviderResult; diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index 23ba7ebb22e7..21f75dc23777 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -2,6 +2,7 @@ use super::{ AccountReader, BlockHashReader, BlockIdReader, StateProofProvider, StateRootProvider, StorageRootProvider, }; +use alloc::boxed::Box; use alloy_consensus::constants::KECCAK_EMPTY; use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, StorageValue, B256, U256}; diff --git a/crates/storage/storage-api/src/storage.rs b/crates/storage/storage-api/src/storage.rs index fbdde137454f..6a9be1997c65 100644 --- a/crates/storage/storage-api/src/storage.rs +++ b/crates/storage/storage-api/src/storage.rs @@ -1,11 +1,12 @@ +use alloc::{ + collections::{BTreeMap, BTreeSet}, + vec::Vec, +}; use alloy_primitives::{Address, BlockNumber, B256}; +use core::ops::RangeInclusive; use reth_db_api::models::BlockNumberAddress; use reth_primitives_traits::StorageEntry; use reth_storage_errors::provider::ProviderResult; -use std::{ - collections::{BTreeMap, BTreeSet}, - ops::RangeInclusive, -}; /// Storage reader #[auto_impl::auto_impl(&, Arc, Box)] diff --git a/crates/storage/storage-api/src/transactions.rs b/crates/storage/storage-api/src/transactions.rs index e156119b8b24..8d9f20bf23b9 100644 --- a/crates/storage/storage-api/src/transactions.rs +++ b/crates/storage/storage-api/src/transactions.rs @@ -1,10 +1,11 @@ use crate::{BlockNumReader, BlockReader}; +use alloc::vec::Vec; use alloy_consensus::transaction::TransactionMeta; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{Address, BlockNumber, TxHash, TxNumber}; +use core::ops::{Range, RangeBounds, RangeInclusive}; use reth_primitives_traits::SignedTransaction; use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use std::ops::{Range, RangeBounds, RangeInclusive}; /// Enum to control transaction hash inclusion. /// diff --git a/crates/tokio-util/src/event_sender.rs b/crates/tokio-util/src/event_sender.rs index 16208ee19c0e..b0e6d0a55a0f 100644 --- a/crates/tokio-util/src/event_sender.rs +++ b/crates/tokio-util/src/event_sender.rs @@ -4,8 +4,8 @@ use tracing::trace; const DEFAULT_SIZE_BROADCAST_CHANNEL: usize = 2000; -/// A bounded broadcast channel for a task. -#[derive(Debug, Clone)] +/// A bounded multi-producer, multi-consumer broadcast channel. +#[derive(Debug)] pub struct EventSender { /// The sender part of the broadcast channel sender: Sender, @@ -20,6 +20,12 @@ where } } +impl Clone for EventSender { + fn clone(&self) -> Self { + Self { sender: self.sender.clone() } + } +} + impl EventSender { /// Creates a new `EventSender`. pub fn new(events_channel_size: usize) -> Self { diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 238a0e1d15ee..08435c9009c5 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -18,7 +18,6 @@ reth-chainspec.workspace = true reth-eth-wire-types.workspace = true reth-primitives = { workspace = true, features = ["c-kzg", "secp256k1"] } reth-primitives-traits.workspace = true -reth-payload-util.workspace = true reth-execution-types.workspace = true reth-fs-util.workspace = true reth-storage-api.workspace = true diff --git a/crates/transaction-pool/benches/reorder.rs b/crates/transaction-pool/benches/reorder.rs index 534f0e201d39..234004f96f2d 100644 --- a/crates/transaction-pool/benches/reorder.rs +++ b/crates/transaction-pool/benches/reorder.rs @@ -113,7 +113,7 @@ fn generate_test_data( mod implementations { use super::*; - use reth_transaction_pool::PoolTransaction; + use alloy_consensus::Transaction; use std::collections::BinaryHeap; /// This implementation appends the transactions and uses [`Vec::sort_by`] function for sorting. diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 0aa53807fae5..e78a4d2a3168 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -1,5 +1,7 @@ //! Transaction pool errors +use std::any::Any; + use alloy_eips::eip4844::BlobTransactionValidationError; use alloy_primitives::{Address, TxHash, U256}; use reth_primitives::InvalidTransactionError; @@ -17,6 +19,9 @@ pub trait PoolTransactionError: core::error::Error + Send + Sync { /// /// See [`PoolError::is_bad_transaction`]. fn is_bad_transaction(&self) -> bool; + + /// Returns a reference to `self` as a `&dyn Any`, enabling downcasting. + fn as_any(&self) -> &dyn Any; } // Needed for `#[error(transparent)]` @@ -156,9 +161,9 @@ pub enum Eip4844PoolTransactionError { #[error("too many blobs in transaction: have {have}, permitted {permitted}")] TooManyEip4844Blobs { /// Number of blobs the transaction has - have: usize, + have: u64, /// Number of maximum blobs the transaction can have - permitted: usize, + permitted: u64, }, /// Thrown if validating the blob sidecar for the transaction failed. #[error(transparent)] @@ -322,4 +327,52 @@ impl InvalidPoolTransactionError { matches!(self, Self::Consensus(InvalidTransactionError::NonceNotConsistent { .. })) || matches!(self, Self::Eip4844(Eip4844PoolTransactionError::Eip4844NonceGap)) } + + /// Returns the arbitrary error if it is [`InvalidPoolTransactionError::Other`] + pub fn as_other(&self) -> Option<&dyn PoolTransactionError> { + match self { + Self::Other(err) => Some(&**err), + _ => None, + } + } + + /// Returns a reference to the [`InvalidPoolTransactionError::Other`] value if this type is a + /// [`InvalidPoolTransactionError::Other`] of that type. Returns None otherwise. + pub fn downcast_other_ref(&self) -> Option<&T> { + let other = self.as_other()?; + other.as_any().downcast_ref() + } + + /// Returns true if the this type is a [`InvalidPoolTransactionError::Other`] of that error + /// type. Returns false otherwise. + pub fn is_other(&self) -> bool { + self.as_other().map(|err| err.as_any().is::()).unwrap_or(false) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[derive(thiserror::Error, Debug)] + #[error("err")] + struct E; + + impl PoolTransactionError for E { + fn is_bad_transaction(&self) -> bool { + false + } + + fn as_any(&self) -> &dyn Any { + self + } + } + + #[test] + fn other_downcast() { + let err = InvalidPoolTransactionError::Other(Box::new(E)); + assert!(err.is_other::()); + + assert!(err.downcast_other_ref::().is_some()); + } } diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 1cbe863fb014..04e5e2411f19 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -82,12 +82,14 @@ //! use reth_chainspec::MAINNET; //! use reth_storage_api::StateProviderFactory; //! use reth_tasks::TokioTaskExecutor; +//! use reth_chainspec::ChainSpecProvider; //! use reth_transaction_pool::{TransactionValidationTaskExecutor, Pool, TransactionPool}; //! use reth_transaction_pool::blobstore::InMemoryBlobStore; -//! async fn t(client: C) where C: StateProviderFactory + Clone + 'static{ +//! use reth_chainspec::EthereumHardforks; +//! async fn t(client: C) where C: ChainSpecProvider + StateProviderFactory + Clone + 'static{ //! let blob_store = InMemoryBlobStore::default(); //! let pool = Pool::eth_pool( -//! TransactionValidationTaskExecutor::eth(client, MAINNET.clone(), blob_store.clone(), TokioTaskExecutor::default()), +//! TransactionValidationTaskExecutor::eth(client, blob_store.clone(), TokioTaskExecutor::default()), //! blob_store, //! Default::default(), //! ); @@ -126,7 +128,7 @@ //! let manager = TaskManager::new(rt.handle().clone()); //! let executor = manager.executor(); //! let pool = Pool::eth_pool( -//! TransactionValidationTaskExecutor::eth(client.clone(), MAINNET.clone(), blob_store.clone(), executor.clone()), +//! TransactionValidationTaskExecutor::eth(client.clone(), blob_store.clone(), executor.clone()), //! blob_store, //! Default::default(), //! ); @@ -174,6 +176,7 @@ use crate::{identifier::TransactionId, pool::PoolInner}; use alloy_eips::eip4844::{BlobAndProofV1, BlobTransactionSidecar}; use alloy_primitives::{Address, TxHash, B256, U256}; use aquamarine as _; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use reth_primitives::Recovered; @@ -277,11 +280,17 @@ where pub fn is_exceeded(&self) -> bool { self.pool.is_exceeded() } + + /// Returns the configured blob store. + pub fn blob_store(&self) -> &S { + self.pool.blob_store() + } } impl EthTransactionPool where - Client: StateProviderFactory + Clone + 'static, + Client: + ChainSpecProvider + StateProviderFactory + Clone + 'static, S: BlobStore, { /// Returns a new [`Pool`] that uses the default [`TransactionValidationTaskExecutor`] when @@ -293,15 +302,16 @@ where /// use reth_chainspec::MAINNET; /// use reth_storage_api::StateProviderFactory; /// use reth_tasks::TokioTaskExecutor; + /// use reth_chainspec::ChainSpecProvider; /// use reth_transaction_pool::{ /// blobstore::InMemoryBlobStore, Pool, TransactionValidationTaskExecutor, /// }; - /// # fn t(client: C) where C: StateProviderFactory + Clone + 'static { + /// use reth_chainspec::EthereumHardforks; + /// # fn t(client: C) where C: ChainSpecProvider + StateProviderFactory + Clone + 'static { /// let blob_store = InMemoryBlobStore::default(); /// let pool = Pool::eth_pool( /// TransactionValidationTaskExecutor::eth( /// client, - /// MAINNET.clone(), /// blob_store.clone(), /// TokioTaskExecutor::default(), /// ), diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 84d1545abe89..57025517a88b 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -30,7 +30,10 @@ use std::{ path::{Path, PathBuf}, sync::Arc, }; -use tokio::sync::oneshot; +use tokio::{ + sync::oneshot, + time::{self, Duration}, +}; use tracing::{debug, error, info, trace, warn}; /// Additional settings for maintaining the transaction pool @@ -45,11 +48,19 @@ pub struct MaintainPoolConfig { /// /// Default: 100 pub max_reload_accounts: usize, + + /// Maximum amount of time non-executable, non local transactions are queued. + /// Default: 3 hours + pub max_tx_lifetime: Duration, } impl Default for MaintainPoolConfig { fn default() -> Self { - Self { max_update_depth: 64, max_reload_accounts: 100 } + Self { + max_update_depth: 64, + max_reload_accounts: 100, + max_tx_lifetime: Duration::from_secs(3 * 60 * 60), + } } } @@ -115,13 +126,10 @@ pub async fn maintain_transaction_pool( last_seen_block_hash: latest.hash(), last_seen_block_number: latest.number(), pending_basefee: latest - .next_block_base_fee( - chain_spec.base_fee_params_at_timestamp(latest.timestamp().saturating_add(12)), - ) + .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(latest.timestamp())) .unwrap_or_default(), - pending_blob_fee: latest.maybe_next_block_blob_fee( - chain_spec.blob_params_at_timestamp(latest.timestamp().saturating_add(12)), - ), + pending_blob_fee: latest + .maybe_next_block_blob_fee(chain_spec.blob_params_at_timestamp(latest.timestamp())), }; pool.set_block_info(info); } @@ -142,6 +150,12 @@ pub async fn maintain_transaction_pool( // the future that reloads accounts from state let mut reload_accounts_fut = Fuse::terminated(); + // eviction interval for stale non local txs + let mut stale_eviction_interval = time::interval(config.max_tx_lifetime); + + // toggle for the first notification + let mut first_event = true; + // The update loop that waits for new blocks and reorgs and performs pool updated // Listen for new chain events and derive the update action for the pool loop { @@ -216,7 +230,7 @@ pub async fn maintain_transaction_pool( let mut reloaded = None; // select of account reloads and new canonical state updates which should arrive at the rate - // of the block time (12s) + // of the block time tokio::select! { res = &mut reload_accounts_fut => { reloaded = Some(res); @@ -227,9 +241,27 @@ pub async fn maintain_transaction_pool( break; } event = ev; + // on receiving the first event on start up, mark the pool as drifted to explicitly + // trigger revalidation and clear out outdated txs. + if first_event { + maintained_state = MaintainedPoolState::Drifted; + first_event = false + } + } + _ = stale_eviction_interval.tick() => { + let stale_txs: Vec<_> = pool + .queued_transactions() + .into_iter() + .filter(|tx| { + // filter stale external txs + tx.origin.is_external() && tx.timestamp.elapsed() > config.max_tx_lifetime + }) + .map(|tx| *tx.hash()) + .collect(); + debug!(target: "txpool", count=%stale_txs.len(), "removing stale transactions"); + pool.remove_transactions(stale_txs); } } - // handle the result of the account reload match reloaded { Some(Ok(Ok(LoadedAccounts { accounts, failed_to_load }))) => { @@ -276,12 +308,11 @@ pub async fn maintain_transaction_pool( let pending_block_base_fee = new_tip .header() .next_block_base_fee( - chain_spec - .base_fee_params_at_timestamp(new_tip.timestamp().saturating_add(12)), + chain_spec.base_fee_params_at_timestamp(new_tip.timestamp()), ) .unwrap_or_default(); let pending_block_blob_fee = new_tip.header().maybe_next_block_blob_fee( - chain_spec.blob_params_at_timestamp(new_tip.timestamp().saturating_add(12)), + chain_spec.blob_params_at_timestamp(new_tip.timestamp()), ); // we know all changed account in the new chain @@ -382,12 +413,10 @@ pub async fn maintain_transaction_pool( // fees for the next block: `tip+1` let pending_block_base_fee = tip .header() - .next_block_base_fee( - chain_spec.base_fee_params_at_timestamp(tip.timestamp().saturating_add(12)), - ) + .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(tip.timestamp())) .unwrap_or_default(); let pending_block_blob_fee = tip.header().maybe_next_block_blob_fee( - chain_spec.blob_params_at_timestamp(tip.timestamp().saturating_add(12)), + chain_spec.blob_params_at_timestamp(tip.timestamp()), ); let first_block = blocks.first(); @@ -677,7 +706,6 @@ mod tests { }; use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{hex, U256}; - use reth_chainspec::MAINNET; use reth_fs_util as fs; use reth_primitives::{PooledTransaction, TransactionSigned}; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; @@ -701,13 +729,12 @@ mod tests { let tx_bytes = hex!("02f87201830655c2808505ef61f08482565f94388c818ca8b9251b393131c08a736a67ccb192978801049e39c4b5b1f580c001a01764ace353514e8abdfb92446de356b260e3c1225b73fc4c8876a6258d12a129a04f02294aa61ca7676061cd99f29275491218b4754b46a0248e5e42bc5091f507"); let tx = PooledTransaction::decode_2718(&mut &tx_bytes[..]).unwrap(); let provider = MockEthProvider::default(); - let transaction: EthPooledTransaction = tx.try_into_recovered().unwrap().into(); + let transaction = EthPooledTransaction::from_pooled(tx.try_into_recovered().unwrap()); let tx_to_cmp = transaction.clone(); let sender = hex!("1f9090aaE28b8a3dCeaDf281B0F12828e676c326").into(); provider.add_account(sender, ExtendedAccount::new(42, U256::MAX)); let blob_store = InMemoryBlobStore::default(); - let validator = EthTransactionValidatorBuilder::new(MAINNET.clone()) - .build(provider, blob_store.clone()); + let validator = EthTransactionValidatorBuilder::new(provider).build(blob_store.clone()); let txpool = Pool::new( validator.clone(), diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 7d58e3984416..681fa479de04 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -4,10 +4,11 @@ use crate::{ pool::pending::PendingTransaction, PoolTransaction, TransactionOrdering, ValidPoolTransaction, }; +use alloy_consensus::Transaction; +use alloy_eips::Typed2718; use alloy_primitives::Address; use core::fmt; -use reth_payload_util::PayloadTransactions; -use reth_primitives::{InvalidTransactionError, Recovered}; +use reth_primitives::InvalidTransactionError; use std::{ collections::{BTreeMap, BTreeSet, HashSet, VecDeque}, sync::Arc, @@ -221,51 +222,6 @@ impl Iterator for BestTransactions { } } -/// Wrapper struct that allows to convert `BestTransactions` (used in tx pool) to -/// `PayloadTransactions` (used in block composition). -#[derive(Debug)] -pub struct BestPayloadTransactions -where - T: PoolTransaction, - I: Iterator>>, -{ - invalid: HashSet

, - best: I, -} - -impl BestPayloadTransactions -where - T: PoolTransaction, - I: Iterator>>, -{ - /// Create a new `BestPayloadTransactions` with the given iterator. - pub fn new(best: I) -> Self { - Self { invalid: Default::default(), best } - } -} - -impl PayloadTransactions for BestPayloadTransactions -where - T: PoolTransaction, - I: Iterator>>, -{ - type Transaction = T::Consensus; - - fn next(&mut self, _ctx: ()) -> Option> { - loop { - let tx = self.best.next()?; - if self.invalid.contains(&tx.sender()) { - continue - } - return Some(tx.to_consensus()) - } - } - - fn mark_invalid(&mut self, sender: Address, _nonce: u64) { - self.invalid.insert(sender); - } -} - /// A [`BestTransactions`](crate::traits::BestTransactions) implementation that filters the /// transactions of iter with predicate. /// @@ -423,7 +379,6 @@ mod tests { BestTransactions, Priority, }; use alloy_primitives::U256; - use reth_payload_util::{PayloadTransactionsChain, PayloadTransactionsFixed}; #[test] fn test_best_iter() { @@ -844,85 +799,6 @@ mod tests { // TODO: Test that gas limits for prioritized transactions are respected } - #[test] - fn test_best_transactions_chained_iterators() { - let mut priority_pool = PendingPool::new(MockOrdering::default()); - let mut pool = PendingPool::new(MockOrdering::default()); - let mut f = MockTransactionFactory::default(); - - // Block composition - // === - // (1) up to 100 gas: custom top-of-block transaction - // (2) up to 100 gas: transactions from the priority pool - // (3) up to 200 gas: only transactions from address A - // (4) up to 200 gas: only transactions from address B - // (5) until block gas limit: all transactions from the main pool - - // Notes: - // - If prioritized addresses overlap, a single transaction will be prioritized twice and - // therefore use the per-segment gas limit twice. - // - Priority pool and main pool must synchronize between each other to make sure there are - // no conflicts for the same nonce. For example, in this scenario, pools can't reject - // transactions with seemingly incorrect nonces, because previous transactions might be in - // the other pool. - - let address_top_of_block = Address::random(); - let address_in_priority_pool = Address::random(); - let address_a = Address::random(); - let address_b = Address::random(); - let address_regular = Address::random(); - - // Add transactions to the main pool - { - let prioritized_tx_a = - MockTransaction::eip1559().with_gas_price(5).with_sender(address_a); - // without our custom logic, B would be prioritized over A due to gas price: - let prioritized_tx_b = - MockTransaction::eip1559().with_gas_price(10).with_sender(address_b); - let regular_tx = - MockTransaction::eip1559().with_gas_price(15).with_sender(address_regular); - pool.add_transaction(Arc::new(f.validated(prioritized_tx_a)), 0); - pool.add_transaction(Arc::new(f.validated(prioritized_tx_b)), 0); - pool.add_transaction(Arc::new(f.validated(regular_tx)), 0); - } - - // Add transactions to the priority pool - { - let prioritized_tx = - MockTransaction::eip1559().with_gas_price(0).with_sender(address_in_priority_pool); - let valid_prioritized_tx = f.validated(prioritized_tx); - priority_pool.add_transaction(Arc::new(valid_prioritized_tx), 0); - } - - let mut block = PayloadTransactionsChain::new( - PayloadTransactionsFixed::single( - MockTransaction::eip1559().with_sender(address_top_of_block).into(), - ), - Some(100), - PayloadTransactionsChain::new( - BestPayloadTransactions::new(priority_pool.best()), - Some(100), - BestPayloadTransactions::new(BestTransactionsWithPrioritizedSenders::new( - HashSet::from([address_a]), - 200, - BestTransactionsWithPrioritizedSenders::new( - HashSet::from([address_b]), - 200, - pool.best(), - ), - )), - None, - ), - None, - ); - - assert_eq!(block.next(()).unwrap().signer(), address_top_of_block); - assert_eq!(block.next(()).unwrap().signer(), address_in_priority_pool); - assert_eq!(block.next(()).unwrap().signer(), address_a); - assert_eq!(block.next(()).unwrap().signer(), address_b); - assert_eq!(block.next(()).unwrap().signer(), address_regular); - } - #[test] fn test_best_with_fees_iter_no_blob_fee_required() { // Tests transactions without blob fees where base fees are checked. diff --git a/crates/transaction-pool/src/pool/blob.rs b/crates/transaction-pool/src/pool/blob.rs index e6c0cb245c3f..b20247fdf797 100644 --- a/crates/transaction-pool/src/pool/blob.rs +++ b/crates/transaction-pool/src/pool/blob.rs @@ -29,7 +29,7 @@ pub(crate) struct BlobTransactions { pending_fees: PendingFees, /// Keeps track of the size of this pool. /// - /// See also [`PoolTransaction::size`]. + /// See also [`reth_primitives_traits::InMemorySize::size`]. size_of: SizeTracker, } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 8eb221842b88..5bd88bedae8a 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -87,7 +87,7 @@ use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; -use alloy_eips::eip4844::BlobTransactionSidecar; +use alloy_eips::{eip4844::BlobTransactionSidecar, Typed2718}; use reth_primitives::Recovered; use rustc_hash::FxHashMap; use std::{collections::HashSet, fmt, sync::Arc, time::Instant}; @@ -101,9 +101,7 @@ use crate::{ traits::{GetPooledTransactionLimit, NewBlobSidecar, TransactionListenerKind}, validate::ValidTransaction, }; -pub use best::{ - BestPayloadTransactions, BestTransactionFilter, BestTransactionsWithPrioritizedSenders, -}; +pub use best::{BestTransactionFilter, BestTransactionsWithPrioritizedSenders}; pub use blob::{blob_tx_priority, fee_delta}; pub use events::{FullTransactionEvent, TransactionEvent}; pub use listener::{AllTransactionsEvents, TransactionEvents}; diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index 29216af47d02..bc02497a4d88 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -43,7 +43,7 @@ pub struct ParkedPool { sender_transaction_count: FxHashMap, /// Keeps track of the size of this pool. /// - /// See also [`PoolTransaction::size`]. + /// See also [`reth_primitives_traits::InMemorySize::size`]. size_of: SizeTracker, } @@ -520,6 +520,7 @@ impl Ord for QueuedOrd { mod tests { use super::*; use crate::test_utils::{MockTransaction, MockTransactionFactory, MockTransactionSet}; + use alloy_consensus::Transaction; use alloy_primitives::address; use reth_primitives::TxType; use std::collections::HashSet; diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 52e2c2d2b950..025984eeef2f 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -43,7 +43,7 @@ pub struct PendingPool { independent_transactions: FxHashMap>, /// Keeps track of the size of this pool. /// - /// See also [`PoolTransaction::size`](crate::traits::PoolTransaction::size). + /// See also [`reth_primitives_traits::InMemorySize::size`]. size_of: SizeTracker, /// Used to broadcast new transactions that have been added to the `PendingPool` to existing /// `static_files` of this pool. @@ -98,7 +98,7 @@ impl PendingPool { /// provides a way to mark transactions that the consumer of this iterator considers invalid. In /// which case the transaction's subgraph is also automatically marked invalid, See (1.). /// Invalid transactions are skipped. - pub(crate) fn best(&self) -> BestTransactions { + pub fn best(&self) -> BestTransactions { BestTransactions { all: self.by_id.clone(), independent: self.independent_transactions.values().cloned().collect(), @@ -614,6 +614,7 @@ mod tests { test_utils::{MockOrdering, MockTransaction, MockTransactionFactory, MockTransactionSet}, PoolTransaction, }; + use alloy_consensus::Transaction; use alloy_primitives::address; use reth_primitives::TxType; use std::collections::HashSet; diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index a0a1f6a369fe..cfe09a13975f 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -25,6 +25,7 @@ use alloy_consensus::constants::{ use alloy_eips::{ eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}, eip4844::BLOB_TX_MIN_BLOB_GASPRICE, + Typed2718, }; use alloy_primitives::{Address, TxHash, B256}; use rustc_hash::FxHashMap; @@ -571,7 +572,7 @@ impl TxPool { let mut eip7702_count = 0; for tx in self.all_transactions.transactions_iter() { - match tx.transaction.tx_type() { + match tx.transaction.ty() { LEGACY_TX_TYPE_ID => legacy_count += 1, EIP2930_TX_TYPE_ID => eip2930_count += 1, EIP1559_TX_TYPE_ID => eip1559_count += 1, @@ -1978,6 +1979,7 @@ mod tests { traits::TransactionOrigin, SubPoolLimit, }; + use alloy_consensus::Transaction; use alloy_primitives::address; use reth_primitives::TxType; diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index 4944dd110c16..80275c7e47aa 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -1,4 +1,4 @@ -use crate::EthPooledTransaction; +use crate::{EthPooledTransaction, PoolTransaction}; use alloy_consensus::{SignableTransaction, TxEip1559, TxEip4844, TxLegacy}; use alloy_eips::{eip1559::MIN_PROTOCOL_BASE_FEE, eip2718::Encodable2718, eip2930::AccessList}; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; @@ -101,7 +101,8 @@ impl TransactionGenerator { /// Generates and returns a pooled EIP-1559 transaction with a random signer. pub fn gen_eip1559_pooled(&mut self) -> EthPooledTransaction { - self.gen_eip1559().try_into_recovered().unwrap().try_into().unwrap() + EthPooledTransaction::try_from_consensus(self.gen_eip1559().try_into_recovered().unwrap()) + .unwrap() } /// Generates and returns a pooled EIP-4844 transaction with a random signer. diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 3c393ed508fb..874a5de7aff3 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -29,7 +29,10 @@ use rand::{ prelude::Distribution, }; use reth_primitives::{ - transaction::{SignedTransactionIntoRecoveredExt, TryFromRecoveredTransactionError}, + transaction::{ + SignedTransactionIntoRecoveredExt, TransactionConversionError, + TryFromRecoveredTransactionError, + }, PooledTransaction, Recovered, Transaction, TransactionSigned, TxType, }; use reth_primitives_traits::{InMemorySize, SignedTransaction}; @@ -218,6 +221,8 @@ pub enum MockTransaction { input: Bytes, /// The sidecar information for the transaction. sidecar: BlobTransactionSidecar, + /// The blob versioned hashes for the transaction. + blob_versioned_hashes: Vec, /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, /// The cost of the transaction, returned in the implementation of [`PoolTransaction`]. @@ -359,6 +364,7 @@ impl MockTransaction { input: Bytes::new(), access_list: Default::default(), sidecar: Default::default(), + blob_versioned_hashes: Default::default(), size: Default::default(), cost: U256::ZERO, } @@ -367,7 +373,10 @@ impl MockTransaction { /// Returns a new EIP4844 transaction with a provided sidecar pub fn eip4844_with_sidecar(sidecar: BlobTransactionSidecar) -> Self { let mut transaction = Self::eip4844(); - if let Self::Eip4844 { sidecar: ref mut existing_sidecar, .. } = &mut transaction { + if let Self::Eip4844 { sidecar: existing_sidecar, blob_versioned_hashes, .. } = + &mut transaction + { + *blob_versioned_hashes = sidecar.versioned_hashes().collect(); *existing_sidecar = sidecar; } transaction @@ -661,18 +670,12 @@ impl MockTransaction { } impl PoolTransaction for MockTransaction { - type TryFromConsensusError = TryFromRecoveredTransactionError; + type TryFromConsensusError = TransactionConversionError; type Consensus = TransactionSigned; type Pooled = PooledTransaction; - fn try_from_consensus( - tx: Recovered, - ) -> Result { - tx.try_into() - } - fn into_consensus(self) -> Recovered { self.into() } @@ -681,15 +684,6 @@ impl PoolTransaction for MockTransaction { pooled.into() } - fn try_consensus_into_pooled( - tx: Recovered, - ) -> Result, Self::TryFromConsensusError> { - let (tx, signer) = tx.into_parts(); - Self::Pooled::try_from(tx) - .map(|tx| tx.with_signer(signer)) - .map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing) - } - fn hash(&self) -> &TxHash { self.get_hash() } @@ -702,10 +696,6 @@ impl PoolTransaction for MockTransaction { self.get_sender() } - fn nonce(&self) -> u64 { - *self.get_nonce() - } - // Having `get_cost` from `make_setters_getters` would be cleaner but we didn't // want to also generate the error-prone cost setters. For now cost should be // correct at construction and auto-updated per field update via `update_cost`, @@ -720,10 +710,56 @@ impl PoolTransaction for MockTransaction { } } + /// Returns the encoded length of the transaction. + fn encoded_length(&self) -> usize { + self.size() + } +} + +impl InMemorySize for MockTransaction { + fn size(&self) -> usize { + *self.get_size() + } +} + +impl Typed2718 for MockTransaction { + fn ty(&self) -> u8 { + match self { + Self::Legacy { .. } => TxType::Legacy.into(), + Self::Eip1559 { .. } => TxType::Eip1559.into(), + Self::Eip4844 { .. } => TxType::Eip4844.into(), + Self::Eip2930 { .. } => TxType::Eip2930.into(), + Self::Eip7702 { .. } => TxType::Eip7702.into(), + } + } +} + +impl alloy_consensus::Transaction for MockTransaction { + fn chain_id(&self) -> Option { + match self { + Self::Legacy { chain_id, .. } => *chain_id, + Self::Eip1559 { chain_id, .. } | + Self::Eip4844 { chain_id, .. } | + Self::Eip2930 { chain_id, .. } | + Self::Eip7702 { chain_id, .. } => Some(*chain_id), + } + } + + fn nonce(&self) -> u64 { + *self.get_nonce() + } + fn gas_limit(&self) -> u64 { *self.get_gas_limit() } + fn gas_price(&self) -> Option { + match self { + Self::Legacy { gas_price, .. } | Self::Eip2930 { gas_price, .. } => Some(*gas_price), + _ => None, + } + } + fn max_fee_per_gas(&self) -> u128 { match self { Self::Legacy { gas_price, .. } | Self::Eip2930 { gas_price, .. } => *gas_price, @@ -733,16 +769,6 @@ impl PoolTransaction for MockTransaction { } } - fn access_list(&self) -> Option<&AccessList> { - match self { - Self::Legacy { .. } => None, - Self::Eip1559 { access_list: accesslist, .. } | - Self::Eip4844 { access_list: accesslist, .. } | - Self::Eip2930 { access_list: accesslist, .. } | - Self::Eip7702 { access_list: accesslist, .. } => Some(accesslist), - } - } - fn max_priority_fee_per_gas(&self) -> Option { match self { Self::Legacy { .. } | Self::Eip2930 { .. } => None, @@ -759,33 +785,6 @@ impl PoolTransaction for MockTransaction { } } - /// Calculates the effective tip per gas given a base fee. - fn effective_tip_per_gas(&self, base_fee: u64) -> Option { - // Convert base_fee to u128 for precision in calculations - let base_fee = base_fee as u128; - - // Retrieve the maximum fee per gas - let max_fee_per_gas = self.max_fee_per_gas(); - - // If the maximum fee per gas is less than the base fee, return None - if max_fee_per_gas < base_fee { - return None - } - - // Calculate the fee by subtracting the base fee from the maximum fee per gas - let fee = max_fee_per_gas - base_fee; - - // If the maximum priority fee per gas is available, return the minimum of fee and priority - // fee - if let Some(priority_fee) = self.max_priority_fee_per_gas() { - return Some(fee.min(priority_fee)) - } - - // Otherwise, return the calculated fee - Some(fee) - } - - /// Returns the priority fee or gas price based on the transaction type. fn priority_fee_or_price(&self) -> u128 { match self { Self::Legacy { gas_price, .. } | Self::Eip2930 { gas_price, .. } => *gas_price, @@ -795,7 +794,28 @@ impl PoolTransaction for MockTransaction { } } - /// Returns the transaction kind associated with the transaction. + fn effective_gas_price(&self, base_fee: Option) -> u128 { + base_fee.map_or(self.max_fee_per_gas(), |base_fee| { + // if the tip is greater than the max priority fee per gas, set it to the max + // priority fee per gas + base fee + let tip = self.max_fee_per_gas().saturating_sub(base_fee as u128); + if let Some(max_tip) = self.max_priority_fee_per_gas() { + if tip > max_tip { + max_tip + base_fee as u128 + } else { + // otherwise return the max fee per gas + self.max_fee_per_gas() + } + } else { + self.max_fee_per_gas() + } + }) + } + + fn is_dynamic_fee(&self) -> bool { + !matches!(self, Self::Legacy { .. } | Self::Eip2930 { .. }) + } + fn kind(&self) -> TxKind { match self { Self::Legacy { to, .. } | Self::Eip1559 { to, .. } | Self::Eip2930 { to, .. } => *to, @@ -803,7 +823,6 @@ impl PoolTransaction for MockTransaction { } } - /// Returns true if the transaction is a contract creation. fn is_create(&self) -> bool { match self { Self::Legacy { to, .. } | Self::Eip1559 { to, .. } | Self::Eip2930 { to, .. } => { @@ -813,40 +832,41 @@ impl PoolTransaction for MockTransaction { } } - /// Returns the input data associated with the transaction. - fn input(&self) -> &[u8] { - self.get_input() + fn value(&self) -> U256 { + match self { + Self::Legacy { value, .. } | + Self::Eip1559 { value, .. } | + Self::Eip2930 { value, .. } | + Self::Eip4844 { value, .. } | + Self::Eip7702 { value, .. } => *value, + } } - /// Returns the size of the transaction. - fn size(&self) -> usize { - *self.get_size() + fn input(&self) -> &Bytes { + self.get_input() } - /// Returns the transaction type as a byte identifier. - fn tx_type(&self) -> u8 { + fn access_list(&self) -> Option<&AccessList> { match self { - Self::Legacy { .. } => TxType::Legacy.into(), - Self::Eip1559 { .. } => TxType::Eip1559.into(), - Self::Eip4844 { .. } => TxType::Eip4844.into(), - Self::Eip2930 { .. } => TxType::Eip2930.into(), - Self::Eip7702 { .. } => TxType::Eip7702.into(), + Self::Legacy { .. } => None, + Self::Eip1559 { access_list: accesslist, .. } | + Self::Eip4844 { access_list: accesslist, .. } | + Self::Eip2930 { access_list: accesslist, .. } | + Self::Eip7702 { access_list: accesslist, .. } => Some(accesslist), } } - /// Returns the encoded length of the transaction. - fn encoded_length(&self) -> usize { - self.size() + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + match self { + Self::Eip4844 { blob_versioned_hashes, .. } => Some(blob_versioned_hashes), + _ => None, + } } - /// Returns the chain ID associated with the transaction. - fn chain_id(&self) -> Option { + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { match self { - Self::Legacy { chain_id, .. } => *chain_id, - Self::Eip1559 { chain_id, .. } | - Self::Eip4844 { chain_id, .. } | - Self::Eip2930 { chain_id, .. } | - Self::Eip7702 { chain_id, .. } => Some(*chain_id), + Self::Eip7702 { authorization_list, .. } => Some(authorization_list), + _ => None, } } } @@ -859,13 +879,6 @@ impl EthPoolTransaction for MockTransaction { } } - fn blob_count(&self) -> usize { - match self { - Self::Eip4844 { sidecar, .. } => sidecar.blobs.len(), - _ => 0, - } - } - fn try_into_pooled_eip4844( self, sidecar: Arc, @@ -897,10 +910,6 @@ impl EthPoolTransaction for MockTransaction { _ => Err(BlobTransactionValidationError::NotBlobTransaction(self.tx_type())), } } - - fn authorization_count(&self) -> usize { - 0 - } } impl TryFrom> for MockTransaction { @@ -1009,6 +1018,7 @@ impl TryFrom> for MockTransaction { input, access_list, sidecar: BlobTransactionSidecar::default(), + blob_versioned_hashes: Default::default(), size, cost: U256::from(gas_limit) * U256::from(max_fee_per_gas) + value, }), @@ -1202,7 +1212,7 @@ impl MockTransactionFactory { /// Generates a transaction ID for the given [`MockTransaction`]. pub fn tx_id(&mut self, tx: &MockTransaction) -> TransactionId { let sender = self.ids.sender_id_or_create(tx.sender()); - TransactionId::new(sender, tx.nonce()) + TransactionId::new(sender, *tx.get_nonce()) } /// Validates a [`MockTransaction`] and returns a [`MockValidTx`]. diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index b360cdb7a82c..c3503ca67df9 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -5,14 +5,12 @@ use crate::{ validate::ValidPoolTransaction, AllTransactionsEvents, }; -use alloy_consensus::{ - constants::{EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}, - BlockHeader, Signed, Transaction as _, Typed2718, -}; +use alloy_consensus::{BlockHeader, Signed, Typed2718}; use alloy_eips::{ eip2718::Encodable2718, eip2930::AccessList, eip4844::{BlobAndProofV1, BlobTransactionSidecar, BlobTransactionValidationError}, + eip7702::SignedAuthorization, }; use alloy_primitives::{Address, TxHash, TxKind, B256, U256}; use futures_util::{ready, Stream}; @@ -20,10 +18,10 @@ use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use reth_primitives::{ kzg::KzgSettings, - transaction::{SignedTransactionIntoRecoveredExt, TryFromRecoveredTransactionError}, - PooledTransaction, Recovered, SealedBlock, Transaction, TransactionSigned, + transaction::{SignedTransactionIntoRecoveredExt, TransactionConversionError}, + PooledTransaction, Recovered, SealedBlock, TransactionSigned, }; -use reth_primitives_traits::{Block, SignedTransaction}; +use reth_primitives_traits::{Block, InMemorySize, SignedTransaction}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::{ @@ -444,7 +442,7 @@ pub trait TransactionPool: Send + Sync + Clone { /// Returns all pending transactions that where submitted as [TransactionOrigin::Local] fn get_local_pending_transactions(&self) -> Vec>> { - self.get_transactions_by_origin(TransactionOrigin::Local) + self.get_pending_transactions_by_origin(TransactionOrigin::Local) } /// Returns all pending transactions that where submitted as [TransactionOrigin::Private] @@ -572,17 +570,20 @@ pub struct AllPoolTransactions { impl AllPoolTransactions { /// Returns an iterator over all pending [`Recovered`] transactions. pub fn pending_recovered(&self) -> impl Iterator> + '_ { - self.pending.iter().map(|tx| tx.transaction.clone().into()) + self.pending.iter().map(|tx| tx.transaction.clone().into_consensus()) } /// Returns an iterator over all queued [`Recovered`] transactions. pub fn queued_recovered(&self) -> impl Iterator> + '_ { - self.queued.iter().map(|tx| tx.transaction.clone().into()) + self.queued.iter().map(|tx| tx.transaction.clone().into_consensus()) } /// Returns an iterator over all transactions, both pending and queued. pub fn all(&self) -> impl Iterator> + '_ { - self.pending.iter().chain(self.queued.iter()).map(|tx| tx.transaction.clone().into()) + self.pending + .iter() + .chain(self.queued.iter()) + .map(|tx| tx.transaction.clone().into_consensus()) } } @@ -963,14 +964,12 @@ impl BestTransactionsAttributes { /// This distinction is necessary for the EIP-4844 blob transactions, which require an additional /// sidecar when they are gossiped around the network. It is expected that the `Consensus` format is /// a subset of the `Pooled` format. +/// +/// The assumption is that fallible conversion from `Consensus` to `Pooled` will encapsulate +/// handling of all valid `Consensus` transactions that can't be pooled (e.g Deposit transactions or +/// blob-less EIP-4844 transactions). pub trait PoolTransaction: - fmt::Debug - + Send - + Sync - + Clone - + TryFrom, Error = Self::TryFromConsensusError> - + Into> - + From> + alloy_consensus::Transaction + InMemorySize + fmt::Debug + Send + Sync + Clone { /// Associated error type for the `try_from_consensus` method. type TryFromConsensusError: fmt::Display; @@ -979,13 +978,17 @@ pub trait PoolTransaction: type Consensus: SignedTransaction + From; /// Associated type representing the recovered pooled variant of the transaction. - type Pooled: SignedTransaction; + type Pooled: TryFrom + SignedTransaction; /// Define a method to convert from the `Consensus` type to `Self` + /// + /// Note: this _must_ fail on any transactions that cannot be pooled (e.g OP Deposit + /// transactions). fn try_from_consensus( tx: Recovered, ) -> Result { - tx.try_into() + let (tx, signer) = tx.into_parts(); + Ok(Self::from_pooled(Recovered::new_unchecked(tx.try_into()?, signer))) } /// Clone the transaction into a consensus variant. @@ -996,25 +999,18 @@ pub trait PoolTransaction: } /// Define a method to convert from the `Self` type to `Consensus` - fn into_consensus(self) -> Recovered { - self.into() - } + fn into_consensus(self) -> Recovered; /// Define a method to convert from the `Pooled` type to `Self` - fn from_pooled(pooled: Recovered) -> Self { - pooled.into() - } + fn from_pooled(pooled: Recovered) -> Self; /// Tries to convert the `Consensus` type into the `Pooled` type. fn try_into_pooled(self) -> Result, Self::TryFromConsensusError> { - Self::try_consensus_into_pooled(self.into_consensus()) + let consensus = self.into_consensus(); + let (tx, signer) = consensus.into_parts(); + Ok(Recovered::new_unchecked(tx.try_into()?, signer)) } - /// Tries to convert the `Consensus` type into the `Pooled` type. - fn try_consensus_into_pooled( - tx: Recovered, - ) -> Result, Self::TryFromConsensusError>; - /// Converts the `Pooled` type into the `Consensus` type. fn pooled_into_consensus(tx: Self::Pooled) -> Self::Consensus { tx.into() @@ -1029,9 +1025,6 @@ pub trait PoolTransaction: /// Reference to the Sender of the transaction. fn sender_ref(&self) -> &Address; - /// Returns the nonce for this transaction. - fn nonce(&self) -> u64; - /// Returns the cost that this transaction is allowed to consume: /// /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. @@ -1040,87 +1033,11 @@ pub trait PoolTransaction: /// max_blob_fee_per_gas * blob_gas_used`. fn cost(&self) -> &U256; - /// Amount of gas that should be used in executing this transaction. This is paid up-front. - fn gas_limit(&self) -> u64; - - /// Returns the EIP-1559 the maximum fee per gas the caller is willing to pay. - /// - /// For legacy transactions this is `gas_price`. - /// - /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). - fn max_fee_per_gas(&self) -> u128; - - /// Returns the `access_list` for the particular transaction type. - /// For Legacy transactions, returns default. - fn access_list(&self) -> Option<&AccessList>; - - /// Returns the EIP-1559 Priority fee the caller is paying to the block author. - /// - /// This will return `None` for non-EIP1559 transactions - fn max_priority_fee_per_gas(&self) -> Option; - - /// Returns the EIP-4844 max fee per data gas - /// - /// This will return `None` for non-EIP4844 transactions - fn max_fee_per_blob_gas(&self) -> Option; - - /// Returns the effective tip for this transaction. - /// - /// For EIP-1559 transactions: `min(max_fee_per_gas - base_fee, max_priority_fee_per_gas)`. - /// For legacy transactions: `gas_price - base_fee`. - fn effective_tip_per_gas(&self, base_fee: u64) -> Option; - - /// Returns the max priority fee per gas if the transaction is an EIP-1559 transaction, and - /// otherwise returns the gas price. - fn priority_fee_or_price(&self) -> u128; - - /// Returns the transaction's [`TxKind`], which is the address of the recipient or - /// [`TxKind::Create`] if the transaction is a contract creation. - fn kind(&self) -> TxKind; - - /// Returns true if the transaction is a contract creation. - /// We don't provide a default implementation via `kind` as it copies the 21-byte - /// [`TxKind`] for this simple check. A proper implementation shouldn't allocate. - fn is_create(&self) -> bool; - - /// Returns the recipient of the transaction if it is not a [`TxKind::Create`] - /// transaction. - fn to(&self) -> Option
{ - self.kind().to().copied() - } - - /// Returns the input data of this transaction. - fn input(&self) -> &[u8]; - - /// Returns a measurement of the heap usage of this type and all its internals. - fn size(&self) -> usize; - - /// Returns the transaction type - fn tx_type(&self) -> u8; - - /// Returns true if the transaction is an EIP-1559 transaction. - fn is_eip1559(&self) -> bool { - self.tx_type() == EIP1559_TX_TYPE_ID - } - - /// Returns true if the transaction is an EIP-4844 transaction. - fn is_eip4844(&self) -> bool { - self.tx_type() == EIP4844_TX_TYPE_ID - } - - /// Returns true if the transaction is an EIP-7702 transaction. - fn is_eip7702(&self) -> bool { - self.tx_type() == EIP7702_TX_TYPE_ID - } - /// Returns the length of the rlp encoded transaction object /// /// Note: Implementations should cache this value. fn encoded_length(&self) -> usize; - /// Returns `chain_id` - fn chain_id(&self) -> Option; - /// Ensures that the transaction's code size does not exceed the provided `max_init_code_size`. /// /// This is specifically relevant for contract creation transactions ([`TxKind::Create`]), @@ -1149,14 +1066,11 @@ pub trait EthPoolTransaction: PoolTransaction { /// Extracts the blob sidecar from the transaction. fn take_blob(&mut self) -> EthBlobTransactionSidecar; - /// Returns the number of blobs this transaction has. - fn blob_count(&self) -> usize; - /// A specialization for the EIP-4844 transaction type. /// Tries to reattach the blob sidecar to the transaction. /// /// This returns an option, but callers should ensure that the transaction is an EIP-4844 - /// transaction: [`PoolTransaction::is_eip4844`]. + /// transaction: [`Typed2718::is_eip4844`]. fn try_into_pooled_eip4844( self, sidecar: Arc, @@ -1176,9 +1090,6 @@ pub trait EthPoolTransaction: PoolTransaction { blob: &BlobTransactionSidecar, settings: &KzgSettings, ) -> Result<(), BlobTransactionValidationError>; - - /// Returns the number of authorizations this transaction has. - fn authorization_count(&self) -> usize; } /// The default [`PoolTransaction`] for the [Pool](crate::Pool) for Ethereum. @@ -1239,9 +1150,22 @@ impl EthPooledTransaction { } } -/// Conversion from the network transaction type to the pool transaction type. -impl From> for EthPooledTransaction { - fn from(tx: Recovered) -> Self { +impl PoolTransaction for EthPooledTransaction { + type TryFromConsensusError = TransactionConversionError; + + type Consensus = TransactionSigned; + + type Pooled = PooledTransaction; + + fn clone_into_consensus(&self) -> Recovered { + self.transaction().clone() + } + + fn into_consensus(self) -> Recovered { + self.transaction + } + + fn from_pooled(tx: Recovered) -> Self { let encoded_length = tx.encode_2718_len(); let (tx, signer) = tx.into_parts(); match tx { @@ -1263,27 +1187,6 @@ impl From> for EthPooledTransaction { } } } -} - -impl PoolTransaction for EthPooledTransaction { - type TryFromConsensusError = TryFromRecoveredTransactionError; - - type Consensus = TransactionSigned; - - type Pooled = PooledTransaction; - - fn clone_into_consensus(&self) -> Recovered { - self.transaction().clone() - } - - fn try_consensus_into_pooled( - tx: Recovered, - ) -> Result, Self::TryFromConsensusError> { - let (tx, signer) = tx.into_parts(); - let pooled = - tx.try_into().map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing)?; - Ok(Recovered::new_unchecked(pooled, signer)) - } /// Returns hash of the transaction. fn hash(&self) -> &TxHash { @@ -1300,11 +1203,6 @@ impl PoolTransaction for EthPooledTransaction { self.transaction.signer_ref() } - /// Returns the nonce for this transaction. - fn nonce(&self) -> u64 { - self.transaction.nonce() - } - /// Returns the cost that this transaction is allowed to consume: /// /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. @@ -1315,82 +1213,91 @@ impl PoolTransaction for EthPooledTransaction { &self.cost } - /// Amount of gas that should be used in executing this transaction. This is paid up-front. + /// Returns the length of the rlp encoded object + fn encoded_length(&self) -> usize { + self.encoded_length + } +} + +impl Typed2718 for EthPooledTransaction { + fn ty(&self) -> u8 { + self.transaction.ty() + } +} + +impl InMemorySize for EthPooledTransaction { + fn size(&self) -> usize { + self.transaction.size() + } +} + +impl alloy_consensus::Transaction for EthPooledTransaction { + fn chain_id(&self) -> Option { + self.transaction.chain_id() + } + + fn nonce(&self) -> u64 { + self.transaction.nonce() + } + fn gas_limit(&self) -> u64 { self.transaction.gas_limit() } - /// Returns the EIP-1559 Max base fee the caller is willing to pay. - /// - /// For legacy transactions this is `gas_price`. - /// - /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). - fn max_fee_per_gas(&self) -> u128 { - self.transaction.transaction().max_fee_per_gas() + fn gas_price(&self) -> Option { + self.transaction.gas_price() } - fn access_list(&self) -> Option<&AccessList> { - self.transaction.access_list() + fn max_fee_per_gas(&self) -> u128 { + self.transaction.max_fee_per_gas() } - /// Returns the EIP-1559 Priority fee the caller is paying to the block author. - /// - /// This will return `None` for non-EIP1559 transactions fn max_priority_fee_per_gas(&self) -> Option { - self.transaction.transaction().max_priority_fee_per_gas() + self.transaction.max_priority_fee_per_gas() } fn max_fee_per_blob_gas(&self) -> Option { self.transaction.max_fee_per_blob_gas() } - /// Returns the effective tip for this transaction. - /// - /// For EIP-1559 transactions: `min(max_fee_per_gas - base_fee, max_priority_fee_per_gas)`. - /// For legacy transactions: `gas_price - base_fee`. - fn effective_tip_per_gas(&self, base_fee: u64) -> Option { - self.transaction.effective_tip_per_gas(base_fee) - } - - /// Returns the max priority fee per gas if the transaction is an EIP-1559 transaction, and - /// otherwise returns the gas price. fn priority_fee_or_price(&self) -> u128 { self.transaction.priority_fee_or_price() } - /// Returns the transaction's [`TxKind`], which is the address of the recipient or - /// [`TxKind::Create`] if the transaction is a contract creation. + fn effective_gas_price(&self, base_fee: Option) -> u128 { + self.transaction.effective_gas_price(base_fee) + } + + fn is_dynamic_fee(&self) -> bool { + self.transaction.is_dynamic_fee() + } + fn kind(&self) -> TxKind { self.transaction.kind() } - /// Returns true if the transaction is a contract creation. fn is_create(&self) -> bool { self.transaction.is_create() } - fn input(&self) -> &[u8] { - self.transaction.input() + fn value(&self) -> U256 { + self.transaction.value() } - /// Returns a measurement of the heap usage of this type and all its internals. - fn size(&self) -> usize { - self.transaction.transaction().input().len() + fn input(&self) -> &revm_primitives::Bytes { + self.transaction.input() } - /// Returns the transaction type - fn tx_type(&self) -> u8 { - self.transaction.ty() + fn access_list(&self) -> Option<&AccessList> { + self.transaction.access_list() } - /// Returns the length of the rlp encoded object - fn encoded_length(&self) -> usize { - self.encoded_length + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + self.transaction.blob_versioned_hashes() } - /// Returns `chain_id` - fn chain_id(&self) -> Option { - self.transaction.chain_id() + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + self.transaction.authorization_list() } } @@ -1403,13 +1310,6 @@ impl EthPoolTransaction for EthPooledTransaction { } } - fn blob_count(&self) -> usize { - match self.transaction.transaction() { - Transaction::Eip4844(tx) => tx.blob_versioned_hashes.len(), - _ => 0, - } - } - fn try_into_pooled_eip4844( self, sidecar: Arc, @@ -1438,50 +1338,10 @@ impl EthPoolTransaction for EthPooledTransaction { settings: &KzgSettings, ) -> Result<(), BlobTransactionValidationError> { match self.transaction.transaction() { - Transaction::Eip4844(tx) => tx.validate_blob(sidecar, settings), - _ => Err(BlobTransactionValidationError::NotBlobTransaction(self.tx_type())), + reth_primitives::Transaction::Eip4844(tx) => tx.validate_blob(sidecar, settings), + _ => Err(BlobTransactionValidationError::NotBlobTransaction(self.ty())), } } - - fn authorization_count(&self) -> usize { - match self.transaction.transaction() { - Transaction::Eip7702(tx) => tx.authorization_list.len(), - _ => 0, - } - } -} - -impl TryFrom> for EthPooledTransaction { - type Error = TryFromRecoveredTransactionError; - - fn try_from(tx: Recovered) -> Result { - // ensure we can handle the transaction type and its format - match tx.ty() { - 0..=EIP1559_TX_TYPE_ID | EIP7702_TX_TYPE_ID => { - // supported - } - EIP4844_TX_TYPE_ID => { - // doesn't have a blob sidecar - return Err(TryFromRecoveredTransactionError::BlobSidecarMissing); - } - unsupported => { - // unsupported transaction type - return Err(TryFromRecoveredTransactionError::UnsupportedTransactionType( - unsupported, - )) - } - }; - - let encoded_length = tx.encode_2718_len(); - let transaction = Self::new(tx, encoded_length); - Ok(transaction) - } -} - -impl From for Recovered { - fn from(tx: EthPooledTransaction) -> Self { - tx.transaction - } } /// Represents the blob sidecar of the [`EthPooledTransaction`]. @@ -1640,7 +1500,7 @@ mod tests { use alloy_consensus::{TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}; use alloy_eips::eip4844::DATA_GAS_PER_BLOB; use alloy_primitives::PrimitiveSignature as Signature; - use reth_primitives::TransactionSigned; + use reth_primitives::{Transaction, TransactionSigned}; #[test] fn test_pool_size_invariants() { diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 0e80a096d962..3d1f8e9f5254 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -19,10 +19,9 @@ use alloy_consensus::{ BlockHeader, }; use alloy_eips::{ - eip1559::ETHEREUM_BLOCK_GAS_LIMIT, - eip4844::{env_settings::EnvKzgSettings, MAX_BLOBS_PER_BLOCK}, + eip1559::ETHEREUM_BLOCK_GAS_LIMIT, eip4844::env_settings::EnvKzgSettings, eip7840::BlobParams, }; -use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_primitives::{InvalidTransactionError, SealedBlock}; use reth_primitives_traits::{Block, GotExpected}; use reth_storage_api::{StateProvider, StateProviderFactory}; @@ -45,9 +44,11 @@ pub struct EthTransactionValidator { impl EthTransactionValidator { /// Returns the configured chain spec - #[allow(clippy::missing_const_for_fn)] - pub fn chain_spec(&self) -> &Arc { - &self.inner.chain_spec + pub fn chain_spec(&self) -> Arc + where + Client: ChainSpecProvider, + { + self.client().chain_spec() } /// Returns the configured client @@ -59,7 +60,7 @@ impl EthTransactionValidator { impl EthTransactionValidator where - Client: StateProviderFactory, + Client: ChainSpecProvider + StateProviderFactory, Tx: EthPoolTransaction, { /// Validates a single transaction. @@ -88,7 +89,7 @@ where impl TransactionValidator for EthTransactionValidator where - Client: StateProviderFactory, + Client: ChainSpecProvider + StateProviderFactory, Tx: EthPoolTransaction, { type Transaction = Tx; @@ -132,8 +133,6 @@ where /// And adheres to the configured [`LocalTransactionConfig`]. #[derive(Debug)] pub(crate) struct EthTransactionValidatorInner { - /// Spec of the chain - chain_spec: Arc, /// This type fetches account info from the db client: Client, /// Blobstore used for fetching re-injected blob transactions. @@ -164,19 +163,24 @@ pub(crate) struct EthTransactionValidatorInner { // === impl EthTransactionValidatorInner === -impl EthTransactionValidatorInner { +impl EthTransactionValidatorInner { /// Returns the configured chain id #[allow(clippy::missing_const_for_fn)] pub(crate) fn chain_id(&self) -> u64 { - self.chain_spec.chain().id() + self.client.chain_spec().chain().id() } } impl EthTransactionValidatorInner where - Client: StateProviderFactory, + Client: ChainSpecProvider + StateProviderFactory, Tx: EthPoolTransaction, { + /// Returns the configured chain spec + fn chain_spec(&self) -> Arc { + self.client.chain_spec() + } + /// Validates a single transaction using an optional cached state provider. /// If no provider is passed, a new one will be created. This allows reusing /// the same provider across multiple txs. @@ -187,7 +191,7 @@ where maybe_state: &mut Option>, ) -> TransactionValidationOutcome { // Checks for tx_type - match transaction.tx_type() { + match transaction.ty() { LEGACY_TX_TYPE_ID => { // Accept legacy transactions } @@ -304,7 +308,7 @@ where ) } - if transaction.authorization_count() == 0 { + if transaction.authorization_list().is_none_or(|l| l.is_empty()) { return TransactionValidationOutcome::Invalid( transaction, Eip7702PoolTransactionError::MissingEip7702AuthorizationList.into(), @@ -326,7 +330,8 @@ where ) } - let blob_count = transaction.blob_count(); + let blob_count = + transaction.blob_versioned_hashes().map(|b| b.len() as u64).unwrap_or(0); if blob_count == 0 { // no blobs return TransactionValidationOutcome::Invalid( @@ -337,14 +342,14 @@ where ) } - if blob_count > MAX_BLOBS_PER_BLOCK { - // too many blobs + let max_blob_count = self.fork_tracker.max_blob_count(); + if blob_count > max_blob_count { return TransactionValidationOutcome::Invalid( transaction, InvalidPoolTransactionError::Eip4844( Eip4844PoolTransactionError::TooManyEip4844Blobs { have: blob_count, - permitted: MAX_BLOBS_PER_BLOCK, + permitted: max_blob_count, }, ), ) @@ -512,18 +517,26 @@ where fn on_new_head_block(&self, new_tip_block: &T) { // update all forks - if self.chain_spec.is_cancun_active_at_timestamp(new_tip_block.timestamp()) { + if self.chain_spec().is_cancun_active_at_timestamp(new_tip_block.timestamp()) { self.fork_tracker.cancun.store(true, std::sync::atomic::Ordering::Relaxed); } - if self.chain_spec.is_shanghai_active_at_timestamp(new_tip_block.timestamp()) { + if self.chain_spec().is_shanghai_active_at_timestamp(new_tip_block.timestamp()) { self.fork_tracker.shanghai.store(true, std::sync::atomic::Ordering::Relaxed); } - if self.chain_spec.is_prague_active_at_timestamp(new_tip_block.timestamp()) { + if self.chain_spec().is_prague_active_at_timestamp(new_tip_block.timestamp()) { self.fork_tracker.prague.store(true, std::sync::atomic::Ordering::Relaxed); } + if let Some(blob_params) = + self.chain_spec().blob_params_at_timestamp(new_tip_block.timestamp()) + { + self.fork_tracker + .max_blob_count + .store(blob_params.max_blob_count, std::sync::atomic::Ordering::Relaxed); + } + self.block_gas_limit.store(new_tip_block.gas_limit(), std::sync::atomic::Ordering::Relaxed); } @@ -534,14 +547,16 @@ where /// A builder for [`TransactionValidationTaskExecutor`] #[derive(Debug)] -pub struct EthTransactionValidatorBuilder { - chain_spec: Arc, +pub struct EthTransactionValidatorBuilder { + client: Client, /// Fork indicator whether we are in the Shanghai stage. shanghai: bool, /// Fork indicator whether we are in the Cancun hardfork. cancun: bool, /// Fork indicator whether we are in the Cancun hardfork. prague: bool, + /// Max blob count at the block's timestamp. + max_blob_count: u64, /// Whether using EIP-2718 type transactions is allowed eip2718: bool, /// Whether using EIP-1559 type transactions is allowed @@ -567,8 +582,8 @@ pub struct EthTransactionValidatorBuilder { max_tx_input_bytes: usize, } -impl EthTransactionValidatorBuilder { - /// Creates a new builder for the given [`ChainSpec`] +impl EthTransactionValidatorBuilder { + /// Creates a new builder for the given client /// /// By default this assumes the network is on the `Cancun` hardfork and the following /// transactions are allowed: @@ -576,10 +591,10 @@ impl EthTransactionValidatorBuilder { /// - EIP-2718 /// - EIP-1559 /// - EIP-4844 - pub fn new(chain_spec: Arc) -> Self { + pub fn new(client: Client) -> Self { Self { block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT.into(), - chain_spec, + client, minimum_priority_fee: None, additional_tasks: 1, kzg_settings: EnvKzgSettings::Default, @@ -600,6 +615,9 @@ impl EthTransactionValidatorBuilder { // prague not yet activated prague: false, + + // max blob count is cancun by default + max_blob_count: BlobParams::cancun().max_blob_count, } } @@ -699,10 +717,19 @@ impl EthTransactionValidatorBuilder { /// Configures validation rules based on the head block's timestamp. /// /// For example, whether the Shanghai and Cancun hardfork is activated at launch. - pub fn with_head_timestamp(mut self, timestamp: u64) -> Self { - self.cancun = self.chain_spec.is_cancun_active_at_timestamp(timestamp); - self.shanghai = self.chain_spec.is_shanghai_active_at_timestamp(timestamp); - self.prague = self.chain_spec.is_prague_active_at_timestamp(timestamp); + pub fn with_head_timestamp(mut self, timestamp: u64) -> Self + where + Client: ChainSpecProvider, + { + self.cancun = self.client.chain_spec().is_cancun_active_at_timestamp(timestamp); + self.shanghai = self.client.chain_spec().is_shanghai_active_at_timestamp(timestamp); + self.prague = self.client.chain_spec().is_prague_active_at_timestamp(timestamp); + self.max_blob_count = self + .client + .chain_spec() + .blob_params_at_timestamp(timestamp) + .unwrap_or_else(BlobParams::cancun) + .max_blob_count; self } @@ -721,16 +748,12 @@ impl EthTransactionValidatorBuilder { } /// Builds a the [`EthTransactionValidator`] without spawning validator tasks. - pub fn build( - self, - client: Client, - blob_store: S, - ) -> EthTransactionValidator + pub fn build(self, blob_store: S) -> EthTransactionValidator where S: BlobStore, { let Self { - chain_spec, + client, shanghai, cancun, prague, @@ -746,14 +769,20 @@ impl EthTransactionValidatorBuilder { .. } = self; + let max_blob_count = if prague { + BlobParams::prague().max_blob_count + } else { + BlobParams::cancun().max_blob_count + }; + let fork_tracker = ForkTracker { shanghai: AtomicBool::new(shanghai), cancun: AtomicBool::new(cancun), prague: AtomicBool::new(prague), + max_blob_count: AtomicU64::new(max_blob_count), }; let inner = EthTransactionValidatorInner { - chain_spec, client, eip2718, eip1559, @@ -778,9 +807,8 @@ impl EthTransactionValidatorBuilder { /// The validator will spawn `additional_tasks` additional tasks for validation. /// /// By default this will spawn 1 additional task. - pub fn build_with_tasks( + pub fn build_with_tasks( self, - client: Client, tasks: T, blob_store: S, ) -> TransactionValidationTaskExecutor> @@ -789,7 +817,7 @@ impl EthTransactionValidatorBuilder { S: BlobStore, { let additional_tasks = self.additional_tasks; - let validator = self.build(client, blob_store); + let validator = self.build(blob_store); let (tx, task) = ValidationTask::new(); @@ -825,6 +853,8 @@ pub struct ForkTracker { pub cancun: AtomicBool, /// Tracks if prague is activated at the block's timestamp. pub prague: AtomicBool, + /// Tracks max blob count at the block's timestamp. + pub max_blob_count: AtomicU64, } impl ForkTracker { @@ -842,6 +872,11 @@ impl ForkTracker { pub fn is_prague_activated(&self) -> bool { self.prague.load(std::sync::atomic::Ordering::Relaxed) } + + /// Returns the max blob count. + pub fn max_blob_count(&self) -> u64 { + self.max_blob_count.load(std::sync::atomic::Ordering::Relaxed) + } } /// Ensures that gas limit of the transaction exceeds the intrinsic gas of the transaction. @@ -865,7 +900,7 @@ pub fn ensure_intrinsic_gas( transaction.input(), transaction.is_create(), transaction.access_list().map(|list| list.0.as_slice()).unwrap_or(&[]), - transaction.authorization_count() as u64, + transaction.authorization_list().map(|l| l.len()).unwrap_or(0) as u64, ); let gas_limit = transaction.gas_limit(); @@ -883,9 +918,9 @@ mod tests { blobstore::InMemoryBlobStore, error::PoolErrorKind, traits::PoolTransaction, CoinbaseTipOrdering, EthPooledTransaction, Pool, TransactionPool, }; + use alloy_consensus::Transaction; use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{hex, U256}; - use reth_chainspec::MAINNET; use reth_primitives::{transaction::SignedTransactionIntoRecoveredExt, PooledTransaction}; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; @@ -895,15 +930,19 @@ mod tests { let data = hex::decode(raw).unwrap(); let tx = PooledTransaction::decode_2718(&mut data.as_ref()).unwrap(); - tx.try_into_recovered().unwrap().into() + EthPooledTransaction::from_pooled(tx.try_into_recovered().unwrap()) } // #[tokio::test] async fn validate_transaction() { let transaction = get_transaction(); - let mut fork_tracker = - ForkTracker { shanghai: false.into(), cancun: false.into(), prague: false.into() }; + let mut fork_tracker = ForkTracker { + shanghai: false.into(), + cancun: false.into(), + prague: false.into(), + max_blob_count: 0.into(), + }; let res = ensure_intrinsic_gas(&transaction, &fork_tracker); assert!(res.is_ok()); @@ -918,8 +957,7 @@ mod tests { ExtendedAccount::new(transaction.nonce(), U256::MAX), ); let blob_store = InMemoryBlobStore::default(); - let validator = EthTransactionValidatorBuilder::new(MAINNET.clone()) - .build(provider, blob_store.clone()); + let validator = EthTransactionValidatorBuilder::new(provider).build(blob_store.clone()); let outcome = validator.validate_one(TransactionOrigin::External, transaction.clone()); @@ -946,9 +984,9 @@ mod tests { ); let blob_store = InMemoryBlobStore::default(); - let validator = EthTransactionValidatorBuilder::new(MAINNET.clone()) + let validator = EthTransactionValidatorBuilder::new(provider) .set_block_gas_limit(1_000_000) // tx gas limit is 1_015_288 - .build(provider, blob_store.clone()); + .build(blob_store.clone()); let outcome = validator.validate_one(TransactionOrigin::External, transaction.clone()); diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 0ba716e0bffe..6cc04f7b83e7 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -282,7 +282,7 @@ impl ValidPoolTransaction { /// Returns the type identifier of the transaction pub fn tx_type(&self) -> u8 { - self.transaction.tx_type() + self.transaction.ty() } /// Returns the address of the sender diff --git a/crates/transaction-pool/src/validate/task.rs b/crates/transaction-pool/src/validate/task.rs index 22cc84bd9df1..b630ea7eff21 100644 --- a/crates/transaction-pool/src/validate/task.rs +++ b/crates/transaction-pool/src/validate/task.rs @@ -7,7 +7,6 @@ use crate::{ TransactionValidator, }; use futures_util::{lock::Mutex, StreamExt}; -use reth_chainspec::ChainSpec; use reth_primitives::SealedBlock; use reth_primitives_traits::Block; use reth_tasks::TaskSpawner; @@ -93,8 +92,8 @@ pub struct TransactionValidationTaskExecutor { impl TransactionValidationTaskExecutor<()> { /// Convenience method to create a [`EthTransactionValidatorBuilder`] - pub fn eth_builder(chain_spec: Arc) -> EthTransactionValidatorBuilder { - EthTransactionValidatorBuilder::new(chain_spec) + pub fn eth_builder(client: Client) -> EthTransactionValidatorBuilder { + EthTransactionValidatorBuilder::new(client) } } @@ -112,23 +111,18 @@ impl TransactionValidationTaskExecutor { } impl TransactionValidationTaskExecutor> { - /// Creates a new instance for the given [`ChainSpec`] + /// Creates a new instance for the given client /// /// This will spawn a single validation tasks that performs the actual validation. /// See [`TransactionValidationTaskExecutor::eth_with_additional_tasks`] - pub fn eth( - client: Client, - chain_spec: Arc, - blob_store: S, - tasks: T, - ) -> Self + pub fn eth(client: Client, blob_store: S, tasks: T) -> Self where T: TaskSpawner, { - Self::eth_with_additional_tasks(client, chain_spec, blob_store, tasks, 0) + Self::eth_with_additional_tasks(client, blob_store, tasks, 0) } - /// Creates a new instance for the given [`ChainSpec`] + /// Creates a new instance for the given client /// /// By default this will enable support for: /// - shanghai @@ -139,7 +133,6 @@ impl TransactionValidationTaskExecutor( client: Client, - chain_spec: Arc, blob_store: S, tasks: T, num_additional_tasks: usize, @@ -147,9 +140,9 @@ impl TransactionValidationTaskExecutor(client, tasks, blob_store) + .build_with_tasks::(tasks, blob_store) } } diff --git a/crates/transaction-pool/tests/it/evict.rs b/crates/transaction-pool/tests/it/evict.rs index 3b74b8cb2300..eb677a6dee4e 100644 --- a/crates/transaction-pool/tests/it/evict.rs +++ b/crates/transaction-pool/tests/it/evict.rs @@ -1,5 +1,6 @@ //! Transaction pool eviction tests. +use alloy_consensus::Transaction; use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; use alloy_primitives::{Address, B256}; use rand::distributions::Uniform; @@ -8,8 +9,7 @@ use reth_transaction_pool::{ test_utils::{ MockFeeRange, MockTransactionDistribution, MockTransactionRatio, TestPool, TestPoolBuilder, }, - BlockInfo, PoolConfig, PoolTransaction, SubPoolLimit, TransactionOrigin, TransactionPool, - TransactionPoolExt, + BlockInfo, PoolConfig, SubPoolLimit, TransactionOrigin, TransactionPool, TransactionPoolExt, }; #[tokio::test(flavor = "multi_thread")] diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 4e00373250e8..ca882dd3230e 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -28,6 +28,9 @@ derive_more.workspace = true itertools = { workspace = true, features = ["use_alloc"] } nybbles = { workspace = true, features = ["rlp"] } +# reth +revm.workspace = true + # `serde` feature serde = { workspace = true, optional = true } @@ -38,6 +41,9 @@ hash-db = { version = "=0.15.2", optional = true } plain_hasher = { version = "0.2", optional = true } arbitrary = { workspace = true, features = ["derive"], optional = true } +# misc +rayon = { workspace = true, optional = true } + [dev-dependencies] reth-primitives-traits = { workspace = true, features = ["serde"] } reth-codecs.workspace = true @@ -74,6 +80,7 @@ std = [ "serde?/std", "serde_with?/std", "serde_json/std", + "revm/std", ] eip1186 = [ "alloy-rpc-types-eth/serde", @@ -89,6 +96,7 @@ serde = [ "alloy-rpc-types-eth?/serde", "reth-primitives-traits/serde", "reth-codecs?/serde", + "revm/serde", ] reth-codec = [ "dep:reth-codecs", @@ -106,6 +114,7 @@ test-utils = [ "arbitrary", "reth-primitives-traits/test-utils", "reth-codecs/test-utils", + "revm/test-utils", ] arbitrary = [ "std", @@ -119,7 +128,10 @@ arbitrary = [ "nybbles/arbitrary", "reth-codecs/arbitrary", "alloy-rpc-types-eth?/arbitrary", + "revm/arbitrary", ] +rayon = ["dep:rayon"] +scroll = ["revm/scroll"] [[bench]] name = "prefix_set" diff --git a/crates/trie/trie/src/state.rs b/crates/trie/common/src/hashedstate.rs similarity index 90% rename from crates/trie/trie/src/state.rs rename to crates/trie/common/src/hashedstate.rs index bd082d4660d9..be41b197bdc3 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/common/src/hashedstate.rs @@ -1,18 +1,22 @@ use crate::{ prefix_set::{PrefixSetMut, TriePrefixSetsMut}, - Nibbles, + KeyHasher, Nibbles, }; +use alloc::{borrow::Cow, vec::Vec}; use alloy_primitives::{ keccak256, map::{hash_map, B256HashMap, B256HashSet, HashMap, HashSet}, Address, B256, U256, }; use itertools::Itertools; -use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use reth_primitives_traits::Account; -use reth_trie_common::KeyHasher; use revm::db::{AccountStatus, BundleAccount}; -use std::borrow::Cow; + +#[cfg(feature = "rayon")] +pub use rayon::*; + +#[cfg(feature = "rayon")] +use rayon::prelude::{IntoParallelIterator, ParallelIterator}; /// Representation of in-memory hashed state. #[derive(PartialEq, Eq, Clone, Default, Debug)] @@ -28,6 +32,7 @@ impl HashedPostState { /// Hashes all changed accounts and storage entries that are currently stored in the bundle /// state. #[inline] + #[cfg(feature = "rayon")] pub fn from_bundle_state<'a, KH: KeyHasher>( state: impl IntoParallelIterator, ) -> Self { @@ -55,6 +60,37 @@ impl HashedPostState { Self { accounts, storages } } + /// Initialize [`HashedPostState`] from bundle state. + /// Hashes all changed accounts and storage entries that are currently stored in the bundle + /// state. + #[cfg(not(feature = "rayon"))] + pub fn from_bundle_state<'a, KH: KeyHasher>( + state: impl IntoIterator, + ) -> Self { + let hashed = state + .into_iter() + .map(|(address, account)| { + let hashed_address = KH::hash_key(address); + let hashed_account = account.info.as_ref().map(Into::into); + let hashed_storage = HashedStorage::from_plain_storage( + account.status, + account.storage.iter().map(|(slot, value)| (slot, &value.present_value)), + ); + (hashed_address, (hashed_account, hashed_storage)) + }) + .collect::, HashedStorage))>>(); + + let mut accounts = HashMap::with_capacity_and_hasher(hashed.len(), Default::default()); + let mut storages = HashMap::with_capacity_and_hasher(hashed.len(), Default::default()); + for (address, (account, storage)) in hashed { + accounts.insert(address, account); + if !storage.is_empty() { + storages.insert(address, storage); + } + } + Self { accounts, storages } + } + /// Construct [`HashedPostState`] from a single [`HashedStorage`]. pub fn from_hashed_storage(hashed_address: B256, storage: HashedStorage) -> Self { Self { @@ -260,9 +296,9 @@ impl HashedStorage { #[derive(PartialEq, Eq, Clone, Default, Debug)] pub struct HashedPostStateSorted { /// Updated state of accounts. - pub(crate) accounts: HashedAccountsSorted, + pub accounts: HashedAccountsSorted, /// Map of hashed addresses to hashed storage. - pub(crate) storages: B256HashMap, + pub storages: B256HashMap, } impl HashedPostStateSorted { @@ -289,9 +325,9 @@ impl HashedPostStateSorted { #[derive(Clone, Eq, PartialEq, Default, Debug)] pub struct HashedAccountsSorted { /// Sorted collection of hashed addresses and their account info. - pub(crate) accounts: Vec<(B256, Account)>, + pub accounts: Vec<(B256, Account)>, /// Set of destroyed account keys. - pub(crate) destroyed_accounts: B256HashSet, + pub destroyed_accounts: B256HashSet, } impl HashedAccountsSorted { @@ -309,11 +345,11 @@ impl HashedAccountsSorted { #[derive(Clone, Eq, PartialEq, Debug)] pub struct HashedStorageSorted { /// Sorted hashed storage slots with non-zero value. - pub(crate) non_zero_valued_slots: Vec<(B256, U256)>, + pub non_zero_valued_slots: Vec<(B256, U256)>, /// Slots that have been zero valued. - pub(crate) zero_valued_slots: B256HashSet, + pub zero_valued_slots: B256HashSet, /// Flag indicating whether the storage was wiped or not. - pub(crate) wiped: bool, + pub wiped: bool, } impl HashedStorageSorted { @@ -335,8 +371,8 @@ impl HashedStorageSorted { #[cfg(test)] mod tests { use super::*; + use crate::KeccakKeyHasher; use alloy_primitives::Bytes; - use reth_trie_common::KeccakKeyHasher; use revm::{ db::{states::StorageSlot, StorageWithOriginalValues}, primitives::{AccountInfo, Bytecode}, diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index 093c2969bd5e..81434d052846 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -11,6 +11,10 @@ extern crate alloc; +/// In-memory hashed state. +mod hashedstate; +pub use hashedstate::*; + /// The implementation of hash builder. pub mod hash_builder; diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 2dadd22f2e73..580cdf05bb76 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -78,12 +78,8 @@ impl ParallelProof { impl ParallelProof where - Factory: DatabaseProviderFactory - + StateCommitmentProvider - + Clone - + Send - + Sync - + 'static, + Factory: + DatabaseProviderFactory + StateCommitmentProvider + Clone + 'static, { /// Generate a state multiproof according to specified targets. pub fn multiproof( diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 3c717f85b208..7c5c62dade2f 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -18,7 +18,7 @@ reth-primitives-traits.workspace = true reth-stages-types.workspace = true reth-storage-errors.workspace = true reth-trie-sparse.workspace = true -reth-trie-common.workspace = true +reth-trie-common = { workspace = true, features = ["rayon"] } revm.workspace = true @@ -33,7 +33,6 @@ alloy-trie.workspace = true tracing.workspace = true # misc -rayon.workspace = true auto_impl.workspace = true itertools.workspace = true @@ -69,6 +68,7 @@ serde = [ "revm/serde", "reth-trie-common/serde", "reth-primitives-traits/serde", + "reth-stages-types/serde", ] test-utils = [ "triehash", diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index cf16c26f0ab1..18e58fdba40c 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -1,11 +1,9 @@ use super::{HashedCursor, HashedCursorFactory, HashedStorageCursor}; -use crate::{ - forward_cursor::ForwardInMemoryCursor, HashedAccountsSorted, HashedPostStateSorted, - HashedStorageSorted, -}; +use crate::forward_cursor::ForwardInMemoryCursor; use alloy_primitives::{map::B256HashSet, B256, U256}; use reth_primitives_traits::Account; use reth_storage_errors::db::DatabaseError; +use reth_trie_common::{HashedAccountsSorted, HashedPostStateSorted, HashedStorageSorted}; /// The hashed cursor factory for the post state. #[derive(Clone, Debug)] diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index 1e7eeb9b52b8..ce426843a058 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -4,6 +4,7 @@ //! //! ## Feature Flags //! +//! - `rayon`: uses rayon for parallel [`HashedPostState`] creation. //! - `test-utils`: Export utilities for testing #![doc( @@ -28,10 +29,6 @@ pub mod walker; /// The iterators for traversing existing intermediate hashes and updated trie leaves. pub mod node_iter; -/// In-memory hashed state. -mod state; -pub use state::*; - /// Input for trie computation. mod input; pub use input::TrieInput; diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index ce3a8f7ac393..ba19ffbca17f 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -3,8 +3,9 @@ use crate::{ prefix_set::TriePrefixSetsMut, proof::{Proof, ProofBlindedProviderFactory}, trie_cursor::TrieCursorFactory, - HashedPostState, }; +use reth_trie_common::HashedPostState; + use alloy_primitives::{ keccak256, map::{B256HashMap, B256HashSet, Entry, HashMap}, diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 0934ea04a0f2..48b16ced1ccf 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -123,7 +123,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -175,7 +175,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -193,7 +193,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -245,7 +245,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -263,7 +263,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -315,7 +315,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -333,7 +333,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -385,7 +385,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -403,7 +403,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -455,7 +455,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -473,7 +473,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -525,7 +525,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -543,7 +543,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -604,7 +604,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -622,7 +622,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The checkpoints mark the last block a stage can recover from in the case of a crash or shutdown of the node", "fieldConfig": { @@ -680,7 +680,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -731,7 +731,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -782,7 +782,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(reth_db_table_size{instance=~\"$instance\"})", @@ -793,7 +793,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(reth_db_freelist{instance=~\"$instance\"} * reth_db_page_size{instance=~\"$instance\"})", @@ -806,7 +806,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(reth_static_files_segment_size{instance=~\"$instance\"})", @@ -819,7 +819,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(reth_db_table_size{instance=~\"$instance\"}) + sum(reth_db_freelist{instance=~\"$instance\"} * reth_db_page_size{instance=~\"$instance\"}) + sum(reth_static_files_segment_size{instance=~\"$instance\"})", @@ -837,7 +837,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -917,7 +917,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_sync_entities_processed{instance=~\"$instance\"} / reth_sync_entities_total{instance=~\"$instance\"}", @@ -932,7 +932,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -1013,7 +1013,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_sync_checkpoint{instance=~\"$instance\"}", @@ -1028,7 +1028,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Tracks the number of critical tasks currently ran by the executor.", "fieldConfig": { @@ -1111,7 +1111,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_executor_spawn_critical_tasks_total{instance=\"$instance\"}- reth_executor_spawn_finished_critical_tasks_total{instance=\"$instance\"}", @@ -1128,7 +1128,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Tracks the number of regular tasks currently ran by the executor.", "fieldConfig": { @@ -1224,7 +1224,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1242,7 +1242,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_executor_spawn_regular_tasks_total{instance=\"$instance\"}- reth_executor_spawn_finished_regular_tasks_total{instance=\"$instance\"}", @@ -1273,7 +1273,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The average commit time for database transactions. Generally, this should not be a limiting factor in syncing.", "fieldConfig": { @@ -1353,7 +1353,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -1371,7 +1371,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -1443,7 +1443,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -1461,7 +1461,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The average time a database transaction was open.", "fieldConfig": { @@ -1541,7 +1541,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -1559,7 +1559,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The maximum time the database transaction was open.", "fieldConfig": { @@ -1638,7 +1638,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -1656,7 +1656,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -1769,7 +1769,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -1787,7 +1787,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -1815,7 +1815,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -1928,7 +1928,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1946,7 +1946,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1978,7 +1978,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The size of tables in the database", "fieldConfig": { @@ -2035,7 +2035,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_db_table_size{instance=~\"$instance\"}", @@ -2051,7 +2051,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The maximum time the database transaction operation which inserts a large value took.", "fieldConfig": { @@ -2134,7 +2134,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -2152,7 +2152,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The type of the pages in the database:\n\n- **Leaf** pages contain KV pairs.\n- **Branch** pages contain information about keys in the leaf pages\n- **Overflow** pages store large values and should generally be avoided if possible", "fieldConfig": { @@ -2206,7 +2206,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "sum by (type) ( reth_db_table_pages{instance=~\"$instance\"} )", @@ -2221,7 +2221,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The size of the database over time", "fieldConfig": { @@ -2305,7 +2305,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum by (job) ( reth_db_table_size{instance=~\"$instance\"} )", @@ -2320,7 +2320,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The number of pages on the MDBX freelist", "fieldConfig": { @@ -2402,7 +2402,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(reth_db_freelist{instance=~\"$instance\"}) by (job)", @@ -2417,7 +2417,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -2561,7 +2561,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -2592,7 +2592,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The size of segments in the static files", "fieldConfig": { @@ -2649,7 +2649,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_static_files_segment_size{instance=~\"$instance\"}", @@ -2665,7 +2665,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -2797,7 +2797,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -2815,7 +2815,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -2947,7 +2947,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -2965,7 +2965,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The size of the static files over time", "fieldConfig": { @@ -3047,7 +3047,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum by (job) ( reth_static_files_segment_size{instance=~\"$instance\"} )", @@ -3062,7 +3062,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The maximum time the static files operation which commits a writer took.", "fieldConfig": { @@ -3144,7 +3144,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "max(max_over_time(reth_static_files_jar_provider_write_duration_seconds{instance=~\"$instance\", operation=\"commit-writer\", quantile=\"1\"}[$__interval]) > 0) by (segment)", @@ -3173,7 +3173,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The throughput of the node's executor. The metric is the amount of gas processed in a block, divided by the time it took to process the block.\n\nNote: For mainnet, the block range 2,383,397-2,620,384 will be slow because of the 2016 DoS attack.", "fieldConfig": { @@ -3251,7 +3251,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_sync_execution_gas_per_second{instance=~\"$instance\"}", @@ -3262,7 +3262,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -3278,7 +3278,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -3294,7 +3294,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -3310,7 +3310,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -3326,7 +3326,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -3342,7 +3342,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -3362,7 +3362,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -3443,7 +3443,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -3459,7 +3459,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -3630,7 +3630,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -3735,7 +3735,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_headers_total_downloaded{instance=~\"$instance\"}", @@ -3746,7 +3746,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_headers_total_flushed{instance=~\"$instance\"}", @@ -3758,7 +3758,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_headers_total_downloaded{instance=~\"$instance\"}[$__rate_interval])", @@ -3771,7 +3771,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_headers_total_flushed{instance=~\"$instance\"}[$__rate_interval])", @@ -3787,7 +3787,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Internal errors in the header downloader. These are expected to happen from time to time.", "fieldConfig": { @@ -3869,7 +3869,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_headers_timeout_errors{instance=~\"$instance\"}[$__rate_interval])", @@ -3880,7 +3880,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_headers_unexpected_errors{instance=~\"$instance\"}[$__rate_interval])", @@ -3892,7 +3892,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_headers_validation_errors{instance=~\"$instance\"}[$__rate_interval])", @@ -3908,7 +3908,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The number of connected peers and in-progress requests for headers.", "fieldConfig": { @@ -3989,7 +3989,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_headers_in_flight_requests{instance=~\"$instance\"}", @@ -4000,7 +4000,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_network_connected_peers{instance=~\"$instance\"}", @@ -4030,7 +4030,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The internal state of the headers downloader: the number of downloaded headers, and the number of headers sent to the header stage.", "fieldConfig": { @@ -4145,7 +4145,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_total_downloaded{instance=~\"$instance\"}", @@ -4156,7 +4156,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_total_flushed{instance=~\"$instance\"}", @@ -4168,7 +4168,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_bodies_total_flushed{instance=~\"$instance\"}[$__rate_interval])", @@ -4180,7 +4180,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_bodies_total_downloaded{instance=~\"$instance\"}[$__rate_interval])", @@ -4192,7 +4192,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_buffered_responses{instance=~\"$instance\"}", @@ -4204,7 +4204,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_buffered_blocks{instance=~\"$instance\"}", @@ -4216,7 +4216,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_queued_blocks{instance=~\"$instance\"}", @@ -4232,7 +4232,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Internal errors in the bodies downloader. These are expected to happen from time to time.", "fieldConfig": { @@ -4311,7 +4311,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_bodies_timeout_errors{instance=~\"$instance\"}[$__rate_interval])", @@ -4322,7 +4322,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_bodies_unexpected_errors{instance=~\"$instance\"}[$__rate_interval])", @@ -4334,7 +4334,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_bodies_validation_errors{instance=~\"$instance\"}[$__rate_interval])", @@ -4350,7 +4350,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The number of connected peers and in-progress requests for bodies.", "fieldConfig": { @@ -4431,7 +4431,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_in_flight_requests{instance=~\"$instance\"}", @@ -4442,7 +4442,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_network_connected_peers{instance=~\"$instance\"}", @@ -4458,7 +4458,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The number of blocks and size in bytes of those blocks", "fieldConfig": { @@ -4557,7 +4557,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_buffered_blocks_size_bytes{instance=~\"$instance\"}", @@ -4569,7 +4569,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_buffered_blocks{instance=~\"$instance\"}", @@ -4585,7 +4585,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The number of blocks in a request and size in bytes of those block responses", "fieldConfig": { @@ -4684,7 +4684,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_response_response_size_bytes{instance=~\"$instance\"}", @@ -4696,7 +4696,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_response_response_length{instance=~\"$instance\"}", @@ -4708,7 +4708,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_response_response_size_bytes / reth_downloaders_bodies_response_response_length{instance=~\"$instance\"}", @@ -4739,7 +4739,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The block number of the tip of the canonical chain from the blockchain tree.", "fieldConfig": { @@ -4820,7 +4820,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_blockchain_tree_canonical_chain_height{instance=~\"$instance\"}", @@ -4836,7 +4836,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Total number of blocks in the tree's block buffer", "fieldConfig": { @@ -4917,7 +4917,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_blockchain_tree_block_buffer_blocks{instance=~\"$instance\"}", @@ -4933,7 +4933,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Total number of sidechains in the blockchain tree", "fieldConfig": { @@ -5014,7 +5014,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_blockchain_tree_sidechains{instance=~\"$instance\"}", @@ -5030,7 +5030,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -5111,7 +5111,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "rate(reth_consensus_engine_beacon_make_canonical_committed_latency_sum{instance=~\"$instance\"}[$__rate_interval]) / rate(reth_consensus_engine_beacon_make_canonical_committed_latency_count{instance=~\"$instance\"}[$__rate_interval])", @@ -5127,7 +5127,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -5208,7 +5208,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_blockchain_tree_latest_reorg_depth{instance=~\"$instance\"}", @@ -5238,7 +5238,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -5319,7 +5319,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_consensus_engine_beacon_active_block_downloads{instance=~\"$instance\"}", @@ -5334,7 +5334,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Engine API messages received by the CL, either engine_newPayload or engine_forkchoiceUpdated", "fieldConfig": { @@ -5415,7 +5415,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_consensus_engine_beacon_forkchoice_updated_messages{instance=~\"$instance\"}", @@ -5426,7 +5426,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_consensus_engine_beacon_new_payload_messages{instance=~\"$instance\"}", @@ -5442,7 +5442,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Latency histogram for the engine_newPayload to Forkchoice Update", "fieldConfig": { @@ -5524,7 +5524,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_engine_rpc_new_payload_forkchoice_updated_time_diff{instance=~\"$instance\"}", @@ -5539,7 +5539,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Latency histogram for the engine_newPayload RPC API", "fieldConfig": { @@ -5621,7 +5621,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5636,7 +5636,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5652,7 +5652,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5668,7 +5668,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5684,7 +5684,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5700,7 +5700,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5716,7 +5716,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5732,7 +5732,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5748,7 +5748,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5764,7 +5764,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5780,7 +5780,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5796,7 +5796,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5812,7 +5812,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5828,7 +5828,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5844,7 +5844,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5864,7 +5864,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Total pipeline runs triggered by the sync controller", "fieldConfig": { @@ -5945,7 +5945,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_consensus_engine_beacon_pipeline_runs{instance=~\"$instance\"}", @@ -5960,7 +5960,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Latency histograms for the engine_getPayloadBodiesByHashV1 and engine_getPayloadBodiesByRangeV1 RPC APIs", "fieldConfig": { @@ -6042,7 +6042,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6058,7 +6058,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6074,7 +6074,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6090,7 +6090,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6106,7 +6106,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6122,7 +6122,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6138,7 +6138,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6154,7 +6154,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6170,7 +6170,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6186,7 +6186,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6206,7 +6206,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Latency histogram for the engine_forkchoiceUpdated RPC API", "fieldConfig": { @@ -6288,7 +6288,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6303,7 +6303,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6319,7 +6319,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6335,7 +6335,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6351,7 +6351,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6367,7 +6367,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6383,7 +6383,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6399,7 +6399,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6415,7 +6415,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6431,7 +6431,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6447,7 +6447,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6463,7 +6463,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6479,7 +6479,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6495,7 +6495,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6511,7 +6511,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -6531,7 +6531,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Counts the number of failed response deliveries due to client request termination.", "fieldConfig": { @@ -6613,7 +6613,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "expr": "consensus_engine_beacon_failed_new_payload_response_deliveries{instance=~\"$instance\"}", "legendFormat": "Failed NewPayload Deliveries", @@ -6622,7 +6622,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "expr": "consensus_engine_beacon_failed_forkchoice_updated_response_deliveries{instance=~\"$instance\"}", "legendFormat": "Failed ForkchoiceUpdated Deliveries", @@ -6648,7 +6648,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -6729,7 +6729,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_trie_duration_seconds{instance=\"$instance\",quantile=~\"(0.5|0.9|0.99)\"}", @@ -6745,7 +6745,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -6825,7 +6825,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_trie_branches_added{instance=\"$instance\",quantile=~\"(0.5|0.9|0.99)\"}", @@ -6837,7 +6837,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_trie_leaves_added{instance=\"$instance\",quantile=~\"(0.5|0.9|0.99)\"}", @@ -6868,7 +6868,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Number of active jobs", "fieldConfig": { @@ -6949,7 +6949,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_payloads_active_jobs{instance=~\"$instance\"}", @@ -6964,7 +6964,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Total number of initiated jobs", "fieldConfig": { @@ -7045,7 +7045,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_payloads_initiated_jobs{instance=~\"$instance\"}", @@ -7060,7 +7060,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Total number of failed jobs", "fieldConfig": { @@ -7141,7 +7141,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_payloads_failed_jobs{instance=~\"$instance\"}", @@ -7169,7 +7169,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -7263,7 +7263,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_jemalloc_active{instance=~\"$instance\"}", @@ -7275,7 +7275,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_jemalloc_allocated{instance=~\"$instance\"}", @@ -7288,7 +7288,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_jemalloc_mapped{instance=~\"$instance\"}", @@ -7301,7 +7301,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_jemalloc_metadata{instance=~\"$instance\"}", @@ -7314,7 +7314,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_jemalloc_resident{instance=~\"$instance\"}", @@ -7327,7 +7327,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_jemalloc_retained{instance=~\"$instance\"}", @@ -7344,7 +7344,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -7426,7 +7426,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_process_resident_memory_bytes{instance=~\"$instance\"}", @@ -7442,7 +7442,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "100% = 1 core", "fieldConfig": { @@ -7524,7 +7524,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "avg(rate(reth_process_cpu_seconds_total{instance=~\"$instance\"}[1m]))", @@ -7540,7 +7540,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -7622,7 +7622,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_process_open_fds{instance=~\"$instance\"}", @@ -7651,7 +7651,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -7733,7 +7733,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "rate(reth_pruner_duration_seconds_sum{instance=~\"$instance\"}[$__rate_interval]) / rate(reth_pruner_duration_seconds_count{instance=~\"$instance\"}[$__rate_interval])", @@ -7749,7 +7749,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -7831,7 +7831,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "rate(reth_pruner_segments_duration_seconds_sum{instance=~\"$instance\"}[$__rate_interval]) / rate(reth_pruner_segments_duration_seconds_count{instance=~\"$instance\"}[$__rate_interval])", @@ -7847,7 +7847,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -7928,7 +7928,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_pruner_segments_highest_pruned_block{instance=~\"$instance\"}", @@ -7957,7 +7957,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -8065,7 +8065,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -8085,7 +8085,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -8157,7 +8157,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -8175,7 +8175,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -8257,7 +8257,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "max(max_over_time(reth_rpc_server_calls_time_seconds{instance=~\"$instance\"}[$__rate_interval])) by (method) > 0", @@ -8273,7 +8273,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -8345,7 +8345,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -8363,7 +8363,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -8480,7 +8480,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -8496,7 +8496,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -8513,7 +8513,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -8530,7 +8530,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -8547,7 +8547,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -8564,7 +8564,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -8581,7 +8581,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -8598,7 +8598,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -8619,7 +8619,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -8701,7 +8701,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(rate(reth_rpc_server_calls_successful_total{instance =~ \"$instance\"}[$__rate_interval])) by (method) > 0", @@ -8730,7 +8730,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The total number of canonical state notifications sent to ExExes.", "fieldConfig": { @@ -8811,7 +8811,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_exex_notifications_sent_total{instance=~\"$instance\"}", @@ -8827,7 +8827,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The total number of events ExExes have sent to the manager.", "fieldConfig": { @@ -8908,7 +8908,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_exex_events_sent_total{instance=~\"$instance\"}", @@ -8924,7 +8924,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Current and Maximum capacity of the internal state notifications buffer.", "fieldConfig": { @@ -9005,7 +9005,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_exex_manager_current_capacity{instance=~\"$instance\"}", @@ -9017,7 +9017,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "max_over_time(reth_exex_manager_max_capacity{instance=~\"$instance\"}[1h])", @@ -9033,7 +9033,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Current size of the internal state notifications buffer.", "fieldConfig": { @@ -9114,7 +9114,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_exex_manager_buffer_size{instance=~\"$instance\"}", @@ -9130,7 +9130,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Total number of ExExes installed in the node", "fieldConfig": { @@ -9184,7 +9184,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_exex_manager_num_exexs{instance=~\"$instance\"}", @@ -9213,7 +9213,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -9294,7 +9294,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_exex_wal_lowest_committed_block_height{instance=~\"$instance\"}", @@ -9307,7 +9307,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_exex_wal_highest_committed_block_height{instance=~\"$instance\"}", @@ -9324,7 +9324,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -9405,7 +9405,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_exex_wal_committed_blocks_count{instance=~\"$instance\"}", @@ -9418,7 +9418,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_exex_wal_notifications_count{instance=~\"$instance\"}", @@ -9435,7 +9435,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -9517,7 +9517,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_exex_wal_size_bytes{instance=~\"$instance\"}", @@ -9547,7 +9547,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -9655,7 +9655,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -9675,7 +9675,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -9783,7 +9783,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -9803,7 +9803,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -9911,7 +9911,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -9931,7 +9931,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -10039,7 +10039,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -10066,7 +10066,7 @@ "current": {}, "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "definition": "query_result(reth_info)", "hide": 0, @@ -10083,6 +10083,21 @@ "skipUrlSync": false, "sort": 0, "type": "query" + }, + { + "current": {}, + "hide": 0, + "includeAll": false, + "label": "Datasource", + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" } ] }, diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index f3d9548c4405..ca89b82d46d1 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -34,30 +34,26 @@ use reth::{ node::{NodeTypes, NodeTypesWithEngine}, rpc::{EngineValidatorBuilder, RpcAddOns}, BuilderContext, FullNodeTypes, Node, NodeAdapter, NodeBuilder, NodeComponentsBuilder, - PayloadBuilderConfig, }, network::NetworkHandle, payload::ExecutionPayloadValidator, primitives::{Block, EthPrimitives, SealedBlock, TransactionSigned}, - providers::{CanonStateSubscriptions, EthStorage, StateProviderFactory}, + providers::{EthStorage, StateProviderFactory}, rpc::{ eth::EthApi, - types::engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}, + types::engine::{ExecutionPayload, PayloadError}, }, tasks::TaskManager, transaction_pool::{PoolTransaction, TransactionPool}, version::default_extra_data_bytes, }; -use reth_basic_payload_builder::{ - BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig, BuildArguments, BuildOutcome, - PayloadBuilder, PayloadConfig, -}; +use reth_basic_payload_builder::{BuildArguments, BuildOutcome, PayloadBuilder, PayloadConfig}; use reth_chainspec::{Chain, ChainSpec, ChainSpecProvider}; use reth_engine_local::payload::UnsupportedLocalAttributes; use reth_ethereum_payload_builder::EthereumBuilderConfig; use reth_node_api::{ payload::{EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes}, - validate_version_specific_fields, AddOnsContext, EngineTypes, EngineValidator, + validate_version_specific_fields, AddOnsContext, EngineTypes, EngineValidator, ExecutionData, FullNodeComponents, PayloadAttributes, PayloadBuilderAttributes, PayloadValidator, }; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; @@ -68,10 +64,7 @@ use reth_node_ethereum::{ }, EthEvmConfig, }; -use reth_payload_builder::{ - EthBuiltPayload, EthPayloadBuilderAttributes, PayloadBuilderError, PayloadBuilderHandle, - PayloadBuilderService, -}; +use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes, PayloadBuilderError}; use reth_tracing::{RethTracer, Tracer}; use reth_trie_db::MerklePatriciaTrie; use serde::{Deserialize, Serialize}; @@ -174,13 +167,16 @@ impl EngineTypes for CustomEngineTypes { type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3; type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4; + type ExecutionData = ExecutionData; fn block_to_payload( block: SealedBlock< <::Primitives as reth_node_api::NodePrimitives>::Block, >, - ) -> (ExecutionPayload, ExecutionPayloadSidecar) { - ExecutionPayload::from_block_unchecked(block.hash(), &block.into_block()) + ) -> ExecutionData { + let (payload, sidecar) = + ExecutionPayload::from_block_unchecked(block.hash(), &block.into_block()); + ExecutionData { payload, sidecar } } } @@ -205,19 +201,19 @@ impl CustomEngineValidator { impl PayloadValidator for CustomEngineValidator { type Block = Block; + type ExecutionData = ExecutionData; fn ensure_well_formed_payload( &self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, + payload: ExecutionData, ) -> Result, PayloadError> { - self.inner.ensure_well_formed_payload(payload, sidecar) + self.inner.ensure_well_formed_payload(payload) } } impl EngineValidator for CustomEngineValidator where - T: EngineTypes, + T: EngineTypes, { fn validate_version_specific_fields( &self, @@ -364,43 +360,35 @@ where + Unpin + 'static, { - async fn spawn_payload_service( - self, + type PayloadBuilder = CustomPayloadBuilder; + + async fn build_payload_builder( + &self, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result::Engine>> { - let payload_builder = CustomPayloadBuilder::default(); - let conf = ctx.payload_builder_config(); - - let payload_job_config = BasicPayloadJobGeneratorConfig::default() - .interval(conf.interval()) - .deadline(conf.deadline()) - .max_payload_tasks(conf.max_payload_tasks()); - - let payload_generator = BasicPayloadJobGenerator::with_builder( - ctx.provider().clone(), - pool, - ctx.task_executor().clone(), - payload_job_config, - payload_builder, - ); - let (payload_service, payload_builder) = - PayloadBuilderService::new(payload_generator, ctx.provider().canonical_state_stream()); - - ctx.task_executor().spawn_critical("payload builder service", Box::pin(payload_service)); - + ) -> eyre::Result { + let payload_builder = CustomPayloadBuilder { + inner: reth_ethereum_payload_builder::EthereumPayloadBuilder::new( + ctx.provider().clone(), + pool, + EthEvmConfig::new(ctx.provider().chain_spec().clone()), + EthereumBuilderConfig::new(default_extra_data_bytes()), + ), + }; Ok(payload_builder) } } /// The type responsible for building custom payloads -#[derive(Debug, Default, Clone)] +#[derive(Debug, Clone)] #[non_exhaustive] -pub struct CustomPayloadBuilder; +pub struct CustomPayloadBuilder { + inner: reth_ethereum_payload_builder::EthereumPayloadBuilder, +} -impl PayloadBuilder for CustomPayloadBuilder +impl PayloadBuilder for CustomPayloadBuilder where - Client: StateProviderFactory + ChainSpecProvider, + Client: StateProviderFactory + ChainSpecProvider + Clone, Pool: TransactionPool>, { type Attributes = CustomPayloadBuilderAttributes; @@ -408,22 +396,14 @@ where fn try_build( &self, - args: BuildArguments, + args: BuildArguments, ) -> Result, PayloadBuilderError> { - let BuildArguments { client, pool, cached_reads, config, cancel, best_payload } = args; + let BuildArguments { cached_reads, config, cancel, best_payload } = args; let PayloadConfig { parent_header, attributes } = config; - let chain_spec = client.chain_spec(); - // This reuses the default EthereumPayloadBuilder to build the payload // but any custom logic can be implemented here - reth_ethereum_payload_builder::EthereumPayloadBuilder::new( - EthEvmConfig::new(chain_spec.clone()), - EthereumBuilderConfig::new(default_extra_data_bytes()), - ) - .try_build(BuildArguments { - client, - pool, + self.inner.try_build(BuildArguments { cached_reads, config: PayloadConfig { parent_header, attributes: attributes.0 }, cancel, @@ -433,19 +413,10 @@ where fn build_empty_payload( &self, - client: &Client, config: PayloadConfig, ) -> Result { let PayloadConfig { parent_header, attributes } = config; - let chain_spec = client.chain_spec(); - >::build_empty_payload( - &reth_ethereum_payload_builder::EthereumPayloadBuilder::new( - EthEvmConfig::new(chain_spec.clone()), - EthereumBuilderConfig::new(default_extra_data_bytes()) - ), - client, - PayloadConfig { parent_header, attributes: attributes.0} - ) + self.inner.build_empty_payload(PayloadConfig { parent_header, attributes: attributes.0 }) } } diff --git a/examples/custom-evm/Cargo.toml b/examples/custom-evm/Cargo.toml index c9ce0c4674e0..b236647f1ea6 100644 --- a/examples/custom-evm/Cargo.toml +++ b/examples/custom-evm/Cargo.toml @@ -12,6 +12,7 @@ reth-evm-ethereum.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true reth-primitives.workspace = true +reth-ethereum-payload-builder.workspace = true reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true reth-evm.workspace = true diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index f84708210ca2..06c92960b503 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -204,12 +204,15 @@ where PayloadBuilderAttributes = EthPayloadBuilderAttributes, >, { - async fn spawn_payload_service( - self, + type PayloadBuilder = + reth_ethereum_payload_builder::EthereumPayloadBuilder; + + async fn build_payload_builder( + &self, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result> { - self.inner.spawn(MyEvmConfig::new(ctx.chain_spec()), ctx, pool) + ) -> eyre::Result { + self.inner.build(MyEvmConfig::new(ctx.chain_spec()), ctx, pool) } } #[tokio::main] diff --git a/examples/custom-node-components/src/main.rs b/examples/custom-node-components/src/main.rs index 7924aabd8692..0408c75cc1a7 100644 --- a/examples/custom-node-components/src/main.rs +++ b/examples/custom-node-components/src/main.rs @@ -55,15 +55,11 @@ where async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { let data_dir = ctx.config().datadir(); let blob_store = InMemoryBlobStore::default(); - let validator = TransactionValidationTaskExecutor::eth_builder(ctx.chain_spec()) + let validator = TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone()) .with_head_timestamp(ctx.head().timestamp) .kzg_settings(ctx.kzg_settings()?) .with_additional_tasks(ctx.config().txpool.additional_validation_tasks) - .build_with_tasks( - ctx.provider().clone(), - ctx.task_executor().clone(), - blob_store.clone(), - ); + .build_with_tasks(ctx.task_executor().clone(), blob_store.clone()); let transaction_pool = reth_transaction_pool::Pool::eth_pool(validator, blob_store, self.pool_config); diff --git a/examples/custom-payload-builder/src/generator.rs b/examples/custom-payload-builder/src/generator.rs index dfb973a96fc4..f1a5deb48a8e 100644 --- a/examples/custom-payload-builder/src/generator.rs +++ b/examples/custom-payload-builder/src/generator.rs @@ -4,9 +4,10 @@ use reth::{ api::Block, providers::{BlockReaderIdExt, BlockSource, StateProviderFactory}, tasks::TaskSpawner, - transaction_pool::TransactionPool, }; -use reth_basic_payload_builder::{BasicPayloadJobGeneratorConfig, PayloadBuilder, PayloadConfig}; +use reth_basic_payload_builder::{ + BasicPayloadJobGeneratorConfig, HeaderForPayload, PayloadBuilder, PayloadConfig, +}; use reth_node_api::PayloadBuilderAttributes; use reth_payload_builder::{PayloadBuilderError, PayloadJobGenerator}; use reth_primitives::SealedHeader; @@ -14,11 +15,9 @@ use std::sync::Arc; /// The generator type that creates new jobs that builds empty blocks. #[derive(Debug)] -pub struct EmptyBlockPayloadJobGenerator { +pub struct EmptyBlockPayloadJobGenerator { /// The client that can interact with the chain. client: Client, - /// txpool - pool: Pool, /// How to spawn building tasks executor: Tasks, /// The configuration for the job generator. @@ -31,41 +30,39 @@ pub struct EmptyBlockPayloadJobGenerator { // === impl EmptyBlockPayloadJobGenerator === -impl EmptyBlockPayloadJobGenerator { +impl EmptyBlockPayloadJobGenerator { /// Creates a new [EmptyBlockPayloadJobGenerator] with the given config and custom /// [PayloadBuilder] pub fn with_builder( client: Client, - pool: Pool, executor: Tasks, config: BasicPayloadJobGeneratorConfig, builder: Builder, ) -> Self { - Self { client, pool, executor, _config: config, builder } + Self { client, executor, _config: config, builder } } } -impl PayloadJobGenerator - for EmptyBlockPayloadJobGenerator +impl PayloadJobGenerator + for EmptyBlockPayloadJobGenerator where Client: StateProviderFactory - + BlockReaderIdExt + + BlockReaderIdExt
> + Clone + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, Tasks: TaskSpawner + Clone + Unpin + 'static, - Builder: PayloadBuilder + Unpin + 'static, - >::Attributes: Unpin + Clone, - >::BuiltPayload: Unpin + Clone, + Builder: PayloadBuilder + Unpin + 'static, + Builder::Attributes: Unpin + Clone, + Builder::BuiltPayload: Unpin + Clone, { - type Job = EmptyBlockPayloadJob; + type Job = EmptyBlockPayloadJob; /// This is invoked when the node receives payload attributes from the beacon node via /// `engine_forkchoiceUpdatedV1` fn new_payload_job( &self, - attributes: >::Attributes, + attributes: Builder::Attributes, ) -> Result { let parent_block = if attributes.parent().is_zero() { // use latest block if parent is zero: genesis block @@ -87,8 +84,6 @@ where let config = PayloadConfig::new(Arc::new(header), attributes); Ok(EmptyBlockPayloadJob { - client: self.client.clone(), - _pool: self.pool.clone(), _executor: self.executor.clone(), builder: self.builder.clone(), config, diff --git a/examples/custom-payload-builder/src/job.rs b/examples/custom-payload-builder/src/job.rs index 014198259596..3ba139c066f2 100644 --- a/examples/custom-payload-builder/src/job.rs +++ b/examples/custom-payload-builder/src/job.rs @@ -1,8 +1,6 @@ use futures_util::Future; -use reth::{ - providers::StateProviderFactory, tasks::TaskSpawner, transaction_pool::TransactionPool, -}; -use reth_basic_payload_builder::{PayloadBuilder, PayloadConfig}; +use reth::tasks::TaskSpawner; +use reth_basic_payload_builder::{HeaderForPayload, PayloadBuilder, PayloadConfig}; use reth_node_api::PayloadKind; use reth_payload_builder::{KeepPayloadJobAlive, PayloadBuilderError, PayloadJob}; @@ -12,16 +10,12 @@ use std::{ }; /// A [PayloadJob] that builds empty blocks. -pub struct EmptyBlockPayloadJob +pub struct EmptyBlockPayloadJob where - Builder: PayloadBuilder, + Builder: PayloadBuilder, { /// The configuration for how the payload will be created. - pub(crate) config: PayloadConfig, - /// The client that can interact with the chain. - pub(crate) client: Client, - /// The transaction pool. - pub(crate) _pool: Pool, + pub(crate) config: PayloadConfig>, /// How to spawn building tasks pub(crate) _executor: Tasks, /// The type responsible for building payloads. @@ -30,14 +24,12 @@ where pub(crate) builder: Builder, } -impl PayloadJob for EmptyBlockPayloadJob +impl PayloadJob for EmptyBlockPayloadJob where - Client: StateProviderFactory + Clone + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, - Builder: PayloadBuilder + Unpin + 'static, - >::Attributes: Unpin + Clone, - >::BuiltPayload: Unpin + Clone, + Builder: PayloadBuilder + Unpin + 'static, + Builder::Attributes: Unpin + Clone, + Builder::BuiltPayload: Unpin + Clone, { type PayloadAttributes = Builder::Attributes; type ResolvePayloadFuture = @@ -45,7 +37,7 @@ where type BuiltPayload = Builder::BuiltPayload; fn best_payload(&self) -> Result { - let payload = self.builder.build_empty_payload(&self.client, self.config.clone())?; + let payload = self.builder.build_empty_payload(self.config.clone())?; Ok(payload) } @@ -62,15 +54,13 @@ where } } -/// A [PayloadJob] is a a future that's being polled by the `PayloadBuilderService` -impl Future for EmptyBlockPayloadJob +/// A [PayloadJob] is a future that's being polled by the `PayloadBuilderService` +impl Future for EmptyBlockPayloadJob where - Client: StateProviderFactory + Clone + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, - Builder: PayloadBuilder + Unpin + 'static, - >::Attributes: Unpin + Clone, - >::BuiltPayload: Unpin + Clone, + Builder: PayloadBuilder + Unpin + 'static, + Builder::Attributes: Unpin + Clone, + Builder::BuiltPayload: Unpin + Clone, { type Output = Result<(), PayloadBuilderError>; diff --git a/examples/custom-payload-builder/src/main.rs b/examples/custom-payload-builder/src/main.rs index bdbfe1510286..1f9a7a51511f 100644 --- a/examples/custom-payload-builder/src/main.rs +++ b/examples/custom-payload-builder/src/main.rs @@ -11,20 +11,19 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] -use generator::EmptyBlockPayloadJobGenerator; +use crate::generator::EmptyBlockPayloadJobGenerator; use reth::{ builder::{components::PayloadServiceBuilder, node::FullNodeTypes, BuilderContext}, cli::{config::PayloadBuilderConfig, Cli}, - payload::PayloadBuilderHandle, providers::CanonStateSubscriptions, transaction_pool::{PoolTransaction, TransactionPool}, }; use reth_basic_payload_builder::BasicPayloadJobGeneratorConfig; use reth_chainspec::ChainSpec; -use reth_ethereum_payload_builder::EthereumBuilderConfig; +use reth_ethereum_payload_builder::{EthereumBuilderConfig, EthereumPayloadBuilder}; use reth_node_api::NodeTypesWithEngine; use reth_node_ethereum::{node::EthereumAddOns, EthEngineTypes, EthEvmConfig, EthereumNode}; -use reth_payload_builder::PayloadBuilderService; +use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::{EthPrimitives, TransactionSigned}; pub mod generator; @@ -47,14 +46,31 @@ where + Unpin + 'static, { - async fn spawn_payload_service( - self, + type PayloadBuilder = EthereumPayloadBuilder; + + async fn build_payload_builder( + &self, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result::Engine>> { + ) -> eyre::Result { tracing::info!("Spawning a custom payload builder"); let conf = ctx.payload_builder_config(); + Ok(reth_ethereum_payload_builder::EthereumPayloadBuilder::new( + ctx.provider().clone(), + pool, + EthEvmConfig::new(ctx.chain_spec()), + EthereumBuilderConfig::new(conf.extra_data_bytes()), + )) + } + + fn spawn_payload_builder_service( + self, + ctx: &BuilderContext, + payload_builder: Self::PayloadBuilder, + ) -> eyre::Result::Engine>> { + let conf = ctx.payload_builder_config(); + let payload_job_config = BasicPayloadJobGeneratorConfig::default() .interval(conf.interval()) .deadline(conf.deadline()) @@ -62,13 +78,9 @@ where let payload_generator = EmptyBlockPayloadJobGenerator::with_builder( ctx.provider().clone(), - pool, ctx.task_executor().clone(), payload_job_config, - reth_ethereum_payload_builder::EthereumPayloadBuilder::new( - EthEvmConfig::new(ctx.chain_spec()), - EthereumBuilderConfig::new(conf.extra_data_bytes()), - ), + payload_builder, ); let (payload_service, payload_builder) = diff --git a/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/proto.rs b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/proto.rs index 043d37c4f6ae..4657fe1f1d6b 100644 --- a/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/proto.rs +++ b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/proto.rs @@ -45,7 +45,7 @@ impl CustomRlpxProtoMessage { message: CustomRlpxProtoMessageKind::PingMessage(msg.into()), } } - /// Creates a ping message + /// Creates a pong message pub fn pong_message(msg: impl Into) -> Self { Self { message_type: CustomRlpxProtoMessageId::PongMessage, diff --git a/examples/network-proxy/src/main.rs b/examples/network-proxy/src/main.rs index edd6fe625504..1699d08d8fe2 100644 --- a/examples/network-proxy/src/main.rs +++ b/examples/network-proxy/src/main.rs @@ -13,8 +13,11 @@ use futures::StreamExt; use reth_chainspec::DEV; use reth_network::{ - config::rng_secret_key, eth_requests::IncomingEthRequest, p2p::HeadersClient, - transactions::NetworkTransactionEvent, types::NewPooledTransactionHashes68, + config::rng_secret_key, + eth_requests::IncomingEthRequest, + p2p::HeadersClient, + transactions::NetworkTransactionEvent, + types::{BlockHashOrNumber, NewPooledTransactionHashes68}, BlockDownloaderProvider, FetchClient, NetworkConfig, NetworkEventListenerProvider, NetworkHandle, NetworkInfo, NetworkManager, Peers, }; @@ -62,7 +65,7 @@ async fn main() -> eyre::Result<()> { }); loop { - // receive incoming eth requests and transaction messages + // receive incoming eth requests and transaction messages from the second peer tokio::select! { eth_request = requests_rx.recv() => { let Some(eth_request) = eth_request else {break}; @@ -93,7 +96,8 @@ async fn main() -> eyre::Result<()> { Ok(()) } -/// Launches another network/peer, connects to the first peer and sends a header request. +/// Launches another network/peer, connects to the first peer and sends requests/messages to the +/// first peer. async fn run_peer(handle: NetworkHandle) -> eyre::Result<()> { // create another peer let config = NetworkConfig::builder(rng_secret_key()) @@ -106,12 +110,13 @@ async fn run_peer(handle: NetworkHandle) -> eyre::Result<()> { tokio::task::spawn(network); // add the other peer as trusted + // this will establish a connection to the first peer peer.add_trusted_peer(*handle.peer_id(), handle.local_addr()); // obtain the client that can emit requests let client: FetchClient = peer.fetch_client().await?; - let header = client.get_header(0.into()).await.unwrap(); + let header = client.get_header(BlockHashOrNumber::Number(0)).await.unwrap(); println!("Got header: {:?}", header); // send a (bogus) hashes message diff --git a/examples/network-txpool/Cargo.toml b/examples/network-txpool/Cargo.toml index 7d4817263b75..41e2ecaefc9f 100644 --- a/examples/network-txpool/Cargo.toml +++ b/examples/network-txpool/Cargo.toml @@ -6,6 +6,7 @@ edition.workspace = true license.workspace = true [dependencies] +alloy-consensus.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } eyre.workspace = true tokio.workspace = true diff --git a/examples/network-txpool/src/main.rs b/examples/network-txpool/src/main.rs index 716e6cc57c9f..ff066336fd9e 100644 --- a/examples/network-txpool/src/main.rs +++ b/examples/network-txpool/src/main.rs @@ -7,6 +7,7 @@ //! cargo run --release -p network-txpool -- node //! ``` +use alloy_consensus::Transaction; use reth_network::{config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkManager}; use reth_provider::test_utils::NoopProvider; use reth_transaction_pool::{ diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index 1bf98c8c0af6..67c40035f20e 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -34,9 +34,7 @@ use reth::rpc::builder::{ // Configuring the network parts, ideally also wouldn't need to think about this. use myrpc_ext::{MyRpcExt, MyRpcExtApiServer}; use reth::tasks::TokioTaskExecutor; -use reth_node_ethereum::{ - node::EthereumEngineValidator, EthEvmConfig, EthExecutorProvider, EthereumNode, -}; +use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider, EthereumNode}; use reth_provider::ChainSpecProvider; // Custom rpc extension @@ -75,11 +73,7 @@ async fn main() -> eyre::Result<()> { // Pick which namespaces to expose. let config = TransportRpcModuleConfig::default().with_http([RethRpcModule::Eth]); - let mut server = rpc_builder.build( - config, - Box::new(EthApi::with_spawner), - Arc::new(EthereumEngineValidator::new(spec)), - ); + let mut server = rpc_builder.build(config, Box::new(EthApi::with_spawner)); // Add a custom rpc namespace let custom_rpc = MyRpcExt { provider }; diff --git a/fork.yaml b/fork.yaml index 7a18b8e6ad1e..5d07d1ceed70 100644 --- a/fork.yaml +++ b/fork.yaml @@ -4,7 +4,7 @@ footer: | base: name: reth url: https://github.com/paradigmxyz/reth - hash: b06682e9af5abf0cee86d707314f4bd8972ffb85 + hash: 46d63e8054b63f4fe09978a3850d3f1c02871b49 fork: name: scroll-reth url: https://github.com/scroll-tech/reth