From 7aa693dda03f5800a71f4aecc8ac5fb56943c045 Mon Sep 17 00:00:00 2001 From: HAOYUatHZ Date: Fri, 24 May 2024 19:01:59 +0800 Subject: [PATCH 1/3] Revert "cmd/devp2p, eth: drop eth/66 (#28239)" This reverts commit bc6d184872889224480cf9df58b0539b210ffa9e. --- cmd/devp2p/internal/ethtest/chain_test.go | 6 +- cmd/devp2p/internal/ethtest/helpers.go | 13 +- cmd/devp2p/internal/ethtest/suite.go | 38 ++-- cmd/devp2p/internal/ethtest/types.go | 26 +-- eth/downloader/downloader_test.go | 136 ++++++------ eth/downloader/fetchers.go | 8 +- eth/downloader/fetchers_concurrent_bodies.go | 2 +- eth/downloader/fetchers_concurrent_headers.go | 2 +- .../fetchers_concurrent_receipts.go | 2 +- eth/downloader/skeleton.go | 2 +- eth/downloader/skeleton_test.go | 6 +- eth/fetcher/block_fetcher.go | 4 +- eth/fetcher/block_fetcher_test.go | 4 +- eth/handler.go | 2 +- eth/handler_eth.go | 4 +- eth/handler_eth_test.go | 15 +- eth/protocols/eth/handler.go | 66 ++++-- eth/protocols/eth/handler_test.go | 208 ++++++++++++++---- eth/protocols/eth/handlers.go | 138 ++++++++---- eth/protocols/eth/handshake_test.go | 3 +- eth/protocols/eth/peer.go | 98 ++++++--- eth/protocols/eth/protocol.go | 197 +++++++++-------- eth/protocols/eth/protocol_test.go | 102 +++++---- eth/sync_test.go | 2 +- 24 files changed, 683 insertions(+), 401 deletions(-) diff --git a/cmd/devp2p/internal/ethtest/chain_test.go b/cmd/devp2p/internal/ethtest/chain_test.go index 02f00e0a2bf4..5f3d498eb593 100644 --- a/cmd/devp2p/internal/ethtest/chain_test.go +++ b/cmd/devp2p/internal/ethtest/chain_test.go @@ -145,7 +145,7 @@ func TestChain_GetHeaders(t *testing.T) { }{ { req: GetBlockHeaders{ - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{Number: uint64(2)}, Amount: uint64(5), Skip: 1, @@ -162,7 +162,7 @@ func TestChain_GetHeaders(t *testing.T) { }, { req: GetBlockHeaders{ - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{Number: uint64(chain.Len() - 1)}, Amount: uint64(3), Skip: 0, @@ -177,7 +177,7 @@ func TestChain_GetHeaders(t *testing.T) { }, { req: GetBlockHeaders{ - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{Hash: chain.Head().Hash()}, Amount: uint64(1), Skip: 0, diff --git a/cmd/devp2p/internal/ethtest/helpers.go b/cmd/devp2p/internal/ethtest/helpers.go index ca082dce15e4..e385a0b0c6e4 100644 --- a/cmd/devp2p/internal/ethtest/helpers.go +++ b/cmd/devp2p/internal/ethtest/helpers.go @@ -62,6 +62,7 @@ func (s *Suite) dial() (*Conn, error) { } // set default p2p capabilities conn.caps = []p2p.Cap{ + {Name: "eth", Version: 66}, {Name: "eth", Version: 67}, {Name: "eth", Version: 68}, } @@ -236,8 +237,8 @@ func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message { return errorf("could not get headers for inbound header request: %v", err) } resp := &BlockHeaders{ - RequestId: msg.ReqID(), - BlockHeadersRequest: eth.BlockHeadersRequest(headers), + RequestId: msg.ReqID(), + BlockHeadersPacket: eth.BlockHeadersPacket(headers), } if err := c.Write(resp); err != nil { return errorf("could not write to connection: %v", err) @@ -266,7 +267,7 @@ func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, reqID uint if !ok { return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg)) } - headers := []*types.Header(resp.BlockHeadersRequest) + headers := []*types.Header(resp.BlockHeadersPacket) return headers, nil } @@ -378,7 +379,7 @@ func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block) error { conn.SetReadDeadline(time.Now().Add(20 * time.Second)) // create request req := &GetBlockHeaders{ - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{Hash: block.Hash()}, Amount: 1, }, @@ -603,8 +604,8 @@ func (s *Suite) hashAnnounce() error { pretty.Sdump(blockHeaderReq)) } err = sendConn.Write(&BlockHeaders{ - RequestId: blockHeaderReq.ReqID(), - BlockHeadersRequest: eth.BlockHeadersRequest{nextBlock.Header()}, + RequestId: blockHeaderReq.ReqID(), + BlockHeadersPacket: eth.BlockHeadersPacket{nextBlock.Header()}, }) if err != nil { return fmt.Errorf("failed to write to connection: %v", err) diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index 98bdd966849b..77d834e8960b 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -112,7 +112,7 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) { } // write request req := &GetBlockHeaders{ - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{Hash: s.chain.blocks[1].Hash()}, Amount: 2, Skip: 1, @@ -150,7 +150,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) { // create two requests req1 := &GetBlockHeaders{ RequestId: uint64(111), - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{ Hash: s.chain.blocks[1].Hash(), }, @@ -161,7 +161,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) { } req2 := &GetBlockHeaders{ RequestId: uint64(222), - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{ Hash: s.chain.blocks[1].Hash(), }, @@ -201,10 +201,10 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) { if err != nil { t.Fatalf("failed to get expected headers for request 2: %v", err) } - if !headersMatch(expected1, headers1.BlockHeadersRequest) { + if !headersMatch(expected1, headers1.BlockHeadersPacket) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) } - if !headersMatch(expected2, headers2.BlockHeadersRequest) { + if !headersMatch(expected2, headers2.BlockHeadersPacket) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) } } @@ -224,7 +224,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) { reqID := uint64(1234) request1 := &GetBlockHeaders{ RequestId: reqID, - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{ Number: 1, }, @@ -233,7 +233,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) { } request2 := &GetBlockHeaders{ RequestId: reqID, - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{ Number: 33, }, @@ -270,10 +270,10 @@ func (s *Suite) TestSameRequestID(t *utesting.T) { if err != nil { t.Fatalf("failed to get expected block headers: %v", err) } - if !headersMatch(expected1, headers1.BlockHeadersRequest) { + if !headersMatch(expected1, headers1.BlockHeadersPacket) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) } - if !headersMatch(expected2, headers2.BlockHeadersRequest) { + if !headersMatch(expected2, headers2.BlockHeadersPacket) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) } } @@ -290,7 +290,7 @@ func (s *Suite) TestZeroRequestID(t *utesting.T) { t.Fatalf("peering failed: %v", err) } req := &GetBlockHeaders{ - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{Number: 0}, Amount: 2, }, @@ -322,7 +322,7 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) { // create block bodies request req := &GetBlockBodies{ RequestId: uint64(55), - GetBlockBodiesRequest: eth.GetBlockBodiesRequest{ + GetBlockBodiesPacket: eth.GetBlockBodiesPacket{ s.chain.blocks[54].Hash(), s.chain.blocks[75].Hash(), }, @@ -336,11 +336,11 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) { if !ok { t.Fatalf("unexpected: %s", pretty.Sdump(msg)) } - bodies := resp.BlockBodiesResponse + bodies := resp.BlockBodiesPacket t.Logf("received %d block bodies", len(bodies)) - if len(bodies) != len(req.GetBlockBodiesRequest) { + if len(bodies) != len(req.GetBlockBodiesPacket) { t.Fatalf("wrong bodies in response: expected %d bodies, "+ - "got %d", len(req.GetBlockBodiesRequest), len(bodies)) + "got %d", len(req.GetBlockBodiesPacket), len(bodies)) } } @@ -481,8 +481,8 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) { hashes = append(hashes, hash) } getTxReq := &GetPooledTransactions{ - RequestId: 1234, - GetPooledTransactionsRequest: hashes, + RequestId: 1234, + GetPooledTransactionsPacket: hashes, } if err = conn.Write(getTxReq); err != nil { t.Fatalf("could not write to conn: %v", err) @@ -490,7 +490,7 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) { // check that all received transactions match those that were sent to node switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) { case *PooledTransactions: - for _, gotTx := range msg.PooledTransactionsResponse { + for _, gotTx := range msg.PooledTransactionsPacket { if _, exists := hashMap[gotTx.Hash()]; !exists { t.Fatalf("unexpected tx received: %v", gotTx.Hash()) } @@ -547,8 +547,8 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) { msg := conn.readAndServe(s.chain, timeout) switch msg := msg.(type) { case *GetPooledTransactions: - if len(msg.GetPooledTransactionsRequest) != len(hashes) { - t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsRequest)) + if len(msg.GetPooledTransactionsPacket) != len(hashes) { + t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsPacket)) } return diff --git a/cmd/devp2p/internal/ethtest/types.go b/cmd/devp2p/internal/ethtest/types.go index 339f4e713755..8fa21af05b3b 100644 --- a/cmd/devp2p/internal/ethtest/types.go +++ b/cmd/devp2p/internal/ethtest/types.go @@ -99,24 +99,24 @@ func (msg Transactions) Code() int { return 18 } func (msg Transactions) ReqID() uint64 { return 18 } // GetBlockHeaders represents a block header query. -type GetBlockHeaders eth.GetBlockHeadersPacket +type GetBlockHeaders eth.GetBlockHeadersPacket66 func (msg GetBlockHeaders) Code() int { return 19 } func (msg GetBlockHeaders) ReqID() uint64 { return msg.RequestId } -type BlockHeaders eth.BlockHeadersPacket +type BlockHeaders eth.BlockHeadersPacket66 func (msg BlockHeaders) Code() int { return 20 } func (msg BlockHeaders) ReqID() uint64 { return msg.RequestId } // GetBlockBodies represents a GetBlockBodies request -type GetBlockBodies eth.GetBlockBodiesPacket +type GetBlockBodies eth.GetBlockBodiesPacket66 func (msg GetBlockBodies) Code() int { return 21 } func (msg GetBlockBodies) ReqID() uint64 { return msg.RequestId } // BlockBodies is the network packet for block content distribution. -type BlockBodies eth.BlockBodiesPacket +type BlockBodies eth.BlockBodiesPacket66 func (msg BlockBodies) Code() int { return 22 } func (msg BlockBodies) ReqID() uint64 { return msg.RequestId } @@ -128,7 +128,7 @@ func (msg NewBlock) Code() int { return 23 } func (msg NewBlock) ReqID() uint64 { return 0 } // NewPooledTransactionHashes66 is the network packet for the tx hash propagation message. -type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket67 +type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket66 func (msg NewPooledTransactionHashes66) Code() int { return 24 } func (msg NewPooledTransactionHashes66) ReqID() uint64 { return 0 } @@ -139,12 +139,12 @@ type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket68 func (msg NewPooledTransactionHashes) Code() int { return 24 } func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 } -type GetPooledTransactions eth.GetPooledTransactionsPacket +type GetPooledTransactions eth.GetPooledTransactionsPacket66 func (msg GetPooledTransactions) Code() int { return 25 } func (msg GetPooledTransactions) ReqID() uint64 { return msg.RequestId } -type PooledTransactions eth.PooledTransactionsPacket +type PooledTransactions eth.PooledTransactionsPacket66 func (msg PooledTransactions) Code() int { return 26 } func (msg PooledTransactions) ReqID() uint64 { return msg.RequestId } @@ -180,25 +180,25 @@ func (c *Conn) Read() Message { case (Status{}).Code(): msg = new(Status) case (GetBlockHeaders{}).Code(): - ethMsg := new(eth.GetBlockHeadersPacket) + ethMsg := new(eth.GetBlockHeadersPacket66) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*GetBlockHeaders)(ethMsg) case (BlockHeaders{}).Code(): - ethMsg := new(eth.BlockHeadersPacket) + ethMsg := new(eth.BlockHeadersPacket66) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*BlockHeaders)(ethMsg) case (GetBlockBodies{}).Code(): - ethMsg := new(eth.GetBlockBodiesPacket) + ethMsg := new(eth.GetBlockBodiesPacket66) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*GetBlockBodies)(ethMsg) case (BlockBodies{}).Code(): - ethMsg := new(eth.BlockBodiesPacket) + ethMsg := new(eth.BlockBodiesPacket66) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } @@ -217,13 +217,13 @@ func (c *Conn) Read() Message { } msg = new(NewPooledTransactionHashes66) case (GetPooledTransactions{}.Code()): - ethMsg := new(eth.GetPooledTransactionsPacket) + ethMsg := new(eth.GetPooledTransactionsPacket66) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*GetPooledTransactions)(ethMsg) case (PooledTransactions{}.Code()): - ethMsg := new(eth.PooledTransactionsPacket) + ethMsg := new(eth.PooledTransactionsPacket66) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 273114748a78..bc0a289991f9 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -177,7 +177,7 @@ func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header { // function can be used to retrieve batches of headers from the particular peer. func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { // Service the header query via the live handler code - rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{ + rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{ Hash: origin, }, @@ -205,7 +205,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersRequest)(&headers), + Res: (*eth.BlockHeadersPacket)(&headers), Meta: hashes, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -221,7 +221,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i // function can be used to retrieve batches of headers from the particular peer. func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { // Service the header query via the live handler code - rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{ + rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{ Number: origin, }, @@ -249,7 +249,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersRequest)(&headers), + Res: (*eth.BlockHeadersPacket)(&headers), Meta: hashes, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -286,7 +286,7 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et } res := ð.Response{ Req: req, - Res: (*eth.BlockBodiesResponse)(&bodies), + Res: (*eth.BlockBodiesPacket)(&bodies), Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes}, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -317,7 +317,7 @@ func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan * } res := ð.Response{ Req: req, - Res: (*eth.ReceiptsResponse)(&receipts), + Res: (*eth.ReceiptsPacket)(&receipts), Meta: hashes, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -437,9 +437,9 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) { } } -func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) } -func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) } -func TestCanonicalSynchronisation68Light(t *testing.T) { testCanonSync(t, eth.ETH68, LightSync) } +func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) } +func TestCanonicalSynchronisation66Snap(t *testing.T) { testCanonSync(t, eth.ETH66, SnapSync) } +func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) } func TestCanonicalSynchronisation67Full(t *testing.T) { testCanonSync(t, eth.ETH67, FullSync) } func TestCanonicalSynchronisation67Snap(t *testing.T) { testCanonSync(t, eth.ETH67, SnapSync) } func TestCanonicalSynchronisation67Light(t *testing.T) { testCanonSync(t, eth.ETH67, LightSync) } @@ -461,8 +461,8 @@ func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that if a large batch of blocks are being downloaded, it is throttled // until the cached blocks are retrieved. -func TestThrottling68Full(t *testing.T) { testThrottling(t, eth.ETH68, FullSync) } -func TestThrottling68Snap(t *testing.T) { testThrottling(t, eth.ETH68, SnapSync) } +func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) } +func TestThrottling66Snap(t *testing.T) { testThrottling(t, eth.ETH66, SnapSync) } func TestThrottling67Full(t *testing.T) { testThrottling(t, eth.ETH67, FullSync) } func TestThrottling67Snap(t *testing.T) { testThrottling(t, eth.ETH67, SnapSync) } @@ -543,9 +543,9 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { // Tests that simple synchronization against a forked chain works correctly. In // this test common ancestor lookup should *not* be short circuited, and a full // binary search should be executed. -func TestForkedSync68Full(t *testing.T) { testForkedSync(t, eth.ETH68, FullSync) } -func TestForkedSync68Snap(t *testing.T) { testForkedSync(t, eth.ETH68, SnapSync) } -func TestForkedSync68Light(t *testing.T) { testForkedSync(t, eth.ETH68, LightSync) } +func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) } +func TestForkedSync66Snap(t *testing.T) { testForkedSync(t, eth.ETH66, SnapSync) } +func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) } func TestForkedSync67Full(t *testing.T) { testForkedSync(t, eth.ETH67, FullSync) } func TestForkedSync67Snap(t *testing.T) { testForkedSync(t, eth.ETH67, SnapSync) } func TestForkedSync67Light(t *testing.T) { testForkedSync(t, eth.ETH67, LightSync) } @@ -573,9 +573,9 @@ func testForkedSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that synchronising against a much shorter but much heavier fork works // currently and is not dropped. -func TestHeavyForkedSync68Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, FullSync) } -func TestHeavyForkedSync68Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, SnapSync) } -func TestHeavyForkedSync68Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, LightSync) } +func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) } +func TestHeavyForkedSync66Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, SnapSync) } +func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) } func TestHeavyForkedSync67Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, FullSync) } func TestHeavyForkedSync67Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, SnapSync) } func TestHeavyForkedSync67Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, LightSync) } @@ -605,9 +605,9 @@ func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that chain forks are contained within a certain interval of the current // chain head, ensuring that malicious peers cannot waste resources by feeding // long dead chains. -func TestBoundedForkedSync68Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, FullSync) } -func TestBoundedForkedSync68Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, SnapSync) } -func TestBoundedForkedSync68Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, LightSync) } +func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) } +func TestBoundedForkedSync66Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, SnapSync) } +func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) } func TestBoundedForkedSync67Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, FullSync) } func TestBoundedForkedSync67Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, SnapSync) } func TestBoundedForkedSync67Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, LightSync) } @@ -636,14 +636,14 @@ func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that chain forks are contained within a certain interval of the current // chain head for short but heavy forks too. These are a bit special because they // take different ancestor lookup paths. -func TestBoundedHeavyForkedSync68Full(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH68, FullSync) +func TestBoundedHeavyForkedSync66Full(t *testing.T) { + testBoundedHeavyForkedSync(t, eth.ETH66, FullSync) } -func TestBoundedHeavyForkedSync68Snap(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH68, SnapSync) +func TestBoundedHeavyForkedSync66Snap(t *testing.T) { + testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync) } -func TestBoundedHeavyForkedSync68Light(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH68, LightSync) +func TestBoundedHeavyForkedSync66Light(t *testing.T) { + testBoundedHeavyForkedSync(t, eth.ETH66, LightSync) } func TestBoundedHeavyForkedSync67Full(t *testing.T) { testBoundedHeavyForkedSync(t, eth.ETH67, FullSync) @@ -678,9 +678,9 @@ func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { } // Tests that a canceled download wipes all previously accumulated state. -func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) } -func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) } -func TestCancel68Light(t *testing.T) { testCancel(t, eth.ETH68, LightSync) } +func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) } +func TestCancel66Snap(t *testing.T) { testCancel(t, eth.ETH66, SnapSync) } +func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) } func TestCancel67Full(t *testing.T) { testCancel(t, eth.ETH67, FullSync) } func TestCancel67Snap(t *testing.T) { testCancel(t, eth.ETH67, SnapSync) } func TestCancel67Light(t *testing.T) { testCancel(t, eth.ETH67, LightSync) } @@ -708,9 +708,9 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) { } // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). -func TestMultiSynchronisation68Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, FullSync) } -func TestMultiSynchronisation68Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, SnapSync) } -func TestMultiSynchronisation68Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, LightSync) } +func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) } +func TestMultiSynchronisation66Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, SnapSync) } +func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) } func TestMultiSynchronisation67Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, FullSync) } func TestMultiSynchronisation67Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, SnapSync) } func TestMultiSynchronisation67Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, LightSync) } @@ -735,9 +735,9 @@ func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) { // Tests that synchronisations behave well in multi-version protocol environments // and not wreak havoc on other nodes in the network. -func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) } -func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) } -func TestMultiProtoSynchronisation68Light(t *testing.T) { testMultiProtoSync(t, eth.ETH68, LightSync) } +func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) } +func TestMultiProtoSynchronisation66Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH66, SnapSync) } +func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) } func TestMultiProtoSynchronisation67Full(t *testing.T) { testMultiProtoSync(t, eth.ETH67, FullSync) } func TestMultiProtoSynchronisation67Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH67, SnapSync) } func TestMultiProtoSynchronisation67Light(t *testing.T) { testMultiProtoSync(t, eth.ETH67, LightSync) } @@ -750,7 +750,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { chain := testChainBase.shorten(blockCacheMaxItems - 15) // Create peers of every type - tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:]) + tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:]) tester.newPeer("peer 67", eth.ETH67, chain.blocks[1:]) // Synchronise with the requested peer and make sure all blocks were retrieved @@ -760,7 +760,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { assertOwnChain(t, tester, len(chain.blocks)) // Check that no peers have been dropped off - for _, version := range []int{68, 67} { + for _, version := range []int{66, 67} { peer := fmt.Sprintf("peer %d", version) if _, ok := tester.peers[peer]; !ok { t.Errorf("%s dropped", peer) @@ -770,9 +770,9 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that if a block is empty (e.g. header only), no body request should be // made, and instead the header should be assembled into a whole block in itself. -func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) } -func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) } -func TestEmptyShortCircuit68Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, LightSync) } +func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) } +func TestEmptyShortCircuit66Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, SnapSync) } +func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) } func TestEmptyShortCircuit67Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, FullSync) } func TestEmptyShortCircuit67Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, SnapSync) } func TestEmptyShortCircuit67Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, LightSync) } @@ -821,9 +821,9 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { // Tests that headers are enqueued continuously, preventing malicious nodes from // stalling the downloader by feeding gapped header chains. -func TestMissingHeaderAttack68Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, FullSync) } -func TestMissingHeaderAttack68Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, SnapSync) } -func TestMissingHeaderAttack68Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, LightSync) } +func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) } +func TestMissingHeaderAttack66Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, SnapSync) } +func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) } func TestMissingHeaderAttack67Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, FullSync) } func TestMissingHeaderAttack67Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, SnapSync) } func TestMissingHeaderAttack67Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, LightSync) } @@ -850,9 +850,9 @@ func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { // Tests that if requested headers are shifted (i.e. first is missing), the queue // detects the invalid numbering. -func TestShiftedHeaderAttack68Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, FullSync) } -func TestShiftedHeaderAttack68Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, SnapSync) } -func TestShiftedHeaderAttack68Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, LightSync) } +func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) } +func TestShiftedHeaderAttack66Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, SnapSync) } +func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) } func TestShiftedHeaderAttack67Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, FullSync) } func TestShiftedHeaderAttack67Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, SnapSync) } func TestShiftedHeaderAttack67Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, LightSync) } @@ -880,14 +880,14 @@ func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { // Tests that a peer advertising a high TD doesn't get to stall the downloader // afterwards by not sending any useful hashes. -func TestHighTDStarvationAttack68Full(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH68, FullSync) +func TestHighTDStarvationAttack66Full(t *testing.T) { + testHighTDStarvationAttack(t, eth.ETH66, FullSync) } -func TestHighTDStarvationAttack68Snap(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH68, SnapSync) +func TestHighTDStarvationAttack66Snap(t *testing.T) { + testHighTDStarvationAttack(t, eth.ETH66, SnapSync) } -func TestHighTDStarvationAttack68Light(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH68, LightSync) +func TestHighTDStarvationAttack66Light(t *testing.T) { + testHighTDStarvationAttack(t, eth.ETH66, LightSync) } func TestHighTDStarvationAttack67Full(t *testing.T) { testHighTDStarvationAttack(t, eth.ETH67, FullSync) @@ -911,7 +911,7 @@ func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) { } // Tests that misbehaving peers are disconnected, whilst behaving ones are not. -func TestBlockHeaderAttackerDropping68(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH68) } +func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) } func TestBlockHeaderAttackerDropping67(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH67) } func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { @@ -960,9 +960,9 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { // Tests that synchronisation progress (origin block number, current block number // and highest block number) is tracked and updated correctly. -func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) } -func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) } -func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) } +func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) } +func TestSyncProgress66Snap(t *testing.T) { testSyncProgress(t, eth.ETH66, SnapSync) } +func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) } func TestSyncProgress67Full(t *testing.T) { testSyncProgress(t, eth.ETH67, FullSync) } func TestSyncProgress67Snap(t *testing.T) { testSyncProgress(t, eth.ETH67, SnapSync) } func TestSyncProgress67Light(t *testing.T) { testSyncProgress(t, eth.ETH67, LightSync) } @@ -1040,9 +1040,9 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync // Tests that synchronisation progress (origin block number and highest block // number) is tracked and updated correctly in case of a fork (or manual head // revertal). -func TestForkedSyncProgress68Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, FullSync) } -func TestForkedSyncProgress68Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, SnapSync) } -func TestForkedSyncProgress68Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, LightSync) } +func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) } +func TestForkedSyncProgress66Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, SnapSync) } +func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) } func TestForkedSyncProgress67Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, FullSync) } func TestForkedSyncProgress67Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, SnapSync) } func TestForkedSyncProgress67Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, LightSync) } @@ -1114,9 +1114,9 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { // Tests that if synchronisation is aborted due to some failure, then the progress // origin is not updated in the next sync cycle, as it should be considered the // continuation of the previous sync and not a new instance. -func TestFailedSyncProgress68Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, FullSync) } -func TestFailedSyncProgress68Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, SnapSync) } -func TestFailedSyncProgress68Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, LightSync) } +func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) } +func TestFailedSyncProgress66Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, SnapSync) } +func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) } func TestFailedSyncProgress67Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, FullSync) } func TestFailedSyncProgress67Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, SnapSync) } func TestFailedSyncProgress67Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, LightSync) } @@ -1183,9 +1183,9 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { // Tests that if an attacker fakes a chain height, after the attack is detected, // the progress height is successfully reduced at the next sync invocation. -func TestFakedSyncProgress68Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, FullSync) } -func TestFakedSyncProgress68Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, SnapSync) } -func TestFakedSyncProgress68Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, LightSync) } +func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) } +func TestFakedSyncProgress66Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, SnapSync) } +func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) } func TestFakedSyncProgress67Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, FullSync) } func TestFakedSyncProgress67Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, SnapSync) } func TestFakedSyncProgress67Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, LightSync) } @@ -1330,10 +1330,8 @@ func TestRemoteHeaderRequestSpan(t *testing.T) { // Tests that peers below a pre-configured checkpoint block are prevented from // being fast-synced from, avoiding potential cheap eclipse attacks. -func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, eth.ETH68, FullSync) } -func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, eth.ETH68, SnapSync) } -func TestBeaconSync67Full(t *testing.T) { testBeaconSync(t, eth.ETH67, FullSync) } -func TestBeaconSync67Snap(t *testing.T) { testBeaconSync(t, eth.ETH67, SnapSync) } +func TestBeaconSync66Full(t *testing.T) { testBeaconSync(t, eth.ETH66, FullSync) } +func TestBeaconSync66Snap(t *testing.T) { testBeaconSync(t, eth.ETH66, SnapSync) } func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { //log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) diff --git a/eth/downloader/fetchers.go b/eth/downloader/fetchers.go index 4fa4e0b73710..a7022240cd52 100644 --- a/eth/downloader/fetchers.go +++ b/eth/downloader/fetchers.go @@ -58,14 +58,14 @@ func (d *Downloader) fetchHeadersByHash(p *peerConnection, hash common.Hash, amo case res := <-resCh: // Headers successfully retrieved, update the metrics headerReqTimer.Update(time.Since(start)) - headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest)))) + headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket)))) // Don't reject the packet even if it turns out to be bad, downloader will // disconnect the peer on its own terms. Simply delivery the headers to // be processed by the caller res.Done <- nil - return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil + return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil } } @@ -103,13 +103,13 @@ func (d *Downloader) fetchHeadersByNumber(p *peerConnection, number uint64, amou case res := <-resCh: // Headers successfully retrieved, update the metrics headerReqTimer.Update(time.Since(start)) - headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest)))) + headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket)))) // Don't reject the packet even if it turns out to be bad, downloader will // disconnect the peer on its own terms. Simply delivery the headers to // be processed by the caller res.Done <- nil - return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil + return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil } } diff --git a/eth/downloader/fetchers_concurrent_bodies.go b/eth/downloader/fetchers_concurrent_bodies.go index 86b97c790aff..2015379a00ae 100644 --- a/eth/downloader/fetchers_concurrent_bodies.go +++ b/eth/downloader/fetchers_concurrent_bodies.go @@ -89,7 +89,7 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan // deliver is responsible for taking a generic response packet from the concurrent // fetcher, unpacking the body data and delivering it to the downloader's queue. func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesResponse).Unpack() + txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesPacket).Unpack() hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes} accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2]) diff --git a/eth/downloader/fetchers_concurrent_headers.go b/eth/downloader/fetchers_concurrent_headers.go index 9eab36772fec..aa4234bca716 100644 --- a/eth/downloader/fetchers_concurrent_headers.go +++ b/eth/downloader/fetchers_concurrent_headers.go @@ -81,7 +81,7 @@ func (q *headerQueue) request(peer *peerConnection, req *fetchRequest, resCh cha // deliver is responsible for taking a generic response packet from the concurrent // fetcher, unpacking the header data and delivering it to the downloader's queue. func (q *headerQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - headers := *packet.Res.(*eth.BlockHeadersRequest) + headers := *packet.Res.(*eth.BlockHeadersPacket) hashes := packet.Meta.([]common.Hash) accepted, err := q.queue.DeliverHeaders(peer.id, headers, hashes, q.headerProcCh) diff --git a/eth/downloader/fetchers_concurrent_receipts.go b/eth/downloader/fetchers_concurrent_receipts.go index ca4164ef9b96..1692fede382f 100644 --- a/eth/downloader/fetchers_concurrent_receipts.go +++ b/eth/downloader/fetchers_concurrent_receipts.go @@ -88,7 +88,7 @@ func (q *receiptQueue) request(peer *peerConnection, req *fetchRequest, resCh ch // deliver is responsible for taking a generic response packet from the concurrent // fetcher, unpacking the receipt data and delivering it to the downloader's queue. func (q *receiptQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - receipts := *packet.Res.(*eth.ReceiptsResponse) + receipts := *packet.Res.(*eth.ReceiptsPacket) hashes := packet.Meta.([]common.Hash) // {receipt hashes} accepted, err := q.queue.DeliverReceipts(peer.id, receipts, hashes) diff --git a/eth/downloader/skeleton.go b/eth/downloader/skeleton.go index 6857e6b551d1..fb05cca6dbdd 100644 --- a/eth/downloader/skeleton.go +++ b/eth/downloader/skeleton.go @@ -794,7 +794,7 @@ func (s *skeleton) executeTask(peer *peerConnection, req *headerRequest) { case res := <-resCh: // Headers successfully retrieved, update the metrics - headers := *res.Res.(*eth.BlockHeadersRequest) + headers := *res.Res.(*eth.BlockHeadersPacket) headerReqTimer.Update(time.Since(start)) s.peers.rates.Update(peer.id, eth.BlockHeadersMsg, res.Time, len(headers)) diff --git a/eth/downloader/skeleton_test.go b/eth/downloader/skeleton_test.go index 7c603f61e5d5..6a616881f50c 100644 --- a/eth/downloader/skeleton_test.go +++ b/eth/downloader/skeleton_test.go @@ -173,7 +173,7 @@ func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, ski } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersRequest)(&headers), + Res: (*eth.BlockHeadersPacket)(&headers), Meta: hashes, Time: 1, Done: make(chan error), @@ -811,7 +811,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) { // Create a peer set to feed headers through peerset := newPeerSet() for _, peer := range tt.peers { - peerset.Register(newPeerConnection(peer.id, eth.ETH67, peer, log.New("id", peer.id))) + peerset.Register(newPeerConnection(peer.id, eth.ETH66, peer, log.New("id", peer.id))) } // Create a peer dropper to track malicious peers dropped := make(map[string]int) @@ -913,7 +913,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) { skeleton.Sync(tt.newHead, nil, true) } if tt.newPeer != nil { - if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH67, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { + if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { t.Errorf("test %d: failed to register new peer: %v", i, err) } } diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go index 854ac860f574..2fc6d4fa6e7e 100644 --- a/eth/fetcher/block_fetcher.go +++ b/eth/fetcher/block_fetcher.go @@ -483,7 +483,7 @@ func (f *BlockFetcher) loop() { select { case res := <-resCh: res.Done <- nil - f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersRequest), time.Now().Add(res.Time)) + f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersPacket), time.Now().Add(res.Time)) case <-timeout.C: // The peer didn't respond in time. The request @@ -541,7 +541,7 @@ func (f *BlockFetcher) loop() { case res := <-resCh: res.Done <- nil // Ignoring withdrawals here, since the block fetcher is not used post-merge. - txs, uncles, _ := res.Res.(*eth.BlockBodiesResponse).Unpack() + txs, uncles, _ := res.Res.(*eth.BlockBodiesPacket).Unpack() f.FilterBodies(peer, txs, uncles, time.Now()) case <-timeout.C: diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go index 9bb0ac6bdc04..ef47929a5481 100644 --- a/eth/fetcher/block_fetcher_test.go +++ b/eth/fetcher/block_fetcher_test.go @@ -213,7 +213,7 @@ func (f *fetcherTester) makeHeaderFetcher(peer string, blocks map[common.Hash]*t } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersRequest)(&headers), + Res: (*eth.BlockHeadersPacket)(&headers), Time: drift, Done: make(chan error, 1), // Ignore the returned status } @@ -255,7 +255,7 @@ func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*typ } res := ð.Response{ Req: req, - Res: (*eth.BlockBodiesResponse)(&bodies), + Res: (*eth.BlockBodiesPacket)(&bodies), Time: drift, Done: make(chan error, 1), // Ignore the returned status } diff --git a/eth/handler.go b/eth/handler.go index 839a45ac33c0..973ec1808b01 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -434,7 +434,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { select { case res := <-resCh: - headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersRequest)) + headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket)) if len(headers) == 0 { // Required blocks are allowed to be missing if the remote // node is not yet synced diff --git a/eth/handler_eth.go b/eth/handler_eth.go index 890827837acc..111981e43128 100644 --- a/eth/handler_eth.go +++ b/eth/handler_eth.go @@ -67,7 +67,7 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { case *eth.NewBlockPacket: return h.handleBlockBroadcast(peer, packet.Block, packet.TD) - case *eth.NewPooledTransactionHashesPacket67: + case *eth.NewPooledTransactionHashesPacket66: return h.txFetcher.Notify(peer.ID(), nil, nil, *packet) case *eth.NewPooledTransactionHashesPacket68: @@ -81,7 +81,7 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { } return h.txFetcher.Enqueue(peer.ID(), *packet, false) - case *eth.PooledTransactionsResponse: + case *eth.PooledTransactionsPacket: return h.txFetcher.Enqueue(peer.ID(), *packet, true) default: diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index fe0da63aef1d..e2a66e606a84 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -58,7 +58,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { h.blockBroadcasts.Send(packet.Block) return nil - case *eth.NewPooledTransactionHashesPacket67: + case *eth.NewPooledTransactionHashesPacket66: h.txAnnounces.Send(([]common.Hash)(*packet)) return nil @@ -70,7 +70,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { h.txBroadcasts.Send(([]*types.Transaction)(*packet)) return nil - case *eth.PooledTransactionsResponse: + case *eth.PooledTransactionsPacket: h.txBroadcasts.Send(([]*types.Transaction)(*packet)) return nil @@ -81,6 +81,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { // Tests that peers are correctly accepted (or rejected) based on the advertised // fork IDs in the protocol handshake. +func TestForkIDSplit66(t *testing.T) { testForkIDSplit(t, eth.ETH66) } func TestForkIDSplit67(t *testing.T) { testForkIDSplit(t, eth.ETH67) } func TestForkIDSplit68(t *testing.T) { testForkIDSplit(t, eth.ETH68) } @@ -236,6 +237,7 @@ func testForkIDSplit(t *testing.T, protocol uint) { } // Tests that received transactions are added to the local pool. +func TestRecvTransactions66(t *testing.T) { testRecvTransactions(t, eth.ETH66) } func TestRecvTransactions67(t *testing.T) { testRecvTransactions(t, eth.ETH67) } func TestRecvTransactions68(t *testing.T) { testRecvTransactions(t, eth.ETH68) } @@ -294,6 +296,7 @@ func testRecvTransactions(t *testing.T, protocol uint) { } // This test checks that pending transactions are sent. +func TestSendTransactions66(t *testing.T) { testSendTransactions(t, eth.ETH66) } func TestSendTransactions67(t *testing.T) { testSendTransactions(t, eth.ETH67) } func TestSendTransactions68(t *testing.T) { testSendTransactions(t, eth.ETH68) } @@ -353,7 +356,7 @@ func testSendTransactions(t *testing.T, protocol uint) { seen := make(map[common.Hash]struct{}) for len(seen) < len(insert) { switch protocol { - case 67, 68: + case 66, 67, 68: select { case hashes := <-anns: for _, hash := range hashes { @@ -379,6 +382,7 @@ func testSendTransactions(t *testing.T, protocol uint) { // Tests that transactions get propagated to all attached peers, either via direct // broadcasts or via announcements/retrievals. +func TestTransactionPropagation66(t *testing.T) { testTransactionPropagation(t, eth.ETH66) } func TestTransactionPropagation67(t *testing.T) { testTransactionPropagation(t, eth.ETH67) } func TestTransactionPropagation68(t *testing.T) { testTransactionPropagation(t, eth.ETH68) } @@ -486,8 +490,8 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) { defer sourcePipe.Close() defer sinkPipe.Close() - sourcePeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil) - sinkPeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil) + sourcePeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil) + sinkPeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil) defer sourcePeer.Close() defer sinkPeer.Close() @@ -539,6 +543,7 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) { // Tests that a propagated malformed block (uncles or transactions don't match // with the hashes in the header) gets discarded and not broadcast forward. +func TestBroadcastMalformedBlock66(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH66) } func TestBroadcastMalformedBlock67(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH67) } func TestBroadcastMalformedBlock68(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH68) } diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go index 2a2fd6c23416..07255251f815 100644 --- a/eth/protocols/eth/handler.go +++ b/eth/protocols/eth/handler.go @@ -23,6 +23,7 @@ import ( "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core" + "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/metrics" "github.com/scroll-tech/go-ethereum/p2p" @@ -44,6 +45,10 @@ const ( // nowadays, the practical limit will always be softResponseLimit. maxBodiesServe = 1024 + // maxNodeDataServe is the maximum number of state trie nodes to serve. This + // number is there to limit the number of disk lookups. + maxNodeDataServe = 1024 + // maxReceiptsServe is the maximum number of block receipts to serve. This // number is mostly there to limit the number of disk lookups. With block // containing 200+ transactions nowadays, the practical limit will always @@ -99,6 +104,10 @@ func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2 } version := version // Closure + // Path scheme does not support GetNodeData, don't advertise eth66 on it + if version <= ETH66 && backend.Chain().TrieDB().Scheme() == rawdb.PathScheme { + continue + } protocols = append(protocols, p2p.Protocol{ Name: ProtocolName, Version: version, @@ -166,19 +175,36 @@ type Decoder interface { Time() time.Time } +var eth66 = map[uint64]msgHandler{ + NewBlockHashesMsg: handleNewBlockhashes, + NewBlockMsg: handleNewBlock, + TransactionsMsg: handleTransactions, + NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66, + GetBlockHeadersMsg: handleGetBlockHeaders66, + BlockHeadersMsg: handleBlockHeaders66, + GetBlockBodiesMsg: handleGetBlockBodies66, + BlockBodiesMsg: handleBlockBodies66, + GetNodeDataMsg: handleGetNodeData66, + NodeDataMsg: handleNodeData66, + GetReceiptsMsg: handleGetReceipts66, + ReceiptsMsg: handleReceipts66, + GetPooledTransactionsMsg: handleGetPooledTransactions66, + PooledTransactionsMsg: handlePooledTransactions66, +} + var eth67 = map[uint64]msgHandler{ NewBlockHashesMsg: handleNewBlockhashes, NewBlockMsg: handleNewBlock, TransactionsMsg: handleTransactions, - NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes67, - GetBlockHeadersMsg: handleGetBlockHeaders, - BlockHeadersMsg: handleBlockHeaders, - GetBlockBodiesMsg: handleGetBlockBodies, - BlockBodiesMsg: handleBlockBodies, - GetReceiptsMsg: handleGetReceipts, - ReceiptsMsg: handleReceipts, - GetPooledTransactionsMsg: handleGetPooledTransactions, - PooledTransactionsMsg: handlePooledTransactions, + NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66, + GetBlockHeadersMsg: handleGetBlockHeaders66, + BlockHeadersMsg: handleBlockHeaders66, + GetBlockBodiesMsg: handleGetBlockBodies66, + BlockBodiesMsg: handleBlockBodies66, + GetReceiptsMsg: handleGetReceipts66, + ReceiptsMsg: handleReceipts66, + GetPooledTransactionsMsg: handleGetPooledTransactions66, + PooledTransactionsMsg: handlePooledTransactions66, } var eth68 = map[uint64]msgHandler{ @@ -186,14 +212,14 @@ var eth68 = map[uint64]msgHandler{ NewBlockMsg: handleNewBlock, TransactionsMsg: handleTransactions, NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes68, - GetBlockHeadersMsg: handleGetBlockHeaders, - BlockHeadersMsg: handleBlockHeaders, - GetBlockBodiesMsg: handleGetBlockBodies, - BlockBodiesMsg: handleBlockBodies, - GetReceiptsMsg: handleGetReceipts, - ReceiptsMsg: handleReceipts, - GetPooledTransactionsMsg: handleGetPooledTransactions, - PooledTransactionsMsg: handlePooledTransactions, + GetBlockHeadersMsg: handleGetBlockHeaders66, + BlockHeadersMsg: handleBlockHeaders66, + GetBlockBodiesMsg: handleGetBlockBodies66, + BlockBodiesMsg: handleBlockBodies66, + GetReceiptsMsg: handleGetReceipts66, + ReceiptsMsg: handleReceipts66, + GetPooledTransactionsMsg: handleGetPooledTransactions66, + PooledTransactionsMsg: handlePooledTransactions66, } // handleMessage is invoked whenever an inbound message is received from a remote @@ -209,10 +235,14 @@ func handleMessage(backend Backend, peer *Peer) error { } defer msg.Discard() - var handlers = eth67 + var handlers = eth66 + if peer.Version() == ETH67 { + handlers = eth67 + } if peer.Version() >= ETH68 { handlers = eth68 } + // Track the amount of time it takes to serve the request and run the handler if metrics.Enabled { h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index c592905820a4..9dcb7073915f 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -28,6 +28,7 @@ import ( "github.com/scroll-tech/go-ethereum/consensus/ethash" "github.com/scroll-tech/go-ethereum/core" "github.com/scroll-tech/go-ethereum/core/rawdb" + "github.com/scroll-tech/go-ethereum/core/state" "github.com/scroll-tech/go-ethereum/core/txpool" "github.com/scroll-tech/go-ethereum/core/txpool/legacypool" "github.com/scroll-tech/go-ethereum/core/types" @@ -150,6 +151,7 @@ func (b *testBackend) Handle(*Peer, Packet) error { } // Tests that block headers can be retrieved from a remote chain based on user queries. +func TestGetBlockHeaders66(t *testing.T) { testGetBlockHeaders(t, ETH66) } func TestGetBlockHeaders67(t *testing.T) { testGetBlockHeaders(t, ETH67) } func TestGetBlockHeaders68(t *testing.T) { testGetBlockHeaders(t, ETH68) } @@ -176,29 +178,29 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { // Create a batch of tests for various scenarios limit := uint64(maxHeadersServe) tests := []struct { - query *GetBlockHeadersRequest // The query to execute for header retrieval - expect []common.Hash // The hashes of the block whose headers are expected + query *GetBlockHeadersPacket // The query to execute for header retrieval + expect []common.Hash // The hashes of the block whose headers are expected }{ // A single random block should be retrievable by hash { - &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, }, // A single random block should be retrievable by number { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 1}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 1}, []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, }, // Multiple headers should be retrievable in both directions { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 + 1).Hash(), backend.chain.GetBlockByNumber(limit/2 + 2).Hash(), }, }, { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 - 1).Hash(), @@ -207,14 +209,14 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Multiple headers with skip lists should be retrievable { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 + 4).Hash(), backend.chain.GetBlockByNumber(limit/2 + 8).Hash(), }, }, { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 - 4).Hash(), @@ -223,31 +225,31 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // The chain endpoints should be retrievable { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 0}, Amount: 1}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 0}, Amount: 1}, []common.Hash{backend.chain.GetBlockByNumber(0).Hash()}, }, { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1}, []common.Hash{backend.chain.CurrentBlock().Hash()}, }, { // If the peer requests a bit into the future, we deliver what we have - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10}, []common.Hash{backend.chain.CurrentBlock().Hash()}, }, // Ensure protocol limits are honored { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true}, getHashes(backend.chain.CurrentBlock().Number.Uint64(), limit), }, // Check that requesting more than available is handled gracefully { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64()).Hash(), }, }, { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(4).Hash(), backend.chain.GetBlockByNumber(0).Hash(), @@ -255,13 +257,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Check that requesting more than available is handled gracefully, even if mid skip { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 1).Hash(), }, }, { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(4).Hash(), backend.chain.GetBlockByNumber(1).Hash(), @@ -269,7 +271,7 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Check a corner case where requesting more can iterate past the endpoints { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(2).Hash(), backend.chain.GetBlockByNumber(1).Hash(), @@ -278,24 +280,24 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Check a corner case where skipping overflow loops back into the chain start { - &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1}, []common.Hash{ backend.chain.GetBlockByNumber(3).Hash(), }, }, // Check a corner case where skipping overflow loops back to the same header { - &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64}, []common.Hash{ backend.chain.GetBlockByNumber(1).Hash(), }, }, // Check that non existing headers aren't returned { - &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: unknown}, Amount: 1}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: unknown}, Amount: 1}, []common.Hash{}, }, { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1}, []common.Hash{}, }, } @@ -307,13 +309,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { headers = append(headers, backend.chain.GetBlockByHash(hash).Header()) } // Send the hash request and verify the response - p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{ - RequestId: 123, - GetBlockHeadersRequest: tt.query, + p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ + RequestId: 123, + GetBlockHeadersPacket: tt.query, }) - if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket{ - RequestId: 123, - BlockHeadersRequest: headers, + if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket66{ + RequestId: 123, + BlockHeadersPacket: headers, }); err != nil { t.Errorf("test %d: headers mismatch: %v", i, err) } @@ -322,11 +324,11 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil { tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0 - p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{ - RequestId: 456, - GetBlockHeadersRequest: tt.query, + p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ + RequestId: 456, + GetBlockHeadersPacket: tt.query, }) - expected := &BlockHeadersPacket{RequestId: 456, BlockHeadersRequest: headers} + expected := &BlockHeadersPacket66{RequestId: 456, BlockHeadersPacket: headers} if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil { t.Errorf("test %d by hash: headers mismatch: %v", i, err) } @@ -336,6 +338,7 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { } // Tests that block contents can be retrieved from a remote chain based on their hashes. +func TestGetBlockBodies66(t *testing.T) { testGetBlockBodies(t, ETH66) } func TestGetBlockBodies67(t *testing.T) { testGetBlockBodies(t, ETH67) } func TestGetBlockBodies68(t *testing.T) { testGetBlockBodies(t, ETH68) } @@ -417,20 +420,139 @@ func testGetBlockBodies(t *testing.T, protocol uint) { } // Send the hash request and verify the response - p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket{ - RequestId: 123, - GetBlockBodiesRequest: hashes, + p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket66{ + RequestId: 123, + GetBlockBodiesPacket: hashes, }) - if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket{ - RequestId: 123, - BlockBodiesResponse: bodies, + if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket66{ + RequestId: 123, + BlockBodiesPacket: bodies, }); err != nil { t.Fatalf("test %d: bodies mismatch: %v", i, err) } } } +// Tests that the state trie nodes can be retrieved based on hashes. +func TestGetNodeData66(t *testing.T) { testGetNodeData(t, ETH66, false) } +func TestGetNodeData67(t *testing.T) { testGetNodeData(t, ETH67, true) } +func TestGetNodeData68(t *testing.T) { testGetNodeData(t, ETH68, true) } + +func testGetNodeData(t *testing.T, protocol uint, drop bool) { + t.Parallel() + + // Define three accounts to simulate transactions with + acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") + acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey) + acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey) + + signer := types.HomesteadSigner{} + // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_makers_test) + generator := func(i int, block *core.BlockGen) { + switch i { + case 0: + // In block 1, the test bank sends account #1 some ether. + tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey) + block.AddTx(tx) + case 1: + // In block 2, the test bank sends some more ether to account #1. + // acc1Addr passes it on to account #2. + tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey) + tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, acc1Key) + block.AddTx(tx1) + block.AddTx(tx2) + case 2: + // Block 3 is empty but was mined by account #2. + block.SetCoinbase(acc2Addr) + block.SetExtra([]byte("yeehaw")) + case 3: + // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data). + b2 := block.PrevBlock(1).Header() + b2.Extra = []byte("foo") + block.AddUncle(b2) + b3 := block.PrevBlock(2).Header() + b3.Extra = []byte("foo") + block.AddUncle(b3) + } + } + // Assemble the test environment + backend := newTestBackendWithGenerator(4, false, generator) + defer backend.close() + + peer, _ := newTestPeer("peer", protocol, backend) + defer peer.close() + + // Collect all state tree hashes. + var hashes []common.Hash + it := backend.db.NewIterator(nil, nil) + for it.Next() { + if key := it.Key(); len(key) == common.HashLength { + hashes = append(hashes, common.BytesToHash(key)) + } + } + it.Release() + + // Request all hashes. + p2p.Send(peer.app, GetNodeDataMsg, &GetNodeDataPacket66{ + RequestId: 123, + GetNodeDataPacket: hashes, + }) + msg, err := peer.app.ReadMsg() + if !drop { + if err != nil { + t.Fatalf("failed to read node data response: %v", err) + } + } else { + if err != nil { + return + } + t.Fatalf("succeeded to read node data response on non-supporting protocol: %v", msg) + } + if msg.Code != NodeDataMsg { + t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, NodeDataMsg) + } + var res NodeDataPacket66 + if err := msg.Decode(&res); err != nil { + t.Fatalf("failed to decode response node data: %v", err) + } + + // Verify that all hashes correspond to the requested data. + data := res.NodeDataPacket + for i, want := range hashes { + if hash := crypto.Keccak256Hash(data[i]); hash != want { + t.Errorf("data hash mismatch: have %x, want %x", hash, want) + } + } + + // Reconstruct state tree from the received data. + reconstructDB := rawdb.NewMemoryDatabase() + for i := 0; i < len(data); i++ { + rawdb.WriteLegacyTrieNode(reconstructDB, hashes[i], data[i]) + } + + // Sanity check whether all state matches. + accounts := []common.Address{testAddr, acc1Addr, acc2Addr} + for i := uint64(0); i <= backend.chain.CurrentBlock().Number.Uint64(); i++ { + root := backend.chain.GetBlockByNumber(i).Root() + reconstructed, _ := state.New(root, state.NewDatabase(reconstructDB), nil) + for j, acc := range accounts { + state, _ := backend.chain.StateAt(root) + bw := state.GetBalance(acc) + bh := reconstructed.GetBalance(acc) + + if (bw == nil) != (bh == nil) { + t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw) + } + if bw != nil && bh != nil && bw.Cmp(bh) != 0 { + t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw) + } + } + } +} + // Tests that the transaction receipts can be retrieved based on hashes. +func TestGetBlockReceipts66(t *testing.T) { testGetBlockReceipts(t, ETH66) } func TestGetBlockReceipts67(t *testing.T) { testGetBlockReceipts(t, ETH67) } func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) } @@ -491,13 +613,13 @@ func testGetBlockReceipts(t *testing.T, protocol uint) { receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash())) } // Send the hash request and verify the response - p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket{ - RequestId: 123, - GetReceiptsRequest: hashes, + p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket66{ + RequestId: 123, + GetReceiptsPacket: hashes, }) - if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket{ - RequestId: 123, - ReceiptsResponse: receipts, + if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket66{ + RequestId: 123, + ReceiptsPacket: receipts, }); err != nil { t.Errorf("receipts mismatch: %v", err) } diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index cb72f2172861..8ca721e3dfe4 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -22,25 +22,27 @@ import ( "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core" + "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/rlp" "github.com/scroll-tech/go-ethereum/trie" ) -func handleGetBlockHeaders(backend Backend, msg Decoder, peer *Peer) error { +// handleGetBlockHeaders66 is the eth/66 version of handleGetBlockHeaders +func handleGetBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { // Decode the complex header query - var query GetBlockHeadersPacket + var query GetBlockHeadersPacket66 if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersRequest, peer) + response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersPacket, peer) return peer.ReplyBlockHeadersRLP(query.RequestId, response) } // ServiceGetBlockHeadersQuery assembles the response to a header query. It is // exposed to allow external packages to test protocol behavior. -func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue { +func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue { if query.Skip == 0 { // The fast path: when the request is for a contiguous segment of headers. return serviceContiguousBlockHeaderQuery(chain, query) @@ -49,7 +51,7 @@ func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersR } } -func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue { +func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue { hashMode := query.Origin.Hash != (common.Hash{}) first := true maxNonCanonical := uint64(100) @@ -138,7 +140,7 @@ func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBloc return headers } -func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest) []rlp.RawValue { +func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket) []rlp.RawValue { count := query.Amount if count > maxHeadersServe { count = maxHeadersServe @@ -201,19 +203,19 @@ func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHe } } -func handleGetBlockBodies(backend Backend, msg Decoder, peer *Peer) error { +func handleGetBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { // Decode the block body retrieval message - var query GetBlockBodiesPacket + var query GetBlockBodiesPacket66 if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesRequest) + response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesPacket) return peer.ReplyBlockBodiesRLP(query.RequestId, response) } // ServiceGetBlockBodiesQuery assembles the response to a body query. It is // exposed to allow external packages to test protocol behavior. -func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesRequest) []rlp.RawValue { +func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesPacket) []rlp.RawValue { // Gather blocks until the fetch or network limits is reached var ( bytes int @@ -232,19 +234,60 @@ func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesRequ return bodies } -func handleGetReceipts(backend Backend, msg Decoder, peer *Peer) error { +func handleGetNodeData66(backend Backend, msg Decoder, peer *Peer) error { + // Decode the trie node data retrieval message + var query GetNodeDataPacket66 + if err := msg.Decode(&query); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + response := ServiceGetNodeDataQuery(backend.Chain(), query.GetNodeDataPacket) + return peer.ReplyNodeData(query.RequestId, response) +} + +// ServiceGetNodeDataQuery assembles the response to a node data query. It is +// exposed to allow external packages to test protocol behavior. +func ServiceGetNodeDataQuery(chain *core.BlockChain, query GetNodeDataPacket) [][]byte { + // Request nodes by hash is not supported in path-based scheme. + if chain.TrieDB().Scheme() == rawdb.PathScheme { + return nil + } + // Gather state data until the fetch or network limits is reached + var ( + bytes int + nodes [][]byte + ) + for lookups, hash := range query { + if bytes >= softResponseLimit || len(nodes) >= maxNodeDataServe || + lookups >= 2*maxNodeDataServe { + break + } + // Retrieve the requested state entry + entry, err := chain.TrieDB().Node(hash) + if len(entry) == 0 || err != nil { + // Read the contract code with prefix only to save unnecessary lookups. + entry, err = chain.ContractCodeWithPrefix(hash) + } + if err == nil && len(entry) > 0 { + nodes = append(nodes, entry) + bytes += len(entry) + } + } + return nodes +} + +func handleGetReceipts66(backend Backend, msg Decoder, peer *Peer) error { // Decode the block receipts retrieval message - var query GetReceiptsPacket + var query GetReceiptsPacket66 if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsRequest) + response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsPacket) return peer.ReplyReceiptsRLP(query.RequestId, response) } // ServiceGetReceiptsQuery assembles the response to a receipt query. It is // exposed to allow external packages to test protocol behavior. -func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsRequest) []rlp.RawValue { +func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsPacket) []rlp.RawValue { // Gather state data until the fetch or network limits is reached var ( bytes int @@ -313,15 +356,15 @@ func handleNewBlock(backend Backend, msg Decoder, peer *Peer) error { return backend.Handle(peer, ann) } -func handleBlockHeaders(backend Backend, msg Decoder, peer *Peer) error { +func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { // A batch of headers arrived to one of our previous requests - res := new(BlockHeadersPacket) + res := new(BlockHeadersPacket66) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } metadata := func() interface{} { - hashes := make([]common.Hash, len(res.BlockHeadersRequest)) - for i, header := range res.BlockHeadersRequest { + hashes := make([]common.Hash, len(res.BlockHeadersPacket)) + for i, header := range res.BlockHeadersPacket { hashes[i] = header.Hash() } return hashes @@ -329,24 +372,24 @@ func handleBlockHeaders(backend Backend, msg Decoder, peer *Peer) error { return peer.dispatchResponse(&Response{ id: res.RequestId, code: BlockHeadersMsg, - Res: &res.BlockHeadersRequest, + Res: &res.BlockHeadersPacket, }, metadata) } -func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error { +func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { // A batch of block bodies arrived to one of our previous requests - res := new(BlockBodiesPacket) + res := new(BlockBodiesPacket66) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } metadata := func() interface{} { var ( - txsHashes = make([]common.Hash, len(res.BlockBodiesResponse)) - uncleHashes = make([]common.Hash, len(res.BlockBodiesResponse)) - withdrawalHashes = make([]common.Hash, len(res.BlockBodiesResponse)) + txsHashes = make([]common.Hash, len(res.BlockBodiesPacket)) + uncleHashes = make([]common.Hash, len(res.BlockBodiesPacket)) + withdrawalHashes = make([]common.Hash, len(res.BlockBodiesPacket)) ) hasher := trie.NewStackTrie(nil) - for i, body := range res.BlockBodiesResponse { + for i, body := range res.BlockBodiesPacket { txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher) uncleHashes[i] = types.CalcUncleHash(body.Uncles) if body.Withdrawals != nil { @@ -358,20 +401,33 @@ func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error { return peer.dispatchResponse(&Response{ id: res.RequestId, code: BlockBodiesMsg, - Res: &res.BlockBodiesResponse, + Res: &res.BlockBodiesPacket, }, metadata) } -func handleReceipts(backend Backend, msg Decoder, peer *Peer) error { +func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error { + // A batch of node state data arrived to one of our previous requests + res := new(NodeDataPacket66) + if err := msg.Decode(res); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + return peer.dispatchResponse(&Response{ + id: res.RequestId, + code: NodeDataMsg, + Res: &res.NodeDataPacket, + }, nil) // No post-processing, we're not using this packet anymore +} + +func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error { // A batch of receipts arrived to one of our previous requests - res := new(ReceiptsPacket) + res := new(ReceiptsPacket66) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } metadata := func() interface{} { hasher := trie.NewStackTrie(nil) - hashes := make([]common.Hash, len(res.ReceiptsResponse)) - for i, receipt := range res.ReceiptsResponse { + hashes := make([]common.Hash, len(res.ReceiptsPacket)) + for i, receipt := range res.ReceiptsPacket { hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher) } return hashes @@ -379,17 +435,17 @@ func handleReceipts(backend Backend, msg Decoder, peer *Peer) error { return peer.dispatchResponse(&Response{ id: res.RequestId, code: ReceiptsMsg, - Res: &res.ReceiptsResponse, + Res: &res.ReceiptsPacket, }, metadata) } -func handleNewPooledTransactionHashes67(backend Backend, msg Decoder, peer *Peer) error { +func handleNewPooledTransactionHashes66(backend Backend, msg Decoder, peer *Peer) error { // New transaction announcement arrived, make sure we have // a valid and fresh chain to handle them if !backend.AcceptTxs() { return nil } - ann := new(NewPooledTransactionHashesPacket67) + ann := new(NewPooledTransactionHashesPacket66) if err := msg.Decode(ann); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } @@ -420,17 +476,17 @@ func handleNewPooledTransactionHashes68(backend Backend, msg Decoder, peer *Peer return backend.Handle(peer, ann) } -func handleGetPooledTransactions(backend Backend, msg Decoder, peer *Peer) error { +func handleGetPooledTransactions66(backend Backend, msg Decoder, peer *Peer) error { // Decode the pooled transactions retrieval message - var query GetPooledTransactionsPacket + var query GetPooledTransactionsPacket66 if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsRequest) + hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsPacket, peer) return peer.ReplyPooledTransactionsRLP(query.RequestId, hashes, txs) } -func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsRequest) ([]common.Hash, []rlp.RawValue) { +func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsPacket, peer *Peer) ([]common.Hash, []rlp.RawValue) { // Gather transactions until the fetch or network limits is reached var ( bytes int @@ -478,17 +534,17 @@ func handleTransactions(backend Backend, msg Decoder, peer *Peer) error { return backend.Handle(peer, &txs) } -func handlePooledTransactions(backend Backend, msg Decoder, peer *Peer) error { +func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error { // Transactions arrived, make sure we have a valid and fresh chain to handle them if !backend.AcceptTxs() { return nil } // Transactions can be processed, parse all of them and deliver to the pool - var txs PooledTransactionsPacket + var txs PooledTransactionsPacket66 if err := msg.Decode(&txs); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - for i, tx := range txs.PooledTransactionsResponse { + for i, tx := range txs.PooledTransactionsPacket { // Validate and mark the remote transaction if tx == nil { return fmt.Errorf("%w: transaction %d is nil", errDecode, i) @@ -497,5 +553,5 @@ func handlePooledTransactions(backend Backend, msg Decoder, peer *Peer) error { } requestTracker.Fulfil(peer.id, peer.version, PooledTransactionsMsg, txs.RequestId) - return backend.Handle(peer, &txs.PooledTransactionsResponse) + return backend.Handle(peer, &txs.PooledTransactionsPacket) } diff --git a/eth/protocols/eth/handshake_test.go b/eth/protocols/eth/handshake_test.go index f033139ebe33..00b3850b3556 100644 --- a/eth/protocols/eth/handshake_test.go +++ b/eth/protocols/eth/handshake_test.go @@ -27,8 +27,7 @@ import ( ) // Tests that handshake failures are detected and reported correctly. -func TestHandshake67(t *testing.T) { testHandshake(t, ETH67) } -func TestHandshake68(t *testing.T) { testHandshake(t, ETH68) } +func TestHandshake66(t *testing.T) { testHandshake(t, ETH66) } func testHandshake(t *testing.T, protocol uint) { t.Parallel() diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go index b839259b4d25..5f7ecad3ff4e 100644 --- a/eth/protocols/eth/peer.go +++ b/eth/protocols/eth/peer.go @@ -219,7 +219,7 @@ func (p *Peer) AsyncSendTransactions(hashes []common.Hash) { func (p *Peer) sendPooledTransactionHashes66(hashes []common.Hash) error { // Mark all the transactions as known, but ensure we don't overflow our limits p.knownTxs.Add(hashes...) - return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket67(hashes)) + return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket66(hashes)) } // sendPooledTransactionHashes68 sends transaction hashes (tagged with their type @@ -248,15 +248,15 @@ func (p *Peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) { } } -// ReplyPooledTransactionsRLP is the response to RequestTxs. +// ReplyPooledTransactionsRLP is the eth/66 version of SendPooledTransactionsRLP. func (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs []rlp.RawValue) error { // Mark all the transactions as known, but ensure we don't overflow our limits p.knownTxs.Add(hashes...) - // Not packed into PooledTransactionsResponse to avoid RLP decoding - return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket{ - RequestId: id, - PooledTransactionsRLPResponse: txs, + // Not packed into PooledTransactionsPacket to avoid RLP decoding + return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket66{ + RequestId: id, + PooledTransactionsRLPPacket: txs, }) } @@ -309,28 +309,36 @@ func (p *Peer) AsyncSendNewBlock(block *types.Block, td *big.Int) { } } -// ReplyBlockHeadersRLP is the response to GetBlockHeaders. +// ReplyBlockHeadersRLP is the eth/66 response to GetBlockHeaders. func (p *Peer) ReplyBlockHeadersRLP(id uint64, headers []rlp.RawValue) error { - return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket{ - RequestId: id, - BlockHeadersRLPResponse: headers, + return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket66{ + RequestId: id, + BlockHeadersRLPPacket: headers, }) } -// ReplyBlockBodiesRLP is the response to GetBlockBodies. +// ReplyBlockBodiesRLP is the eth/66 response to GetBlockBodies. func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error { - // Not packed into BlockBodiesResponse to avoid RLP decoding - return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket{ - RequestId: id, - BlockBodiesRLPResponse: bodies, + // Not packed into BlockBodiesPacket to avoid RLP decoding + return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket66{ + RequestId: id, + BlockBodiesRLPPacket: bodies, }) } -// ReplyReceiptsRLP is the response to GetReceipts. +// ReplyNodeData is the eth/66 response to GetNodeData. +func (p *Peer) ReplyNodeData(id uint64, data [][]byte) error { + return p2p.Send(p.rw, NodeDataMsg, &NodeDataPacket66{ + RequestId: id, + NodeDataPacket: data, + }) +} + +// ReplyReceiptsRLP is the eth/66 response to GetReceipts. func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error { - return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket{ - RequestId: id, - ReceiptsRLPResponse: receipts, + return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket66{ + RequestId: id, + ReceiptsRLPPacket: receipts, }) } @@ -345,9 +353,9 @@ func (p *Peer) RequestOneHeader(hash common.Hash, sink chan *Response) (*Request sink: sink, code: GetBlockHeadersMsg, want: BlockHeadersMsg, - data: &GetBlockHeadersPacket{ + data: &GetBlockHeadersPacket66{ RequestId: id, - GetBlockHeadersRequest: &GetBlockHeadersRequest{ + GetBlockHeadersPacket: &GetBlockHeadersPacket{ Origin: HashOrNumber{Hash: hash}, Amount: uint64(1), Skip: uint64(0), @@ -372,9 +380,9 @@ func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, re sink: sink, code: GetBlockHeadersMsg, want: BlockHeadersMsg, - data: &GetBlockHeadersPacket{ + data: &GetBlockHeadersPacket66{ RequestId: id, - GetBlockHeadersRequest: &GetBlockHeadersRequest{ + GetBlockHeadersPacket: &GetBlockHeadersPacket{ Origin: HashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), @@ -399,9 +407,9 @@ func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, rever sink: sink, code: GetBlockHeadersMsg, want: BlockHeadersMsg, - data: &GetBlockHeadersPacket{ + data: &GetBlockHeadersPacket66{ RequestId: id, - GetBlockHeadersRequest: &GetBlockHeadersRequest{ + GetBlockHeadersPacket: &GetBlockHeadersPacket{ Origin: HashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), @@ -426,9 +434,31 @@ func (p *Peer) RequestBodies(hashes []common.Hash, sink chan *Response) (*Reques sink: sink, code: GetBlockBodiesMsg, want: BlockBodiesMsg, - data: &GetBlockBodiesPacket{ - RequestId: id, - GetBlockBodiesRequest: hashes, + data: &GetBlockBodiesPacket66{ + RequestId: id, + GetBlockBodiesPacket: hashes, + }, + } + if err := p.dispatchRequest(req); err != nil { + return nil, err + } + return req, nil +} + +// RequestNodeData fetches a batch of arbitrary data from a node's known state +// data, corresponding to the specified hashes. +func (p *Peer) RequestNodeData(hashes []common.Hash, sink chan *Response) (*Request, error) { + p.Log().Debug("Fetching batch of state data", "count", len(hashes)) + id := rand.Uint64() + + req := &Request{ + id: id, + sink: sink, + code: GetNodeDataMsg, + want: NodeDataMsg, + data: &GetNodeDataPacket66{ + RequestId: id, + GetNodeDataPacket: hashes, }, } if err := p.dispatchRequest(req); err != nil { @@ -447,9 +477,9 @@ func (p *Peer) RequestReceipts(hashes []common.Hash, sink chan *Response) (*Requ sink: sink, code: GetReceiptsMsg, want: ReceiptsMsg, - data: &GetReceiptsPacket{ - RequestId: id, - GetReceiptsRequest: hashes, + data: &GetReceiptsPacket66{ + RequestId: id, + GetReceiptsPacket: hashes, }, } if err := p.dispatchRequest(req); err != nil { @@ -464,9 +494,9 @@ func (p *Peer) RequestTxs(hashes []common.Hash) error { id := rand.Uint64() requestTracker.Track(p.id, p.version, GetPooledTransactionsMsg, PooledTransactionsMsg, id) - return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket{ - RequestId: id, - GetPooledTransactionsRequest: hashes, + return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket66{ + RequestId: id, + GetPooledTransactionsPacket: hashes, }) } diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go index a6b7a76e1437..1fb9dadff5e3 100644 --- a/eth/protocols/eth/protocol.go +++ b/eth/protocols/eth/protocol.go @@ -30,6 +30,7 @@ import ( // Constants to match up protocol versions and messages const ( + ETH66 = 66 ETH67 = 67 ETH68 = 68 ) @@ -40,11 +41,11 @@ const ProtocolName = "eth" // ProtocolVersions are the supported versions of the `eth` protocol (first // is primary). -var ProtocolVersions = []uint{ETH68, ETH67} +var ProtocolVersions = []uint{ETH68, ETH67, ETH66} // protocolLengths are the number of implemented message corresponding to // different protocol versions. -var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17} +var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17, ETH66: 17} // maxMessageSize is the maximum cap on the size of a protocol message. const maxMessageSize = 10 * 1024 * 1024 @@ -61,6 +62,8 @@ const ( NewPooledTransactionHashesMsg = 0x08 GetPooledTransactionsMsg = 0x09 PooledTransactionsMsg = 0x0a + GetNodeDataMsg = 0x0d + NodeDataMsg = 0x0e GetReceiptsMsg = 0x0f ReceiptsMsg = 0x10 ) @@ -82,7 +85,7 @@ type Packet interface { Kind() byte // Kind returns the message type. } -// StatusPacket is the network packet for the status message. +// StatusPacket is the network packet for the status message for eth/64 and later. type StatusPacket struct { ProtocolVersion uint32 NetworkID uint64 @@ -115,18 +118,18 @@ func (p *NewBlockHashesPacket) Unpack() ([]common.Hash, []uint64) { // TransactionsPacket is the network packet for broadcasting new transactions. type TransactionsPacket []*types.Transaction -// GetBlockHeadersRequest represents a block header query. -type GetBlockHeadersRequest struct { +// GetBlockHeadersPacket represents a block header query. +type GetBlockHeadersPacket struct { Origin HashOrNumber // Block from which to retrieve headers Amount uint64 // Maximum number of headers to retrieve Skip uint64 // Blocks to skip between consecutive headers Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis) } -// GetBlockHeadersPacket represents a block header query with request ID wrapping. -type GetBlockHeadersPacket struct { +// GetBlockHeadersPacket66 represents a block header query over eth/66 +type GetBlockHeadersPacket66 struct { RequestId uint64 - *GetBlockHeadersRequest + *GetBlockHeadersPacket } // HashOrNumber is a combined field for specifying an origin block. @@ -165,23 +168,23 @@ func (hn *HashOrNumber) DecodeRLP(s *rlp.Stream) error { } } -// BlockHeadersRequest represents a block header response. -type BlockHeadersRequest []*types.Header +// BlockHeadersPacket represents a block header response. +type BlockHeadersPacket []*types.Header -// BlockHeadersPacket represents a block header response over with request ID wrapping. -type BlockHeadersPacket struct { +// BlockHeadersPacket66 represents a block header response over eth/66. +type BlockHeadersPacket66 struct { RequestId uint64 - BlockHeadersRequest + BlockHeadersPacket } -// BlockHeadersRLPResponse represents a block header response, to use when we already +// BlockHeadersRLPPacket represents a block header response, to use when we already // have the headers rlp encoded. -type BlockHeadersRLPResponse []rlp.RawValue +type BlockHeadersRLPPacket []rlp.RawValue -// BlockHeadersRLPPacket represents a block header response with request ID wrapping. -type BlockHeadersRLPPacket struct { +// BlockHeadersRLPPacket66 represents a block header response over eth/66. +type BlockHeadersRLPPacket66 struct { RequestId uint64 - BlockHeadersRLPResponse + BlockHeadersRLPPacket } // NewBlockPacket is the network packet for the block propagation message. @@ -203,34 +206,33 @@ func (request *NewBlockPacket) sanityCheck() error { return nil } -// GetBlockBodiesRequest represents a block body query. -type GetBlockBodiesRequest []common.Hash +// GetBlockBodiesPacket represents a block body query. +type GetBlockBodiesPacket []common.Hash -// GetBlockBodiesPacket represents a block body query with request ID wrapping. -type GetBlockBodiesPacket struct { +// GetBlockBodiesPacket66 represents a block body query over eth/66. +type GetBlockBodiesPacket66 struct { RequestId uint64 - GetBlockBodiesRequest + GetBlockBodiesPacket } -// BlockBodiesResponse is the network packet for block content distribution. -type BlockBodiesResponse []*BlockBody +// BlockBodiesPacket is the network packet for block content distribution. +type BlockBodiesPacket []*BlockBody -// BlockBodiesPacket is the network packet for block content distribution with -// request ID wrapping. -type BlockBodiesPacket struct { +// BlockBodiesPacket66 is the network packet for block content distribution over eth/66. +type BlockBodiesPacket66 struct { RequestId uint64 - BlockBodiesResponse + BlockBodiesPacket } -// BlockBodiesRLPResponse is used for replying to block body requests, in cases +// BlockBodiesRLPPacket is used for replying to block body requests, in cases // where we already have them RLP-encoded, and thus can avoid the decode-encode // roundtrip. -type BlockBodiesRLPResponse []rlp.RawValue +type BlockBodiesRLPPacket []rlp.RawValue -// BlockBodiesRLPPacket is the BlockBodiesRLPResponse with request ID wrapping. -type BlockBodiesRLPPacket struct { +// BlockBodiesRLPPacket66 is the BlockBodiesRLPPacket over eth/66 +type BlockBodiesRLPPacket66 struct { RequestId uint64 - BlockBodiesRLPResponse + BlockBodiesRLPPacket } // BlockBody represents the data content of a single block. @@ -242,7 +244,7 @@ type BlockBody struct { // Unpack retrieves the transactions and uncles from the range packet and returns // them in a split flat format that's more consistent with the internal data structures. -func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) { +func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) { // TODO(matt): add support for withdrawals to fetchers var ( txset = make([][]*types.Transaction, len(*p)) @@ -255,36 +257,53 @@ func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Heade return txset, uncleset, withdrawalset } -// GetReceiptsRequest represents a block receipts query. -type GetReceiptsRequest []common.Hash +// GetNodeDataPacket represents a trie node data query. +type GetNodeDataPacket []common.Hash + +// GetNodeDataPacket66 represents a trie node data query over eth/66. +type GetNodeDataPacket66 struct { + RequestId uint64 + GetNodeDataPacket +} + +// NodeDataPacket is the network packet for trie node data distribution. +type NodeDataPacket [][]byte -// GetReceiptsPacket represents a block receipts query with request ID wrapping. -type GetReceiptsPacket struct { +// NodeDataPacket66 is the network packet for trie node data distribution over eth/66. +type NodeDataPacket66 struct { RequestId uint64 - GetReceiptsRequest + NodeDataPacket } -// ReceiptsResponse is the network packet for block receipts distribution. -type ReceiptsResponse [][]*types.Receipt +// GetReceiptsPacket represents a block receipts query. +type GetReceiptsPacket []common.Hash -// ReceiptsPacket is the network packet for block receipts distribution with -// request ID wrapping. -type ReceiptsPacket struct { +// GetReceiptsPacket66 represents a block receipts query over eth/66. +type GetReceiptsPacket66 struct { RequestId uint64 - ReceiptsResponse + GetReceiptsPacket } -// ReceiptsRLPResponse is used for receipts, when we already have it encoded -type ReceiptsRLPResponse []rlp.RawValue +// ReceiptsPacket is the network packet for block receipts distribution. +type ReceiptsPacket [][]*types.Receipt -// ReceiptsRLPPacket is ReceiptsRLPResponse with request ID wrapping. -type ReceiptsRLPPacket struct { +// ReceiptsPacket66 is the network packet for block receipts distribution over eth/66. +type ReceiptsPacket66 struct { RequestId uint64 - ReceiptsRLPResponse + ReceiptsPacket } -// NewPooledTransactionHashesPacket67 represents a transaction announcement packet on eth/67. -type NewPooledTransactionHashesPacket67 []common.Hash +// ReceiptsRLPPacket is used for receipts, when we already have it encoded +type ReceiptsRLPPacket []rlp.RawValue + +// ReceiptsRLPPacket66 is the eth-66 version of ReceiptsRLPPacket +type ReceiptsRLPPacket66 struct { + RequestId uint64 + ReceiptsRLPPacket +} + +// NewPooledTransactionHashesPacket66 represents a transaction announcement packet on eth/66 and eth/67. +type NewPooledTransactionHashesPacket66 []common.Hash // NewPooledTransactionHashesPacket68 represents a transaction announcement packet on eth/68 and newer. type NewPooledTransactionHashesPacket68 struct { @@ -293,33 +312,31 @@ type NewPooledTransactionHashesPacket68 struct { Hashes []common.Hash } -// GetPooledTransactionsRequest represents a transaction query. -type GetPooledTransactionsRequest []common.Hash +// GetPooledTransactionsPacket represents a transaction query. +type GetPooledTransactionsPacket []common.Hash -// GetPooledTransactionsPacket represents a transaction query with request ID wrapping. -type GetPooledTransactionsPacket struct { +type GetPooledTransactionsPacket66 struct { RequestId uint64 - GetPooledTransactionsRequest + GetPooledTransactionsPacket } -// PooledTransactionsResponse is the network packet for transaction distribution. -type PooledTransactionsResponse []*types.Transaction +// PooledTransactionsPacket is the network packet for transaction distribution. +type PooledTransactionsPacket []*types.Transaction -// PooledTransactionsPacket is the network packet for transaction distribution -// with request ID wrapping. -type PooledTransactionsPacket struct { +// PooledTransactionsPacket66 is the network packet for transaction distribution over eth/66. +type PooledTransactionsPacket66 struct { RequestId uint64 - PooledTransactionsResponse + PooledTransactionsPacket } -// PooledTransactionsRLPResponse is the network packet for transaction distribution, used +// PooledTransactionsRLPPacket is the network packet for transaction distribution, used // in the cases we already have them in rlp-encoded form -type PooledTransactionsRLPResponse []rlp.RawValue +type PooledTransactionsRLPPacket []rlp.RawValue -// PooledTransactionsRLPPacket is PooledTransactionsRLPResponse with request ID wrapping. -type PooledTransactionsRLPPacket struct { +// PooledTransactionsRLPPacket66 is the eth/66 form of PooledTransactionsRLPPacket +type PooledTransactionsRLPPacket66 struct { RequestId uint64 - PooledTransactionsRLPResponse + PooledTransactionsRLPPacket } func (*StatusPacket) Name() string { return "Status" } @@ -331,34 +348,40 @@ func (*NewBlockHashesPacket) Kind() byte { return NewBlockHashesMsg } func (*TransactionsPacket) Name() string { return "Transactions" } func (*TransactionsPacket) Kind() byte { return TransactionsMsg } -func (*GetBlockHeadersRequest) Name() string { return "GetBlockHeaders" } -func (*GetBlockHeadersRequest) Kind() byte { return GetBlockHeadersMsg } +func (*GetBlockHeadersPacket) Name() string { return "GetBlockHeaders" } +func (*GetBlockHeadersPacket) Kind() byte { return GetBlockHeadersMsg } -func (*BlockHeadersRequest) Name() string { return "BlockHeaders" } -func (*BlockHeadersRequest) Kind() byte { return BlockHeadersMsg } +func (*BlockHeadersPacket) Name() string { return "BlockHeaders" } +func (*BlockHeadersPacket) Kind() byte { return BlockHeadersMsg } -func (*GetBlockBodiesRequest) Name() string { return "GetBlockBodies" } -func (*GetBlockBodiesRequest) Kind() byte { return GetBlockBodiesMsg } +func (*GetBlockBodiesPacket) Name() string { return "GetBlockBodies" } +func (*GetBlockBodiesPacket) Kind() byte { return GetBlockBodiesMsg } -func (*BlockBodiesResponse) Name() string { return "BlockBodies" } -func (*BlockBodiesResponse) Kind() byte { return BlockBodiesMsg } +func (*BlockBodiesPacket) Name() string { return "BlockBodies" } +func (*BlockBodiesPacket) Kind() byte { return BlockBodiesMsg } func (*NewBlockPacket) Name() string { return "NewBlock" } func (*NewBlockPacket) Kind() byte { return NewBlockMsg } -func (*NewPooledTransactionHashesPacket67) Name() string { return "NewPooledTransactionHashes" } -func (*NewPooledTransactionHashesPacket67) Kind() byte { return NewPooledTransactionHashesMsg } +func (*NewPooledTransactionHashesPacket66) Name() string { return "NewPooledTransactionHashes" } +func (*NewPooledTransactionHashesPacket66) Kind() byte { return NewPooledTransactionHashesMsg } func (*NewPooledTransactionHashesPacket68) Name() string { return "NewPooledTransactionHashes" } func (*NewPooledTransactionHashesPacket68) Kind() byte { return NewPooledTransactionHashesMsg } -func (*GetPooledTransactionsRequest) Name() string { return "GetPooledTransactions" } -func (*GetPooledTransactionsRequest) Kind() byte { return GetPooledTransactionsMsg } +func (*GetPooledTransactionsPacket) Name() string { return "GetPooledTransactions" } +func (*GetPooledTransactionsPacket) Kind() byte { return GetPooledTransactionsMsg } + +func (*PooledTransactionsPacket) Name() string { return "PooledTransactions" } +func (*PooledTransactionsPacket) Kind() byte { return PooledTransactionsMsg } + +func (*GetNodeDataPacket) Name() string { return "GetNodeData" } +func (*GetNodeDataPacket) Kind() byte { return GetNodeDataMsg } -func (*PooledTransactionsResponse) Name() string { return "PooledTransactions" } -func (*PooledTransactionsResponse) Kind() byte { return PooledTransactionsMsg } +func (*NodeDataPacket) Name() string { return "NodeData" } +func (*NodeDataPacket) Kind() byte { return NodeDataMsg } -func (*GetReceiptsRequest) Name() string { return "GetReceipts" } -func (*GetReceiptsRequest) Kind() byte { return GetReceiptsMsg } +func (*GetReceiptsPacket) Name() string { return "GetReceipts" } +func (*GetReceiptsPacket) Kind() byte { return GetReceiptsMsg } -func (*ReceiptsResponse) Name() string { return "Receipts" } -func (*ReceiptsResponse) Kind() byte { return ReceiptsMsg } +func (*ReceiptsPacket) Name() string { return "Receipts" } +func (*ReceiptsPacket) Kind() byte { return ReceiptsMsg } diff --git a/eth/protocols/eth/protocol_test.go b/eth/protocols/eth/protocol_test.go index 5090d73e0361..0ec3ef6a5b0a 100644 --- a/eth/protocols/eth/protocol_test.go +++ b/eth/protocols/eth/protocol_test.go @@ -35,19 +35,19 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { } // Assemble some table driven tests tests := []struct { - packet *GetBlockHeadersRequest + packet *GetBlockHeadersPacket fail bool }{ // Providing the origin as either a hash or a number should both work - {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 314}}}, - {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash}}}, + {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}}}, + {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}}}, // Providing arbitrary query field should also work - {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}}, - {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}}, + {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}}, + {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}}, // Providing both the origin hash and origin number must fail - {fail: true, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash, Number: 314}}}, + {fail: true, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash, Number: 314}}}, } // Iterate over each of the tests and try to encode and then decode for i, tt := range tests { @@ -58,7 +58,7 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { t.Fatalf("test %d: encode should have failed", i) } if !tt.fail { - packet := new(GetBlockHeadersRequest) + packet := new(GetBlockHeadersPacket) if err := rlp.DecodeBytes(bytes, packet); err != nil { t.Fatalf("test %d: failed to decode packet: %v", i, err) } @@ -70,40 +70,46 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { } } -// TestEmptyMessages tests encoding of empty messages. -func TestEmptyMessages(t *testing.T) { +// TestEth66EmptyMessages tests encoding of empty eth66 messages +func TestEth66EmptyMessages(t *testing.T) { // All empty messages encodes to the same format want := common.FromHex("c4820457c0") for i, msg := range []interface{}{ // Headers - GetBlockHeadersPacket{1111, nil}, - BlockHeadersPacket{1111, nil}, + GetBlockHeadersPacket66{1111, nil}, + BlockHeadersPacket66{1111, nil}, // Bodies - GetBlockBodiesPacket{1111, nil}, - BlockBodiesPacket{1111, nil}, - BlockBodiesRLPPacket{1111, nil}, + GetBlockBodiesPacket66{1111, nil}, + BlockBodiesPacket66{1111, nil}, + BlockBodiesRLPPacket66{1111, nil}, + // Node data + GetNodeDataPacket66{1111, nil}, + NodeDataPacket66{1111, nil}, // Receipts - GetReceiptsPacket{1111, nil}, - ReceiptsPacket{1111, nil}, + GetReceiptsPacket66{1111, nil}, + ReceiptsPacket66{1111, nil}, // Transactions - GetPooledTransactionsPacket{1111, nil}, - PooledTransactionsPacket{1111, nil}, - PooledTransactionsRLPPacket{1111, nil}, + GetPooledTransactionsPacket66{1111, nil}, + PooledTransactionsPacket66{1111, nil}, + PooledTransactionsRLPPacket66{1111, nil}, // Headers - BlockHeadersPacket{1111, BlockHeadersRequest([]*types.Header{})}, + BlockHeadersPacket66{1111, BlockHeadersPacket([]*types.Header{})}, // Bodies - GetBlockBodiesPacket{1111, GetBlockBodiesRequest([]common.Hash{})}, - BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{})}, - BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{})}, + GetBlockBodiesPacket66{1111, GetBlockBodiesPacket([]common.Hash{})}, + BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{})}, + BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{})}, + // Node data + GetNodeDataPacket66{1111, GetNodeDataPacket([]common.Hash{})}, + NodeDataPacket66{1111, NodeDataPacket([][]byte{})}, // Receipts - GetReceiptsPacket{1111, GetReceiptsRequest([]common.Hash{})}, - ReceiptsPacket{1111, ReceiptsResponse([][]*types.Receipt{})}, + GetReceiptsPacket66{1111, GetReceiptsPacket([]common.Hash{})}, + ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{})}, // Transactions - GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest([]common.Hash{})}, - PooledTransactionsPacket{1111, PooledTransactionsResponse([]*types.Transaction{})}, - PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse([]rlp.RawValue{})}, + GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket([]common.Hash{})}, + PooledTransactionsPacket66{1111, PooledTransactionsPacket([]*types.Transaction{})}, + PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket([]rlp.RawValue{})}, } { if have, _ := rlp.EncodeToBytes(msg); !bytes.Equal(have, want) { t.Errorf("test %d, type %T, have\n\t%x\nwant\n\t%x", i, msg, have, want) @@ -111,8 +117,8 @@ func TestEmptyMessages(t *testing.T) { } } -// TestMessages tests the encoding of all messages. -func TestMessages(t *testing.T) { +// TestEth66Messages tests the encoding of all redefined eth66 messages +func TestEth66Messages(t *testing.T) { // Some basic structs used during testing var ( header *types.Header @@ -163,6 +169,10 @@ func TestMessages(t *testing.T) { common.HexToHash("deadc0de"), common.HexToHash("feedbeef"), } + byteSlices := [][]byte{ + common.FromHex("deadc0de"), + common.FromHex("feedbeef"), + } // init the receipts { receipts = []*types.Receipt{ @@ -193,51 +203,59 @@ func TestMessages(t *testing.T) { want []byte }{ { - GetBlockHeadersPacket{1111, &GetBlockHeadersRequest{HashOrNumber{hashes[0], 0}, 5, 5, false}}, + GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{hashes[0], 0}, 5, 5, false}}, common.FromHex("e8820457e4a000000000000000000000000000000000000000000000000000000000deadc0de050580"), }, { - GetBlockHeadersPacket{1111, &GetBlockHeadersRequest{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}}, + GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}}, common.FromHex("ca820457c682270f050580"), }, { - BlockHeadersPacket{1111, BlockHeadersRequest{header}}, + BlockHeadersPacket66{1111, BlockHeadersPacket{header}}, common.FromHex("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, { - GetBlockBodiesPacket{1111, GetBlockBodiesRequest(hashes)}, + GetBlockBodiesPacket66{1111, GetBlockBodiesPacket(hashes)}, common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{blockBody})}, + BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{blockBody})}, common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, { // Identical to non-rlp-shortcut version - BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{blockBodyRlp})}, + BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{blockBodyRlp})}, common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, { - GetReceiptsPacket{1111, GetReceiptsRequest(hashes)}, + GetNodeDataPacket66{1111, GetNodeDataPacket(hashes)}, + common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), + }, + { + NodeDataPacket66{1111, NodeDataPacket(byteSlices)}, + common.FromHex("ce820457ca84deadc0de84feedbeef"), + }, + { + GetReceiptsPacket66{1111, GetReceiptsPacket(hashes)}, common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - ReceiptsPacket{1111, ReceiptsResponse([][]*types.Receipt{receipts})}, + ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{receipts})}, common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"), }, { - ReceiptsRLPPacket{1111, ReceiptsRLPResponse([]rlp.RawValue{receiptsRlp})}, + ReceiptsRLPPacket66{1111, ReceiptsRLPPacket([]rlp.RawValue{receiptsRlp})}, common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"), }, { - GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest(hashes)}, + GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket(hashes)}, common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - PooledTransactionsPacket{1111, PooledTransactionsResponse(txs)}, + PooledTransactionsPacket66{1111, PooledTransactionsPacket(txs)}, common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"), }, { - PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse(txRlps)}, + PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket(txRlps)}, common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"), }, } { diff --git a/eth/sync_test.go b/eth/sync_test.go index bbf5d93fcf01..b99b2eaea27e 100644 --- a/eth/sync_test.go +++ b/eth/sync_test.go @@ -28,8 +28,8 @@ import ( ) // Tests that snap sync is disabled after a successful sync cycle. +func TestSnapSyncDisabling66(t *testing.T) { testSnapSyncDisabling(t, eth.ETH66, snap.SNAP1) } func TestSnapSyncDisabling67(t *testing.T) { testSnapSyncDisabling(t, eth.ETH67, snap.SNAP1) } -func TestSnapSyncDisabling68(t *testing.T) { testSnapSyncDisabling(t, eth.ETH68, snap.SNAP1) } // Tests that snap sync gets disabled as soon as a real block is successfully // imported into the blockchain. From 9e69bf86ea7631e7f6109c6a333fd0a90b9c2223 Mon Sep 17 00:00:00 2001 From: HAOYUatHZ Date: Tue, 15 Oct 2024 18:14:44 +1100 Subject: [PATCH 2/3] hack shanghaiBlock --- params/config.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/params/config.go b/params/config.go index 6a81fe990a4d..61e267595e4c 100644 --- a/params/config.go +++ b/params/config.go @@ -503,12 +503,12 @@ type ChainConfig struct { // Fork scheduling was switched from blocks to timestamps here - ShanghaiTime *uint64 `json:"shanghaiTime,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai) - CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun) - PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague) - VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle) - DarwinTime *uint64 `json:"darwinTime,omitempty"` // Darwin switch time (nil = no fork, 0 = already on darwin) - DarwinV2Time *uint64 `json:"darwinv2Time,omitempty"` // DarwinV2 switch time (nil = no fork, 0 = already on darwinv2) + ShanghaiTime *uint64 `json:"shanghaiBlock,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai) + CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun) + PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague) + VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle) + DarwinTime *uint64 `json:"darwinTime,omitempty"` // Darwin switch time (nil = no fork, 0 = already on darwin) + DarwinV2Time *uint64 `json:"darwinv2Time,omitempty"` // DarwinV2 switch time (nil = no fork, 0 = already on darwinv2) // TerminalTotalDifficulty is the amount of total difficulty reached by // the network that triggers the consensus upgrade. From b92db10dd7fa8cf04c036ca01a9c44e3b9d0109c Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Fri, 18 Oct 2024 19:17:03 +0800 Subject: [PATCH 3/3] disable time-based forks in ForkID --- core/forkid/forkid.go | 26 +++++--------------------- core/forkid/forkid_test.go | 25 +++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 21 deletions(-) diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go index 901fd8dc8c68..91b568774c68 100644 --- a/core/forkid/forkid.go +++ b/core/forkid/forkid.go @@ -77,7 +77,7 @@ func NewID(config *params.ChainConfig, genesis *types.Block, head, time uint64) hash := crc32.ChecksumIEEE(genesis.Hash().Bytes()) // Calculate the current fork checksum and the next fork block - forksByBlock, forksByTime := gatherForks(config, genesis.Time()) + forksByBlock, _ := gatherForks(config, genesis.Time()) for _, fork := range forksByBlock { if fork <= head { // Fork already passed, checksum the previous hash and the fork number @@ -86,14 +86,6 @@ func NewID(config *params.ChainConfig, genesis *types.Block, head, time uint64) } return ID{Hash: checksumToBytes(hash), Next: fork} } - for _, fork := range forksByTime { - if fork <= time { - // Fork already passed, checksum the previous hash and fork timestamp - hash = checksumUpdate(hash, fork) - continue - } - return ID{Hash: checksumToBytes(hash), Next: fork} - } return ID{Hash: checksumToBytes(hash), Next: 0} } @@ -134,9 +126,8 @@ func NewStaticFilter(config *params.ChainConfig, genesis *types.Block) Filter { func newFilter(config *params.ChainConfig, genesis *types.Block, headfn func() (uint64, uint64)) Filter { // Calculate the all the valid fork hash and fork next combos var ( - forksByBlock, forksByTime = gatherForks(config, genesis.Time()) - forks = append(append([]uint64{}, forksByBlock...), forksByTime...) - sums = make([][4]byte, len(forks)+1) // 0th is the genesis + forks, _ = gatherForks(config, genesis.Time()) + sums = make([][4]byte, len(forks)+1) // 0th is the genesis ) hash := crc32.ChecksumIEEE(genesis.Hash().Bytes()) sums[0] = checksumToBytes(hash) @@ -147,10 +138,6 @@ func newFilter(config *params.ChainConfig, genesis *types.Block, headfn func() ( // Add two sentries to simplify the fork checks and don't require special // casing the last one. forks = append(forks, math.MaxUint64) // Last fork will never be passed - if len(forksByTime) == 0 { - // In purely block based forks, avoid the sentry spilling into timestapt territory - forksByBlock = append(forksByBlock, math.MaxUint64) // Last fork will never be passed - } // Create a validator that will filter out incompatible chains return func(id ID) error { // Run the fork checksum validation ruleset: @@ -172,13 +159,10 @@ func newFilter(config *params.ChainConfig, genesis *types.Block, headfn func() ( // the remote, but at this current point in time we don't have enough // information. // 4. Reject in all other cases. - block, time := headfn() + block, _ := headfn() for i, fork := range forks { // Pick the head comparison based on fork progression head := block - if i >= len(forksByBlock) { - head = time - } // If our head is beyond this fork, continue to the next (we have a dummy // fork of maxuint64 as the last item to always fail this check eventually). if head >= fork { @@ -189,7 +173,7 @@ func newFilter(config *params.ChainConfig, genesis *types.Block, headfn func() ( if sums[i] == id.Hash { // Fork checksum matched, check if a remote future fork block already passed // locally without the local node being aware of it (rule #1a). - if id.Next > 0 && (head >= id.Next || (id.Next > timestampThreshold && time >= id.Next)) { + if id.Next > 0 && head >= id.Next { return ErrLocalIncompatibleOrStale } // Haven't passed locally a remote-only fork, accept the connection (rule #1b). diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index 8b98cfa75fde..54d7bff8ba5d 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -448,3 +448,28 @@ func TestTimeBasedForkInGenesis(t *testing.T) { } } } + +func TestScroll(t *testing.T) { + tests := []struct { + config *params.ChainConfig + genesis *types.Block + head uint64 + time uint64 + want ID + }{ + // Scroll test cases + { + params.ScrollMainnetChainConfig, + core.DefaultScrollMainnetGenesisBlock().ToBlock(), + 10281275, + 1729250728, // omit timestamp-based forks + ID{Hash: checksumToBytes(0x18d3c8d9), Next: 0}, // 0x18d3c8d9 is fetched from develop branch + }, + } + + for i, tt := range tests { + if have := NewID(tt.config, tt.genesis, tt.head, tt.time); have != tt.want { + t.Errorf("test %d: fork ID mismatch: have %x, want %x", i, have, tt.want) + } + } +}