Skip to content

Commit

Permalink
Revert 'all: remove forkchoicer and reorgNeeded (#29179)'
Browse files Browse the repository at this point in the history
  • Loading branch information
lucca30 committed Feb 3, 2025
1 parent fedab1c commit a344268
Show file tree
Hide file tree
Showing 7 changed files with 185 additions and 49 deletions.
3 changes: 2 additions & 1 deletion cmd/utils/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -285,6 +285,7 @@ func ImportHistory(chain *core.BlockChain, db ethdb.Database, dir string, networ
start = time.Now()
reported = time.Now()
imported = 0
forker = core.NewForkChoice(chain, nil, nil)
h = sha256.New()
buf = bytes.NewBuffer(nil)
)
Expand Down Expand Up @@ -327,7 +328,7 @@ func ImportHistory(chain *core.BlockChain, db ethdb.Database, dir string, networ
if err != nil {
return fmt.Errorf("error reading receipts %d: %w", it.Number(), err)
}
if status, err := chain.HeaderChain().InsertHeaderChain([]*types.Header{block.Header()}, start); err != nil {
if status, err := chain.HeaderChain().InsertHeaderChain([]*types.Header{block.Header()}, start, forker); err != nil {
return fmt.Errorf("error inserting header %d: %w", it.Number(), err)
} else if status != core.CanonStatTy {
return fmt.Errorf("error inserting header %d, not canon: %v", it.Number(), status)
Expand Down
4 changes: 2 additions & 2 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -2075,6 +2075,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
if bc.insertStopped() {
return nil, 0, nil
}

// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
SenderCacher.RecoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time()), chain)

Expand Down Expand Up @@ -3326,8 +3327,7 @@ func (bc *BlockChain) InsertHeaderChain(chain []*types.Header) (int, error) {
return 0, errChainStopped
}
defer bc.chainmu.Unlock()
_, err := bc.hc.InsertHeaderChain(chain, start)

_, err := bc.hc.InsertHeaderChain(chain, start, bc.forker)
return 0, err
}

Expand Down
22 changes: 13 additions & 9 deletions core/blockchain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1576,7 +1576,7 @@ done:
timeout.Reset(timeoutDura)

case <-timeout.C:
t.Fatalf("Timeout. Possibly not all blocks were triggered for sideevent: %v", i)
t.Fatal("Timeout. Possibly not all blocks were triggered for sideevent")
}
}

Expand Down Expand Up @@ -1996,15 +1996,18 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) {
if chain.HasState(shared[len(shared)-1].Root()) {
t.Fatalf("common-but-old ancestor still cache")
}
// Import the competitor chain without exceeding the canonical's TD.
// Post-merge the side chain should be executed
// Import the competitor chain without exceeding the canonical's TD and ensure
// we have not processed any of the blocks (protection against malicious blocks)
if _, err := chain.InsertChain(competitor[:len(competitor)-2]); err != nil {
t.Fatalf("failed to insert competitor chain: %v", err)
}
if !chain.HasState(competitor[len(competitor)-3].Root()) {
t.Fatalf("failed to insert low-TD chain")
for i, block := range competitor[:len(competitor)-2] {
if chain.HasState(block.Root()) {
t.Fatalf("competitor %d: low TD chain became processed", i)
}
}
// Import the head of the competitor chain.
// Import the head of the competitor chain, triggering the reorg and ensure we
// successfully reprocess all the stashed away blocks.
if _, err := chain.InsertChain(competitor[len(competitor)-2:]); err != nil {
t.Fatalf("failed to finalize competitor chain: %v", err)
}
Expand Down Expand Up @@ -2523,10 +2526,10 @@ func testInsertKnownChainData(t *testing.T, typ string, scheme string) {
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
t.Fatalf("failed to insert chain data: %v", err)
}
// Post-merge the chain should change even if td is lower.
asserter(t, blocks2[len(blocks2)-1])
// The head shouldn't change.
asserter(t, blocks3[len(blocks3)-1])

// Rollback the heavier chain and re-insert the longer chain again.
// Rollback the heavier chain and re-insert the longer chain again
chain.SetHead(rollback - 1)

if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
Expand Down Expand Up @@ -3086,6 +3089,7 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) {
if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) {
t.Errorf("Block %d pruned, scheme : %s", firstNonPrunedBlock.NumberU64(), scheme)
}
// Now re-import some old blocks
blockToReimport := blocks[5:8]

_, err = chain.InsertChain(blockToReimport)
Expand Down
149 changes: 140 additions & 9 deletions core/forkchoice_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/triedb"

"github.com/stretchr/testify/require"
)

// chainValidatorFake is a mock for the chain validator service
Expand All @@ -30,6 +32,60 @@ func newChainReaderFake(getTd func(hash common.Hash, number uint64) *big.Int) *c
return &chainReaderFake{getTd: getTd}
}

// nolint: tparallel
func TestForkChoice(t *testing.T) {
t.Parallel()

// Create mocks for forker
getTd := func(hash common.Hash, number uint64) *big.Int {
if number <= 2 {
return big.NewInt(int64(number))
}

return big.NewInt(0)
}
mockChainReader := newChainReaderFake(getTd)
mockForker := NewForkChoice(mockChainReader, nil, nil)

createHeader := func(number int64, extra []byte) *types.Header {
return &types.Header{
Number: big.NewInt(number),
Extra: extra,
}
}

// Create headers for different cases
headerA := createHeader(1, []byte("A"))
headerB := createHeader(2, []byte("B"))
headerC := createHeader(3, []byte("C"))
headerD := createHeader(4, []byte("D")) // 0x96b0f70c01f4d2b1ee2df5b0202c099776f24c9375ffc89d94b880007633961b (hash)
headerE := createHeader(4, []byte("E")) // 0xdc0acf54354ff86194baeaab983098a49a40218cffcc77a583726fc06c429685 (hash)

testCases := []struct {
name string
current *types.Header
incoming *types.Header
want bool
}{
{"tdd(incoming) > tdd(current)", headerA, headerB, true},
{"tdd(current) > tdd(incoming)", headerB, headerA, false},
{"tdd(current) = tdd(incoming), number(incoming) > number(current)", headerC, headerD, false},
{"tdd(current) = tdd(incoming), number(current) > number(incoming)", headerD, headerC, true},
{"tdd(current) = tdd(incoming), number(current) = number(incoming), hash(current) > hash(incoming)", headerE, headerD, false},
{"tdd(current) = tdd(incoming), number(current) = number(incoming), hash(incoming) > hash(current)", headerD, headerE, true},
}

// nolint: paralleltest
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
res, err := mockForker.ReorgNeeded(tc.current, tc.incoming)
require.Equal(t, tc.want, res, tc.name)
require.NoError(t, err, tc.name)
})
}
}

func TestPastChainInsert(t *testing.T) {
t.Parallel()

Expand All @@ -45,12 +101,33 @@ func TestPastChainInsert(t *testing.T) {
t.Fatal(err)
}

// Create mocks for forker
getTd := func(hash common.Hash, number uint64) *big.Int {
return big.NewInt(int64(number))
}
validate := func(currentHeader *types.Header, chain []*types.Header) (bool, error) {
// Put all explicit conditions here
// If canonical chain is empty, and we're importing a chain of 64 blocks
if currentHeader.Number.Uint64() == uint64(0) && len(chain) == 64 {
return true, nil
}
// If canonical chain is of len 64, and we're importing a past chain from 54-64, then accept it
if currentHeader.Number.Uint64() == uint64(64) && chain[0].Number.Uint64() == 55 && len(chain) == 10 {
return true, nil
}

return false, nil
}
mockChainReader := newChainReaderFake(getTd)
mockChainValidator := newChainValidatorFake(validate)
mockForker := NewForkChoice(mockChainReader, nil, mockChainValidator)

// chain A: G->A1->A2...A64
genDb, chainA := makeHeaderChainWithGenesis(gspec, 64, ethash.NewFaker(), 10)

// Inserting 64 headers on an empty chain
// expecting 1 write status with no error
testInsert(t, hc, chainA, CanonStatTy, nil)
testInsert(t, hc, chainA, CanonStatTy, nil, mockForker)

// The current chain is: G->A1->A2...A64
// chain B: G->A1->A2...A44->B45->B46...B64
Expand All @@ -60,13 +137,25 @@ func TestPastChainInsert(t *testing.T) {
// chain C: G->A1->A2...A54->C55->C56...C64
chainC := makeHeaderChain(gspec.Config, chainA[53], 10, ethash.NewFaker(), genDb, 10)

// Update the function to consider chainC with higher difficulty
getTd = func(hash common.Hash, number uint64) *big.Int {
td := big.NewInt(int64(number))
if hash == chainB[len(chainB)-1].Hash() || hash == chainC[len(chainC)-1].Hash() {
td = big.NewInt(65)
}

return td
}
mockChainReader = newChainReaderFake(getTd)
mockForker = NewForkChoice(mockChainReader, nil, mockChainValidator)

// Inserting 20 blocks from chainC on canonical chain
// expecting 2 write status with no error
testInsert(t, hc, chainB, SideStatTy, nil)
testInsert(t, hc, chainB, SideStatTy, nil, mockForker)

// Inserting 10 blocks from chainB on canonical chain
// expecting 1 write status with no error
testInsert(t, hc, chainC, CanonStatTy, nil)
testInsert(t, hc, chainC, CanonStatTy, nil, mockForker)
}

func TestFutureChainInsert(t *testing.T) {
Expand All @@ -84,28 +173,49 @@ func TestFutureChainInsert(t *testing.T) {
t.Fatal(err)
}

// Create mocks for forker
getTd := func(hash common.Hash, number uint64) *big.Int {
return big.NewInt(int64(number))
}
validate := func(currentHeader *types.Header, chain []*types.Header) (bool, error) {
// Put all explicit conditions here
// If canonical chain is empty and we're importing a chain of 64 blocks
if currentHeader.Number.Uint64() == uint64(0) && len(chain) == 64 {
return true, nil
}
// If length of future chains > some value, they should not be accepted
if currentHeader.Number.Uint64() == uint64(64) && len(chain) <= 10 {
return true, nil
}

return false, nil
}
mockChainReader := newChainReaderFake(getTd)
mockChainValidator := newChainValidatorFake(validate)
mockForker := NewForkChoice(mockChainReader, nil, mockChainValidator)

// chain A: G->A1->A2...A64
genDb, chainA := makeHeaderChainWithGenesis(gspec, 64, ethash.NewFaker(), 10)

// Inserting 64 headers on an empty chain
// expecting 1 write status with no error
testInsert(t, hc, chainA, CanonStatTy, nil)
testInsert(t, hc, chainA, CanonStatTy, nil, mockForker)

// The current chain is: G->A1->A2...A64
// chain B: G->A1->A2...A64->B65->B66...B84
chainB := makeHeaderChain(gspec.Config, chainA[63], 20, ethash.NewFaker(), genDb, 10)

// Inserting 20 headers on the canonical chain
// expecting 0 write status with no error
testInsert(t, hc, chainB, SideStatTy, nil)
testInsert(t, hc, chainB, SideStatTy, nil, mockForker)

// The current chain is: G->A1->A2...A64
// chain C: G->A1->A2...A64->C65->C66...C74
chainC := makeHeaderChain(gspec.Config, chainA[63], 10, ethash.NewFaker(), genDb, 10)

// Inserting 10 headers on the canonical chain
// expecting 0 write status with no error
testInsert(t, hc, chainC, CanonStatTy, nil)
testInsert(t, hc, chainC, CanonStatTy, nil, mockForker)
}

func TestOverlappingChainInsert(t *testing.T) {
Expand All @@ -123,28 +233,49 @@ func TestOverlappingChainInsert(t *testing.T) {
t.Fatal(err)
}

// Create mocks for forker
getTd := func(hash common.Hash, number uint64) *big.Int {
return big.NewInt(int64(number))
}
validate := func(currentHeader *types.Header, chain []*types.Header) (bool, error) {
// Put all explicit conditions here
// If canonical chain is empty and we're importing a chain of 64 blocks
if currentHeader.Number.Uint64() == uint64(0) && len(chain) == 64 {
return true, nil
}
// If length of chain is > some fixed value then don't accept it
if currentHeader.Number.Uint64() == uint64(64) && len(chain) <= 20 {
return true, nil
}

return false, nil
}
mockChainReader := newChainReaderFake(getTd)
mockChainValidator := newChainValidatorFake(validate)
mockForker := NewForkChoice(mockChainReader, nil, mockChainValidator)

// chain A: G->A1->A2...A64
genDb, chainA := makeHeaderChainWithGenesis(gspec, 64, ethash.NewFaker(), 10)

// Inserting 64 headers on an empty chain
// expecting 1 write status with no error
testInsert(t, hc, chainA, CanonStatTy, nil)
testInsert(t, hc, chainA, CanonStatTy, nil, mockForker)

// The current chain is: G->A1->A2...A64
// chain B: G->A1->A2...A54->B55->B56...B84
chainB := makeHeaderChain(gspec.Config, chainA[53], 30, ethash.NewFaker(), genDb, 10)

// Inserting 20 blocks on canonical chain
// expecting 2 write status with no error
testInsert(t, hc, chainB, SideStatTy, nil)
testInsert(t, hc, chainB, SideStatTy, nil, mockForker)

// The current chain is: G->A1->A2...A64
// chain C: G->A1->A2...A54->C55->C56...C74
chainC := makeHeaderChain(gspec.Config, chainA[53], 20, ethash.NewFaker(), genDb, 10)

// Inserting 10 blocks on canonical chain
// expecting 1 write status with no error
testInsert(t, hc, chainC, CanonStatTy, nil)
testInsert(t, hc, chainC, CanonStatTy, nil, mockForker)
}

// Mock chain reader functions
Expand Down
15 changes: 12 additions & 3 deletions core/headerchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ func (hc *HeaderChain) WriteHeaders(headers []*types.Header) (int, error) {
// without the real blocks. Hence, writing headers directly should only be done
// in two scenarios: pure-header mode of operation (light clients), or properly
// separated header/block phases (non-archive clients).
func (hc *HeaderChain) writeHeadersAndSetHead(headers []*types.Header) (*headerWriteResult, error) {
func (hc *HeaderChain) writeHeadersAndSetHead(headers []*types.Header, forker *ForkChoice) (*headerWriteResult, error) {
inserted, err := hc.WriteHeaders(headers)
if err != nil {
return nil, err
Expand All @@ -295,6 +295,15 @@ func (hc *HeaderChain) writeHeadersAndSetHead(headers []*types.Header) (*headerW
lastHeader: lastHeader,
}
)
// Ask the fork choicer if the reorg is necessary
if reorg, err := forker.ReorgNeeded(hc.CurrentHeader(), lastHeader); err != nil {
return nil, err
} else if !reorg {
if inserted != 0 {
result.status = SideStatTy
}
return result, nil
}
// Special case, all the inserted headers are already on the canonical
// header chain, skip the reorg operation.
if hc.GetCanonicalHash(lastHeader.Number.Uint64()) == lastHash && lastHeader.Number.Uint64() <= hc.CurrentHeader().Number.Uint64() {
Expand Down Expand Up @@ -354,11 +363,11 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header) (int, error) {
//
// The returned 'write status' says if the inserted headers are part of the canonical chain
// or a side chain.
func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time) (WriteStatus, error) {
func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time, forker *ForkChoice) (WriteStatus, error) {
if hc.procInterrupt() {
return 0, errors.New("aborted")
}
res, err := hc.writeHeadersAndSetHead(chain)
res, err := hc.writeHeadersAndSetHead(chain, forker)
if err != nil {
return 0, err
}
Expand Down
Loading

0 comments on commit a344268

Please sign in to comment.