From 6ecd5170326c1fa94422a1f3c9ac9f1b0ab8993d Mon Sep 17 00:00:00 2001 From: ssd04 Date: Wed, 18 Dec 2024 17:48:29 +0200 Subject: [PATCH 01/21] adapt integration tests for consensus v2 --- integrationTests/consensus/consensus_test.go | 74 +++-- integrationTests/testConsensusNode.go | 331 +++++++++++++++++-- integrationTests/testInitializer.go | 20 ++ integrationTests/testProcessorNode.go | 21 ++ 4 files changed, 383 insertions(+), 63 deletions(-) diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index 7a480f3ecc0..4b90bb757cd 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -32,17 +32,30 @@ var ( log = logger.GetOrCreate("integrationtests/consensus") ) -func encodeAddress(address []byte) string { - return hex.EncodeToString(address) +func TestConsensusBLSFullTestSingleKeys(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + logger.SetLogLevel("*:TRACE") + + runFullConsensusTest(t, blsConsensusType, 1) } -func getPkEncoded(pubKey crypto.PublicKey) string { - pk, err := pubKey.ToByteArray() - if err != nil { - return err.Error() +func TestConsensusBLSFullTestMultiKeys(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") } - return encodeAddress(pk) + runFullConsensusTest(t, blsConsensusType, 5) +} + +func TestConsensusBLSNotEnoughValidators(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + runConsensusWithNotEnoughValidators(t, blsConsensusType) } func initNodesAndTest( @@ -219,9 +232,9 @@ func checkBlockProposedEveryRound(numCommBlock uint64, nonceForRoundMap map[uint } func runFullConsensusTest(t *testing.T, consensusType string, numKeysOnEachNode int) { - numMetaNodes := uint32(4) - numNodes := uint32(4) - consensusSize := uint32(4 * numKeysOnEachNode) + numMetaNodes := uint32(2) + numNodes := uint32(2) + consensusSize := uint32(2 * numKeysOnEachNode) numInvalid := uint32(0) roundTime := uint64(1000) numCommBlock := uint64(8) @@ -233,8 +246,8 @@ func runFullConsensusTest(t *testing.T, consensusType string, numKeysOnEachNode ) enableEpochsConfig := integrationTests.CreateEnableEpochsConfig() - enableEpochsConfig.EquivalentMessagesEnableEpoch = integrationTests.UnreachableEpoch - enableEpochsConfig.FixedOrderInConsensusEnableEpoch = integrationTests.UnreachableEpoch + enableEpochsConfig.EquivalentMessagesEnableEpoch = 0 + enableEpochsConfig.FixedOrderInConsensusEnableEpoch = 0 nodes := initNodesAndTest( numMetaNodes, numNodes, @@ -285,22 +298,6 @@ func runFullConsensusTest(t *testing.T, consensusType string, numKeysOnEachNode } } -func TestConsensusBLSFullTestSingleKeys(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - runFullConsensusTest(t, blsConsensusType, 1) -} - -func TestConsensusBLSFullTestMultiKeys(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - runFullConsensusTest(t, blsConsensusType, 5) -} - func runConsensusWithNotEnoughValidators(t *testing.T, consensusType string) { numMetaNodes := uint32(4) numNodes := uint32(4) @@ -343,14 +340,6 @@ func runConsensusWithNotEnoughValidators(t *testing.T, consensusType string) { } } -func TestConsensusBLSNotEnoughValidators(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - runConsensusWithNotEnoughValidators(t, blsConsensusType) -} - func displayAndStartNodes(shardID uint32, nodes []*integrationTests.TestConsensusNode) { for _, n := range nodes { skBuff, _ := n.NodeKeys.Sk.ToByteArray() @@ -365,3 +354,16 @@ func displayAndStartNodes(shardID uint32, nodes []*integrationTests.TestConsensu ) } } + +func encodeAddress(address []byte) string { + return hex.EncodeToString(address) +} + +func getPkEncoded(pubKey crypto.PublicKey) string { + pk, err := pubKey.ToByteArray() + if err != nil { + return err.Error() + } + + return encodeAddress(pk) +} diff --git a/integrationTests/testConsensusNode.go b/integrationTests/testConsensusNode.go index 8651045eb7e..a16d01a2f4b 100644 --- a/integrationTests/testConsensusNode.go +++ b/integrationTests/testConsensusNode.go @@ -17,11 +17,16 @@ import ( mclMultiSig "github.com/multiversx/mx-chain-crypto-go/signing/mcl/multisig" "github.com/multiversx/mx-chain-crypto-go/signing/multisig" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/enablers" + "github.com/multiversx/mx-chain-go/common/forking" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/round" "github.com/multiversx/mx-chain-go/dataRetriever" + epochStartDisabled "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" "github.com/multiversx/mx-chain-go/epochStart/metachain" "github.com/multiversx/mx-chain-go/epochStart/notifier" + "github.com/multiversx/mx-chain-go/epochStart/shardchain" cryptoFactory "github.com/multiversx/mx-chain-go/factory/crypto" "github.com/multiversx/mx-chain-go/factory/peerSignatureHandler" "github.com/multiversx/mx-chain-go/integrationTests/mock" @@ -30,7 +35,14 @@ import ( "github.com/multiversx/mx-chain-go/ntp" "github.com/multiversx/mx-chain-go/p2p" p2pFactory "github.com/multiversx/mx-chain-go/p2p/factory" + "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/factory" + "github.com/multiversx/mx-chain-go/process/factory/interceptorscontainer" + "github.com/multiversx/mx-chain-go/process/interceptors" + disabledInterceptors "github.com/multiversx/mx-chain-go/process/interceptors/disabled" + interceptorsFactory "github.com/multiversx/mx-chain-go/process/interceptors/factory" + processMock "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/process/smartContract" syncFork "github.com/multiversx/mx-chain-go/process/sync" "github.com/multiversx/mx-chain-go/sharding" chainShardingMocks "github.com/multiversx/mx-chain-go/sharding/mock" @@ -44,6 +56,7 @@ import ( consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" testFactory "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" @@ -82,17 +95,20 @@ type ArgsTestConsensusNode struct { // TestConsensusNode represents a structure used in integration tests used for consensus tests type TestConsensusNode struct { - Node *node.Node - MainMessenger p2p.Messenger - FullArchiveMessenger p2p.Messenger - NodesCoordinator nodesCoordinator.NodesCoordinator - ShardCoordinator sharding.Coordinator - ChainHandler data.ChainHandler - BlockProcessor *mock.BlockProcessorMock - RequestersFinder dataRetriever.RequestersFinder - AccountsDB *state.AccountsDB - NodeKeys *TestKeyPair - MultiSigner *cryptoMocks.MultisignerMock + Node *node.Node + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + NodesCoordinator nodesCoordinator.NodesCoordinator + ShardCoordinator sharding.Coordinator + ChainHandler data.ChainHandler + BlockProcessor *mock.BlockProcessorMock + RequestersFinder dataRetriever.RequestersFinder + AccountsDB *state.AccountsDB + NodeKeys *TestKeyPair + MultiSigner *cryptoMocks.MultisignerMock + MainInterceptorsContainer process.InterceptorsContainer + DataPool dataRetriever.PoolsHolder + RequestHandler process.RequestHandler } // NewTestConsensusNode returns a new TestConsensusNode @@ -184,6 +200,8 @@ func createCustomMultiSignerMock(multiSigner crypto.MultiSigner) *cryptoMocks.Mu } func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { + var err error + testHasher := createHasher(args.ConsensusType) epochStartRegistrationHandler := notifier.NewEpochStartSubscriptionHandler() consensusCache, _ := cache.NewLRUCache(10000) @@ -198,6 +216,13 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { syncer := ntp.NewSyncTime(ntp.NewNTPGoogleConfig(), nil) syncer.StartSyncingTime() + genericEpochNotifier := forking.NewGenericEpochNotifier() + + epochsConfig := GetDefaultEnableEpochsConfig() + enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(*epochsConfig, genericEpochNotifier) + + storage := CreateStore(tcn.ShardCoordinator.NumberOfShards()) + roundHandler, _ := round.NewRound( time.Unix(args.StartTime, 0), syncer.CurrentTime(), @@ -206,22 +231,63 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { 0) dataPool := dataRetrieverMock.CreatePoolsHolder(1, 0) + tcn.DataPool = dataPool - argsNewMetaEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ - GenesisTime: time.Unix(args.StartTime, 0), - EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), - Settings: &config.EpochStartConfig{ - MinRoundsBetweenEpochs: 1, - RoundsPerEpoch: 1000, - }, - Epoch: 0, - Storage: createTestStore(), - Marshalizer: TestMarshalizer, - Hasher: testHasher, - AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, - DataPool: dataPool, + var epochTrigger TestEpochStartTrigger + if tcn.ShardCoordinator.SelfId() == core.MetachainShardId { + argsNewMetaEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ + GenesisTime: time.Unix(args.StartTime, 0), + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + Settings: &config.EpochStartConfig{ + MinRoundsBetweenEpochs: 1, + RoundsPerEpoch: 1000, + }, + Epoch: 0, + Storage: createTestStore(), + Marshalizer: TestMarshalizer, + Hasher: testHasher, + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + DataPool: dataPool, + } + epochStartTrigger, err := metachain.NewEpochStartTrigger(argsNewMetaEpochStart) + if err != nil { + fmt.Println(err.Error()) + } + epochTrigger = &metachain.TestTrigger{} + epochTrigger.SetTrigger(epochStartTrigger) + } else { + argsPeerMiniBlocksSyncer := shardchain.ArgPeerMiniBlockSyncer{ + MiniBlocksPool: tcn.DataPool.MiniBlocks(), + ValidatorsInfoPool: tcn.DataPool.ValidatorsInfo(), + RequestHandler: &testscommon.RequestHandlerStub{}, + } + peerMiniBlockSyncer, _ := shardchain.NewPeerMiniBlockSyncer(argsPeerMiniBlocksSyncer) + + argsShardEpochStart := &shardchain.ArgsShardEpochStartTrigger{ + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + HeaderValidator: &mock.HeaderValidatorStub{}, + Uint64Converter: TestUint64Converter, + DataPool: tcn.DataPool, + Storage: storage, + RequestHandler: &testscommon.RequestHandlerStub{}, + Epoch: 0, + Validity: 1, + Finality: 1, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + PeerMiniBlocksSyncer: peerMiniBlockSyncer, + RoundHandler: roundHandler, + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + EnableEpochsHandler: enableEpochsHandler, + } + epochStartTrigger, err := shardchain.NewEpochStartTrigger(argsShardEpochStart) + if err != nil { + fmt.Println("NewEpochStartTrigger shard") + fmt.Println(err.Error()) + } + epochTrigger = &shardchain.TestTrigger{} + epochTrigger.SetTrigger(epochStartTrigger) } - epochStartTrigger, _ := metachain.NewEpochStartTrigger(argsNewMetaEpochStart) forkDetector, _ := syncFork.NewShardForkDetector( roundHandler, @@ -259,6 +325,7 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { return uint32(args.ConsensusSize) }, } + coreComponents.HardforkTriggerPubKeyField = []byte("provided hardfork pub key") argsKeysHolder := keysManagement.ArgsManagedPeersHolder{ KeyGenerator: args.KeyGen, @@ -315,7 +382,7 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { processComponents.NodesCoord = tcn.NodesCoordinator processComponents.BlockProcess = tcn.BlockProcessor processComponents.ReqFinder = tcn.RequestersFinder - processComponents.EpochTrigger = epochStartTrigger + processComponents.EpochTrigger = epochTrigger processComponents.EpochNotifier = epochStartRegistrationHandler processComponents.BlackListHdl = &testscommon.TimeCacheStub{} processComponents.BootSore = &mock.BoostrapStorerMock{} @@ -329,6 +396,9 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { processComponents.ProcessedMiniBlocksTrackerInternal = &testscommon.ProcessedMiniBlocksTrackerStub{} processComponents.SentSignaturesTrackerInternal = &testscommon.SentSignatureTrackerStub{} + tcn.initInterceptors(coreComponents, cryptoComponents, roundHandler, enableEpochsHandler, storage, epochTrigger) + processComponents.IntContainer = tcn.MainInterceptorsContainer + dataComponents := GetDefaultDataComponents() dataComponents.BlockChain = tcn.ChainHandler dataComponents.DataPool = dataPool @@ -342,7 +412,6 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { AppStatusHandlerField: &statusHandlerMock.AppStatusHandlerStub{}, } - var err error tcn.Node, err = node.NewNode( node.WithCoreComponents(coreComponents), node.WithStatusCoreComponents(statusCoreComponents), @@ -363,6 +432,214 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { } } +func (tcn *TestConsensusNode) initInterceptors( + coreComponents process.CoreComponentsHolder, + cryptoComponents process.CryptoComponentsHolder, + roundHandler consensus.RoundHandler, + enableEpochsHandler common.EnableEpochsHandler, + storage dataRetriever.StorageService, + epochStartTrigger TestEpochStartTrigger, +) { + interceptorDataVerifierArgs := interceptorsFactory.InterceptedDataVerifierFactoryArgs{ + CacheSpan: time.Second * 10, + CacheExpiry: time.Second * 10, + } + + accountsAdapter := epochStartDisabled.NewAccountsAdapter() + + blockBlackListHandler := cache.NewTimeCache(TimeSpanForBadHeaders) + + // argsNewEconomicsData := economics.ArgsNewEconomicsData{ + // Economics: &config.EconomicsConfig{}, + // EpochNotifier: genericEpochNotifier, + // EnableEpochsHandler: enableEpochsHandler, + // TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + // } + // economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) + + genesisBlocks := make(map[uint32]data.HeaderHandler) + blockTracker := processMock.NewBlockTrackerMock(tcn.ShardCoordinator, genesisBlocks) + + whiteLstHandler, _ := disabledInterceptors.NewDisabledWhiteListDataVerifier() + + cacherVerifiedCfg := storageunit.CacheConfig{Capacity: 5000, Type: storageunit.LRUCache, Shards: 1} + cacheVerified, _ := storageunit.NewCache(cacherVerifiedCfg) + whiteListerVerifiedTxs, _ := interceptors.NewWhiteListDataVerifier(cacheVerified) + + tcn.initRequesters() + + if tcn.ShardCoordinator.SelfId() == core.MetachainShardId { + metaInterceptorContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ + CoreComponents: coreComponents, + CryptoComponents: cryptoComponents, + Accounts: accountsAdapter, + ShardCoordinator: tcn.ShardCoordinator, + NodesCoordinator: tcn.NodesCoordinator, + MainMessenger: tcn.MainMessenger, + FullArchiveMessenger: tcn.FullArchiveMessenger, + Store: storage, + DataPool: tcn.DataPool, + MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, + TxFeeHandler: &economicsmocks.EconomicsHandlerMock{}, + BlockBlackList: blockBlackListHandler, + HeaderSigVerifier: &consensusMocks.HeaderSigVerifierMock{}, + HeaderIntegrityVerifier: CreateHeaderIntegrityVerifier(), + ValidityAttester: blockTracker, + EpochStartTrigger: epochStartTrigger, + WhiteListHandler: whiteLstHandler, + WhiteListerVerifiedTxs: whiteListerVerifiedTxs, + AntifloodHandler: &mock.NilAntifloodHandler{}, + ArgumentsParser: smartContract.NewArgumentParser(), + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + SizeCheckDelta: sizeCheckDelta, + RequestHandler: &testscommon.RequestHandlerStub{}, + PeerSignatureHandler: &processMock.PeerSignatureHandlerStub{}, + SignaturesHandler: &processMock.SignaturesHandlerStub{}, + HeartbeatExpiryTimespanInSec: 30, + MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), + FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), + HardforkTrigger: &testscommon.HardforkTriggerStub{}, + NodeOperationMode: common.NormalOperation, + InterceptedDataVerifierFactory: interceptorsFactory.NewInterceptedDataVerifierFactory(interceptorDataVerifierArgs), + } + interceptorContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(metaInterceptorContainerFactoryArgs) + if err != nil { + fmt.Println(err.Error()) + } + + tcn.MainInterceptorsContainer, _, err = interceptorContainerFactory.Create() + if err != nil { + log.Debug("interceptor container factory Create", "error", err.Error()) + } + } else { + argsPeerMiniBlocksSyncer := shardchain.ArgPeerMiniBlockSyncer{ + MiniBlocksPool: tcn.DataPool.MiniBlocks(), + ValidatorsInfoPool: tcn.DataPool.ValidatorsInfo(), + RequestHandler: &testscommon.RequestHandlerStub{}, + } + peerMiniBlockSyncer, _ := shardchain.NewPeerMiniBlockSyncer(argsPeerMiniBlocksSyncer) + argsShardEpochStart := &shardchain.ArgsShardEpochStartTrigger{ + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + HeaderValidator: &mock.HeaderValidatorStub{}, + Uint64Converter: TestUint64Converter, + DataPool: tcn.DataPool, + Storage: storage, + RequestHandler: &testscommon.RequestHandlerStub{}, + Epoch: 0, + Validity: 1, + Finality: 1, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + PeerMiniBlocksSyncer: peerMiniBlockSyncer, + RoundHandler: roundHandler, + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + EnableEpochsHandler: enableEpochsHandler, + } + _, _ = shardchain.NewEpochStartTrigger(argsShardEpochStart) + + shardIntereptorContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ + CoreComponents: coreComponents, + CryptoComponents: cryptoComponents, + Accounts: accountsAdapter, + ShardCoordinator: tcn.ShardCoordinator, + NodesCoordinator: tcn.NodesCoordinator, + MainMessenger: tcn.MainMessenger, + FullArchiveMessenger: tcn.FullArchiveMessenger, + Store: storage, + DataPool: tcn.DataPool, + MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, + TxFeeHandler: &economicsmocks.EconomicsHandlerMock{}, + BlockBlackList: blockBlackListHandler, + HeaderSigVerifier: &consensusMocks.HeaderSigVerifierMock{}, + HeaderIntegrityVerifier: CreateHeaderIntegrityVerifier(), + ValidityAttester: blockTracker, + EpochStartTrigger: epochStartTrigger, + WhiteListHandler: whiteLstHandler, + WhiteListerVerifiedTxs: whiteListerVerifiedTxs, + AntifloodHandler: &mock.NilAntifloodHandler{}, + ArgumentsParser: smartContract.NewArgumentParser(), + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + SizeCheckDelta: sizeCheckDelta, + RequestHandler: &testscommon.RequestHandlerStub{}, + PeerSignatureHandler: &processMock.PeerSignatureHandlerStub{}, + SignaturesHandler: &processMock.SignaturesHandlerStub{}, + HeartbeatExpiryTimespanInSec: 30, + MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), + FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), + HardforkTrigger: &testscommon.HardforkTriggerStub{}, + NodeOperationMode: common.NormalOperation, + InterceptedDataVerifierFactory: interceptorsFactory.NewInterceptedDataVerifierFactory(interceptorDataVerifierArgs), + } + + interceptorContainerFactory, err := interceptorscontainer.NewShardInterceptorsContainerFactory(shardIntereptorContainerFactoryArgs) + if err != nil { + fmt.Println(err.Error()) + } + + tcn.MainInterceptorsContainer, _, err = interceptorContainerFactory.Create() + if err != nil { + fmt.Println(err.Error()) + } + // interceptorsContainer := &testscommon.InterceptorsContainerStub{ + // GetCalled: func(topic string) (process.Interceptor, error) { + // var hdl func(handler func(topic string, hash []byte, data interface{})) + // switch topic { + // case "shardBlocks_0_META": + // hdl = registerHandlerHeaders + // case "txBlockBodies_0_1": + // case "txBlockBodies_0_META": + // hdl = registerHandlerMiniblocks + // default: + // return nil, errors.New("unexpected topic") + // } + + // return &testscommon.InterceptorStub{ + // RegisterHandlerCalled: hdl, + // }, nil + // }, + // } + } +} + +func (tcn *TestConsensusNode) initRequesters() { + // whiteListHandler, _ := disabledInterceptors.NewDisabledWhiteListDataVerifier() + + // requestersContainerFactoryArgs := requesterscontainer.FactoryArgs{ + // RequesterConfig: config.RequesterConfig{ + // NumCrossShardPeers: 2, + // NumTotalPeers: 3, + // NumFullHistoryPeers: 3, + // }, + // ShardCoordinator: tcn.ShardCoordinator, + // MainMessenger: tcn.MainMessenger, + // FullArchiveMessenger: tcn.FullArchiveMessenger, + // Marshaller: TestMarshaller, + // Uint64ByteSliceConverter: TestUint64Converter, + // OutputAntifloodHandler: &mock.NilAntifloodHandler{}, + // CurrentNetworkEpochProvider: tcn.EpochProvider, + // MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + // FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + // PeersRatingHandler: tpn.PeersRatingHandler, + // SizeCheckDelta: 0, + // } + + // if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { + // tpn.createMetaRequestersContainer(requestersContainerFactoryArgs) + // } else { + // tpn.createShardRequestersContainer(requestersContainerFactoryArgs) + // } + + // requestersFinder, _ = containers.NewRequestersFinder(tpn.RequestersContainer, tpn.ShardCoordinator) + // tpn.RequestHandler, _ = requestHandlers.NewResolverRequestHandler( + // requestersFinder, + // cache.NewTimeCache(time.Second), + // whiteListHandler, + // 100, + // tcn.ShardCoordinator.SelfId(), + // time.Second, + // ) +} + func (tcn *TestConsensusNode) initNodesCoordinator( consensusSize int, hasher hashing.Hasher, diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 57af859a8df..aeebb9f74af 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -1161,6 +1161,26 @@ func ProposeBlock(nodes []*TestProcessorNode, leaders []*TestProcessorNode, roun log.Info("Proposed block\n" + MakeDisplayTable(nodes)) } +// ProposeEpochStartBlock proposes a block for every shard +func ProposeEpochStartBlock(nodes []*TestProcessorNode, leaders []*TestProcessorNode, round uint64, nonce uint64) { + log.Info("All shards propose blocks...") + + stepDelayAdjustment := StepDelay * time.Duration(1+len(nodes)/3) + + for _, n := range leaders { + body, header, _ := n.ProposeEpochStartBlock(round, nonce) + + n.WhiteListBody(nodes, body) + pk := n.NodeKeys.MainKey.Pk + n.BroadcastBlock(body, header, pk) + n.CommitBlock(body, header) + } + + log.Info("Delaying for disseminating headers and miniblocks...") + time.Sleep(stepDelayAdjustment) + log.Info("Proposed block\n" + MakeDisplayTable(nodes)) +} + // SyncBlock synchronizes the proposed block in all the other shard nodes func SyncBlock( t *testing.T, diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 6416f8b6c7c..6dfcbef13fc 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -2808,6 +2808,27 @@ func (tpn *TestProcessorNode) ProposeBlock(round uint64, nonce uint64) (data.Bod return blockBody, blockHeader, txHashes } +// ProposeEpochStartBlock proposes a new block +func (tpn *TestProcessorNode) ProposeEpochStartBlock(round uint64, nonce uint64) (data.BodyHandler, data.HeaderHandler, [][]byte) { + body, header, txHashes := tpn.ProposeBlock(round, nonce) + + metaBlock, ok := header.(*dataBlock.MetaBlock) + if !ok { + return nil, nil, nil + } + + metaBlock.GetEpochStartHandler().SetLastFinalizedHeaders( + []data.EpochStartShardDataHandler{ + &dataBlock.EpochStartShardData{ + ShardID: 0, + Epoch: 1, + }, + }, + ) + + return body, metaBlock, txHashes +} + // BroadcastBlock broadcasts the block and body to the connected peers func (tpn *TestProcessorNode) BroadcastBlock(body data.BodyHandler, header data.HeaderHandler, publicKey crypto.PublicKey) { _ = tpn.BroadcastMessenger.BroadcastBlock(body, header) From 30f6c12d2fce867303b8e22fb20dc9a4a8d96b20 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Mon, 23 Dec 2024 15:07:54 +0200 Subject: [PATCH 02/21] added separate single keys test with equivalent proofs --- integrationTests/consensus/consensus_test.go | 41 +++++++++++++++----- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index 4b90bb757cd..5d740dc3549 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -37,9 +37,7 @@ func TestConsensusBLSFullTestSingleKeys(t *testing.T) { t.Skip("this is not a short test") } - logger.SetLogLevel("*:TRACE") - - runFullConsensusTest(t, blsConsensusType, 1) + runFullConsensusTest(t, blsConsensusType, 1, false) } func TestConsensusBLSFullTestMultiKeys(t *testing.T) { @@ -47,7 +45,18 @@ func TestConsensusBLSFullTestMultiKeys(t *testing.T) { t.Skip("this is not a short test") } - runFullConsensusTest(t, blsConsensusType, 5) + runFullConsensusTest(t, blsConsensusType, 5, false) +} + +func TestConsensusBLSFullTestSingleKeys_WithEquivalentProofs(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + logger.ToggleLoggerName(true) + logger.SetLogLevel("*:DEBUG,consensus:TRACE") + + runFullConsensusTest(t, blsConsensusType, 1, true) } func TestConsensusBLSNotEnoughValidators(t *testing.T) { @@ -231,10 +240,15 @@ func checkBlockProposedEveryRound(numCommBlock uint64, nonceForRoundMap map[uint } } -func runFullConsensusTest(t *testing.T, consensusType string, numKeysOnEachNode int) { - numMetaNodes := uint32(2) - numNodes := uint32(2) - consensusSize := uint32(2 * numKeysOnEachNode) +func runFullConsensusTest( + t *testing.T, + consensusType string, + numKeysOnEachNode int, + withEquivalentProofs bool, +) { + numMetaNodes := uint32(4) + numNodes := uint32(4) + consensusSize := uint32(3 * numKeysOnEachNode) numInvalid := uint32(0) roundTime := uint64(1000) numCommBlock := uint64(8) @@ -246,8 +260,15 @@ func runFullConsensusTest(t *testing.T, consensusType string, numKeysOnEachNode ) enableEpochsConfig := integrationTests.CreateEnableEpochsConfig() - enableEpochsConfig.EquivalentMessagesEnableEpoch = 0 - enableEpochsConfig.FixedOrderInConsensusEnableEpoch = 0 + + equivalentProodsActivationEpoch := integrationTests.UnreachableEpoch + if withEquivalentProofs { + equivalentProodsActivationEpoch = 0 + } + + enableEpochsConfig.EquivalentMessagesEnableEpoch = equivalentProodsActivationEpoch + enableEpochsConfig.FixedOrderInConsensusEnableEpoch = equivalentProodsActivationEpoch + nodes := initNodesAndTest( numMetaNodes, numNodes, From 4a4f9b29f63206a29df5e631408ec9593c133e13 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Fri, 10 Jan 2025 01:10:04 +0200 Subject: [PATCH 03/21] remove unused code --- integrationTests/testConsensusNode.go | 67 --------------------------- integrationTests/testInitializer.go | 20 -------- integrationTests/testProcessorNode.go | 21 --------- 3 files changed, 108 deletions(-) diff --git a/integrationTests/testConsensusNode.go b/integrationTests/testConsensusNode.go index a16d01a2f4b..b62c7f869be 100644 --- a/integrationTests/testConsensusNode.go +++ b/integrationTests/testConsensusNode.go @@ -449,14 +449,6 @@ func (tcn *TestConsensusNode) initInterceptors( blockBlackListHandler := cache.NewTimeCache(TimeSpanForBadHeaders) - // argsNewEconomicsData := economics.ArgsNewEconomicsData{ - // Economics: &config.EconomicsConfig{}, - // EpochNotifier: genericEpochNotifier, - // EnableEpochsHandler: enableEpochsHandler, - // TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - // } - // economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) - genesisBlocks := make(map[uint32]data.HeaderHandler) blockTracker := processMock.NewBlockTrackerMock(tcn.ShardCoordinator, genesisBlocks) @@ -466,8 +458,6 @@ func (tcn *TestConsensusNode) initInterceptors( cacheVerified, _ := storageunit.NewCache(cacherVerifiedCfg) whiteListerVerifiedTxs, _ := interceptors.NewWhiteListDataVerifier(cacheVerified) - tcn.initRequesters() - if tcn.ShardCoordinator.SelfId() == core.MetachainShardId { metaInterceptorContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ CoreComponents: coreComponents, @@ -580,66 +570,9 @@ func (tcn *TestConsensusNode) initInterceptors( if err != nil { fmt.Println(err.Error()) } - // interceptorsContainer := &testscommon.InterceptorsContainerStub{ - // GetCalled: func(topic string) (process.Interceptor, error) { - // var hdl func(handler func(topic string, hash []byte, data interface{})) - // switch topic { - // case "shardBlocks_0_META": - // hdl = registerHandlerHeaders - // case "txBlockBodies_0_1": - // case "txBlockBodies_0_META": - // hdl = registerHandlerMiniblocks - // default: - // return nil, errors.New("unexpected topic") - // } - - // return &testscommon.InterceptorStub{ - // RegisterHandlerCalled: hdl, - // }, nil - // }, - // } } } -func (tcn *TestConsensusNode) initRequesters() { - // whiteListHandler, _ := disabledInterceptors.NewDisabledWhiteListDataVerifier() - - // requestersContainerFactoryArgs := requesterscontainer.FactoryArgs{ - // RequesterConfig: config.RequesterConfig{ - // NumCrossShardPeers: 2, - // NumTotalPeers: 3, - // NumFullHistoryPeers: 3, - // }, - // ShardCoordinator: tcn.ShardCoordinator, - // MainMessenger: tcn.MainMessenger, - // FullArchiveMessenger: tcn.FullArchiveMessenger, - // Marshaller: TestMarshaller, - // Uint64ByteSliceConverter: TestUint64Converter, - // OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - // CurrentNetworkEpochProvider: tcn.EpochProvider, - // MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - // FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - // PeersRatingHandler: tpn.PeersRatingHandler, - // SizeCheckDelta: 0, - // } - - // if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { - // tpn.createMetaRequestersContainer(requestersContainerFactoryArgs) - // } else { - // tpn.createShardRequestersContainer(requestersContainerFactoryArgs) - // } - - // requestersFinder, _ = containers.NewRequestersFinder(tpn.RequestersContainer, tpn.ShardCoordinator) - // tpn.RequestHandler, _ = requestHandlers.NewResolverRequestHandler( - // requestersFinder, - // cache.NewTimeCache(time.Second), - // whiteListHandler, - // 100, - // tcn.ShardCoordinator.SelfId(), - // time.Second, - // ) -} - func (tcn *TestConsensusNode) initNodesCoordinator( consensusSize int, hasher hashing.Hasher, diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index aeebb9f74af..57af859a8df 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -1161,26 +1161,6 @@ func ProposeBlock(nodes []*TestProcessorNode, leaders []*TestProcessorNode, roun log.Info("Proposed block\n" + MakeDisplayTable(nodes)) } -// ProposeEpochStartBlock proposes a block for every shard -func ProposeEpochStartBlock(nodes []*TestProcessorNode, leaders []*TestProcessorNode, round uint64, nonce uint64) { - log.Info("All shards propose blocks...") - - stepDelayAdjustment := StepDelay * time.Duration(1+len(nodes)/3) - - for _, n := range leaders { - body, header, _ := n.ProposeEpochStartBlock(round, nonce) - - n.WhiteListBody(nodes, body) - pk := n.NodeKeys.MainKey.Pk - n.BroadcastBlock(body, header, pk) - n.CommitBlock(body, header) - } - - log.Info("Delaying for disseminating headers and miniblocks...") - time.Sleep(stepDelayAdjustment) - log.Info("Proposed block\n" + MakeDisplayTable(nodes)) -} - // SyncBlock synchronizes the proposed block in all the other shard nodes func SyncBlock( t *testing.T, diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 6dfcbef13fc..6416f8b6c7c 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -2808,27 +2808,6 @@ func (tpn *TestProcessorNode) ProposeBlock(round uint64, nonce uint64) (data.Bod return blockBody, blockHeader, txHashes } -// ProposeEpochStartBlock proposes a new block -func (tpn *TestProcessorNode) ProposeEpochStartBlock(round uint64, nonce uint64) (data.BodyHandler, data.HeaderHandler, [][]byte) { - body, header, txHashes := tpn.ProposeBlock(round, nonce) - - metaBlock, ok := header.(*dataBlock.MetaBlock) - if !ok { - return nil, nil, nil - } - - metaBlock.GetEpochStartHandler().SetLastFinalizedHeaders( - []data.EpochStartShardDataHandler{ - &dataBlock.EpochStartShardData{ - ShardID: 0, - Epoch: 1, - }, - }, - ) - - return body, metaBlock, txHashes -} - // BroadcastBlock broadcasts the block and body to the connected peers func (tpn *TestProcessorNode) BroadcastBlock(body data.BodyHandler, header data.HeaderHandler, publicKey crypto.PublicKey) { _ = tpn.BroadcastMessenger.BroadcastBlock(body, header) From 6f29b71ba394304293144d52acc7471b1f9298e3 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Sun, 26 Jan 2025 12:32:45 +0200 Subject: [PATCH 04/21] added full integration test - without equivalent proofs activation --- consensus/spos/bls/v1/subroundEndRound.go | 1 + integrationTests/consensus/consensus_test.go | 272 +++++ integrationTests/testFullNode.go | 1144 ++++++++++++++++++ integrationTests/testProcessorNode.go | 16 +- 4 files changed, 1430 insertions(+), 3 deletions(-) create mode 100644 integrationTests/testFullNode.go diff --git a/consensus/spos/bls/v1/subroundEndRound.go b/consensus/spos/bls/v1/subroundEndRound.go index c591c736aca..51c1f4a1af3 100644 --- a/consensus/spos/bls/v1/subroundEndRound.go +++ b/consensus/spos/bls/v1/subroundEndRound.go @@ -321,6 +321,7 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { return false } + log.Error("doEndRoundJobByLeader.SetSignature", "set sig", "sig", sig) err = header.SetSignature(sig) if err != nil { log.Debug("doEndRoundJobByLeader.SetSignature", "error", err.Error()) diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index 5d740dc3549..55faae12bac 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -8,11 +8,13 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/pubkeyConverter" "github.com/multiversx/mx-chain-core-go/data" crypto "github.com/multiversx/mx-chain-crypto-go" logger "github.com/multiversx/mx-chain-logger-go" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/multiversx/mx-chain-go/config" consensusComp "github.com/multiversx/mx-chain-go/factory/consensus" @@ -67,6 +69,276 @@ func TestConsensusBLSNotEnoughValidators(t *testing.T) { runConsensusWithNotEnoughValidators(t, blsConsensusType) } +func TestConsensusBLSWithProcessing(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + _ = logger.SetLogLevel("*:DEBUG,process:TRACE,consensus:TRACE") + logger.ToggleLoggerName(true) + + numKeysOnEachNode := 1 + numMetaNodes := uint32(2) + numNodes := uint32(2) + consensusSize := uint32(2 * numKeysOnEachNode) + roundTime := uint64(1000) + + log.Info("runFullConsensusTest", + "numNodes", numNodes, + "numKeysOnEachNode", numKeysOnEachNode, + "consensusSize", consensusSize, + ) + + enableEpochsConfig := integrationTests.CreateEnableEpochsConfig() + + equivalentProodsActivationEpoch := uint32(0) + + enableEpochsConfig.EquivalentMessagesEnableEpoch = equivalentProodsActivationEpoch + enableEpochsConfig.FixedOrderInConsensusEnableEpoch = equivalentProodsActivationEpoch + + fmt.Println("Step 1. Setup nodes...") + + nodes := integrationTests.CreateNodesWithTestConsensusNode( + int(numMetaNodes), + int(numNodes), + int(consensusSize), + roundTime, + blsConsensusType, + numKeysOnEachNode, + enableEpochsConfig, + ) + + // leaders := []*integrationTests.TestConsensusNode{} + for shardID, nodesList := range nodes { + // leaders = append(leaders, nodesList[0]) + + displayAndStartNodes(shardID, nodesList) + } + + time.Sleep(p2pBootstrapDelay) + + // round := uint64(0) + // nonce := uint64(0) + // round = integrationTests.IncrementAndPrintRound(round) + // integrationTests.UpdateRound(nodes, round) + // nonce++ + + // numRoundsToTest := 5 + // for i := 0; i < numRoundsToTest; i++ { + // integrationTests.ProposeBlock(nodes, leaders, round, nonce) + + // time.Sleep(integrationTests.SyncDelay) + + // round = integrationTests.IncrementAndPrintRound(round) + // integrationTests.UpdateRound(nodes, round) + // nonce++ + // } + + for _, nodesList := range nodes { + for _, n := range nodesList { + statusComponents := integrationTests.GetDefaultStatusComponents() + + consensusArgs := consensusComp.ConsensusComponentsFactoryArgs{ + Config: config.Config{ + Consensus: config.ConsensusConfig{ + Type: blsConsensusType, + }, + ValidatorPubkeyConverter: config.PubkeyConfig{ + Length: 96, + Type: "bls", + SignatureLength: 48, + }, + TrieSync: config.TrieSyncConfig{ + NumConcurrentTrieSyncers: 5, + MaxHardCapForMissingNodes: 5, + TrieSyncerVersion: 2, + CheckNodesOnDisk: false, + }, + GeneralSettings: config.GeneralSettingsConfig{ + SyncProcessTimeInMillis: 6000, + }, + }, + BootstrapRoundIndex: 0, + CoreComponents: n.Node.GetCoreComponents(), + NetworkComponents: n.Node.GetNetworkComponents(), + CryptoComponents: n.Node.GetCryptoComponents(), + DataComponents: n.Node.GetDataComponents(), + ProcessComponents: n.Node.GetProcessComponents(), + StateComponents: n.Node.GetStateComponents(), + StatusComponents: statusComponents, + StatusCoreComponents: n.Node.GetStatusCoreComponents(), + ScheduledProcessor: &consensusMocks.ScheduledProcessorStub{}, + IsInImportMode: n.Node.IsInImportMode(), + } + + consensusFactory, err := consensusComp.NewConsensusComponentsFactory(consensusArgs) + require.Nil(t, err) + + managedConsensusComponents, err := consensusComp.NewManagedConsensusComponents(consensusFactory) + require.Nil(t, err) + + err = managedConsensusComponents.Create() + require.Nil(t, err) + } + } + + time.Sleep(100 * time.Second) + + fmt.Println("Checking shards...") + + for _, nodesList := range nodes { + // expectedNonce := nodesList[0].Node.GetDataComponents().Blockchain().GetCurrentBlockHeader().GetNonce() + expectedNonce := 1 + for _, n := range nodesList { + for i := 1; i < len(nodes); i++ { + if check.IfNil(n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader()) { + assert.Fail(t, fmt.Sprintf("Node with idx %d does not have a current block", i)) + } else { + assert.Equal(t, expectedNonce, n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader().GetNonce()) + } + } + } + } +} + +func TestConsensusBLSWithFullProcessing(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + _ = logger.SetLogLevel("*:DEBUG,process:TRACE,consensus:TRACE") + logger.ToggleLoggerName(true) + + numKeysOnEachNode := 1 + numMetaNodes := uint32(4) + numNodes := uint32(4) + consensusSize := uint32(4 * numKeysOnEachNode) + roundTime := uint64(1000) + + // maxShards := uint32(1) + // shardId := uint32(0) + // numNodesPerShard := 3 + + log.Info("runFullNodesTest", + "numNodes", numNodes, + "numKeysOnEachNode", numKeysOnEachNode, + "consensusSize", consensusSize, + ) + + enableEpochsConfig := integrationTests.CreateEnableEpochsConfig() + + equivalentProodsActivationEpoch := uint32(10) + + enableEpochsConfig.EquivalentMessagesEnableEpoch = equivalentProodsActivationEpoch + enableEpochsConfig.FixedOrderInConsensusEnableEpoch = equivalentProodsActivationEpoch + + fmt.Println("Step 1. Setup nodes...") + + nodes := integrationTests.CreateNodesWithTestFullNode( + int(numMetaNodes), + int(numNodes), + int(consensusSize), + roundTime, + blsConsensusType, + numKeysOnEachNode, + enableEpochsConfig, + ) + + for shardID, nodesList := range nodes { + for _, n := range nodesList { + skBuff, _ := n.NodeKeys.MainKey.Sk.ToByteArray() + pkBuff, _ := n.NodeKeys.MainKey.Pk.ToByteArray() + + encodedNodePkBuff := testPubkeyConverter.SilentEncode(pkBuff, log) + + fmt.Printf("Shard ID: %v, sk: %s, pk: %s\n", + shardID, + hex.EncodeToString(skBuff), + encodedNodePkBuff, + ) + } + } + + time.Sleep(p2pBootstrapDelay) + + defer func() { + for _, nodesList := range nodes { + for _, n := range nodesList { + n.Close() + } + } + }() + + for _, nodesList := range nodes { + for _, n := range nodesList { + statusComponents := integrationTests.GetDefaultStatusComponents() + + consensusArgs := consensusComp.ConsensusComponentsFactoryArgs{ + Config: config.Config{ + Consensus: config.ConsensusConfig{ + Type: blsConsensusType, + }, + ValidatorPubkeyConverter: config.PubkeyConfig{ + Length: 96, + Type: "bls", + SignatureLength: 48, + }, + TrieSync: config.TrieSyncConfig{ + NumConcurrentTrieSyncers: 5, + MaxHardCapForMissingNodes: 5, + TrieSyncerVersion: 2, + CheckNodesOnDisk: false, + }, + GeneralSettings: config.GeneralSettingsConfig{ + SyncProcessTimeInMillis: 6000, + }, + }, + BootstrapRoundIndex: 0, + CoreComponents: n.Node.GetCoreComponents(), + NetworkComponents: n.Node.GetNetworkComponents(), + CryptoComponents: n.Node.GetCryptoComponents(), + DataComponents: n.Node.GetDataComponents(), + ProcessComponents: n.Node.GetProcessComponents(), + StateComponents: n.Node.GetStateComponents(), + StatusComponents: statusComponents, + StatusCoreComponents: n.Node.GetStatusCoreComponents(), + ScheduledProcessor: &consensusMocks.ScheduledProcessorStub{}, + IsInImportMode: n.Node.IsInImportMode(), + } + + consensusFactory, err := consensusComp.NewConsensusComponentsFactory(consensusArgs) + require.Nil(t, err) + + managedConsensusComponents, err := consensusComp.NewManagedConsensusComponents(consensusFactory) + require.Nil(t, err) + + err = managedConsensusComponents.Create() + require.Nil(t, err) + } + } + + time.Sleep(10 * time.Second) + + fmt.Println("Checking shards...") + + for _, nodesList := range nodes { + expectedNonce := uint64(0) + if !check.IfNil(nodesList[0].Node.GetDataComponents().Blockchain().GetCurrentBlockHeader()) { + expectedNonce = nodesList[0].Node.GetDataComponents().Blockchain().GetCurrentBlockHeader().GetNonce() + } + for _, n := range nodesList { + for i := 1; i < len(nodes); i++ { + if check.IfNil(n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader()) { + // assert.Fail(t, fmt.Sprintf("Node with idx %d does not have a current block", i)) + } else { + fmt.Println("FOUND") + assert.Equal(t, expectedNonce, n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader().GetNonce()) + } + } + } + } +} + func initNodesAndTest( numMetaNodes, numNodes, diff --git a/integrationTests/testFullNode.go b/integrationTests/testFullNode.go new file mode 100644 index 00000000000..cf841e4c54e --- /dev/null +++ b/integrationTests/testFullNode.go @@ -0,0 +1,1144 @@ +package integrationTests + +import ( + "encoding/hex" + "fmt" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/core/versioning" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/hashing" + crypto "github.com/multiversx/mx-chain-crypto-go" + mclMultiSig "github.com/multiversx/mx-chain-crypto-go/signing/mcl/multisig" + "github.com/multiversx/mx-chain-crypto-go/signing/multisig" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/enablers" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/round" + "github.com/multiversx/mx-chain-go/consensus/spos/sposFactory" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" + epochStartDisabled "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" + "github.com/multiversx/mx-chain-go/epochStart/metachain" + "github.com/multiversx/mx-chain-go/epochStart/notifier" + "github.com/multiversx/mx-chain-go/epochStart/shardchain" + cryptoFactory "github.com/multiversx/mx-chain-go/factory/crypto" + "github.com/multiversx/mx-chain-go/factory/peerSignatureHandler" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/keysManagement" + "github.com/multiversx/mx-chain-go/node" + "github.com/multiversx/mx-chain-go/node/nodeDebugFactory" + "github.com/multiversx/mx-chain-go/ntp" + p2pFactory "github.com/multiversx/mx-chain-go/p2p/factory" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/block" + "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" + "github.com/multiversx/mx-chain-go/process/factory" + "github.com/multiversx/mx-chain-go/process/factory/interceptorscontainer" + "github.com/multiversx/mx-chain-go/process/interceptors" + disabledInterceptors "github.com/multiversx/mx-chain-go/process/interceptors/disabled" + interceptorsFactory "github.com/multiversx/mx-chain-go/process/interceptors/factory" + processMock "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/process/scToProtocol" + "github.com/multiversx/mx-chain-go/process/smartContract" + processSync "github.com/multiversx/mx-chain-go/process/sync" + "github.com/multiversx/mx-chain-go/process/track" + chainShardingMocks "github.com/multiversx/mx-chain-go/sharding/mock" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/state/blockInfoProviders" + "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/storage/cache" + "github.com/multiversx/mx-chain-go/storage/storageunit" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/chainParameters" + consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + testFactory "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" + "github.com/multiversx/mx-chain-go/testscommon/outport" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" + "github.com/multiversx/mx-chain-go/vm" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" + logger "github.com/multiversx/mx-chain-logger-go" + wasmConfig "github.com/multiversx/mx-chain-vm-go/config" +) + +func CreateNodesWithTestFullNode( + numMetaNodes int, + nodesPerShard int, + consensusSize int, + roundTime uint64, + consensusType string, + numKeysOnEachNode int, + enableEpochsConfig config.EnableEpochs, +) map[uint32][]*TestFullNode { + + nodes := make(map[uint32][]*TestFullNode, nodesPerShard) + cp := CreateCryptoParams(nodesPerShard, numMetaNodes, maxShards, numKeysOnEachNode) + keysMap := PubKeysMapFromNodesKeysMap(cp.NodesKeys) + validatorsMap := GenValidatorsFromPubKeys(keysMap, maxShards) + eligibleMap, _ := nodesCoordinator.NodesInfoToValidators(validatorsMap) + waitingMap := make(map[uint32][]nodesCoordinator.Validator) + connectableNodes := make(map[uint32][]Connectable, 0) + + startTime := time.Now().Unix() + testHasher := createHasher(consensusType) + + for shardID := range cp.NodesKeys { + for _, keysPair := range cp.NodesKeys[shardID] { + multiSigner, _ := multisig.NewBLSMultisig(&mclMultiSig.BlsMultiSigner{Hasher: testHasher}, cp.KeyGen) + multiSignerMock := createCustomMultiSignerMock(multiSigner) + + args := ArgsTestFullNode{ + ArgTestProcessorNode: &ArgTestProcessorNode{ + MaxShards: 2, + NodeShardId: 0, + TxSignPrivKeyShardId: 0, + WithSync: false, + EpochsConfig: &enableEpochsConfig, + NodeKeys: keysPair, + }, + ConsensusSize: consensusSize, + RoundTime: roundTime, + ConsensusType: consensusType, + EligibleMap: eligibleMap, + WaitingMap: waitingMap, + KeyGen: cp.KeyGen, + P2PKeyGen: cp.P2PKeyGen, + MultiSigner: multiSignerMock, + StartTime: startTime, + } + + tfn := NewTestFullNode(args) + nodes[shardID] = append(nodes[shardID], tfn) + connectableNodes[shardID] = append(connectableNodes[shardID], tfn) + } + } + + for shardID := range nodes { + ConnectNodes(connectableNodes[shardID]) + } + + return nodes +} + +type ArgsTestFullNode struct { + *ArgTestProcessorNode + + ConsensusSize int + RoundTime uint64 + ConsensusType string + EligibleMap map[uint32][]nodesCoordinator.Validator + WaitingMap map[uint32][]nodesCoordinator.Validator + KeyGen crypto.KeyGenerator + P2PKeyGen crypto.KeyGenerator + MultiSigner *cryptoMocks.MultisignerMock + StartTime int64 +} + +type TestFullNode struct { + *TestProcessorNode +} + +func NewTestFullNode(args ArgsTestFullNode) *TestFullNode { + tpn := newBaseTestProcessorNode(*args.ArgTestProcessorNode) + + tfn := &TestFullNode{ + TestProcessorNode: tpn, + } + + tfn.initTestNodeWithArgs(*args.ArgTestProcessorNode, args) + + return tfn +} + +func (tfn *TestFullNode) initNodesCoordinator( + consensusSize int, + hasher hashing.Hasher, + epochStartRegistrationHandler notifier.EpochStartNotifier, + eligibleMap map[uint32][]nodesCoordinator.Validator, + waitingMap map[uint32][]nodesCoordinator.Validator, + pkBytes []byte, + cache storage.Cacher, +) { + argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ + ChainParametersHandler: &chainParameters.ChainParametersHandlerStub{ + ChainParametersForEpochCalled: func(_ uint32) (config.ChainParametersByEpochConfig, error) { + return config.ChainParametersByEpochConfig{ + ShardConsensusGroupSize: uint32(consensusSize), + MetachainConsensusGroupSize: uint32(consensusSize), + }, nil + }, + }, + Marshalizer: TestMarshalizer, + Hasher: hasher, + Shuffler: &shardingMocks.NodeShufflerMock{}, + EpochStartNotifier: epochStartRegistrationHandler, + BootStorer: CreateMemUnit(), + NbShards: maxShards, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: pkBytes, + ConsensusGroupCache: cache, + ShuffledOutHandler: &chainShardingMocks.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardIDAsObserver: tfn.ShardCoordinator.SelfId(), + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, + } + + tfn.NodesCoordinator, _ = nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) +} + +func (tpn *TestFullNode) initTestNodeWithArgs(args ArgTestProcessorNode, fullArgs ArgsTestFullNode) { + tpn.AppStatusHandler = args.AppStatusHandler + if check.IfNil(args.AppStatusHandler) { + tpn.AppStatusHandler = TestAppStatusHandler + } + + id := hex.EncodeToString(tpn.OwnAccount.PkTxSignBytes) + if len(id) > 8 { + id = id[0:8] + } + + tpn.MainMessenger = CreateMessengerWithNoDiscovery() + + tpn.StatusMetrics = args.StatusMetrics + if check.IfNil(args.StatusMetrics) { + args.StatusMetrics = &testscommon.StatusMetricsStub{} + } + + tpn.initChainHandler() + tpn.initHeaderValidator() + + syncer := ntp.NewSyncTime(ntp.NewNTPGoogleConfig(), nil) + syncer.StartSyncingTime() + + roundHandler, _ := round.NewRound( + time.Unix(fullArgs.StartTime, 0), + syncer.CurrentTime(), + time.Millisecond*time.Duration(fullArgs.RoundTime), + syncer, + 0) + + tpn.NetworkShardingCollector = mock.NewNetworkShardingCollectorMock() + if check.IfNil(tpn.EpochNotifier) { + tpn.EpochStartNotifier = notifier.NewEpochStartSubscriptionHandler() + } + tpn.initStorage() + if check.IfNil(args.TrieStore) { + tpn.initAccountDBsWithPruningStorer() + } else { + tpn.initAccountDBs(args.TrieStore) + } + + economicsConfig := args.EconomicsConfig + if economicsConfig == nil { + economicsConfig = createDefaultEconomicsConfig() + } + + tpn.initEconomicsData(economicsConfig) + tpn.initRatingsData() + tpn.initRequestedItemsHandler() + tpn.initResolvers() + tpn.initRequesters() + tpn.initValidatorStatistics() + tpn.initGenesisBlocks(args) + tpn.initBlockTracker(roundHandler) + + gasMap := wasmConfig.MakeGasMapForTests() + defaults.FillGasMapInternal(gasMap, 1) + if args.GasScheduleMap != nil { + gasMap = args.GasScheduleMap + } + vmConfig := getDefaultVMConfig() + if args.VMConfig != nil { + vmConfig = args.VMConfig + } + tpn.initInnerProcessors(gasMap, vmConfig) + + if check.IfNil(args.TrieStore) { + var apiBlockchain data.ChainHandler + if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { + apiBlockchain, _ = blockchain.NewMetaChain(statusHandlerMock.NewAppStatusHandlerMock()) + } else { + apiBlockchain, _ = blockchain.NewBlockChain(statusHandlerMock.NewAppStatusHandlerMock()) + } + argsNewScQueryService := smartContract.ArgsNewSCQueryService{ + VmContainer: tpn.VMContainer, + EconomicsFee: tpn.EconomicsData, + BlockChainHook: tpn.BlockchainHook, + MainBlockChain: tpn.BlockChain, + APIBlockChain: apiBlockchain, + WasmVMChangeLocker: tpn.WasmVMChangeLocker, + Bootstrapper: tpn.Bootstrapper, + AllowExternalQueriesChan: common.GetClosedUnbufferedChannel(), + HistoryRepository: tpn.HistoryRepository, + ShardCoordinator: tpn.ShardCoordinator, + StorageService: tpn.Storage, + Marshaller: TestMarshaller, + Hasher: TestHasher, + Uint64ByteSliceConverter: TestUint64Converter, + } + tpn.SCQueryService, _ = smartContract.NewSCQueryService(argsNewScQueryService) + } else { + tpn.createFullSCQueryService(gasMap, vmConfig) + } + + testHasher := createHasher(fullArgs.ConsensusType) + epochStartRegistrationHandler := notifier.NewEpochStartSubscriptionHandler() + pkBytes, _ := tpn.NodeKeys.MainKey.Pk.ToByteArray() + consensusCache, _ := cache.NewLRUCache(10000) + + tpn.initNodesCoordinator( + fullArgs.ConsensusSize, + testHasher, + epochStartRegistrationHandler, + fullArgs.EligibleMap, + fullArgs.WaitingMap, + pkBytes, + consensusCache, + ) + + tpn.BroadcastMessenger, _ = sposFactory.GetBroadcastMessenger( + TestMarshalizer, + TestHasher, + tpn.MainMessenger, + tpn.ShardCoordinator, + tpn.OwnAccount.PeerSigHandler, + tpn.DataPool.Headers(), + tpn.MainInterceptorsContainer, + &testscommon.AlarmSchedulerStub{}, + testscommon.NewKeysHandlerSingleSignerMock( + tpn.NodeKeys.MainKey.Sk, + tpn.MainMessenger.ID(), + ), + config.ConsensusGradualBroadcastConfig{GradualIndexBroadcastDelay: []config.IndexBroadcastDelay{}}, + ) + + if args.WithSync { + tpn.initBootstrapper() + } + tpn.setGenesisBlock() + tpn.initNode(fullArgs, syncer, roundHandler) + tpn.addHandlersForCounters() + tpn.addGenesisBlocksIntoStorage() + + if args.GenesisFile != "" { + tpn.createHeartbeatWithHardforkTrigger() + } +} + +func (tpn *TestFullNode) initNode( + args ArgsTestFullNode, + syncer ntp.SyncTimer, + roundHandler consensus.RoundHandler, +) { + var err error + + statusCoreComponents := &testFactory.StatusCoreComponentsStub{ + StatusMetricsField: tpn.StatusMetrics, + AppStatusHandlerField: tpn.AppStatusHandler, + } + if tpn.EpochNotifier == nil { + tpn.EpochNotifier = forking.NewGenericEpochNotifier() + } + if tpn.EnableEpochsHandler == nil { + tpn.EnableEpochsHandler, _ = enablers.NewEnableEpochsHandler(CreateEnableEpochsConfig(), tpn.EpochNotifier) + } + + var epochTrigger TestEpochStartTrigger + if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { + argsNewMetaEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ + GenesisTime: time.Unix(args.StartTime, 0), + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + Settings: &config.EpochStartConfig{ + MinRoundsBetweenEpochs: 1, + RoundsPerEpoch: 1000, + }, + Epoch: 0, + Storage: createTestStore(), + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + DataPool: tpn.DataPool, + } + epochStartTrigger, err := metachain.NewEpochStartTrigger(argsNewMetaEpochStart) + if err != nil { + fmt.Println(err.Error()) + } + epochTrigger = &metachain.TestTrigger{} + epochTrigger.SetTrigger(epochStartTrigger) + } else { + argsPeerMiniBlocksSyncer := shardchain.ArgPeerMiniBlockSyncer{ + MiniBlocksPool: tpn.DataPool.MiniBlocks(), + ValidatorsInfoPool: tpn.DataPool.ValidatorsInfo(), + RequestHandler: &testscommon.RequestHandlerStub{}, + } + peerMiniBlockSyncer, _ := shardchain.NewPeerMiniBlockSyncer(argsPeerMiniBlocksSyncer) + + argsShardEpochStart := &shardchain.ArgsShardEpochStartTrigger{ + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + HeaderValidator: &mock.HeaderValidatorStub{}, + Uint64Converter: TestUint64Converter, + DataPool: tpn.DataPool, + Storage: tpn.Storage, + RequestHandler: &testscommon.RequestHandlerStub{}, + Epoch: 0, + Validity: 1, + Finality: 1, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + PeerMiniBlocksSyncer: peerMiniBlockSyncer, + RoundHandler: roundHandler, + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + EnableEpochsHandler: tpn.EnableEpochsHandler, + } + epochStartTrigger, err := shardchain.NewEpochStartTrigger(argsShardEpochStart) + if err != nil { + fmt.Println("NewEpochStartTrigger shard") + fmt.Println(err.Error()) + } + epochTrigger = &shardchain.TestTrigger{} + epochTrigger.SetTrigger(epochStartTrigger) + } + + tpn.EpochStartTrigger = epochTrigger + + strPk := "" + if !check.IfNil(args.HardforkPk) { + buff, err := args.HardforkPk.ToByteArray() + log.LogIfError(err) + + strPk = hex.EncodeToString(buff) + } + _ = tpn.createHardforkTrigger(strPk) + + coreComponents := GetDefaultCoreComponents(tpn.EnableEpochsHandler, tpn.EpochNotifier) + coreComponents.SyncTimerField = syncer + coreComponents.RoundHandlerField = roundHandler + + coreComponents.InternalMarshalizerField = TestMarshalizer + coreComponents.VmMarshalizerField = TestVmMarshalizer + coreComponents.TxMarshalizerField = TestTxSignMarshalizer + coreComponents.HasherField = TestHasher + coreComponents.AddressPubKeyConverterField = TestAddressPubkeyConverter + coreComponents.ValidatorPubKeyConverterField = TestValidatorPubkeyConverter + coreComponents.ChainIdCalled = func() string { + return string(tpn.ChainID) + } + coreComponents.GenesisTimeField = time.Unix(args.StartTime, 0) + coreComponents.GenesisNodesSetupField = &genesisMocks.NodesSetupStub{ + GetShardConsensusGroupSizeCalled: func() uint32 { + return uint32(args.ConsensusSize) + }, + GetMetaConsensusGroupSizeCalled: func() uint32 { + return uint32(args.ConsensusSize) + }, + } + coreComponents.MinTransactionVersionCalled = func() uint32 { + return tpn.MinTransactionVersion + } + coreComponents.TxVersionCheckField = versioning.NewTxVersionChecker(tpn.MinTransactionVersion) + hardforkPubKeyBytes, _ := coreComponents.ValidatorPubKeyConverterField.Decode(hardforkPubKey) + coreComponents.HardforkTriggerPubKeyField = hardforkPubKeyBytes + coreComponents.Uint64ByteSliceConverterField = TestUint64Converter + coreComponents.EconomicsDataField = tpn.EconomicsData + coreComponents.APIEconomicsHandler = tpn.EconomicsData + coreComponents.EnableEpochsHandlerField = tpn.EnableEpochsHandler + coreComponents.EpochNotifierField = tpn.EpochNotifier + coreComponents.RoundNotifierField = tpn.RoundNotifier + coreComponents.WasmVMChangeLockerInternal = tpn.WasmVMChangeLocker + coreComponents.EconomicsDataField = tpn.EconomicsData + + dataComponents := GetDefaultDataComponents() + dataComponents.BlockChain = tpn.BlockChain + dataComponents.DataPool = tpn.DataPool + dataComponents.Store = tpn.Storage + + bootstrapComponents := getDefaultBootstrapComponents(tpn.ShardCoordinator, tpn.EnableEpochsHandler) + + tpn.BlockBlackListHandler = cache.NewTimeCache(TimeSpanForBadHeaders) + + if tpn.ShardCoordinator.SelfId() != core.MetachainShardId { + tpn.ForkDetector, err = processSync.NewShardForkDetector( + roundHandler, + tpn.BlockBlackListHandler, + tpn.BlockTracker, + args.StartTime, + tpn.EnableEpochsHandler, + tpn.DataPool.Proofs()) + } else { + tpn.ForkDetector, err = processSync.NewMetaForkDetector( + roundHandler, + tpn.BlockBlackListHandler, + tpn.BlockTracker, + args.StartTime, + tpn.EnableEpochsHandler, + tpn.DataPool.Proofs()) + } + if err != nil { + panic(err.Error()) + } + + argsKeysHolder := keysManagement.ArgsManagedPeersHolder{ + KeyGenerator: args.KeyGen, + P2PKeyGenerator: args.P2PKeyGen, + MaxRoundsOfInactivity: 10, + PrefsConfig: config.Preferences{}, + P2PKeyConverter: p2pFactory.NewP2PKeyConverter(), + } + keysHolder, _ := keysManagement.NewManagedPeersHolder(argsKeysHolder) + + // adding provided handled keys + for _, key := range args.NodeKeys.HandledKeys { + skBytes, _ := key.Sk.ToByteArray() + _ = keysHolder.AddManagedPeer(skBytes) + } + + multiSigContainer := cryptoMocks.NewMultiSignerContainerMock(args.MultiSigner) + pubKey := tpn.NodeKeys.MainKey.Sk.GeneratePublic() + pubKeyBytes, _ := pubKey.ToByteArray() + pubKeyString := coreComponents.ValidatorPubKeyConverterField.SilentEncode(pubKeyBytes, log) + argsKeysHandler := keysManagement.ArgsKeysHandler{ + ManagedPeersHolder: keysHolder, + PrivateKey: tpn.NodeKeys.MainKey.Sk, + Pid: tpn.MainMessenger.ID(), + } + keysHandler, _ := keysManagement.NewKeysHandler(argsKeysHandler) + + signingHandlerArgs := cryptoFactory.ArgsSigningHandler{ + PubKeys: []string{pubKeyString}, + MultiSignerContainer: multiSigContainer, + KeyGenerator: args.KeyGen, + KeysHandler: keysHandler, + SingleSigner: TestSingleBlsSigner, + } + sigHandler, _ := cryptoFactory.NewSigningHandler(signingHandlerArgs) + + cryptoComponents := GetDefaultCryptoComponents() + cryptoComponents.PrivKey = tpn.NodeKeys.MainKey.Sk + cryptoComponents.PubKey = tpn.NodeKeys.MainKey.Pk + cryptoComponents.TxSig = tpn.OwnAccount.SingleSigner + cryptoComponents.BlockSig = tpn.OwnAccount.SingleSigner + cryptoComponents.MultiSigContainer = cryptoMocks.NewMultiSignerContainerMock(tpn.MultiSigner) + cryptoComponents.BlKeyGen = tpn.OwnAccount.KeygenTxSign + cryptoComponents.TxKeyGen = TestKeyGenForAccounts + + peerSigCache, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) + peerSigHandler, _ := peerSignatureHandler.NewPeerSignatureHandler(peerSigCache, TestSingleBlsSigner, args.KeyGen) + cryptoComponents.PeerSignHandler = peerSigHandler + cryptoComponents.SigHandler = sigHandler + cryptoComponents.KeysHandlerField = keysHandler + + tpn.initInterceptors(coreComponents, cryptoComponents, roundHandler, tpn.EnableEpochsHandler, tpn.Storage, epochTrigger) + + if args.WithSync { + tpn.initBlockProcessorWithSync() + } else { + tpn.initBlockProcessor(coreComponents, dataComponents, args, roundHandler) + } + + processComponents := GetDefaultProcessComponents() + processComponents.ForkDetect = tpn.ForkDetector + processComponents.BlockProcess = tpn.BlockProcessor + processComponents.ReqFinder = tpn.RequestersFinder + processComponents.HeaderIntegrVerif = tpn.HeaderIntegrityVerifier + processComponents.HeaderSigVerif = tpn.HeaderSigVerifier + processComponents.BlackListHdl = tpn.BlockBlackListHandler + processComponents.NodesCoord = tpn.NodesCoordinator + processComponents.ShardCoord = tpn.ShardCoordinator + processComponents.IntContainer = tpn.MainInterceptorsContainer + processComponents.FullArchiveIntContainer = tpn.FullArchiveInterceptorsContainer + processComponents.HistoryRepositoryInternal = tpn.HistoryRepository + processComponents.WhiteListHandlerInternal = tpn.WhiteListHandler + processComponents.WhiteListerVerifiedTxsInternal = tpn.WhiteListerVerifiedTxs + processComponents.TxsSenderHandlerField = createTxsSender(tpn.ShardCoordinator, tpn.MainMessenger) + processComponents.HardforkTriggerField = tpn.HardforkTrigger + processComponents.ScheduledTxsExecutionHandlerInternal = &testscommon.ScheduledTxsExecutionStub{} + processComponents.ProcessedMiniBlocksTrackerInternal = &testscommon.ProcessedMiniBlocksTrackerStub{} + processComponents.SentSignaturesTrackerInternal = &testscommon.SentSignatureTrackerStub{} + + processComponents.RoundHandlerField = roundHandler + processComponents.EpochNotifier = tpn.EpochStartNotifier + + stateComponents := GetDefaultStateComponents() + stateComponents.Accounts = tpn.AccntState + stateComponents.AccountsAPI = tpn.AccntState + + finalProvider, _ := blockInfoProviders.NewFinalBlockInfo(dataComponents.BlockChain) + finalAccountsApi, _ := state.NewAccountsDBApi(tpn.AccntState, finalProvider) + + currentProvider, _ := blockInfoProviders.NewCurrentBlockInfo(dataComponents.BlockChain) + currentAccountsApi, _ := state.NewAccountsDBApi(tpn.AccntState, currentProvider) + + historicalAccountsApi, _ := state.NewAccountsDBApiWithHistory(tpn.AccntState) + + argsAccountsRepo := state.ArgsAccountsRepository{ + FinalStateAccountsWrapper: finalAccountsApi, + CurrentStateAccountsWrapper: currentAccountsApi, + HistoricalStateAccountsWrapper: historicalAccountsApi, + } + stateComponents.AccountsRepo, _ = state.NewAccountsRepository(argsAccountsRepo) + + networkComponents := GetDefaultNetworkComponents() + networkComponents.Messenger = tpn.MainMessenger + networkComponents.FullArchiveNetworkMessengerField = tpn.FullArchiveMessenger + networkComponents.PeersRatingHandlerField = tpn.PeersRatingHandler + networkComponents.PeersRatingMonitorField = tpn.PeersRatingMonitor + networkComponents.InputAntiFlood = &mock.NilAntifloodHandler{} + networkComponents.PeerHonesty = &mock.PeerHonestyHandlerStub{} + + tpn.Node, err = node.NewNode( + node.WithAddressSignatureSize(64), + node.WithValidatorSignatureSize(48), + node.WithBootstrapComponents(bootstrapComponents), + node.WithCoreComponents(coreComponents), + node.WithStatusCoreComponents(statusCoreComponents), + node.WithDataComponents(dataComponents), + node.WithProcessComponents(processComponents), + node.WithCryptoComponents(cryptoComponents), + node.WithNetworkComponents(networkComponents), + node.WithStateComponents(stateComponents), + node.WithPeerDenialEvaluator(&mock.PeerDenialEvaluatorStub{}), + node.WithStatusCoreComponents(statusCoreComponents), + node.WithGenesisTime(time.Unix(args.StartTime, 0)), + node.WithRoundDuration(args.RoundTime), + node.WithPublicKeySize(publicKeySize), + ) + log.LogIfError(err) + + err = nodeDebugFactory.CreateInterceptedDebugHandler( + tpn.Node, + tpn.MainInterceptorsContainer, + tpn.ResolversContainer, + tpn.RequestersFinder, + config.InterceptorResolverDebugConfig{ + Enabled: true, + CacheSize: 1000, + EnablePrint: true, + IntervalAutoPrintInSeconds: 1, + NumRequestsThreshold: 1, + NumResolveFailureThreshold: 1, + DebugLineExpiration: 1000, + }, + ) + log.LogIfError(err) +} + +func (tcn *TestFullNode) initInterceptors( + coreComponents process.CoreComponentsHolder, + cryptoComponents process.CryptoComponentsHolder, + roundHandler consensus.RoundHandler, + enableEpochsHandler common.EnableEpochsHandler, + storage dataRetriever.StorageService, + epochStartTrigger TestEpochStartTrigger, +) { + interceptorDataVerifierArgs := interceptorsFactory.InterceptedDataVerifierFactoryArgs{ + CacheSpan: time.Second * 10, + CacheExpiry: time.Second * 10, + } + + accountsAdapter := epochStartDisabled.NewAccountsAdapter() + + blockBlackListHandler := cache.NewTimeCache(TimeSpanForBadHeaders) + + genesisBlocks := make(map[uint32]data.HeaderHandler) + blockTracker := processMock.NewBlockTrackerMock(tcn.ShardCoordinator, genesisBlocks) + + whiteLstHandler, _ := disabledInterceptors.NewDisabledWhiteListDataVerifier() + + cacherVerifiedCfg := storageunit.CacheConfig{Capacity: 5000, Type: storageunit.LRUCache, Shards: 1} + cacheVerified, _ := storageunit.NewCache(cacherVerifiedCfg) + whiteListerVerifiedTxs, _ := interceptors.NewWhiteListDataVerifier(cacheVerified) + + if tcn.ShardCoordinator.SelfId() == core.MetachainShardId { + metaInterceptorContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ + CoreComponents: coreComponents, + CryptoComponents: cryptoComponents, + Accounts: accountsAdapter, + ShardCoordinator: tcn.ShardCoordinator, + NodesCoordinator: tcn.NodesCoordinator, + MainMessenger: tcn.MainMessenger, + FullArchiveMessenger: tcn.FullArchiveMessenger, + Store: storage, + DataPool: tcn.DataPool, + MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, + TxFeeHandler: &economicsmocks.EconomicsHandlerMock{}, + BlockBlackList: blockBlackListHandler, + HeaderSigVerifier: &consensusMocks.HeaderSigVerifierMock{}, + HeaderIntegrityVerifier: CreateHeaderIntegrityVerifier(), + ValidityAttester: blockTracker, + EpochStartTrigger: epochStartTrigger, + WhiteListHandler: whiteLstHandler, + WhiteListerVerifiedTxs: whiteListerVerifiedTxs, + AntifloodHandler: &mock.NilAntifloodHandler{}, + ArgumentsParser: smartContract.NewArgumentParser(), + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + SizeCheckDelta: sizeCheckDelta, + RequestHandler: &testscommon.RequestHandlerStub{}, + PeerSignatureHandler: &processMock.PeerSignatureHandlerStub{}, + SignaturesHandler: &processMock.SignaturesHandlerStub{}, + HeartbeatExpiryTimespanInSec: 30, + MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), + FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), + HardforkTrigger: &testscommon.HardforkTriggerStub{}, + NodeOperationMode: common.NormalOperation, + InterceptedDataVerifierFactory: interceptorsFactory.NewInterceptedDataVerifierFactory(interceptorDataVerifierArgs), + } + interceptorContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(metaInterceptorContainerFactoryArgs) + if err != nil { + fmt.Println(err.Error()) + } + + tcn.MainInterceptorsContainer, _, err = interceptorContainerFactory.Create() + if err != nil { + log.Debug("interceptor container factory Create", "error", err.Error()) + } + } else { + argsPeerMiniBlocksSyncer := shardchain.ArgPeerMiniBlockSyncer{ + MiniBlocksPool: tcn.DataPool.MiniBlocks(), + ValidatorsInfoPool: tcn.DataPool.ValidatorsInfo(), + RequestHandler: &testscommon.RequestHandlerStub{}, + } + peerMiniBlockSyncer, _ := shardchain.NewPeerMiniBlockSyncer(argsPeerMiniBlocksSyncer) + argsShardEpochStart := &shardchain.ArgsShardEpochStartTrigger{ + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + HeaderValidator: &mock.HeaderValidatorStub{}, + Uint64Converter: TestUint64Converter, + DataPool: tcn.DataPool, + Storage: storage, + RequestHandler: &testscommon.RequestHandlerStub{}, + Epoch: 0, + Validity: 1, + Finality: 1, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + PeerMiniBlocksSyncer: peerMiniBlockSyncer, + RoundHandler: roundHandler, + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + EnableEpochsHandler: enableEpochsHandler, + } + _, _ = shardchain.NewEpochStartTrigger(argsShardEpochStart) + + shardIntereptorContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ + CoreComponents: coreComponents, + CryptoComponents: cryptoComponents, + Accounts: accountsAdapter, + ShardCoordinator: tcn.ShardCoordinator, + NodesCoordinator: tcn.NodesCoordinator, + MainMessenger: tcn.MainMessenger, + FullArchiveMessenger: tcn.FullArchiveMessenger, + Store: storage, + DataPool: tcn.DataPool, + MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, + TxFeeHandler: &economicsmocks.EconomicsHandlerMock{}, + BlockBlackList: blockBlackListHandler, + HeaderSigVerifier: &consensusMocks.HeaderSigVerifierMock{}, + HeaderIntegrityVerifier: CreateHeaderIntegrityVerifier(), + ValidityAttester: blockTracker, + EpochStartTrigger: epochStartTrigger, + WhiteListHandler: whiteLstHandler, + WhiteListerVerifiedTxs: whiteListerVerifiedTxs, + AntifloodHandler: &mock.NilAntifloodHandler{}, + ArgumentsParser: smartContract.NewArgumentParser(), + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + SizeCheckDelta: sizeCheckDelta, + RequestHandler: &testscommon.RequestHandlerStub{}, + PeerSignatureHandler: &processMock.PeerSignatureHandlerStub{}, + SignaturesHandler: &processMock.SignaturesHandlerStub{}, + HeartbeatExpiryTimespanInSec: 30, + MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), + FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), + HardforkTrigger: &testscommon.HardforkTriggerStub{}, + NodeOperationMode: common.NormalOperation, + InterceptedDataVerifierFactory: interceptorsFactory.NewInterceptedDataVerifierFactory(interceptorDataVerifierArgs), + } + + interceptorContainerFactory, err := interceptorscontainer.NewShardInterceptorsContainerFactory(shardIntereptorContainerFactoryArgs) + if err != nil { + fmt.Println(err.Error()) + } + + tcn.MainInterceptorsContainer, _, err = interceptorContainerFactory.Create() + if err != nil { + fmt.Println(err.Error()) + } + } +} + +func (tpn *TestFullNode) initBlockProcessor( + coreComponents *mock.CoreComponentsStub, + dataComponents *mock.DataComponentsStub, + args ArgsTestFullNode, + roundHandler consensus.RoundHandler, +) { + var err error + + id := hex.EncodeToString(tpn.OwnAccount.PkTxSignBytes) + if len(id) > 8 { + id = id[0:8] + } + + log := logger.GetOrCreate(fmt.Sprintf("p/sync/%s", id)) + + accountsDb := make(map[state.AccountsDbIdentifier]state.AccountsAdapter) + accountsDb[state.UserAccountsState] = tpn.AccntState + accountsDb[state.PeerAccountsState] = tpn.PeerState + + if tpn.EpochNotifier == nil { + tpn.EpochNotifier = forking.NewGenericEpochNotifier() + } + if tpn.EnableEpochsHandler == nil { + tpn.EnableEpochsHandler, _ = enablers.NewEnableEpochsHandler(CreateEnableEpochsConfig(), tpn.EpochNotifier) + } + + // if tpn.ShardCoordinator.SelfId() != core.MetachainShardId { + // tpn.ForkDetector, _ = processSync.NewShardForkDetector( + // log, + // tpn.RoundHandler, + // tpn.BlockBlackListHandler, + // tpn.BlockTracker, + // tpn.NodesSetup.GetStartTime(), + // tpn.EnableEpochsHandler, + // tpn.DataPool.Proofs()) + // } else { + // tpn.ForkDetector, _ = processSync.NewMetaForkDetector( + // log, + // tpn.RoundHandler, + // tpn.BlockBlackListHandler, + // tpn.BlockTracker, + // tpn.NodesSetup.GetStartTime(), + // tpn.EnableEpochsHandler, + // tpn.DataPool.Proofs()) + // } + + // if tpn.ForkDetector == nil { + // panic("AAAAAAAAAAAAAAAAA") + // } + + bootstrapComponents := getDefaultBootstrapComponents(tpn.ShardCoordinator, tpn.EnableEpochsHandler) + bootstrapComponents.HdrIntegrityVerifier = tpn.HeaderIntegrityVerifier + + statusComponents := GetDefaultStatusComponents() + + statusCoreComponents := &testFactory.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandlerMock.AppStatusHandlerStub{}, + } + + argumentsBase := block.ArgBaseProcessor{ + CoreComponents: coreComponents, + DataComponents: dataComponents, + BootstrapComponents: bootstrapComponents, + StatusComponents: statusComponents, + StatusCoreComponents: statusCoreComponents, + Config: config.Config{}, + AccountsDB: accountsDb, + ForkDetector: tpn.ForkDetector, + NodesCoordinator: tpn.NodesCoordinator, + FeeHandler: tpn.FeeAccumulator, + RequestHandler: tpn.RequestHandler, + BlockChainHook: tpn.BlockchainHook, + HeaderValidator: tpn.HeaderValidator, + BootStorer: &mock.BoostrapStorerMock{ + PutCalled: func(round int64, bootData bootstrapStorage.BootstrapData) error { + return nil + }, + }, + BlockTracker: tpn.BlockTracker, + BlockSizeThrottler: TestBlockSizeThrottler, + HistoryRepository: tpn.HistoryRepository, + GasHandler: tpn.GasHandler, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, + ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, + OutportDataProvider: &outport.OutportDataProviderStub{}, + BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, + ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, + } + + if check.IfNil(tpn.EpochStartNotifier) { + tpn.EpochStartNotifier = notifier.NewEpochStartSubscriptionHandler() + } + + if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { + if check.IfNil(tpn.EpochStartTrigger) { + argsEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ + GenesisTime: time.Unix(args.StartTime, 0), + Settings: &config.EpochStartConfig{ + MinRoundsBetweenEpochs: 1000, + RoundsPerEpoch: 10000, + }, + Epoch: 0, + EpochStartNotifier: tpn.EpochStartNotifier, + Storage: tpn.Storage, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + DataPool: tpn.DataPool, + } + epochStartTrigger, _ := metachain.NewEpochStartTrigger(argsEpochStart) + tpn.EpochStartTrigger = &metachain.TestTrigger{} + tpn.EpochStartTrigger.SetTrigger(epochStartTrigger) + } + + argumentsBase.EpochStartTrigger = tpn.EpochStartTrigger + argumentsBase.TxCoordinator = tpn.TxCoordinator + + argsStakingToPeer := scToProtocol.ArgStakingToPeer{ + PubkeyConv: TestValidatorPubkeyConverter, + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + PeerState: tpn.PeerState, + BaseState: tpn.AccntState, + ArgParser: tpn.ArgsParser, + CurrTxs: tpn.DataPool.CurrentBlockTxs(), + RatingsData: tpn.RatingsData, + EnableEpochsHandler: tpn.EnableEpochsHandler, + } + scToProtocolInstance, _ := scToProtocol.NewStakingToPeer(argsStakingToPeer) + + argsEpochStartData := metachain.ArgsNewEpochStartData{ + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + Store: tpn.Storage, + DataPool: tpn.DataPool, + BlockTracker: tpn.BlockTracker, + ShardCoordinator: tpn.ShardCoordinator, + EpochStartTrigger: tpn.EpochStartTrigger, + RequestHandler: tpn.RequestHandler, + EnableEpochsHandler: tpn.EnableEpochsHandler, + } + epochStartDataCreator, _ := metachain.NewEpochStartData(argsEpochStartData) + + economicsDataProvider := metachain.NewEpochEconomicsStatistics() + argsEpochEconomics := metachain.ArgsNewEpochEconomics{ + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + Store: tpn.Storage, + ShardCoordinator: tpn.ShardCoordinator, + RewardsHandler: tpn.EconomicsData, + RoundTime: roundHandler, + GenesisTotalSupply: tpn.EconomicsData.GenesisTotalSupply(), + EconomicsDataNotified: economicsDataProvider, + StakingV2EnableEpoch: tpn.EnableEpochs.StakingV2EnableEpoch, + } + epochEconomics, _ := metachain.NewEndOfEpochEconomicsDataCreator(argsEpochEconomics) + + systemVM, errGet := tpn.VMContainer.Get(factory.SystemVirtualMachine) + if errGet != nil { + log.Error("initBlockProcessor tpn.VMContainer.Get", "error", errGet) + } + + argsStakingDataProvider := metachain.StakingDataProviderArgs{ + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + SystemVM: systemVM, + MinNodePrice: "1000", + } + stakingDataProvider, errRsp := metachain.NewStakingDataProvider(argsStakingDataProvider) + if errRsp != nil { + log.Error("initBlockProcessor NewRewardsStakingProvider", "error", errRsp) + } + + rewardsStorage, _ := tpn.Storage.GetStorer(dataRetriever.RewardTransactionUnit) + miniBlockStorage, _ := tpn.Storage.GetStorer(dataRetriever.MiniBlockUnit) + argsEpochRewards := metachain.RewardsCreatorProxyArgs{ + BaseRewardsCreatorArgs: metachain.BaseRewardsCreatorArgs{ + ShardCoordinator: tpn.ShardCoordinator, + PubkeyConverter: TestAddressPubkeyConverter, + RewardsStorage: rewardsStorage, + MiniBlockStorage: miniBlockStorage, + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + DataPool: tpn.DataPool, + ProtocolSustainabilityAddress: testProtocolSustainabilityAddress, + NodesConfigProvider: tpn.NodesCoordinator, + UserAccountsDB: tpn.AccntState, + EnableEpochsHandler: tpn.EnableEpochsHandler, + ExecutionOrderHandler: tpn.TxExecutionOrderHandler, + }, + StakingDataProvider: stakingDataProvider, + RewardsHandler: tpn.EconomicsData, + EconomicsDataProvider: economicsDataProvider, + } + epochStartRewards, _ := metachain.NewRewardsCreatorProxy(argsEpochRewards) + + validatorInfoStorage, _ := tpn.Storage.GetStorer(dataRetriever.UnsignedTransactionUnit) + argsEpochValidatorInfo := metachain.ArgsNewValidatorInfoCreator{ + ShardCoordinator: tpn.ShardCoordinator, + ValidatorInfoStorage: validatorInfoStorage, + MiniBlockStorage: miniBlockStorage, + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + DataPool: tpn.DataPool, + EnableEpochsHandler: tpn.EnableEpochsHandler, + } + epochStartValidatorInfo, _ := metachain.NewValidatorInfoCreator(argsEpochValidatorInfo) + + maxNodesChangeConfigProvider, _ := notifier.NewNodesConfigProvider( + tpn.EpochNotifier, + nil, + ) + auctionCfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } + ald, _ := metachain.NewAuctionListDisplayer(metachain.ArgsAuctionListDisplayer{ + TableDisplayHandler: metachain.NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: auctionCfg, + }) + + argsAuctionListSelector := metachain.AuctionListSelectorArgs{ + ShardCoordinator: tpn.ShardCoordinator, + StakingDataProvider: stakingDataProvider, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListDisplayHandler: ald, + SoftAuctionConfig: auctionCfg, + } + auctionListSelector, _ := metachain.NewAuctionListSelector(argsAuctionListSelector) + + argsEpochSystemSC := metachain.ArgsNewEpochStartSystemSCProcessing{ + SystemVM: systemVM, + UserAccountsDB: tpn.AccntState, + PeerAccountsDB: tpn.PeerState, + Marshalizer: TestMarshalizer, + StartRating: tpn.RatingsData.StartRating(), + ValidatorInfoCreator: tpn.ValidatorStatisticsProcessor, + EndOfEpochCallerAddress: vm.EndOfEpochAddress, + StakingSCAddress: vm.StakingSCAddress, + ChanceComputer: tpn.NodesCoordinator, + EpochNotifier: tpn.EpochNotifier, + GenesisNodesConfig: tpn.NodesSetup, + StakingDataProvider: stakingDataProvider, + NodesConfigProvider: tpn.NodesCoordinator, + ShardCoordinator: tpn.ShardCoordinator, + ESDTOwnerAddressBytes: vm.EndOfEpochAddress, + EnableEpochsHandler: tpn.EnableEpochsHandler, + AuctionListSelector: auctionListSelector, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + } + epochStartSystemSCProcessor, _ := metachain.NewSystemSCProcessor(argsEpochSystemSC) + tpn.EpochStartSystemSCProcessor = epochStartSystemSCProcessor + + arguments := block.ArgMetaProcessor{ + ArgBaseProcessor: argumentsBase, + SCToProtocol: scToProtocolInstance, + PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, + EpochEconomics: epochEconomics, + EpochStartDataCreator: epochStartDataCreator, + EpochRewardsCreator: epochStartRewards, + EpochValidatorInfoCreator: epochStartValidatorInfo, + ValidatorStatisticsProcessor: tpn.ValidatorStatisticsProcessor, + EpochSystemSCProcessor: epochStartSystemSCProcessor, + } + + tpn.BlockProcessor, err = block.NewMetaProcessor(arguments) + } else { + if check.IfNil(tpn.EpochStartTrigger) { + argsPeerMiniBlocksSyncer := shardchain.ArgPeerMiniBlockSyncer{ + MiniBlocksPool: tpn.DataPool.MiniBlocks(), + ValidatorsInfoPool: tpn.DataPool.ValidatorsInfo(), + RequestHandler: tpn.RequestHandler, + } + peerMiniBlocksSyncer, _ := shardchain.NewPeerMiniBlockSyncer(argsPeerMiniBlocksSyncer) + argsShardEpochStart := &shardchain.ArgsShardEpochStartTrigger{ + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + HeaderValidator: tpn.HeaderValidator, + Uint64Converter: TestUint64Converter, + DataPool: tpn.DataPool, + Storage: tpn.Storage, + RequestHandler: tpn.RequestHandler, + Epoch: 0, + Validity: 1, + Finality: 1, + EpochStartNotifier: tpn.EpochStartNotifier, + PeerMiniBlocksSyncer: peerMiniBlocksSyncer, + RoundHandler: tpn.RoundHandler, + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + EnableEpochsHandler: tpn.EnableEpochsHandler, + } + epochStartTrigger, _ := shardchain.NewEpochStartTrigger(argsShardEpochStart) + tpn.EpochStartTrigger = &shardchain.TestTrigger{} + tpn.EpochStartTrigger.SetTrigger(epochStartTrigger) + } + + argumentsBase.EpochStartTrigger = tpn.EpochStartTrigger + argumentsBase.BlockChainHook = tpn.BlockchainHook + argumentsBase.TxCoordinator = tpn.TxCoordinator + argumentsBase.ScheduledTxsExecutionHandler = &testscommon.ScheduledTxsExecutionStub{} + + arguments := block.ArgShardProcessor{ + ArgBaseProcessor: argumentsBase, + } + + tpn.BlockProcessor, err = block.NewShardProcessor(arguments) + } + + if err != nil { + panic(fmt.Sprintf("error creating blockprocessor: %s", err.Error())) + } +} + +func (tpn *TestFullNode) initBlockTracker( + roundHandler consensus.RoundHandler, +) { + + argBaseTracker := track.ArgBaseTracker{ + Hasher: TestHasher, + HeaderValidator: tpn.HeaderValidator, + Marshalizer: TestMarshalizer, + RequestHandler: tpn.RequestHandler, + RoundHandler: roundHandler, + ShardCoordinator: tpn.ShardCoordinator, + Store: tpn.Storage, + StartHeaders: tpn.GenesisBlocks, + PoolsHolder: tpn.DataPool, + WhitelistHandler: tpn.WhiteListHandler, + FeeHandler: tpn.EconomicsData, + EnableEpochsHandler: tpn.EnableEpochsHandler, + ProofsPool: tpn.DataPool.Proofs(), + } + + var err error + if tpn.ShardCoordinator.SelfId() != core.MetachainShardId { + arguments := track.ArgShardTracker{ + ArgBaseTracker: argBaseTracker, + } + + tpn.BlockTracker, err = track.NewShardBlockTrack(arguments) + if err != nil { + panic(err.Error()) + } + } else { + arguments := track.ArgMetaTracker{ + ArgBaseTracker: argBaseTracker, + } + + tpn.BlockTracker, err = track.NewMetaBlockTrack(arguments) + if err != nil { + panic(err.Error()) + } + } +} diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 2918d7e826f..56924a10de8 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -1775,7 +1775,7 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u ) processedMiniBlocksTracker := processedMb.NewProcessedMiniBlocksTracker() - fact, _ := shard.NewPreProcessorsContainerFactory( + fact, err := shard.NewPreProcessorsContainerFactory( tpn.ShardCoordinator, tpn.Storage, TestMarshalizer, @@ -1799,6 +1799,9 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u processedMiniBlocksTracker, tpn.TxExecutionOrderHandler, ) + if err != nil { + panic(err.Error()) + } tpn.PreProcessorsContainer, _ = fact.Create() argsTransactionCoordinator := coordinator.ArgTransactionCoordinator{ @@ -3121,18 +3124,25 @@ func (tpn *TestProcessorNode) initBlockTracker() { ProofsPool: tpn.DataPool.Proofs(), } + var err error if tpn.ShardCoordinator.SelfId() != core.MetachainShardId { arguments := track.ArgShardTracker{ ArgBaseTracker: argBaseTracker, } - tpn.BlockTracker, _ = track.NewShardBlockTrack(arguments) + tpn.BlockTracker, err = track.NewShardBlockTrack(arguments) + if err != nil { + panic(err.Error()) + } } else { arguments := track.ArgMetaTracker{ ArgBaseTracker: argBaseTracker, } - tpn.BlockTracker, _ = track.NewMetaBlockTrack(arguments) + tpn.BlockTracker, err = track.NewMetaBlockTrack(arguments) + if err != nil { + panic(err.Error()) + } } } From e45434ef5856ee539d162aa6fa6ae84319ae56a9 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Tue, 28 Jan 2025 16:48:38 +0200 Subject: [PATCH 05/21] integration tests meta chain fixes --- integrationTests/consensus/consensus_test.go | 14 ++++---- integrationTests/testFullNode.go | 36 +++++++++++++++++++- 2 files changed, 42 insertions(+), 8 deletions(-) diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index 55faae12bac..2ef84960446 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -210,9 +210,9 @@ func TestConsensusBLSWithFullProcessing(t *testing.T) { logger.ToggleLoggerName(true) numKeysOnEachNode := 1 - numMetaNodes := uint32(4) - numNodes := uint32(4) - consensusSize := uint32(4 * numKeysOnEachNode) + numMetaNodes := uint32(2) + numNodes := uint32(2) + consensusSize := uint32(2 * numKeysOnEachNode) roundTime := uint64(1000) // maxShards := uint32(1) @@ -227,10 +227,10 @@ func TestConsensusBLSWithFullProcessing(t *testing.T) { enableEpochsConfig := integrationTests.CreateEnableEpochsConfig() - equivalentProodsActivationEpoch := uint32(10) + equivalentProofsActivationEpoch := uint32(0) - enableEpochsConfig.EquivalentMessagesEnableEpoch = equivalentProodsActivationEpoch - enableEpochsConfig.FixedOrderInConsensusEnableEpoch = equivalentProodsActivationEpoch + enableEpochsConfig.EquivalentMessagesEnableEpoch = equivalentProofsActivationEpoch + enableEpochsConfig.FixedOrderInConsensusEnableEpoch = equivalentProofsActivationEpoch fmt.Println("Step 1. Setup nodes...") @@ -332,7 +332,7 @@ func TestConsensusBLSWithFullProcessing(t *testing.T) { // assert.Fail(t, fmt.Sprintf("Node with idx %d does not have a current block", i)) } else { fmt.Println("FOUND") - assert.Equal(t, expectedNonce, n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader().GetNonce()) + assert.GreaterOrEqual(t, n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader().GetNonce(), expectedNonce) } } } diff --git a/integrationTests/testFullNode.go b/integrationTests/testFullNode.go index cf841e4c54e..55bf1c2a5ec 100644 --- a/integrationTests/testFullNode.go +++ b/integrationTests/testFullNode.go @@ -48,6 +48,7 @@ import ( "github.com/multiversx/mx-chain-go/process/smartContract" processSync "github.com/multiversx/mx-chain-go/process/sync" "github.com/multiversx/mx-chain-go/process/track" + "github.com/multiversx/mx-chain-go/sharding" chainShardingMocks "github.com/multiversx/mx-chain-go/sharding/mock" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" @@ -110,6 +111,7 @@ func CreateNodesWithTestFullNode( EpochsConfig: &enableEpochsConfig, NodeKeys: keysPair, }, + ShardID: shardID, ConsensusSize: consensusSize, RoundTime: roundTime, ConsensusType: consensusType, @@ -137,6 +139,7 @@ func CreateNodesWithTestFullNode( type ArgsTestFullNode struct { *ArgTestProcessorNode + ShardID uint32 ConsensusSize int RoundTime uint64 ConsensusType string @@ -150,13 +153,18 @@ type ArgsTestFullNode struct { type TestFullNode struct { *TestProcessorNode + + ShardCoordinator sharding.Coordinator } func NewTestFullNode(args ArgsTestFullNode) *TestFullNode { tpn := newBaseTestProcessorNode(*args.ArgTestProcessorNode) + shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, args.ShardID) + tfn := &TestFullNode{ TestProcessorNode: tpn, + ShardCoordinator: shardCoordinator, } tfn.initTestNodeWithArgs(*args.ArgTestProcessorNode, args) @@ -345,6 +353,25 @@ func (tpn *TestFullNode) initTestNodeWithArgs(args ArgTestProcessorNode, fullArg } } +func (tpn *TestFullNode) setGenesisBlock() { + genesisBlock := tpn.GenesisBlocks[tpn.ShardCoordinator.SelfId()] + _ = tpn.BlockChain.SetGenesisHeader(genesisBlock) + hash, _ := core.CalculateHash(TestMarshalizer, TestHasher, genesisBlock) + tpn.BlockChain.SetGenesisHeaderHash(hash) + log.Info("set genesis", + "shard ID", tpn.ShardCoordinator.SelfId(), + "hash", hex.EncodeToString(hash), + ) +} + +func (tpn *TestFullNode) initChainHandler() { + if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { + tpn.BlockChain = CreateMetaChain() + } else { + tpn.BlockChain = CreateShardChain() + } +} + func (tpn *TestFullNode) initNode( args ArgsTestFullNode, syncer ntp.SyncTimer, @@ -947,6 +974,10 @@ func (tpn *TestFullNode) initBlockProcessor( log.Error("initBlockProcessor tpn.VMContainer.Get", "error", errGet) } + if systemVM == nil { + systemVM, _ = mock.NewOneSCExecutorMockVM(tpn.BlockchainHook, TestHasher) + } + argsStakingDataProvider := metachain.StakingDataProviderArgs{ EnableEpochsHandler: coreComponents.EnableEpochsHandler(), SystemVM: systemVM, @@ -978,7 +1009,10 @@ func (tpn *TestFullNode) initBlockProcessor( RewardsHandler: tpn.EconomicsData, EconomicsDataProvider: economicsDataProvider, } - epochStartRewards, _ := metachain.NewRewardsCreatorProxy(argsEpochRewards) + epochStartRewards, err := metachain.NewRewardsCreatorProxy(argsEpochRewards) + if err != nil { + panic(fmt.Sprintf("error creating rewards creator proxy: %s", err.Error())) + } validatorInfoStorage, _ := tpn.Storage.GetStorer(dataRetriever.UnsignedTransactionUnit) argsEpochValidatorInfo := metachain.ArgsNewValidatorInfoCreator{ From bba01654df8c1d56e3c578ca0e6f3a2aab38d819 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Tue, 28 Jan 2025 16:48:13 +0200 Subject: [PATCH 06/21] check for genesis block --- consensus/spos/bls/v2/subroundBlock.go | 2 +- process/block/baseProcess.go | 2 +- process/block/metablock.go | 4 ++-- process/block/shardblock.go | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/consensus/spos/bls/v2/subroundBlock.go b/consensus/spos/bls/v2/subroundBlock.go index 0c24c834070..bc8e606175f 100644 --- a/consensus/spos/bls/v2/subroundBlock.go +++ b/consensus/spos/bls/v2/subroundBlock.go @@ -364,7 +364,7 @@ func (sr *subroundBlock) saveProofForPreviousHeaderIfNeeded(header data.HeaderHa proof := header.GetPreviousProof() err := common.VerifyProofAgainstHeader(proof, prevHeader) if err != nil { - log.Debug("saveProofForPreviousHeaderIfNeeded: invalid proof, %s", err.Error()) + log.Debug("saveProofForPreviousHeaderIfNeeded: invalid proof", "error", err) return } diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 5b56ae4e7b6..984cfed2335 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -746,7 +746,7 @@ func (bp *baseProcessor) sortHeaderHashesForCurrentBlockByNonce(usedInBlock bool } func (bp *baseProcessor) hasMissingProof(headerInfo *hdrInfo, hdrHash string) bool { - isFlagEnabledForHeader := bp.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, headerInfo.hdr.GetEpoch()) + isFlagEnabledForHeader := common.ShouldBlockHavePrevProof(headerInfo.hdr, bp.enableEpochsHandler, common.EquivalentMessagesFlag) if !isFlagEnabledForHeader { return false } diff --git a/process/block/metablock.go b/process/block/metablock.go index 868713789f2..57949e4b10c 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -437,7 +437,7 @@ func (mp *metaProcessor) checkProofsForShardData(header *block.MetaBlock) error continue } - if !mp.proofsPool.HasProof(shardData.ShardID, shardData.HeaderHash) { + if !mp.proofsPool.HasProof(shardData.ShardID, shardData.HeaderHash) && shardData.GetNonce() > 1 { return fmt.Errorf("%w for header hash %s", process.ErrMissingHeaderProof, hex.EncodeToString(shardData.HeaderHash)) } @@ -2234,7 +2234,7 @@ func (mp *metaProcessor) createShardInfo() ([]data.ShardDataHandler, error) { } isBlockAfterEquivalentMessagesFlag := !check.IfNil(headerInfo.hdr) && - mp.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, headerInfo.hdr.GetEpoch()) + mp.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, headerInfo.hdr.GetEpoch()) && headerInfo.hdr.GetNonce() > 1 hasMissingShardHdrProof := isBlockAfterEquivalentMessagesFlag && !mp.proofsPool.HasProof(headerInfo.hdr.GetShardID(), []byte(hdrHash)) if hasMissingShardHdrProof { return nil, fmt.Errorf("%w for shard header with hash %s", process.ErrMissingHeaderProof, hex.EncodeToString([]byte(hdrHash))) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 830a4f46d3c..b58afe1f70e 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -305,7 +305,7 @@ func (sp *shardProcessor) ProcessBlock( continue } - if !sp.proofsPool.HasProof(core.MetachainShardId, metaBlockHash) { + if !sp.proofsPool.HasProof(core.MetachainShardId, metaBlockHash) && header.GetNonce() > 1 { return fmt.Errorf("%w for header hash %s", process.ErrMissingHeaderProof, hex.EncodeToString(metaBlockHash)) } } From 06ff80ed3bc1841503397fb0ba5a6978d5afe087 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Tue, 28 Jan 2025 16:48:38 +0200 Subject: [PATCH 07/21] integration tests meta chain fixes From 22598c34f60d048b5640e15a5dab576486dd2715 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Tue, 28 Jan 2025 18:05:57 +0200 Subject: [PATCH 08/21] sync test fixes --- consensus/spos/bls/v2/subroundBlock.go | 5 ++++- integrationTests/sync/basicSync/basicSync_test.go | 5 ++--- integrationTests/testProcessorNode.go | 6 +++++- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/consensus/spos/bls/v2/subroundBlock.go b/consensus/spos/bls/v2/subroundBlock.go index bc8e606175f..f6566153f72 100644 --- a/consensus/spos/bls/v2/subroundBlock.go +++ b/consensus/spos/bls/v2/subroundBlock.go @@ -444,7 +444,10 @@ func (sr *subroundBlock) isHeaderForCurrentConsensus(header data.HeaderHandler) func (sr *subroundBlock) getLeaderForHeader(headerHandler data.HeaderHandler) ([]byte, error) { nc := sr.NodesCoordinator() - prevBlockEpoch := sr.Blockchain().GetCurrentBlockHeader().GetEpoch() + prevBlockEpoch := uint32(0) + if sr.Blockchain().GetCurrentBlockHeader() != nil { + prevBlockEpoch = sr.Blockchain().GetCurrentBlockHeader().GetEpoch() + } // TODO: remove this if first block in new epoch will be validated by epoch validators // first block in epoch is validated by previous epoch validators selectionEpoch := headerHandler.GetEpoch() diff --git a/integrationTests/sync/basicSync/basicSync_test.go b/integrationTests/sync/basicSync/basicSync_test.go index 408262f2297..1d08f51b30e 100644 --- a/integrationTests/sync/basicSync/basicSync_test.go +++ b/integrationTests/sync/basicSync/basicSync_test.go @@ -199,13 +199,12 @@ func testAllNodesHaveSameLastBlock(t *testing.T, nodes []*integrationTests.TestP } func TestSyncWorksInShard_EmptyBlocksNoForks_With_EquivalentProofs(t *testing.T) { - // TODO: remove skip after test is fixed - t.Skip("will be fixed in another PR") - if testing.Short() { t.Skip("this is not a short test") } + logger.SetLogLevel("*:TRACE") + // 3 shard nodes and 1 metachain node maxShards := uint32(1) shardId := uint32(0) diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 56924a10de8..d6d9a7572a7 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -2833,6 +2833,7 @@ func (tpn *TestProcessorNode) setBlockSignatures(blockHeader data.HeaderHandler) HeaderEpoch: currHdr.GetEpoch(), HeaderNonce: currHdr.GetNonce(), HeaderShardId: currHdr.GetShardID(), + HeaderRound: currHdr.GetRound(), IsStartOfEpoch: blockHeader.IsStartOfEpochBlock(), } blockHeader.SetPreviousProof(previousProof) @@ -2899,7 +2900,10 @@ func (tpn *TestProcessorNode) WhiteListBody(nodes []*TestProcessorNode, bodyHand // CommitBlock commits the block and body func (tpn *TestProcessorNode) CommitBlock(body data.BodyHandler, header data.HeaderHandler) { - _ = tpn.BlockProcessor.CommitBlock(header, body) + err := tpn.BlockProcessor.CommitBlock(header, body) + if err != nil { + log.Error("CommitBlock", "error", err) + } } // GetShardHeader returns the first *dataBlock.Header stored in datapools having the nonce provided as parameter From 14514986cc6087a0a94de03be44e34bb5d3289fe Mon Sep 17 00:00:00 2001 From: ssd04 Date: Tue, 28 Jan 2025 21:57:12 +0200 Subject: [PATCH 09/21] cleanup unused code --- consensus/spos/bls/v1/subroundEndRound.go | 1 - integrationTests/consensus/consensus_test.go | 152 ++----------------- integrationTests/testFullNode.go | 52 +------ 3 files changed, 18 insertions(+), 187 deletions(-) diff --git a/consensus/spos/bls/v1/subroundEndRound.go b/consensus/spos/bls/v1/subroundEndRound.go index 51c1f4a1af3..c591c736aca 100644 --- a/consensus/spos/bls/v1/subroundEndRound.go +++ b/consensus/spos/bls/v1/subroundEndRound.go @@ -321,7 +321,6 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { return false } - log.Error("doEndRoundJobByLeader.SetSignature", "set sig", "sig", sig) err = header.SetSignature(sig) if err != nil { log.Debug("doEndRoundJobByLeader.SetSignature", "error", err.Error()) diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index 2ef84960446..604b41c93ad 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -69,143 +69,23 @@ func TestConsensusBLSNotEnoughValidators(t *testing.T) { runConsensusWithNotEnoughValidators(t, blsConsensusType) } -func TestConsensusBLSWithProcessing(t *testing.T) { +func TestConsensusBLSWithFullProcessing_BeforeEquivalentProofs(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") } - _ = logger.SetLogLevel("*:DEBUG,process:TRACE,consensus:TRACE") - logger.ToggleLoggerName(true) - - numKeysOnEachNode := 1 - numMetaNodes := uint32(2) - numNodes := uint32(2) - consensusSize := uint32(2 * numKeysOnEachNode) - roundTime := uint64(1000) - - log.Info("runFullConsensusTest", - "numNodes", numNodes, - "numKeysOnEachNode", numKeysOnEachNode, - "consensusSize", consensusSize, - ) - - enableEpochsConfig := integrationTests.CreateEnableEpochsConfig() - - equivalentProodsActivationEpoch := uint32(0) - - enableEpochsConfig.EquivalentMessagesEnableEpoch = equivalentProodsActivationEpoch - enableEpochsConfig.FixedOrderInConsensusEnableEpoch = equivalentProodsActivationEpoch - - fmt.Println("Step 1. Setup nodes...") - - nodes := integrationTests.CreateNodesWithTestConsensusNode( - int(numMetaNodes), - int(numNodes), - int(consensusSize), - roundTime, - blsConsensusType, - numKeysOnEachNode, - enableEpochsConfig, - ) - - // leaders := []*integrationTests.TestConsensusNode{} - for shardID, nodesList := range nodes { - // leaders = append(leaders, nodesList[0]) - - displayAndStartNodes(shardID, nodesList) - } - - time.Sleep(p2pBootstrapDelay) - - // round := uint64(0) - // nonce := uint64(0) - // round = integrationTests.IncrementAndPrintRound(round) - // integrationTests.UpdateRound(nodes, round) - // nonce++ - - // numRoundsToTest := 5 - // for i := 0; i < numRoundsToTest; i++ { - // integrationTests.ProposeBlock(nodes, leaders, round, nonce) - - // time.Sleep(integrationTests.SyncDelay) - - // round = integrationTests.IncrementAndPrintRound(round) - // integrationTests.UpdateRound(nodes, round) - // nonce++ - // } - - for _, nodesList := range nodes { - for _, n := range nodesList { - statusComponents := integrationTests.GetDefaultStatusComponents() - - consensusArgs := consensusComp.ConsensusComponentsFactoryArgs{ - Config: config.Config{ - Consensus: config.ConsensusConfig{ - Type: blsConsensusType, - }, - ValidatorPubkeyConverter: config.PubkeyConfig{ - Length: 96, - Type: "bls", - SignatureLength: 48, - }, - TrieSync: config.TrieSyncConfig{ - NumConcurrentTrieSyncers: 5, - MaxHardCapForMissingNodes: 5, - TrieSyncerVersion: 2, - CheckNodesOnDisk: false, - }, - GeneralSettings: config.GeneralSettingsConfig{ - SyncProcessTimeInMillis: 6000, - }, - }, - BootstrapRoundIndex: 0, - CoreComponents: n.Node.GetCoreComponents(), - NetworkComponents: n.Node.GetNetworkComponents(), - CryptoComponents: n.Node.GetCryptoComponents(), - DataComponents: n.Node.GetDataComponents(), - ProcessComponents: n.Node.GetProcessComponents(), - StateComponents: n.Node.GetStateComponents(), - StatusComponents: statusComponents, - StatusCoreComponents: n.Node.GetStatusCoreComponents(), - ScheduledProcessor: &consensusMocks.ScheduledProcessorStub{}, - IsInImportMode: n.Node.IsInImportMode(), - } - - consensusFactory, err := consensusComp.NewConsensusComponentsFactory(consensusArgs) - require.Nil(t, err) - - managedConsensusComponents, err := consensusComp.NewManagedConsensusComponents(consensusFactory) - require.Nil(t, err) - - err = managedConsensusComponents.Create() - require.Nil(t, err) - } - } - - time.Sleep(100 * time.Second) - - fmt.Println("Checking shards...") - - for _, nodesList := range nodes { - // expectedNonce := nodesList[0].Node.GetDataComponents().Blockchain().GetCurrentBlockHeader().GetNonce() - expectedNonce := 1 - for _, n := range nodesList { - for i := 1; i < len(nodes); i++ { - if check.IfNil(n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader()) { - assert.Fail(t, fmt.Sprintf("Node with idx %d does not have a current block", i)) - } else { - assert.Equal(t, expectedNonce, n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader().GetNonce()) - } - } - } - } + testConsensusBLSWithFullProcessing(t, integrationTests.UnreachableEpoch) } -func TestConsensusBLSWithFullProcessing(t *testing.T) { +func TestConsensusBLSWithFullProcessing_WithEquivalentProofs(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") } + testConsensusBLSWithFullProcessing(t, uint32(0)) +} + +func testConsensusBLSWithFullProcessing(t *testing.T, equivalentProofsActivationEpoch uint32) { _ = logger.SetLogLevel("*:DEBUG,process:TRACE,consensus:TRACE") logger.ToggleLoggerName(true) @@ -215,10 +95,6 @@ func TestConsensusBLSWithFullProcessing(t *testing.T) { consensusSize := uint32(2 * numKeysOnEachNode) roundTime := uint64(1000) - // maxShards := uint32(1) - // shardId := uint32(0) - // numNodesPerShard := 3 - log.Info("runFullNodesTest", "numNodes", numNodes, "numKeysOnEachNode", numKeysOnEachNode, @@ -227,8 +103,6 @@ func TestConsensusBLSWithFullProcessing(t *testing.T) { enableEpochsConfig := integrationTests.CreateEnableEpochsConfig() - equivalentProofsActivationEpoch := uint32(0) - enableEpochsConfig.EquivalentMessagesEnableEpoch = equivalentProofsActivationEpoch enableEpochsConfig.FixedOrderInConsensusEnableEpoch = equivalentProofsActivationEpoch @@ -317,21 +191,19 @@ func TestConsensusBLSWithFullProcessing(t *testing.T) { } } - time.Sleep(10 * time.Second) + fmt.Println("Wait for several rounds...") + + time.Sleep(15 * time.Second) fmt.Println("Checking shards...") + expectedNonce := uint64(10) for _, nodesList := range nodes { - expectedNonce := uint64(0) - if !check.IfNil(nodesList[0].Node.GetDataComponents().Blockchain().GetCurrentBlockHeader()) { - expectedNonce = nodesList[0].Node.GetDataComponents().Blockchain().GetCurrentBlockHeader().GetNonce() - } for _, n := range nodesList { for i := 1; i < len(nodes); i++ { if check.IfNil(n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader()) { - // assert.Fail(t, fmt.Sprintf("Node with idx %d does not have a current block", i)) + assert.Fail(t, fmt.Sprintf("Node with idx %d does not have a current block", i)) } else { - fmt.Println("FOUND") assert.GreaterOrEqual(t, n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader().GetNonce(), expectedNonce) } } diff --git a/integrationTests/testFullNode.go b/integrationTests/testFullNode.go index 55bf1c2a5ec..4dabf294651 100644 --- a/integrationTests/testFullNode.go +++ b/integrationTests/testFullNode.go @@ -38,7 +38,6 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" - "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/process/factory/interceptorscontainer" "github.com/multiversx/mx-chain-go/process/interceptors" disabledInterceptors "github.com/multiversx/mx-chain-go/process/interceptors/disabled" @@ -72,7 +71,6 @@ import ( vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" "github.com/multiversx/mx-chain-go/vm" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" - logger "github.com/multiversx/mx-chain-logger-go" wasmConfig "github.com/multiversx/mx-chain-vm-go/config" ) @@ -521,7 +519,7 @@ func (tpn *TestFullNode) initNode( tpn.DataPool.Proofs()) } if err != nil { - panic(err.Error()) + log.Error("error creating fork detector", "error", err) } argsKeysHolder := keysManagement.ArgsManagedPeersHolder{ @@ -818,13 +816,6 @@ func (tpn *TestFullNode) initBlockProcessor( ) { var err error - id := hex.EncodeToString(tpn.OwnAccount.PkTxSignBytes) - if len(id) > 8 { - id = id[0:8] - } - - log := logger.GetOrCreate(fmt.Sprintf("p/sync/%s", id)) - accountsDb := make(map[state.AccountsDbIdentifier]state.AccountsAdapter) accountsDb[state.UserAccountsState] = tpn.AccntState accountsDb[state.PeerAccountsState] = tpn.PeerState @@ -836,30 +827,6 @@ func (tpn *TestFullNode) initBlockProcessor( tpn.EnableEpochsHandler, _ = enablers.NewEnableEpochsHandler(CreateEnableEpochsConfig(), tpn.EpochNotifier) } - // if tpn.ShardCoordinator.SelfId() != core.MetachainShardId { - // tpn.ForkDetector, _ = processSync.NewShardForkDetector( - // log, - // tpn.RoundHandler, - // tpn.BlockBlackListHandler, - // tpn.BlockTracker, - // tpn.NodesSetup.GetStartTime(), - // tpn.EnableEpochsHandler, - // tpn.DataPool.Proofs()) - // } else { - // tpn.ForkDetector, _ = processSync.NewMetaForkDetector( - // log, - // tpn.RoundHandler, - // tpn.BlockBlackListHandler, - // tpn.BlockTracker, - // tpn.NodesSetup.GetStartTime(), - // tpn.EnableEpochsHandler, - // tpn.DataPool.Proofs()) - // } - - // if tpn.ForkDetector == nil { - // panic("AAAAAAAAAAAAAAAAA") - // } - bootstrapComponents := getDefaultBootstrapComponents(tpn.ShardCoordinator, tpn.EnableEpochsHandler) bootstrapComponents.HdrIntegrityVerifier = tpn.HeaderIntegrityVerifier @@ -969,14 +936,7 @@ func (tpn *TestFullNode) initBlockProcessor( } epochEconomics, _ := metachain.NewEndOfEpochEconomicsDataCreator(argsEpochEconomics) - systemVM, errGet := tpn.VMContainer.Get(factory.SystemVirtualMachine) - if errGet != nil { - log.Error("initBlockProcessor tpn.VMContainer.Get", "error", errGet) - } - - if systemVM == nil { - systemVM, _ = mock.NewOneSCExecutorMockVM(tpn.BlockchainHook, TestHasher) - } + systemVM, _ := mock.NewOneSCExecutorMockVM(tpn.BlockchainHook, TestHasher) argsStakingDataProvider := metachain.StakingDataProviderArgs{ EnableEpochsHandler: coreComponents.EnableEpochsHandler(), @@ -1011,7 +971,7 @@ func (tpn *TestFullNode) initBlockProcessor( } epochStartRewards, err := metachain.NewRewardsCreatorProxy(argsEpochRewards) if err != nil { - panic(fmt.Sprintf("error creating rewards creator proxy: %s", err.Error())) + log.Error("error creating rewards proxy", "error", err) } validatorInfoStorage, _ := tpn.Storage.GetStorer(dataRetriever.UnsignedTransactionUnit) @@ -1131,7 +1091,7 @@ func (tpn *TestFullNode) initBlockProcessor( } if err != nil { - panic(fmt.Sprintf("error creating blockprocessor: %s", err.Error())) + log.Error("error creating blockprocessor", "error", err) } } @@ -1163,7 +1123,7 @@ func (tpn *TestFullNode) initBlockTracker( tpn.BlockTracker, err = track.NewShardBlockTrack(arguments) if err != nil { - panic(err.Error()) + log.Error("NewShardBlockTrack", "error", err) } } else { arguments := track.ArgMetaTracker{ @@ -1172,7 +1132,7 @@ func (tpn *TestFullNode) initBlockTracker( tpn.BlockTracker, err = track.NewMetaBlockTrack(arguments) if err != nil { - panic(err.Error()) + log.Error("NewMetaBlockTrack", "error", err) } } } From 9f6e6669f6cc9d9cc8179b8d486d61227cdf3ff8 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Tue, 28 Jan 2025 21:59:23 +0200 Subject: [PATCH 10/21] include genesis header check --- process/block/metablock.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/process/block/metablock.go b/process/block/metablock.go index 57949e4b10c..ec89a7cc6b6 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -419,7 +419,7 @@ func (mp *metaProcessor) ProcessBlock( } func (mp *metaProcessor) checkProofsForShardData(header *block.MetaBlock) error { - if !mp.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, header.Epoch) { + if !common.ShouldBlockHavePrevProof(header, mp.enableEpochsHandler, common.EquivalentMessagesFlag) { return nil } From ad69364656edcdab23c0802841bd2c30ab001649 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Fri, 31 Jan 2025 12:33:21 +0200 Subject: [PATCH 11/21] update integration test with invalid signers --- .../consensus/consensusSigning_test.go | 112 +++++++++++----- integrationTests/consensus/consensus_test.go | 1 + integrationTests/testFullNode.go | 121 +++++++++++++++++- 3 files changed, 202 insertions(+), 32 deletions(-) diff --git a/integrationTests/consensus/consensusSigning_test.go b/integrationTests/consensus/consensusSigning_test.go index dfa6966f1f0..e747587adfb 100644 --- a/integrationTests/consensus/consensusSigning_test.go +++ b/integrationTests/consensus/consensusSigning_test.go @@ -4,13 +4,18 @@ import ( "bytes" "encoding/hex" "fmt" - "sync" "testing" "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/config" + consensusComp "github.com/multiversx/mx-chain-go/factory/consensus" "github.com/multiversx/mx-chain-go/integrationTests" + consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + logger "github.com/multiversx/mx-chain-logger-go" ) func initNodesWithTestSigner( @@ -20,12 +25,17 @@ func initNodesWithTestSigner( numInvalid uint32, roundTime uint64, consensusType string, -) map[uint32][]*integrationTests.TestConsensusNode { +) map[uint32][]*integrationTests.TestFullNode { fmt.Println("Step 1. Setup nodes...") + equivalentProofsActivationEpoch := uint32(0) + enableEpochsConfig := integrationTests.CreateEnableEpochsConfig() - nodes := integrationTests.CreateNodesWithTestConsensusNode( + enableEpochsConfig.EquivalentMessagesEnableEpoch = equivalentProofsActivationEpoch + enableEpochsConfig.FixedOrderInConsensusEnableEpoch = equivalentProofsActivationEpoch + + nodes := integrationTests.CreateNodesWithTestFullNode( int(numMetaNodes), int(numNodes), int(consensusSize), @@ -33,12 +43,9 @@ func initNodesWithTestSigner( consensusType, 1, enableEpochsConfig, + false, ) - for shardID, nodesList := range nodes { - displayAndStartNodes(shardID, nodesList) - } - time.Sleep(p2pBootstrapDelay) for shardID := range nodes { @@ -54,7 +61,7 @@ func initNodesWithTestSigner( // sig share with invalid size invalidSigShare = bytes.Repeat([]byte("a"), 3) } - log.Warn("invalid sig share from ", "pk", getPkEncoded(nodes[shardID][ii].NodeKeys.Pk), "sig", invalidSigShare) + log.Warn("invalid sig share from ", "pk", getPkEncoded(nodes[shardID][ii].NodeKeys.MainKey.Pk), "sig", invalidSigShare) return invalidSigShare, nil } @@ -70,12 +77,14 @@ func TestConsensusWithInvalidSigners(t *testing.T) { t.Skip("this is not a short test") } + logger.ToggleLoggerName(true) + logger.SetLogLevel("*:TRACE,consensus:TRACE") + numMetaNodes := uint32(4) numNodes := uint32(4) consensusSize := uint32(4) numInvalid := uint32(1) roundTime := uint64(1000) - numCommBlock := uint64(8) nodes := initNodesWithTestSigner(numMetaNodes, numNodes, consensusSize, numInvalid, roundTime, blsConsensusType) @@ -92,27 +101,70 @@ func TestConsensusWithInvalidSigners(t *testing.T) { fmt.Println("Start consensus...") time.Sleep(time.Second) - for shardID := range nodes { - mutex := &sync.Mutex{} - nonceForRoundMap := make(map[uint64]uint64) - totalCalled := 0 - - err := startNodesWithCommitBlock(nodes[shardID], mutex, nonceForRoundMap, &totalCalled) - assert.Nil(t, err) - - chDone := make(chan bool) - go checkBlockProposedEveryRound(numCommBlock, nonceForRoundMap, mutex, chDone, t) - - extraTime := uint64(2) - endTime := time.Duration(roundTime)*time.Duration(numCommBlock+extraTime)*time.Millisecond + time.Minute - select { - case <-chDone: - case <-time.After(endTime): - mutex.Lock() - log.Error("currently saved nonces for rounds", "nonceForRoundMap", nonceForRoundMap) - assert.Fail(t, "consensus too slow, not working.") - mutex.Unlock() - return + for _, nodesList := range nodes { + for _, n := range nodesList { + statusComponents := integrationTests.GetDefaultStatusComponents() + + consensusArgs := consensusComp.ConsensusComponentsFactoryArgs{ + Config: config.Config{ + Consensus: config.ConsensusConfig{ + Type: blsConsensusType, + }, + ValidatorPubkeyConverter: config.PubkeyConfig{ + Length: 96, + Type: "bls", + SignatureLength: 48, + }, + TrieSync: config.TrieSyncConfig{ + NumConcurrentTrieSyncers: 5, + MaxHardCapForMissingNodes: 5, + TrieSyncerVersion: 2, + CheckNodesOnDisk: false, + }, + GeneralSettings: config.GeneralSettingsConfig{ + SyncProcessTimeInMillis: 6000, + }, + }, + BootstrapRoundIndex: 0, + CoreComponents: n.Node.GetCoreComponents(), + NetworkComponents: n.Node.GetNetworkComponents(), + CryptoComponents: n.Node.GetCryptoComponents(), + DataComponents: n.Node.GetDataComponents(), + ProcessComponents: n.Node.GetProcessComponents(), + StateComponents: n.Node.GetStateComponents(), + StatusComponents: statusComponents, + StatusCoreComponents: n.Node.GetStatusCoreComponents(), + ScheduledProcessor: &consensusMocks.ScheduledProcessorStub{}, + IsInImportMode: n.Node.IsInImportMode(), + } + + consensusFactory, err := consensusComp.NewConsensusComponentsFactory(consensusArgs) + require.Nil(t, err) + + managedConsensusComponents, err := consensusComp.NewManagedConsensusComponents(consensusFactory) + require.Nil(t, err) + + err = managedConsensusComponents.Create() + require.Nil(t, err) + } + } + + fmt.Println("Wait for several rounds...") + + time.Sleep(15 * time.Second) + + fmt.Println("Checking shards...") + + expectedNonce := uint64(10) + for _, nodesList := range nodes { + for _, n := range nodesList { + for i := 1; i < len(nodes); i++ { + if check.IfNil(n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader()) { + assert.Fail(t, fmt.Sprintf("Node with idx %d does not have a current block", i)) + } else { + assert.GreaterOrEqual(t, n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader().GetNonce(), expectedNonce) + } + } } } } diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index 604b41c93ad..b6ad9b1cd7b 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -116,6 +116,7 @@ func testConsensusBLSWithFullProcessing(t *testing.T, equivalentProofsActivation blsConsensusType, numKeysOnEachNode, enableEpochsConfig, + true, ) for shardID, nodesList := range nodes { diff --git a/integrationTests/testFullNode.go b/integrationTests/testFullNode.go index 4dabf294651..06100621cd0 100644 --- a/integrationTests/testFullNode.go +++ b/integrationTests/testFullNode.go @@ -45,6 +45,7 @@ import ( processMock "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/process/scToProtocol" "github.com/multiversx/mx-chain-go/process/smartContract" + "github.com/multiversx/mx-chain-go/process/sync" processSync "github.com/multiversx/mx-chain-go/process/sync" "github.com/multiversx/mx-chain-go/process/track" "github.com/multiversx/mx-chain-go/sharding" @@ -61,6 +62,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/factory" testFactory "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" @@ -82,6 +84,7 @@ func CreateNodesWithTestFullNode( consensusType string, numKeysOnEachNode int, enableEpochsConfig config.EnableEpochs, + withSync bool, ) map[uint32][]*TestFullNode { nodes := make(map[uint32][]*TestFullNode, nodesPerShard) @@ -105,7 +108,7 @@ func CreateNodesWithTestFullNode( MaxShards: 2, NodeShardId: 0, TxSignPrivKeyShardId: 0, - WithSync: false, + WithSync: withSync, EpochsConfig: &enableEpochsConfig, NodeKeys: keysPair, }, @@ -153,6 +156,7 @@ type TestFullNode struct { *TestProcessorNode ShardCoordinator sharding.Coordinator + MultiSigner *cryptoMocks.MultisignerMock } func NewTestFullNode(args ArgsTestFullNode) *TestFullNode { @@ -163,6 +167,7 @@ func NewTestFullNode(args ArgsTestFullNode) *TestFullNode { tfn := &TestFullNode{ TestProcessorNode: tpn, ShardCoordinator: shardCoordinator, + MultiSigner: args.MultiSigner, } tfn.initTestNodeWithArgs(*args.ArgTestProcessorNode, args) @@ -232,6 +237,7 @@ func (tpn *TestFullNode) initTestNodeWithArgs(args ArgTestProcessorNode, fullArg tpn.initChainHandler() tpn.initHeaderValidator() + tpn.initRoundHandler() syncer := ntp.NewSyncTime(ntp.NewNTPGoogleConfig(), nil) syncer.StartSyncingTime() @@ -575,7 +581,7 @@ func (tpn *TestFullNode) initNode( tpn.initInterceptors(coreComponents, cryptoComponents, roundHandler, tpn.EnableEpochsHandler, tpn.Storage, epochTrigger) if args.WithSync { - tpn.initBlockProcessorWithSync() + tpn.initBlockProcessorWithSync(coreComponents, dataComponents, roundHandler) } else { tpn.initBlockProcessor(coreComponents, dataComponents, args, roundHandler) } @@ -1095,6 +1101,117 @@ func (tpn *TestFullNode) initBlockProcessor( } } +func (tpn *TestFullNode) initBlockProcessorWithSync( + coreComponents *mock.CoreComponentsStub, + dataComponents *mock.DataComponentsStub, + roundHandler consensus.RoundHandler, +) { + var err error + + accountsDb := make(map[state.AccountsDbIdentifier]state.AccountsAdapter) + accountsDb[state.UserAccountsState] = tpn.AccntState + accountsDb[state.PeerAccountsState] = tpn.PeerState + + if tpn.EpochNotifier == nil { + tpn.EpochNotifier = forking.NewGenericEpochNotifier() + } + if tpn.EnableEpochsHandler == nil { + tpn.EnableEpochsHandler, _ = enablers.NewEnableEpochsHandler(CreateEnableEpochsConfig(), tpn.EpochNotifier) + } + + bootstrapComponents := getDefaultBootstrapComponents(tpn.ShardCoordinator, tpn.EnableEpochsHandler) + bootstrapComponents.HdrIntegrityVerifier = tpn.HeaderIntegrityVerifier + + statusComponents := GetDefaultStatusComponents() + + statusCoreComponents := &factory.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandlerMock.AppStatusHandlerStub{}, + } + + argumentsBase := block.ArgBaseProcessor{ + CoreComponents: coreComponents, + DataComponents: dataComponents, + BootstrapComponents: bootstrapComponents, + StatusComponents: statusComponents, + StatusCoreComponents: statusCoreComponents, + Config: config.Config{}, + AccountsDB: accountsDb, + ForkDetector: nil, + NodesCoordinator: tpn.NodesCoordinator, + FeeHandler: tpn.FeeAccumulator, + RequestHandler: tpn.RequestHandler, + BlockChainHook: &testscommon.BlockChainHookStub{}, + EpochStartTrigger: &mock.EpochStartTriggerStub{}, + HeaderValidator: tpn.HeaderValidator, + BootStorer: &mock.BoostrapStorerMock{ + PutCalled: func(round int64, bootData bootstrapStorage.BootstrapData) error { + return nil + }, + }, + BlockTracker: tpn.BlockTracker, + BlockSizeThrottler: TestBlockSizeThrottler, + HistoryRepository: tpn.HistoryRepository, + GasHandler: tpn.GasHandler, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, + ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, + OutportDataProvider: &outport.OutportDataProviderStub{}, + BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, + ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, + } + + if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { + tpn.ForkDetector, _ = sync.NewMetaForkDetector( + roundHandler, + tpn.BlockBlackListHandler, + tpn.BlockTracker, + 0, + tpn.EnableEpochsHandler, + tpn.DataPool.Proofs()) + argumentsBase.ForkDetector = tpn.ForkDetector + argumentsBase.TxCoordinator = &mock.TransactionCoordinatorMock{} + arguments := block.ArgMetaProcessor{ + ArgBaseProcessor: argumentsBase, + SCToProtocol: &mock.SCToProtocolStub{}, + PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, + EpochStartDataCreator: &mock.EpochStartDataCreatorStub{}, + EpochEconomics: &mock.EpochEconomicsStub{}, + EpochRewardsCreator: &testscommon.RewardsCreatorStub{}, + EpochValidatorInfoCreator: &testscommon.EpochValidatorInfoCreatorStub{}, + ValidatorStatisticsProcessor: &testscommon.ValidatorStatisticsProcessorStub{ + UpdatePeerStateCalled: func(header data.MetaHeaderHandler) ([]byte, error) { + return []byte("validator stats root hash"), nil + }, + }, + EpochSystemSCProcessor: &testscommon.EpochStartSystemSCStub{}, + } + + tpn.BlockProcessor, err = block.NewMetaProcessor(arguments) + } else { + tpn.ForkDetector, _ = sync.NewShardForkDetector( + roundHandler, + tpn.BlockBlackListHandler, + tpn.BlockTracker, + 0, + tpn.EnableEpochsHandler, + tpn.DataPool.Proofs()) + argumentsBase.ForkDetector = tpn.ForkDetector + argumentsBase.BlockChainHook = tpn.BlockchainHook + argumentsBase.TxCoordinator = tpn.TxCoordinator + argumentsBase.ScheduledTxsExecutionHandler = &testscommon.ScheduledTxsExecutionStub{} + arguments := block.ArgShardProcessor{ + ArgBaseProcessor: argumentsBase, + } + + tpn.BlockProcessor, err = block.NewShardProcessor(arguments) + } + + if err != nil { + panic(fmt.Sprintf("Error creating blockprocessor: %s", err.Error())) + } +} + func (tpn *TestFullNode) initBlockTracker( roundHandler consensus.RoundHandler, ) { From 8b53a2892746e1f8cdd4637d0f426d8147cdd0a1 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Fri, 31 Jan 2025 12:33:34 +0200 Subject: [PATCH 12/21] added trace log messages --- consensus/spos/bls/v2/subroundEndRound.go | 8 +++++++- consensus/spos/worker.go | 5 +++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/consensus/spos/bls/v2/subroundEndRound.go b/consensus/spos/bls/v2/subroundEndRound.go index b02d6015a07..a6d763d28c4 100644 --- a/consensus/spos/bls/v2/subroundEndRound.go +++ b/consensus/spos/bls/v2/subroundEndRound.go @@ -373,7 +373,7 @@ func (sr *subroundEndRound) checkGoRoutinesThrottler(ctx context.Context) error func (sr *subroundEndRound) verifySignature(i int, pk string, sigShare []byte) error { err := sr.SigningHandler().VerifySignatureShare(uint16(i), sigShare, sr.GetData(), sr.GetHeader().GetEpoch()) if err != nil { - log.Trace("VerifySignatureShare returned an error: ", err) + log.Trace("VerifySignatureShare returned an error: ", "error", err) errSetJob := sr.SetJobDone(pk, bls.SrSignature, false) if errSetJob != nil { return errSetJob @@ -521,6 +521,12 @@ func (sr *subroundEndRound) computeAggSigOnValidNodes() ([]byte, []byte, error) return nil, nil, err } + log.Trace("computeAggSigOnValidNodes", + "bitmap", bitmap, + "threshold", threshold, + "numValidSigShares", numValidSigShares, + ) + return bitmap, sig, nil } diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index 804bec83715..c4511ebb246 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -580,6 +580,11 @@ func (wrk *Worker) doJobOnMessageWithSignature(cnsMsg *consensus.Message, p2pMsg wrk.mapDisplayHashConsensusMessage[hash] = append(wrk.mapDisplayHashConsensusMessage[hash], cnsMsg) wrk.consensusState.AddMessageWithSignature(string(cnsMsg.PubKey), p2pMsg) + + log.Trace("received message with signature", + "from", core.GetTrimmedPk(hex.EncodeToString(cnsMsg.PubKey)), + "header hash", cnsMsg.BlockHeaderHash, + ) } func (wrk *Worker) addBlockToPool(bodyBytes []byte) { From 1887e158175c1aa65be203f2f3d7416b480dc3fa Mon Sep 17 00:00:00 2001 From: ssd04 Date: Fri, 31 Jan 2025 14:51:19 +0200 Subject: [PATCH 13/21] added sync test with more meta nodes --- .../sync/basicSync/basicSync_test.go | 94 +++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/integrationTests/sync/basicSync/basicSync_test.go b/integrationTests/sync/basicSync/basicSync_test.go index 1d08f51b30e..e2ebfae5fab 100644 --- a/integrationTests/sync/basicSync/basicSync_test.go +++ b/integrationTests/sync/basicSync/basicSync_test.go @@ -288,3 +288,97 @@ func TestSyncWorksInShard_EmptyBlocksNoForks_With_EquivalentProofs(t *testing.T) } } } + +func TestSyncMetaAndShard_With_EquivalentProofs(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + // 3 shard nodes and 3 metachain node + maxShards := uint32(1) + shardId := uint32(0) + numNodesPerShard := 3 + + enableEpochs := integrationTests.CreateEnableEpochsConfig() + enableEpochs.EquivalentMessagesEnableEpoch = uint32(0) + enableEpochs.FixedOrderInConsensusEnableEpoch = uint32(0) + + nodes := make([]*integrationTests.TestProcessorNode, 2*numNodesPerShard) + leaders := make([]*integrationTests.TestProcessorNode, 0) + connectableNodes := make([]integrationTests.Connectable, 0) + + for i := 0; i < numNodesPerShard; i++ { + nodes[i] = integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ + MaxShards: maxShards, + NodeShardId: shardId, + TxSignPrivKeyShardId: shardId, + WithSync: true, + EpochsConfig: &enableEpochs, + }) + connectableNodes = append(connectableNodes, nodes[i]) + } + + idxProposerShard0 := 0 + leaders = append(leaders, nodes[idxProposerShard0]) + + idxProposerMeta := numNodesPerShard + for i := 0; i < numNodesPerShard; i++ { + metachainNode := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ + MaxShards: maxShards, + NodeShardId: core.MetachainShardId, + TxSignPrivKeyShardId: shardId, + WithSync: true, + EpochsConfig: &enableEpochs, + }) + nodes[idxProposerMeta+i] = metachainNode + connectableNodes = append(connectableNodes, metachainNode) + } + leaders = append(leaders, nodes[idxProposerMeta]) + + integrationTests.ConnectNodes(connectableNodes) + + defer func() { + for _, n := range nodes { + n.Close() + } + }() + + for _, n := range nodes { + _ = n.StartSync() + } + + fmt.Println("Delaying for nodes p2p bootstrap...") + time.Sleep(integrationTests.P2pBootstrapDelay) + + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + integrationTests.UpdateRound(nodes, round) + nonce++ + + numRoundsToTest := 5 + for i := 0; i < numRoundsToTest; i++ { + integrationTests.ProposeBlock(nodes, leaders, round, nonce) + + time.Sleep(integrationTests.SyncDelay) + + round = integrationTests.IncrementAndPrintRound(round) + integrationTests.UpdateRound(nodes, round) + nonce++ + } + + time.Sleep(integrationTests.SyncDelay) + + expectedNonce := nodes[0].BlockChain.GetCurrentBlockHeader().GetNonce() + for i := 1; i < len(nodes); i++ { + if check.IfNil(nodes[i].BlockChain.GetCurrentBlockHeader()) { + assert.Fail(t, fmt.Sprintf("Node with idx %d does not have a current block", i)) + } else { + if i == idxProposerMeta { // metachain node has highest nonce since it's single node and it did not synced the header + assert.Equal(t, expectedNonce, nodes[i].BlockChain.GetCurrentBlockHeader().GetNonce()) + } else { // shard nodes have not managed to sync last header since there is no proof for it; in the complete flow, when nodes will be fully sinced they will get current header directly from consensus, so they will receive the proof for header + assert.Equal(t, expectedNonce-1, nodes[i].BlockChain.GetCurrentBlockHeader().GetNonce()) + } + } + } +} From 84b22558fa4f388b8ece69e01d3a346548342e20 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Fri, 31 Jan 2025 15:04:14 +0200 Subject: [PATCH 14/21] fix linter issues --- integrationTests/consensus/consensus_test.go | 6 ------ integrationTests/sync/basicSync/basicSync_test.go | 2 -- 2 files changed, 8 deletions(-) diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index b6ad9b1cd7b..5a5779fa2d8 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -55,9 +55,6 @@ func TestConsensusBLSFullTestSingleKeys_WithEquivalentProofs(t *testing.T) { t.Skip("this is not a short test") } - logger.ToggleLoggerName(true) - logger.SetLogLevel("*:DEBUG,consensus:TRACE") - runFullConsensusTest(t, blsConsensusType, 1, true) } @@ -86,9 +83,6 @@ func TestConsensusBLSWithFullProcessing_WithEquivalentProofs(t *testing.T) { } func testConsensusBLSWithFullProcessing(t *testing.T, equivalentProofsActivationEpoch uint32) { - _ = logger.SetLogLevel("*:DEBUG,process:TRACE,consensus:TRACE") - logger.ToggleLoggerName(true) - numKeysOnEachNode := 1 numMetaNodes := uint32(2) numNodes := uint32(2) diff --git a/integrationTests/sync/basicSync/basicSync_test.go b/integrationTests/sync/basicSync/basicSync_test.go index e2ebfae5fab..7d3563a997c 100644 --- a/integrationTests/sync/basicSync/basicSync_test.go +++ b/integrationTests/sync/basicSync/basicSync_test.go @@ -203,8 +203,6 @@ func TestSyncWorksInShard_EmptyBlocksNoForks_With_EquivalentProofs(t *testing.T) t.Skip("this is not a short test") } - logger.SetLogLevel("*:TRACE") - // 3 shard nodes and 1 metachain node maxShards := uint32(1) shardId := uint32(0) From 92c1f0b4d03b4e356c3cdfe1a57bb41cb02efbe8 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Fri, 31 Jan 2025 15:39:34 +0200 Subject: [PATCH 15/21] fix config after merge --- integrationTests/consensus/consensusSigning_test.go | 4 ---- integrationTests/testFullNode.go | 1 - 2 files changed, 5 deletions(-) diff --git a/integrationTests/consensus/consensusSigning_test.go b/integrationTests/consensus/consensusSigning_test.go index e747587adfb..56059dd0136 100644 --- a/integrationTests/consensus/consensusSigning_test.go +++ b/integrationTests/consensus/consensusSigning_test.go @@ -15,7 +15,6 @@ import ( consensusComp "github.com/multiversx/mx-chain-go/factory/consensus" "github.com/multiversx/mx-chain-go/integrationTests" consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" - logger "github.com/multiversx/mx-chain-logger-go" ) func initNodesWithTestSigner( @@ -77,9 +76,6 @@ func TestConsensusWithInvalidSigners(t *testing.T) { t.Skip("this is not a short test") } - logger.ToggleLoggerName(true) - logger.SetLogLevel("*:TRACE,consensus:TRACE") - numMetaNodes := uint32(4) numNodes := uint32(4) consensusSize := uint32(4) diff --git a/integrationTests/testFullNode.go b/integrationTests/testFullNode.go index 06100621cd0..fd5eb30bb96 100644 --- a/integrationTests/testFullNode.go +++ b/integrationTests/testFullNode.go @@ -341,7 +341,6 @@ func (tpn *TestFullNode) initTestNodeWithArgs(args ArgTestProcessorNode, fullArg tpn.NodeKeys.MainKey.Sk, tpn.MainMessenger.ID(), ), - config.ConsensusGradualBroadcastConfig{GradualIndexBroadcastDelay: []config.IndexBroadcastDelay{}}, ) if args.WithSync { From a32849b36f4b0981198c2900ca24407af373bd67 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Fri, 31 Jan 2025 15:50:47 +0200 Subject: [PATCH 16/21] use common function for full node start --- .../consensus/consensusSigning_test.go | 46 +-------- integrationTests/consensus/consensus_test.go | 94 ++++++++++--------- integrationTests/testFullNode.go | 5 - 3 files changed, 53 insertions(+), 92 deletions(-) diff --git a/integrationTests/consensus/consensusSigning_test.go b/integrationTests/consensus/consensusSigning_test.go index 56059dd0136..c569a0c789b 100644 --- a/integrationTests/consensus/consensusSigning_test.go +++ b/integrationTests/consensus/consensusSigning_test.go @@ -11,10 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/config" - consensusComp "github.com/multiversx/mx-chain-go/factory/consensus" "github.com/multiversx/mx-chain-go/integrationTests" - consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" ) func initNodesWithTestSigner( @@ -99,48 +96,7 @@ func TestConsensusWithInvalidSigners(t *testing.T) { for _, nodesList := range nodes { for _, n := range nodesList { - statusComponents := integrationTests.GetDefaultStatusComponents() - - consensusArgs := consensusComp.ConsensusComponentsFactoryArgs{ - Config: config.Config{ - Consensus: config.ConsensusConfig{ - Type: blsConsensusType, - }, - ValidatorPubkeyConverter: config.PubkeyConfig{ - Length: 96, - Type: "bls", - SignatureLength: 48, - }, - TrieSync: config.TrieSyncConfig{ - NumConcurrentTrieSyncers: 5, - MaxHardCapForMissingNodes: 5, - TrieSyncerVersion: 2, - CheckNodesOnDisk: false, - }, - GeneralSettings: config.GeneralSettingsConfig{ - SyncProcessTimeInMillis: 6000, - }, - }, - BootstrapRoundIndex: 0, - CoreComponents: n.Node.GetCoreComponents(), - NetworkComponents: n.Node.GetNetworkComponents(), - CryptoComponents: n.Node.GetCryptoComponents(), - DataComponents: n.Node.GetDataComponents(), - ProcessComponents: n.Node.GetProcessComponents(), - StateComponents: n.Node.GetStateComponents(), - StatusComponents: statusComponents, - StatusCoreComponents: n.Node.GetStatusCoreComponents(), - ScheduledProcessor: &consensusMocks.ScheduledProcessorStub{}, - IsInImportMode: n.Node.IsInImportMode(), - } - - consensusFactory, err := consensusComp.NewConsensusComponentsFactory(consensusArgs) - require.Nil(t, err) - - managedConsensusComponents, err := consensusComp.NewManagedConsensusComponents(consensusFactory) - require.Nil(t, err) - - err = managedConsensusComponents.Create() + err := startFullConsensusNode(n) require.Nil(t, err) } } diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index 5a5779fa2d8..e281efb0414 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -140,48 +140,7 @@ func testConsensusBLSWithFullProcessing(t *testing.T, equivalentProofsActivation for _, nodesList := range nodes { for _, n := range nodesList { - statusComponents := integrationTests.GetDefaultStatusComponents() - - consensusArgs := consensusComp.ConsensusComponentsFactoryArgs{ - Config: config.Config{ - Consensus: config.ConsensusConfig{ - Type: blsConsensusType, - }, - ValidatorPubkeyConverter: config.PubkeyConfig{ - Length: 96, - Type: "bls", - SignatureLength: 48, - }, - TrieSync: config.TrieSyncConfig{ - NumConcurrentTrieSyncers: 5, - MaxHardCapForMissingNodes: 5, - TrieSyncerVersion: 2, - CheckNodesOnDisk: false, - }, - GeneralSettings: config.GeneralSettingsConfig{ - SyncProcessTimeInMillis: 6000, - }, - }, - BootstrapRoundIndex: 0, - CoreComponents: n.Node.GetCoreComponents(), - NetworkComponents: n.Node.GetNetworkComponents(), - CryptoComponents: n.Node.GetCryptoComponents(), - DataComponents: n.Node.GetDataComponents(), - ProcessComponents: n.Node.GetProcessComponents(), - StateComponents: n.Node.GetStateComponents(), - StatusComponents: statusComponents, - StatusCoreComponents: n.Node.GetStatusCoreComponents(), - ScheduledProcessor: &consensusMocks.ScheduledProcessorStub{}, - IsInImportMode: n.Node.IsInImportMode(), - } - - consensusFactory, err := consensusComp.NewConsensusComponentsFactory(consensusArgs) - require.Nil(t, err) - - managedConsensusComponents, err := consensusComp.NewManagedConsensusComponents(consensusFactory) - require.Nil(t, err) - - err = managedConsensusComponents.Create() + err := startFullConsensusNode(n) require.Nil(t, err) } } @@ -206,6 +165,57 @@ func testConsensusBLSWithFullProcessing(t *testing.T, equivalentProofsActivation } } +func startFullConsensusNode( + n *integrationTests.TestFullNode, +) error { + statusComponents := integrationTests.GetDefaultStatusComponents() + + consensusArgs := consensusComp.ConsensusComponentsFactoryArgs{ + Config: config.Config{ + Consensus: config.ConsensusConfig{ + Type: blsConsensusType, + }, + ValidatorPubkeyConverter: config.PubkeyConfig{ + Length: 96, + Type: "bls", + SignatureLength: 48, + }, + TrieSync: config.TrieSyncConfig{ + NumConcurrentTrieSyncers: 5, + MaxHardCapForMissingNodes: 5, + TrieSyncerVersion: 2, + CheckNodesOnDisk: false, + }, + GeneralSettings: config.GeneralSettingsConfig{ + SyncProcessTimeInMillis: 6000, + }, + }, + BootstrapRoundIndex: 0, + CoreComponents: n.Node.GetCoreComponents(), + NetworkComponents: n.Node.GetNetworkComponents(), + CryptoComponents: n.Node.GetCryptoComponents(), + DataComponents: n.Node.GetDataComponents(), + ProcessComponents: n.Node.GetProcessComponents(), + StateComponents: n.Node.GetStateComponents(), + StatusComponents: statusComponents, + StatusCoreComponents: n.Node.GetStatusCoreComponents(), + ScheduledProcessor: &consensusMocks.ScheduledProcessorStub{}, + IsInImportMode: n.Node.IsInImportMode(), + } + + consensusFactory, err := consensusComp.NewConsensusComponentsFactory(consensusArgs) + if err != nil { + return err + } + + managedConsensusComponents, err := consensusComp.NewManagedConsensusComponents(consensusFactory) + if err != nil { + return err + } + + return managedConsensusComponents.Create() +} + func initNodesAndTest( numMetaNodes, numNodes, diff --git a/integrationTests/testFullNode.go b/integrationTests/testFullNode.go index fd5eb30bb96..9904b4722a7 100644 --- a/integrationTests/testFullNode.go +++ b/integrationTests/testFullNode.go @@ -223,11 +223,6 @@ func (tpn *TestFullNode) initTestNodeWithArgs(args ArgTestProcessorNode, fullArg tpn.AppStatusHandler = TestAppStatusHandler } - id := hex.EncodeToString(tpn.OwnAccount.PkTxSignBytes) - if len(id) > 8 { - id = id[0:8] - } - tpn.MainMessenger = CreateMessengerWithNoDiscovery() tpn.StatusMetrics = args.StatusMetrics From 22edddc528f2c941a30181d0daa50c7bff275766 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Fri, 31 Jan 2025 15:54:46 +0200 Subject: [PATCH 17/21] fix linter issue --- integrationTests/testFullNode.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/integrationTests/testFullNode.go b/integrationTests/testFullNode.go index 9904b4722a7..108a3ef61e8 100644 --- a/integrationTests/testFullNode.go +++ b/integrationTests/testFullNode.go @@ -1048,6 +1048,9 @@ func (tpn *TestFullNode) initBlockProcessor( } tpn.BlockProcessor, err = block.NewMetaProcessor(arguments) + if err != nil { + log.Error("error creating meta blockprocessor", "error", err) + } } else { if check.IfNil(tpn.EpochStartTrigger) { argsPeerMiniBlocksSyncer := shardchain.ArgPeerMiniBlockSyncer{ @@ -1088,11 +1091,11 @@ func (tpn *TestFullNode) initBlockProcessor( } tpn.BlockProcessor, err = block.NewShardProcessor(arguments) + if err != nil { + log.Error("error creating shard blockprocessor", "error", err) + } } - if err != nil { - log.Error("error creating blockprocessor", "error", err) - } } func (tpn *TestFullNode) initBlockProcessorWithSync( From 2716384ecb1cc04ec0241ab1bf3690e621451513 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Wed, 5 Feb 2025 09:31:08 +0200 Subject: [PATCH 18/21] fixes after review --- integrationTests/testConsensusNode.go | 104 +++++++++---------------- integrationTests/testFullNode.go | 108 ++++++++++---------------- 2 files changed, 74 insertions(+), 138 deletions(-) diff --git a/integrationTests/testConsensusNode.go b/integrationTests/testConsensusNode.go index 1fb8d975150..0725b0000f7 100644 --- a/integrationTests/testConsensusNode.go +++ b/integrationTests/testConsensusNode.go @@ -463,41 +463,41 @@ func (tcn *TestConsensusNode) initInterceptors( cacheVerified, _ := storageunit.NewCache(cacherVerifiedCfg) whiteListerVerifiedTxs, _ := interceptors.NewWhiteListDataVerifier(cacheVerified) + interceptorContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ + CoreComponents: coreComponents, + CryptoComponents: cryptoComponents, + Accounts: accountsAdapter, + ShardCoordinator: tcn.ShardCoordinator, + NodesCoordinator: tcn.NodesCoordinator, + MainMessenger: tcn.MainMessenger, + FullArchiveMessenger: tcn.FullArchiveMessenger, + Store: storage, + DataPool: tcn.DataPool, + MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, + TxFeeHandler: &economicsmocks.EconomicsHandlerMock{}, + BlockBlackList: blockBlackListHandler, + HeaderSigVerifier: &consensusMocks.HeaderSigVerifierMock{}, + HeaderIntegrityVerifier: CreateHeaderIntegrityVerifier(), + ValidityAttester: blockTracker, + EpochStartTrigger: epochStartTrigger, + WhiteListHandler: whiteLstHandler, + WhiteListerVerifiedTxs: whiteListerVerifiedTxs, + AntifloodHandler: &mock.NilAntifloodHandler{}, + ArgumentsParser: smartContract.NewArgumentParser(), + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + SizeCheckDelta: sizeCheckDelta, + RequestHandler: &testscommon.RequestHandlerStub{}, + PeerSignatureHandler: &processMock.PeerSignatureHandlerStub{}, + SignaturesHandler: &processMock.SignaturesHandlerStub{}, + HeartbeatExpiryTimespanInSec: 30, + MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), + FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), + HardforkTrigger: &testscommon.HardforkTriggerStub{}, + NodeOperationMode: common.NormalOperation, + InterceptedDataVerifierFactory: interceptorsFactory.NewInterceptedDataVerifierFactory(interceptorDataVerifierArgs), + } if tcn.ShardCoordinator.SelfId() == core.MetachainShardId { - metaInterceptorContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ - CoreComponents: coreComponents, - CryptoComponents: cryptoComponents, - Accounts: accountsAdapter, - ShardCoordinator: tcn.ShardCoordinator, - NodesCoordinator: tcn.NodesCoordinator, - MainMessenger: tcn.MainMessenger, - FullArchiveMessenger: tcn.FullArchiveMessenger, - Store: storage, - DataPool: tcn.DataPool, - MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, - TxFeeHandler: &economicsmocks.EconomicsHandlerMock{}, - BlockBlackList: blockBlackListHandler, - HeaderSigVerifier: &consensusMocks.HeaderSigVerifierMock{}, - HeaderIntegrityVerifier: CreateHeaderIntegrityVerifier(), - ValidityAttester: blockTracker, - EpochStartTrigger: epochStartTrigger, - WhiteListHandler: whiteLstHandler, - WhiteListerVerifiedTxs: whiteListerVerifiedTxs, - AntifloodHandler: &mock.NilAntifloodHandler{}, - ArgumentsParser: smartContract.NewArgumentParser(), - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - SizeCheckDelta: sizeCheckDelta, - RequestHandler: &testscommon.RequestHandlerStub{}, - PeerSignatureHandler: &processMock.PeerSignatureHandlerStub{}, - SignaturesHandler: &processMock.SignaturesHandlerStub{}, - HeartbeatExpiryTimespanInSec: 30, - MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), - FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), - HardforkTrigger: &testscommon.HardforkTriggerStub{}, - NodeOperationMode: common.NormalOperation, - InterceptedDataVerifierFactory: interceptorsFactory.NewInterceptedDataVerifierFactory(interceptorDataVerifierArgs), - } - interceptorContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(metaInterceptorContainerFactoryArgs) + interceptorContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(interceptorContainerFactoryArgs) if err != nil { fmt.Println(err.Error()) } @@ -532,41 +532,7 @@ func (tcn *TestConsensusNode) initInterceptors( } _, _ = shardchain.NewEpochStartTrigger(argsShardEpochStart) - shardIntereptorContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ - CoreComponents: coreComponents, - CryptoComponents: cryptoComponents, - Accounts: accountsAdapter, - ShardCoordinator: tcn.ShardCoordinator, - NodesCoordinator: tcn.NodesCoordinator, - MainMessenger: tcn.MainMessenger, - FullArchiveMessenger: tcn.FullArchiveMessenger, - Store: storage, - DataPool: tcn.DataPool, - MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, - TxFeeHandler: &economicsmocks.EconomicsHandlerMock{}, - BlockBlackList: blockBlackListHandler, - HeaderSigVerifier: &consensusMocks.HeaderSigVerifierMock{}, - HeaderIntegrityVerifier: CreateHeaderIntegrityVerifier(), - ValidityAttester: blockTracker, - EpochStartTrigger: epochStartTrigger, - WhiteListHandler: whiteLstHandler, - WhiteListerVerifiedTxs: whiteListerVerifiedTxs, - AntifloodHandler: &mock.NilAntifloodHandler{}, - ArgumentsParser: smartContract.NewArgumentParser(), - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - SizeCheckDelta: sizeCheckDelta, - RequestHandler: &testscommon.RequestHandlerStub{}, - PeerSignatureHandler: &processMock.PeerSignatureHandlerStub{}, - SignaturesHandler: &processMock.SignaturesHandlerStub{}, - HeartbeatExpiryTimespanInSec: 30, - MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), - FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), - HardforkTrigger: &testscommon.HardforkTriggerStub{}, - NodeOperationMode: common.NormalOperation, - InterceptedDataVerifierFactory: interceptorsFactory.NewInterceptedDataVerifierFactory(interceptorDataVerifierArgs), - } - - interceptorContainerFactory, err := interceptorscontainer.NewShardInterceptorsContainerFactory(shardIntereptorContainerFactoryArgs) + interceptorContainerFactory, err := interceptorscontainer.NewShardInterceptorsContainerFactory(interceptorContainerFactoryArgs) if err != nil { fmt.Println(err.Error()) } diff --git a/integrationTests/testFullNode.go b/integrationTests/testFullNode.go index 108a3ef61e8..999898cb805 100644 --- a/integrationTests/testFullNode.go +++ b/integrationTests/testFullNode.go @@ -76,6 +76,7 @@ import ( wasmConfig "github.com/multiversx/mx-chain-vm-go/config" ) +// CreateNodesWithTestFullNode will create a set of nodes with full consensus and processing components func CreateNodesWithTestFullNode( numMetaNodes int, nodesPerShard int, @@ -137,6 +138,7 @@ func CreateNodesWithTestFullNode( return nodes } +// ArgsTestFullNode defines arguments for test full node type ArgsTestFullNode struct { *ArgTestProcessorNode @@ -152,6 +154,7 @@ type ArgsTestFullNode struct { StartTime int64 } +// TestFullNode defines the structure for testing node with full processing and consensus components type TestFullNode struct { *TestProcessorNode @@ -159,6 +162,7 @@ type TestFullNode struct { MultiSigner *cryptoMocks.MultisignerMock } +// NewTestFullNode will create a new instance of full testing node func NewTestFullNode(args ArgsTestFullNode) *TestFullNode { tpn := newBaseTestProcessorNode(*args.ArgTestProcessorNode) @@ -693,41 +697,41 @@ func (tcn *TestFullNode) initInterceptors( cacheVerified, _ := storageunit.NewCache(cacherVerifiedCfg) whiteListerVerifiedTxs, _ := interceptors.NewWhiteListDataVerifier(cacheVerified) + interceptorContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ + CoreComponents: coreComponents, + CryptoComponents: cryptoComponents, + Accounts: accountsAdapter, + ShardCoordinator: tcn.ShardCoordinator, + NodesCoordinator: tcn.NodesCoordinator, + MainMessenger: tcn.MainMessenger, + FullArchiveMessenger: tcn.FullArchiveMessenger, + Store: storage, + DataPool: tcn.DataPool, + MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, + TxFeeHandler: &economicsmocks.EconomicsHandlerMock{}, + BlockBlackList: blockBlackListHandler, + HeaderSigVerifier: &consensusMocks.HeaderSigVerifierMock{}, + HeaderIntegrityVerifier: CreateHeaderIntegrityVerifier(), + ValidityAttester: blockTracker, + EpochStartTrigger: epochStartTrigger, + WhiteListHandler: whiteLstHandler, + WhiteListerVerifiedTxs: whiteListerVerifiedTxs, + AntifloodHandler: &mock.NilAntifloodHandler{}, + ArgumentsParser: smartContract.NewArgumentParser(), + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + SizeCheckDelta: sizeCheckDelta, + RequestHandler: &testscommon.RequestHandlerStub{}, + PeerSignatureHandler: &processMock.PeerSignatureHandlerStub{}, + SignaturesHandler: &processMock.SignaturesHandlerStub{}, + HeartbeatExpiryTimespanInSec: 30, + MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), + FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), + HardforkTrigger: &testscommon.HardforkTriggerStub{}, + NodeOperationMode: common.NormalOperation, + InterceptedDataVerifierFactory: interceptorsFactory.NewInterceptedDataVerifierFactory(interceptorDataVerifierArgs), + } if tcn.ShardCoordinator.SelfId() == core.MetachainShardId { - metaInterceptorContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ - CoreComponents: coreComponents, - CryptoComponents: cryptoComponents, - Accounts: accountsAdapter, - ShardCoordinator: tcn.ShardCoordinator, - NodesCoordinator: tcn.NodesCoordinator, - MainMessenger: tcn.MainMessenger, - FullArchiveMessenger: tcn.FullArchiveMessenger, - Store: storage, - DataPool: tcn.DataPool, - MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, - TxFeeHandler: &economicsmocks.EconomicsHandlerMock{}, - BlockBlackList: blockBlackListHandler, - HeaderSigVerifier: &consensusMocks.HeaderSigVerifierMock{}, - HeaderIntegrityVerifier: CreateHeaderIntegrityVerifier(), - ValidityAttester: blockTracker, - EpochStartTrigger: epochStartTrigger, - WhiteListHandler: whiteLstHandler, - WhiteListerVerifiedTxs: whiteListerVerifiedTxs, - AntifloodHandler: &mock.NilAntifloodHandler{}, - ArgumentsParser: smartContract.NewArgumentParser(), - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - SizeCheckDelta: sizeCheckDelta, - RequestHandler: &testscommon.RequestHandlerStub{}, - PeerSignatureHandler: &processMock.PeerSignatureHandlerStub{}, - SignaturesHandler: &processMock.SignaturesHandlerStub{}, - HeartbeatExpiryTimespanInSec: 30, - MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), - FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), - HardforkTrigger: &testscommon.HardforkTriggerStub{}, - NodeOperationMode: common.NormalOperation, - InterceptedDataVerifierFactory: interceptorsFactory.NewInterceptedDataVerifierFactory(interceptorDataVerifierArgs), - } - interceptorContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(metaInterceptorContainerFactoryArgs) + interceptorContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(interceptorContainerFactoryArgs) if err != nil { fmt.Println(err.Error()) } @@ -762,41 +766,7 @@ func (tcn *TestFullNode) initInterceptors( } _, _ = shardchain.NewEpochStartTrigger(argsShardEpochStart) - shardIntereptorContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ - CoreComponents: coreComponents, - CryptoComponents: cryptoComponents, - Accounts: accountsAdapter, - ShardCoordinator: tcn.ShardCoordinator, - NodesCoordinator: tcn.NodesCoordinator, - MainMessenger: tcn.MainMessenger, - FullArchiveMessenger: tcn.FullArchiveMessenger, - Store: storage, - DataPool: tcn.DataPool, - MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, - TxFeeHandler: &economicsmocks.EconomicsHandlerMock{}, - BlockBlackList: blockBlackListHandler, - HeaderSigVerifier: &consensusMocks.HeaderSigVerifierMock{}, - HeaderIntegrityVerifier: CreateHeaderIntegrityVerifier(), - ValidityAttester: blockTracker, - EpochStartTrigger: epochStartTrigger, - WhiteListHandler: whiteLstHandler, - WhiteListerVerifiedTxs: whiteListerVerifiedTxs, - AntifloodHandler: &mock.NilAntifloodHandler{}, - ArgumentsParser: smartContract.NewArgumentParser(), - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - SizeCheckDelta: sizeCheckDelta, - RequestHandler: &testscommon.RequestHandlerStub{}, - PeerSignatureHandler: &processMock.PeerSignatureHandlerStub{}, - SignaturesHandler: &processMock.SignaturesHandlerStub{}, - HeartbeatExpiryTimespanInSec: 30, - MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), - FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), - HardforkTrigger: &testscommon.HardforkTriggerStub{}, - NodeOperationMode: common.NormalOperation, - InterceptedDataVerifierFactory: interceptorsFactory.NewInterceptedDataVerifierFactory(interceptorDataVerifierArgs), - } - - interceptorContainerFactory, err := interceptorscontainer.NewShardInterceptorsContainerFactory(shardIntereptorContainerFactoryArgs) + interceptorContainerFactory, err := interceptorscontainer.NewShardInterceptorsContainerFactory(interceptorContainerFactoryArgs) if err != nil { fmt.Println(err.Error()) } From 8a3ca7aae063ea3c5e8b31f1484897db49441a6b Mon Sep 17 00:00:00 2001 From: ssd04 Date: Thu, 6 Feb 2025 15:31:00 +0200 Subject: [PATCH 19/21] remove duplicated code --- integrationTests/testFullNode.go | 216 +++++++++++++------------------ 1 file changed, 90 insertions(+), 126 deletions(-) diff --git a/integrationTests/testFullNode.go b/integrationTests/testFullNode.go index 999898cb805..4f24865de35 100644 --- a/integrationTests/testFullNode.go +++ b/integrationTests/testFullNode.go @@ -392,62 +392,7 @@ func (tpn *TestFullNode) initNode( tpn.EnableEpochsHandler, _ = enablers.NewEnableEpochsHandler(CreateEnableEpochsConfig(), tpn.EpochNotifier) } - var epochTrigger TestEpochStartTrigger - if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { - argsNewMetaEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ - GenesisTime: time.Unix(args.StartTime, 0), - EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), - Settings: &config.EpochStartConfig{ - MinRoundsBetweenEpochs: 1, - RoundsPerEpoch: 1000, - }, - Epoch: 0, - Storage: createTestStore(), - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, - DataPool: tpn.DataPool, - } - epochStartTrigger, err := metachain.NewEpochStartTrigger(argsNewMetaEpochStart) - if err != nil { - fmt.Println(err.Error()) - } - epochTrigger = &metachain.TestTrigger{} - epochTrigger.SetTrigger(epochStartTrigger) - } else { - argsPeerMiniBlocksSyncer := shardchain.ArgPeerMiniBlockSyncer{ - MiniBlocksPool: tpn.DataPool.MiniBlocks(), - ValidatorsInfoPool: tpn.DataPool.ValidatorsInfo(), - RequestHandler: &testscommon.RequestHandlerStub{}, - } - peerMiniBlockSyncer, _ := shardchain.NewPeerMiniBlockSyncer(argsPeerMiniBlocksSyncer) - - argsShardEpochStart := &shardchain.ArgsShardEpochStartTrigger{ - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - HeaderValidator: &mock.HeaderValidatorStub{}, - Uint64Converter: TestUint64Converter, - DataPool: tpn.DataPool, - Storage: tpn.Storage, - RequestHandler: &testscommon.RequestHandlerStub{}, - Epoch: 0, - Validity: 1, - Finality: 1, - EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), - PeerMiniBlocksSyncer: peerMiniBlockSyncer, - RoundHandler: roundHandler, - AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, - EnableEpochsHandler: tpn.EnableEpochsHandler, - } - epochStartTrigger, err := shardchain.NewEpochStartTrigger(argsShardEpochStart) - if err != nil { - fmt.Println("NewEpochStartTrigger shard") - fmt.Println(err.Error()) - } - epochTrigger = &shardchain.TestTrigger{} - epochTrigger.SetTrigger(epochStartTrigger) - } - + epochTrigger := tpn.createEpochStartTrigger(args.StartTime) tpn.EpochStartTrigger = epochTrigger strPk := "" @@ -504,27 +449,7 @@ func (tpn *TestFullNode) initNode( bootstrapComponents := getDefaultBootstrapComponents(tpn.ShardCoordinator, tpn.EnableEpochsHandler) tpn.BlockBlackListHandler = cache.NewTimeCache(TimeSpanForBadHeaders) - - if tpn.ShardCoordinator.SelfId() != core.MetachainShardId { - tpn.ForkDetector, err = processSync.NewShardForkDetector( - roundHandler, - tpn.BlockBlackListHandler, - tpn.BlockTracker, - args.StartTime, - tpn.EnableEpochsHandler, - tpn.DataPool.Proofs()) - } else { - tpn.ForkDetector, err = processSync.NewMetaForkDetector( - roundHandler, - tpn.BlockBlackListHandler, - tpn.BlockTracker, - args.StartTime, - tpn.EnableEpochsHandler, - tpn.DataPool.Proofs()) - } - if err != nil { - log.Error("error creating fork detector", "error", err) - } + tpn.ForkDetector = tpn.createForkDetector(args.StartTime) argsKeysHolder := keysManagement.ArgsManagedPeersHolder{ KeyGenerator: args.KeyGen, @@ -671,6 +596,94 @@ func (tpn *TestFullNode) initNode( log.LogIfError(err) } +func (tfn *TestFullNode) createForkDetector(startTime int64) process.ForkDetector { + var err error + var forkDetector process.ForkDetector + + if tfn.ShardCoordinator.SelfId() != core.MetachainShardId { + forkDetector, err = processSync.NewShardForkDetector( + tfn.RoundHandler, + tfn.BlockBlackListHandler, + tfn.BlockTracker, + startTime, + tfn.EnableEpochsHandler, + tfn.DataPool.Proofs()) + } else { + forkDetector, err = processSync.NewMetaForkDetector( + tfn.RoundHandler, + tfn.BlockBlackListHandler, + tfn.BlockTracker, + startTime, + tfn.EnableEpochsHandler, + tfn.DataPool.Proofs()) + } + if err != nil { + log.Error("error creating fork detector", "error", err) + return nil + } + + return forkDetector +} + +func (tfn *TestFullNode) createEpochStartTrigger(startTime int64) TestEpochStartTrigger { + var epochTrigger TestEpochStartTrigger + if tfn.ShardCoordinator.SelfId() == core.MetachainShardId { + argsNewMetaEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ + GenesisTime: time.Unix(startTime, 0), + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + Settings: &config.EpochStartConfig{ + MinRoundsBetweenEpochs: 1, + RoundsPerEpoch: 1000, + }, + Epoch: 0, + Storage: createTestStore(), + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + DataPool: tfn.DataPool, + } + epochStartTrigger, err := metachain.NewEpochStartTrigger(argsNewMetaEpochStart) + if err != nil { + fmt.Println(err.Error()) + } + epochTrigger = &metachain.TestTrigger{} + epochTrigger.SetTrigger(epochStartTrigger) + } else { + argsPeerMiniBlocksSyncer := shardchain.ArgPeerMiniBlockSyncer{ + MiniBlocksPool: tfn.DataPool.MiniBlocks(), + ValidatorsInfoPool: tfn.DataPool.ValidatorsInfo(), + RequestHandler: &testscommon.RequestHandlerStub{}, + } + peerMiniBlockSyncer, _ := shardchain.NewPeerMiniBlockSyncer(argsPeerMiniBlocksSyncer) + + argsShardEpochStart := &shardchain.ArgsShardEpochStartTrigger{ + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + HeaderValidator: &mock.HeaderValidatorStub{}, + Uint64Converter: TestUint64Converter, + DataPool: tfn.DataPool, + Storage: tfn.Storage, + RequestHandler: &testscommon.RequestHandlerStub{}, + Epoch: 0, + Validity: 1, + Finality: 1, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + PeerMiniBlocksSyncer: peerMiniBlockSyncer, + RoundHandler: tfn.RoundHandler, + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + EnableEpochsHandler: tfn.EnableEpochsHandler, + } + epochStartTrigger, err := shardchain.NewEpochStartTrigger(argsShardEpochStart) + if err != nil { + fmt.Println(err.Error()) + } + epochTrigger = &shardchain.TestTrigger{} + epochTrigger.SetTrigger(epochStartTrigger) + } + + return epochTrigger +} + func (tcn *TestFullNode) initInterceptors( coreComponents process.CoreComponentsHolder, cryptoComponents process.CryptoComponentsHolder, @@ -843,26 +856,6 @@ func (tpn *TestFullNode) initBlockProcessor( } if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { - if check.IfNil(tpn.EpochStartTrigger) { - argsEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ - GenesisTime: time.Unix(args.StartTime, 0), - Settings: &config.EpochStartConfig{ - MinRoundsBetweenEpochs: 1000, - RoundsPerEpoch: 10000, - }, - Epoch: 0, - EpochStartNotifier: tpn.EpochStartNotifier, - Storage: tpn.Storage, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, - DataPool: tpn.DataPool, - } - epochStartTrigger, _ := metachain.NewEpochStartTrigger(argsEpochStart) - tpn.EpochStartTrigger = &metachain.TestTrigger{} - tpn.EpochStartTrigger.SetTrigger(epochStartTrigger) - } - argumentsBase.EpochStartTrigger = tpn.EpochStartTrigger argumentsBase.TxCoordinator = tpn.TxCoordinator @@ -1022,35 +1015,6 @@ func (tpn *TestFullNode) initBlockProcessor( log.Error("error creating meta blockprocessor", "error", err) } } else { - if check.IfNil(tpn.EpochStartTrigger) { - argsPeerMiniBlocksSyncer := shardchain.ArgPeerMiniBlockSyncer{ - MiniBlocksPool: tpn.DataPool.MiniBlocks(), - ValidatorsInfoPool: tpn.DataPool.ValidatorsInfo(), - RequestHandler: tpn.RequestHandler, - } - peerMiniBlocksSyncer, _ := shardchain.NewPeerMiniBlockSyncer(argsPeerMiniBlocksSyncer) - argsShardEpochStart := &shardchain.ArgsShardEpochStartTrigger{ - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - HeaderValidator: tpn.HeaderValidator, - Uint64Converter: TestUint64Converter, - DataPool: tpn.DataPool, - Storage: tpn.Storage, - RequestHandler: tpn.RequestHandler, - Epoch: 0, - Validity: 1, - Finality: 1, - EpochStartNotifier: tpn.EpochStartNotifier, - PeerMiniBlocksSyncer: peerMiniBlocksSyncer, - RoundHandler: tpn.RoundHandler, - AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, - EnableEpochsHandler: tpn.EnableEpochsHandler, - } - epochStartTrigger, _ := shardchain.NewEpochStartTrigger(argsShardEpochStart) - tpn.EpochStartTrigger = &shardchain.TestTrigger{} - tpn.EpochStartTrigger.SetTrigger(epochStartTrigger) - } - argumentsBase.EpochStartTrigger = tpn.EpochStartTrigger argumentsBase.BlockChainHook = tpn.BlockchainHook argumentsBase.TxCoordinator = tpn.TxCoordinator From e5eb36a4ce6272da40bf7238128e1b623652f7a0 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Thu, 6 Feb 2025 15:36:57 +0200 Subject: [PATCH 20/21] update check + additional check for genesis block --- process/block/baseProcess.go | 2 +- process/block/metablock.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index be00f2f8a91..0d9d13b4ced 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -747,7 +747,7 @@ func (bp *baseProcessor) sortHeaderHashesForCurrentBlockByNonce(usedInBlock bool } func (bp *baseProcessor) hasMissingProof(headerInfo *hdrInfo, hdrHash string) bool { - isFlagEnabledForHeader := common.ShouldBlockHavePrevProof(headerInfo.hdr, bp.enableEpochsHandler, common.EquivalentMessagesFlag) + isFlagEnabledForHeader := bp.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, headerInfo.hdr.GetEpoch()) && headerInfo.hdr.GetNonce() > 1 if !isFlagEnabledForHeader { return false } diff --git a/process/block/metablock.go b/process/block/metablock.go index 63180536e43..5e998ad27cc 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -419,7 +419,7 @@ func (mp *metaProcessor) ProcessBlock( } func (mp *metaProcessor) checkProofsForShardData(header *block.MetaBlock) error { - if !common.ShouldBlockHavePrevProof(header, mp.enableEpochsHandler, common.EquivalentMessagesFlag) { + if !(mp.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, header.Epoch) && header.GetNonce() > 1) { return nil } From ecbb55f770a7d3c025fe44a0b4368a9759f49dd0 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Thu, 6 Feb 2025 17:24:12 +0200 Subject: [PATCH 21/21] fix genesis time --- integrationTests/testFullNode.go | 40 ++++++++++++-------------------- 1 file changed, 15 insertions(+), 25 deletions(-) diff --git a/integrationTests/testFullNode.go b/integrationTests/testFullNode.go index 4f24865de35..2afc20ab4be 100644 --- a/integrationTests/testFullNode.go +++ b/integrationTests/testFullNode.go @@ -45,7 +45,6 @@ import ( processMock "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/process/scToProtocol" "github.com/multiversx/mx-chain-go/process/smartContract" - "github.com/multiversx/mx-chain-go/process/sync" processSync "github.com/multiversx/mx-chain-go/process/sync" "github.com/multiversx/mx-chain-go/process/track" "github.com/multiversx/mx-chain-go/sharding" @@ -160,6 +159,7 @@ type TestFullNode struct { ShardCoordinator sharding.Coordinator MultiSigner *cryptoMocks.MultisignerMock + GenesisTimeField time.Time } // NewTestFullNode will create a new instance of full testing node @@ -240,9 +240,10 @@ func (tpn *TestFullNode) initTestNodeWithArgs(args ArgTestProcessorNode, fullArg syncer := ntp.NewSyncTime(ntp.NewNTPGoogleConfig(), nil) syncer.StartSyncingTime() + tpn.GenesisTimeField = time.Unix(fullArgs.StartTime, 0) roundHandler, _ := round.NewRound( - time.Unix(fullArgs.StartTime, 0), + tpn.GenesisTimeField, syncer.CurrentTime(), time.Millisecond*time.Duration(fullArgs.RoundTime), syncer, @@ -417,7 +418,8 @@ func (tpn *TestFullNode) initNode( coreComponents.ChainIdCalled = func() string { return string(tpn.ChainID) } - coreComponents.GenesisTimeField = time.Unix(args.StartTime, 0) + + coreComponents.GenesisTimeField = tpn.GenesisTimeField coreComponents.GenesisNodesSetupField = &genesisMocks.NodesSetupStub{ GetShardConsensusGroupSizeCalled: func() uint32 { return uint32(args.ConsensusSize) @@ -449,7 +451,7 @@ func (tpn *TestFullNode) initNode( bootstrapComponents := getDefaultBootstrapComponents(tpn.ShardCoordinator, tpn.EnableEpochsHandler) tpn.BlockBlackListHandler = cache.NewTimeCache(TimeSpanForBadHeaders) - tpn.ForkDetector = tpn.createForkDetector(args.StartTime) + tpn.ForkDetector = tpn.createForkDetector(args.StartTime, roundHandler) argsKeysHolder := keysManagement.ArgsManagedPeersHolder{ KeyGenerator: args.KeyGen, @@ -572,7 +574,6 @@ func (tpn *TestFullNode) initNode( node.WithStateComponents(stateComponents), node.WithPeerDenialEvaluator(&mock.PeerDenialEvaluatorStub{}), node.WithStatusCoreComponents(statusCoreComponents), - node.WithGenesisTime(time.Unix(args.StartTime, 0)), node.WithRoundDuration(args.RoundTime), node.WithPublicKeySize(publicKeySize), ) @@ -596,24 +597,27 @@ func (tpn *TestFullNode) initNode( log.LogIfError(err) } -func (tfn *TestFullNode) createForkDetector(startTime int64) process.ForkDetector { +func (tfn *TestFullNode) createForkDetector( + startTime int64, + roundHandler consensus.RoundHandler, +) process.ForkDetector { var err error var forkDetector process.ForkDetector if tfn.ShardCoordinator.SelfId() != core.MetachainShardId { forkDetector, err = processSync.NewShardForkDetector( - tfn.RoundHandler, + roundHandler, tfn.BlockBlackListHandler, tfn.BlockTracker, - startTime, + tfn.GenesisTimeField.Unix(), tfn.EnableEpochsHandler, tfn.DataPool.Proofs()) } else { forkDetector, err = processSync.NewMetaForkDetector( - tfn.RoundHandler, + roundHandler, tfn.BlockBlackListHandler, tfn.BlockTracker, - startTime, + tfn.GenesisTimeField.Unix(), tfn.EnableEpochsHandler, tfn.DataPool.Proofs()) } @@ -629,7 +633,7 @@ func (tfn *TestFullNode) createEpochStartTrigger(startTime int64) TestEpochStart var epochTrigger TestEpochStartTrigger if tfn.ShardCoordinator.SelfId() == core.MetachainShardId { argsNewMetaEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ - GenesisTime: time.Unix(startTime, 0), + GenesisTime: tfn.GenesisTimeField, EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), Settings: &config.EpochStartConfig{ MinRoundsBetweenEpochs: 1, @@ -1093,13 +1097,6 @@ func (tpn *TestFullNode) initBlockProcessorWithSync( } if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { - tpn.ForkDetector, _ = sync.NewMetaForkDetector( - roundHandler, - tpn.BlockBlackListHandler, - tpn.BlockTracker, - 0, - tpn.EnableEpochsHandler, - tpn.DataPool.Proofs()) argumentsBase.ForkDetector = tpn.ForkDetector argumentsBase.TxCoordinator = &mock.TransactionCoordinatorMock{} arguments := block.ArgMetaProcessor{ @@ -1120,13 +1117,6 @@ func (tpn *TestFullNode) initBlockProcessorWithSync( tpn.BlockProcessor, err = block.NewMetaProcessor(arguments) } else { - tpn.ForkDetector, _ = sync.NewShardForkDetector( - roundHandler, - tpn.BlockBlackListHandler, - tpn.BlockTracker, - 0, - tpn.EnableEpochsHandler, - tpn.DataPool.Proofs()) argumentsBase.ForkDetector = tpn.ForkDetector argumentsBase.BlockChainHook = tpn.BlockchainHook argumentsBase.TxCoordinator = tpn.TxCoordinator