From 9f69a3e71cc71a5ce2542b7c704685a9c716e7fc Mon Sep 17 00:00:00 2001 From: ssd04 Date: Wed, 8 Jan 2025 18:48:41 +0200 Subject: [PATCH 01/10] add custom logging --- cmd/node/config/config.toml | 4 +- consensus/chronology/argChronology.go | 2 + consensus/chronology/chronology.go | 18 ++- consensus/spos/bls/proxy/subroundsHandler.go | 5 + consensus/spos/bls/v1/blsSubroundsFactory.go | 7 ++ consensus/spos/bls/v2/blsSubroundsFactory.go | 8 ++ consensus/spos/bls/v2/subroundBlock.go | 100 ++++++++++++----- consensus/spos/bls/v2/subroundEndRound.go | 78 ++++++------- consensus/spos/bls/v2/subroundSignature.go | 34 ++++-- consensus/spos/bls/v2/subroundStartRound.go | 32 +++--- consensus/spos/subround.go | 5 + .../dataPool/proofsCache/proofsPool.go | 2 + dataRetriever/resolvers/headerResolver.go | 4 +- factory/consensus/consensusComponents.go | 21 ++++ factory/network/networkComponents.go | 4 +- factory/processing/processComponents.go | 10 +- .../startInEpoch/startInEpoch_test.go | 2 +- integrationTests/testConsensusNode.go | 12 +- integrationTests/testHeartbeatNode.go | 2 +- integrationTests/testInitializer.go | 10 +- integrationTests/testProcessorNode.go | 9 +- integrationTests/testSyncNode.go | 4 +- node/nodeRunner.go | 1 + .../interceptedBlockHeader.go | 3 + .../interceptedMetaBlockHeader.go | 4 + .../interceptors/interceptedDataVerifier.go | 2 + .../equivalentProofsInterceptorProcessor.go | 8 +- process/sync/argBootstrapper.go | 2 + process/sync/baseForkDetector.go | 20 ++-- process/sync/baseSync.go | 103 +++++++++--------- process/sync/metaForkDetector.go | 7 ++ process/sync/metablock.go | 22 ++-- process/sync/shardForkDetector.go | 9 +- process/sync/shardblock.go | 20 +++- process/sync/testMetaBootstrap.go | 2 +- process/sync/testShardBootstrap.go | 2 +- 36 files changed, 381 insertions(+), 197 deletions(-) diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 688f688b7e2..41213ccd15a 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -42,7 +42,7 @@ # ChainParametersByEpoch defines chain operation configurable values that can be modified based on epochs ChainParametersByEpoch = [ - { EnableEpoch = 0, RoundDuration = 6000, ShardConsensusGroupSize = 7, ShardMinNumNodes = 10, MetachainConsensusGroupSize = 10, MetachainMinNumNodes = 10, Hysteresis = 0.2, Adaptivity = false } + { EnableEpoch = 0, RoundDuration = 6000, ShardConsensusGroupSize = 3, ShardMinNumNodes = 3, MetachainConsensusGroupSize = 3, MetachainMinNumNodes = 3, Hysteresis = 0.2, Adaptivity = false } ] [HardwareRequirements] @@ -628,7 +628,7 @@ [EpochStartConfig] GenesisEpoch = 0 MinRoundsBetweenEpochs = 20 - RoundsPerEpoch = 200 + RoundsPerEpoch = 21 # Min and Max ShuffledOutRestartThreshold represents the minimum and maximum duration of an epoch (in percentage) after a node which # has been shuffled out has to restart its process in order to start in a new shard MinShuffledOutRestartThreshold = 0.05 diff --git a/consensus/chronology/argChronology.go b/consensus/chronology/argChronology.go index 79b012e55b6..387c713289c 100644 --- a/consensus/chronology/argChronology.go +++ b/consensus/chronology/argChronology.go @@ -6,10 +6,12 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/ntp" + logger "github.com/multiversx/mx-chain-logger-go" ) // ArgChronology holds all dependencies required by the chronology component type ArgChronology struct { + Logger logger.Logger GenesisTime time.Time RoundHandler consensus.RoundHandler SyncTimer ntp.SyncTimer diff --git a/consensus/chronology/chronology.go b/consensus/chronology/chronology.go index 0c195c2e31a..828370445d6 100644 --- a/consensus/chronology/chronology.go +++ b/consensus/chronology/chronology.go @@ -10,7 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/closing" "github.com/multiversx/mx-chain-core-go/display" - "github.com/multiversx/mx-chain-logger-go" + logger "github.com/multiversx/mx-chain-logger-go" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" @@ -20,8 +20,6 @@ import ( var _ consensus.ChronologyHandler = (*chronology)(nil) var _ closing.Closer = (*chronology)(nil) -var log = logger.GetOrCreate("consensus/chronology") - // srBeforeStartRound defines the state which exist before the start of the round const srBeforeStartRound = -1 @@ -30,6 +28,7 @@ const chronologyAlarmID = "chronology" // chronology defines the data needed by the chronology type chronology struct { + log logger.Logger genesisTime time.Time roundHandler consensus.RoundHandler @@ -48,6 +47,11 @@ type chronology struct { // NewChronology creates a new chronology object func NewChronology(arg ArgChronology) (*chronology, error) { + var log logger.Logger + log = logger.GetOrCreate("consensus/chronology") + if arg.Logger != nil { + log = arg.Logger + } err := checkNewChronologyParams(arg) if err != nil { @@ -55,6 +59,7 @@ func NewChronology(arg ArgChronology) (*chronology, error) { } chr := chronology{ + log: log, genesisTime: arg.GenesisTime, roundHandler: arg.RoundHandler, syncTimer: arg.SyncTimer, @@ -122,7 +127,7 @@ func (chr *chronology) startRounds(ctx context.Context) { for { select { case <-ctx.Done(): - log.Debug("chronology's go routine is stopping...") + chr.log.Debug("chronology's go routine is stopping...") return case <-time.After(time.Millisecond): } @@ -143,11 +148,12 @@ func (chr *chronology) startRound(ctx context.Context) { sr := chr.loadSubroundHandler(chr.subroundId) if sr == nil { + // chr.log.Trace("chronology: nil subround handler", "subroundId", chr.subroundId) return } msg := fmt.Sprintf("SUBROUND %s BEGINS", sr.Name()) - log.Debug(display.Headline(msg, chr.syncTimer.FormattedCurrentTime(), ".")) + chr.log.Debug(display.Headline(msg, chr.syncTimer.FormattedCurrentTime(), ".")) logger.SetCorrelationSubround(sr.Name()) if !sr.DoWork(ctx, chr.roundHandler) { @@ -166,7 +172,7 @@ func (chr *chronology) updateRound() { if oldRoundIndex != chr.roundHandler.Index() { chr.watchdog.Reset(chronologyAlarmID) msg := fmt.Sprintf("ROUND %d BEGINS (%d)", chr.roundHandler.Index(), chr.roundHandler.TimeStamp().Unix()) - log.Debug(display.Headline(msg, chr.syncTimer.FormattedCurrentTime(), "#")) + chr.log.Debug(display.Headline(msg, chr.syncTimer.FormattedCurrentTime(), "#")) logger.SetCorrelationRound(chr.roundHandler.Index()) chr.initRound() diff --git a/consensus/spos/bls/proxy/subroundsHandler.go b/consensus/spos/bls/proxy/subroundsHandler.go index dec18fea1e6..b650d828dd7 100644 --- a/consensus/spos/bls/proxy/subroundsHandler.go +++ b/consensus/spos/bls/proxy/subroundsHandler.go @@ -18,6 +18,7 @@ var log = logger.GetOrCreate("consensus/spos/bls/proxy") // SubroundsHandlerArgs struct contains the needed data for the SubroundsHandler type SubroundsHandlerArgs struct { + Logger logger.Logger Chronology consensus.ChronologyHandler ConsensusCoreHandler spos.ConsensusCoreHandler ConsensusState spos.ConsensusStateHandler @@ -42,6 +43,7 @@ type consensusStateMachineType int // SubroundsHandler struct contains the needed data for the SubroundsHandler type SubroundsHandler struct { + log logger.Logger chronology consensus.ChronologyHandler consensusCoreHandler spos.ConsensusCoreHandler consensusState spos.ConsensusStateHandler @@ -77,6 +79,7 @@ func NewSubroundsHandler(args *SubroundsHandlerArgs) (*SubroundsHandler, error) } subroundHandler := &SubroundsHandler{ + log: args.Logger, chronology: args.Chronology, consensusCoreHandler: args.ConsensusCoreHandler, consensusState: args.ConsensusState, @@ -150,6 +153,7 @@ func (s *SubroundsHandler) initSubroundsForEpoch(epoch uint32) error { s.currentConsensusType = consensusV2 fct, err = v2.NewSubroundsFactory( + s.log, s.consensusCoreHandler, s.consensusState, s.worker, @@ -175,6 +179,7 @@ func (s *SubroundsHandler) initSubroundsForEpoch(epoch uint32) error { s.appStatusHandler, s.sentSignatureTracker, s.outportHandler, + s.log, ) } if err != nil { diff --git a/consensus/spos/bls/v1/blsSubroundsFactory.go b/consensus/spos/bls/v1/blsSubroundsFactory.go index 9ece08a248d..a9f31048f8a 100644 --- a/consensus/spos/bls/v1/blsSubroundsFactory.go +++ b/consensus/spos/bls/v1/blsSubroundsFactory.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + logger "github.com/multiversx/mx-chain-logger-go" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" @@ -23,6 +24,7 @@ type factory struct { sentSignaturesTracker spos.SentSignaturesTracker chainID []byte currentPid core.PeerID + log logger.Logger } // NewSubroundsFactory creates a new consensusState object @@ -35,6 +37,7 @@ func NewSubroundsFactory( appStatusHandler core.AppStatusHandler, sentSignaturesTracker spos.SentSignaturesTracker, outportHandler outport.OutportHandler, + logger logger.Logger, ) (*factory, error) { // no need to check the outportHandler, it can be nil err := checkNewFactoryParams( @@ -147,6 +150,7 @@ func (fct *factory) generateStartRoundSubround() error { fct.chainID, fct.currentPid, fct.appStatusHandler, + fct.log, ) if err != nil { return err @@ -189,6 +193,7 @@ func (fct *factory) generateBlockSubround() error { fct.chainID, fct.currentPid, fct.appStatusHandler, + fct.log, ) if err != nil { return err @@ -227,6 +232,7 @@ func (fct *factory) generateSignatureSubround() error { fct.chainID, fct.currentPid, fct.appStatusHandler, + fct.log, ) if err != nil { return err @@ -263,6 +269,7 @@ func (fct *factory) generateEndRoundSubround() error { fct.chainID, fct.currentPid, fct.appStatusHandler, + fct.log, ) if err != nil { return err diff --git a/consensus/spos/bls/v2/blsSubroundsFactory.go b/consensus/spos/bls/v2/blsSubroundsFactory.go index 2c9ade325a0..e40e1a2db82 100644 --- a/consensus/spos/bls/v2/blsSubroundsFactory.go +++ b/consensus/spos/bls/v2/blsSubroundsFactory.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + logger "github.com/multiversx/mx-chain-logger-go" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" @@ -14,6 +15,7 @@ import ( // factory defines the data needed by this factory to create all the subrounds and give them their specific // functionality type factory struct { + log logger.Logger consensusCore spos.ConsensusCoreHandler consensusState spos.ConsensusStateHandler worker spos.WorkerHandler @@ -28,6 +30,7 @@ type factory struct { // NewSubroundsFactory creates a new consensusState object func NewSubroundsFactory( + logger logger.Logger, consensusDataContainer spos.ConsensusCoreHandler, consensusState spos.ConsensusStateHandler, worker spos.WorkerHandler, @@ -53,6 +56,7 @@ func NewSubroundsFactory( } fct := factory{ + log: logger, consensusCore: consensusDataContainer, consensusState: consensusState, worker: worker, @@ -155,6 +159,7 @@ func (fct *factory) generateStartRoundSubround() error { fct.chainID, fct.currentPid, fct.appStatusHandler, + fct.log, ) if err != nil { return err @@ -195,6 +200,7 @@ func (fct *factory) generateBlockSubround() error { fct.chainID, fct.currentPid, fct.appStatusHandler, + fct.log, ) if err != nil { return err @@ -231,6 +237,7 @@ func (fct *factory) generateSignatureSubround() error { fct.chainID, fct.currentPid, fct.appStatusHandler, + fct.log, ) if err != nil { return err @@ -267,6 +274,7 @@ func (fct *factory) generateEndRoundSubround() error { fct.chainID, fct.currentPid, fct.appStatusHandler, + fct.log, ) if err != nil { return err diff --git a/consensus/spos/bls/v2/subroundBlock.go b/consensus/spos/bls/v2/subroundBlock.go index 2454ad3643e..7786a6c1501 100644 --- a/consensus/spos/bls/v2/subroundBlock.go +++ b/consensus/spos/bls/v2/subroundBlock.go @@ -79,14 +79,17 @@ func (sr *subroundBlock) doBlockJob(ctx context.Context) bool { } if sr.RoundHandler().Index() <= sr.getRoundInLastCommittedBlock() { + sr.Log.Error("doBlockJob: index") return false } if sr.IsLeaderJobDone(sr.Current()) { + sr.Log.Error("doBlockJob: leader job done") return false } if sr.IsSubroundFinished(sr.Current()) { + sr.Log.Error("doBlockJob: is subround finished") return false } @@ -95,49 +98,51 @@ func (sr *subroundBlock) doBlockJob(ctx context.Context) bool { header, err := sr.createHeader() if err != nil { - printLogMessage(ctx, "doBlockJob.createHeader", err) + sr.printLogMessage(ctx, "doBlockJob.createHeader", err) return false } header, body, err := sr.createBlock(header) if err != nil { - printLogMessage(ctx, "doBlockJob.createBlock", err) + sr.printLogMessage(ctx, "doBlockJob.createBlock", err) return false } // This must be done after createBlock, in order to have the proper epoch set wasProofAdded := sr.addProofOnHeader(header) if !wasProofAdded { + sr.Log.Error("proof was not added") return false } // block proof verification should be done over the header that contains the leader signature leaderSignature, err := sr.signBlockHeader(header) if err != nil { - printLogMessage(ctx, "doBlockJob.signBlockHeader", err) + sr.printLogMessage(ctx, "doBlockJob.signBlockHeader", err) return false } err = header.SetLeaderSignature(leaderSignature) if err != nil { - printLogMessage(ctx, "doBlockJob.SetLeaderSignature", err) + sr.printLogMessage(ctx, "doBlockJob.SetLeaderSignature", err) return false } leader, errGetLeader := sr.GetLeader() if errGetLeader != nil { - log.Debug("doBlockJob.GetLeader", "error", errGetLeader) + sr.Log.Debug("doBlockJob.GetLeader", "error", errGetLeader) return false } sentWithSuccess := sr.sendBlock(header, body, leader) if !sentWithSuccess { + sr.Log.Error("sentWithSuccess failed") return false } err = sr.SetJobDone(leader, sr.Current(), true) if err != nil { - log.Debug("doBlockJob.SetSelfJobDone", "error", err.Error()) + sr.Log.Debug("doBlockJob.SetSelfJobDone", "error", err.Error()) return false } @@ -168,25 +173,25 @@ func (sr *subroundBlock) signBlockHeader(header data.HeaderHandler) ([]byte, err return sr.SigningHandler().CreateSignatureForPublicKey(marshalledHdr, []byte(leader)) } -func printLogMessage(ctx context.Context, baseMessage string, err error) { +func (sr *subroundBlock) printLogMessage(ctx context.Context, baseMessage string, err error) { if common.IsContextDone(ctx) { - log.Debug(baseMessage + " context is closing") + sr.Log.Debug(baseMessage + " context is closing") return } - log.Debug(baseMessage, "error", err.Error()) + sr.Log.Debug(baseMessage, "error", err.Error()) } func (sr *subroundBlock) sendBlock(header data.HeaderHandler, body data.BodyHandler, _ string) bool { marshalledBody, err := sr.Marshalizer().Marshal(body) if err != nil { - log.Debug("sendBlock.Marshal: body", "error", err.Error()) + sr.Log.Debug("sendBlock.Marshal: body", "error", err.Error()) return false } marshalledHeader, err := sr.Marshalizer().Marshal(header) if err != nil { - log.Debug("sendBlock.Marshal: header", "error", err.Error()) + sr.Log.Debug("sendBlock.Marshal: header", "error", err.Error()) return false } @@ -200,7 +205,7 @@ func (sr *subroundBlock) sendBlock(header data.HeaderHandler, body data.BodyHand func (sr *subroundBlock) logBlockSize(marshalledBody []byte, marshalledHeader []byte) { bodyAndHeaderSize := uint32(len(marshalledBody) + len(marshalledHeader)) - log.Debug("logBlockSize", + sr.Log.Debug("sr.LogBlockSize", "body size", len(marshalledBody), "header size", len(marshalledHeader), "body and header size", bodyAndHeaderSize, @@ -232,7 +237,7 @@ func (sr *subroundBlock) sendBlockBody( ) bool { leader, errGetLeader := sr.GetLeader() if errGetLeader != nil { - log.Debug("sendBlockBody.GetLeader", "error", errGetLeader) + sr.Log.Debug("sendBlockBody.GetLeader", "error", errGetLeader) return false } @@ -255,11 +260,11 @@ func (sr *subroundBlock) sendBlockBody( err := sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) if err != nil { - log.Debug("sendBlockBody.BroadcastConsensusMessage", "error", err.Error()) + sr.Log.Debug("sendBlockBody.BroadcastConsensusMessage", "error", err.Error()) return false } - log.Debug("step 1: block body has been sent") + sr.Log.Debug("step 1: block body has been sent") sr.SetBody(bodyHandler) @@ -273,19 +278,19 @@ func (sr *subroundBlock) sendBlockHeader( ) bool { leader, errGetLeader := sr.GetLeader() if errGetLeader != nil { - log.Debug("sendBlockHeader.GetLeader", "error", errGetLeader) + sr.Log.Debug("sendBlockHeader.GetLeader", "error", errGetLeader) return false } err := sr.BroadcastMessenger().BroadcastHeader(headerHandler, []byte(leader)) if err != nil { - log.Warn("sendBlockHeader.BroadcastHeader", "error", err.Error()) + sr.Log.Warn("sendBlockHeader.BroadcastHeader", "error", err.Error()) return false } headerHash := sr.Hasher().Compute(string(marshalledHeader)) - log.Debug("step 1: block header has been sent", + sr.Log.Debug("step 1: block header has been sent", "nonce", headerHandler.GetNonce(), "hash", headerHash) @@ -316,6 +321,7 @@ func (sr *subroundBlock) createHeader() (data.HeaderHandler, error) { if err != nil { return nil, err } + sr.Log.Info("subroundBlock: create header with nonce", "nonce", nonce, "round", round) err = hdr.SetPrevHash(prevHash) if err != nil { @@ -363,6 +369,13 @@ func (sr *subroundBlock) createHeader() (data.HeaderHandler, error) { func (sr *subroundBlock) addProofOnHeader(header data.HeaderHandler) bool { prevBlockProof, err := sr.EquivalentProofsPool().GetProof(sr.ShardCoordinator().SelfId(), header.GetPrevHash()) if err != nil { + sr.Log.Error("failed to get proof for header", "headerHash", sr.GetData()) + + if header.GetNonce() == 1 { + sr.Log.Error("first nonce") + return true + } + // for the first block after activation we won't add the proof // TODO: fix this on verifications as well return common.IsEpochChangeBlockForFlagActivation(header, sr.EnableEpochsHandler(), common.EquivalentMessagesFlag) @@ -378,7 +391,7 @@ func (sr *subroundBlock) addProofOnHeader(header data.HeaderHandler) bool { hash = []byte("") } - log.Debug("addProofOnHeader: no proof found", "header hash", hex.EncodeToString(hash)) + sr.Log.Debug("addProofOnHeader: no proof found", "header hash", hex.EncodeToString(hash)) return false } @@ -392,20 +405,28 @@ func isProofEmpty(proof data.HeaderProofHandler) bool { func (sr *subroundBlock) saveProofForPreviousHeaderIfNeeded(header data.HeaderHandler) { hasProof := sr.EquivalentProofsPool().HasProof(sr.ShardCoordinator().SelfId(), header.GetPrevHash()) if hasProof { - log.Debug("saveProofForPreviousHeaderIfNeeded: no need to set proof since it is already saved") + sr.Log.Debug("saverroofForPreviousHeaderIfNeeded: no need to set proof since it is already saved") return } proof := header.GetPreviousProof() + if proof == nil { + sr.Log.Error("nil previous proof") + return + } + err := sr.EquivalentProofsPool().AddProof(proof) if err != nil { - log.Debug("saveProofForPreviousHeaderIfNeeded: failed to add proof, %w", err) + sr.Log.Debug("saveProofForPreviousHeaderIfNeeded: failed to add proof, %w", err.Error()) return } + + sr.Log.Error("saveProofForPreviousHeaderIfNeeded: added proof on header", "proofHeader", proof.GetHeaderHash()) } // receivedBlockBody method is called when a block body is received through the block body channel func (sr *subroundBlock) receivedBlockBody(ctx context.Context, cnsDta *consensus.Message) bool { + sr.Log.Debug("receivedBlockBody: START") node := string(cnsDta.PubKey) if !sr.IsNodeLeaderInCurrentRound(node) { // is NOT this node leader in current round? @@ -432,7 +453,7 @@ func (sr *subroundBlock) receivedBlockBody(ctx context.Context, cnsDta *consensu return false } - log.Debug("step 1: block body has been received") + sr.Log.Debug("step 1: block body has been received") blockProcessedWithSuccess := sr.processReceivedBlock(ctx, cnsDta.RoundIndex, cnsDta.PubKey) @@ -453,6 +474,7 @@ func (sr *subroundBlock) isHeaderForCurrentConsensus(header data.HeaderHandler) return false } if header.GetRound() != uint64(sr.RoundHandler().Index()) { + sr.Log.Error("isHeaderForCurrentConsensus: round") return false } @@ -461,9 +483,11 @@ func (sr *subroundBlock) isHeaderForCurrentConsensus(header data.HeaderHandler) return false } if !bytes.Equal(header.GetPrevHash(), prevHash) { + sr.Log.Error("isHeaderForCurrentConsensus: prevHash") return false } if header.GetNonce() != prevHeader.GetNonce()+1 { + sr.Log.Error("isHeaderForCurrentConsensus: nonce") return false } prevRandSeed := prevHeader.GetRandSeed() @@ -487,29 +511,37 @@ func (sr *subroundBlock) getLeaderForHeader(headerHandler data.HeaderHandler) ([ } func (sr *subroundBlock) receivedBlockHeader(headerHandler data.HeaderHandler) { + sr.Log.Debug("receivedBlockHEADER: START") + if check.IfNil(headerHandler) { + sr.Log.Error("receivedBlockHeader: nil header") return } if headerHandler.CheckFieldsForNil() != nil { + sr.Log.Error("receivedBlockHeader: nil header fileds") return } if !sr.isHeaderForCurrentConsensus(headerHandler) { + sr.Log.Error("receivedBlockHeader: header not for current consensus") return } isLeader := sr.IsSelfLeader() if sr.ConsensusGroup() == nil || isLeader { + sr.Log.Error("receivedBlockHeader: is leader") return } if sr.IsConsensusDataSet() { + sr.Log.Error("receivedBlockHeader: consensus data is already set") return } headerLeader, err := sr.getLeaderForHeader(headerHandler) if err != nil { + sr.Log.Error("receivedBlockHeader: failed to get leader for header", "error", err) return } @@ -520,10 +552,13 @@ func (sr *subroundBlock) receivedBlockHeader(headerHandler data.HeaderHandler) { spos.LeaderPeerHonestyDecreaseFactor, ) + sr.Log.Error("receivedBlockHeader: node leader not in current roud") + return } if sr.IsHeaderAlreadyReceived() { + sr.Log.Error("receivedBlockHeader: header already received") return } @@ -541,7 +576,7 @@ func (sr *subroundBlock) receivedBlockHeader(headerHandler data.HeaderHandler) { sr.saveProofForPreviousHeaderIfNeeded(headerHandler) - log.Debug("step 1: block header has been received", + sr.Log.Debug("step 1: block header has been received", "nonce", sr.GetHeader().GetNonce(), "hash", sr.GetData()) @@ -581,9 +616,11 @@ func (sr *subroundBlock) processReceivedBlock( senderPK []byte, ) bool { if check.IfNil(sr.GetBody()) { + sr.Log.Error("processReceivedBlock: nil body") return false } if check.IfNil(sr.GetHeader()) { + sr.Log.Error("processReceivedBlock: nil header") return false } @@ -595,7 +632,7 @@ func (sr *subroundBlock) processReceivedBlock( shouldNotProcessBlock := sr.GetExtendedCalled() || round < sr.RoundHandler().Index() if shouldNotProcessBlock { - log.Debug("canceled round, extended has been called or round index has been changed", + sr.Log.Debug("canceled round, extended has been called or round index has been changed", "round", sr.RoundHandler().Index(), "subround", sr.Name(), "cnsDta round", round, @@ -628,7 +665,7 @@ func (sr *subroundBlock) processBlock( ) if roundIndex < sr.RoundHandler().Index() { - log.Debug("canceled round, round index has been changed", + sr.Log.Debug("canceled round, round index has been changed", "round", sr.RoundHandler().Index(), "subround", sr.Name(), "cnsDta round", roundIndex, @@ -657,11 +694,11 @@ func (sr *subroundBlock) processBlock( func (sr *subroundBlock) printCancelRoundLogMessage(ctx context.Context, err error) { if common.IsContextDone(ctx) { - log.Debug("canceled round as the context is closing") + sr.Log.Debug("canceled round as the context is closing") return } - log.Debug("canceled round", + sr.Log.Debug("canceled round", "round", sr.RoundHandler().Index(), "subround", sr.Name(), "error", err.Error()) @@ -681,6 +718,8 @@ func (sr *subroundBlock) computeSubroundProcessingMetric(startTime time.Time, me // doBlockConsensusCheck method checks if the consensus in the subround Block is achieved func (sr *subroundBlock) doBlockConsensusCheck() bool { if sr.GetRoundCanceled() { + sr.Log.Debug("step 1: subround has NOT been finished: round cancelled", + "subround", sr.Name()) return false } @@ -690,12 +729,15 @@ func (sr *subroundBlock) doBlockConsensusCheck() bool { threshold := sr.Threshold(sr.Current()) if sr.isBlockReceived(threshold) { - log.Debug("step 1: subround has been finished", + sr.Log.Debug("step 1: subround has been finished", "subround", sr.Name()) sr.SetStatus(sr.Current(), spos.SsFinished) return true } + sr.Log.Debug("step 1: subround has NOT been finished", + "subround", sr.Name()) + return false } @@ -707,7 +749,7 @@ func (sr *subroundBlock) isBlockReceived(threshold int) bool { node := sr.ConsensusGroup()[i] isJobDone, err := sr.JobDone(node, sr.Current()) if err != nil { - log.Debug("isBlockReceived.JobDone", + sr.Log.Debug("isBlockReceived.JobDone", "node", node, "subround", sr.Name(), "error", err.Error()) diff --git a/consensus/spos/bls/v2/subroundEndRound.go b/consensus/spos/bls/v2/subroundEndRound.go index b5e6440685f..0efaf13d0b6 100644 --- a/consensus/spos/bls/v2/subroundEndRound.go +++ b/consensus/spos/bls/v2/subroundEndRound.go @@ -115,7 +115,7 @@ func (sr *subroundEndRound) receivedProof(proof consensus.ProofHandler) { } // no need to re-verify the proof since it was already verified when it was added to the proofs pool - log.Debug("step 3: block header final info has been received", + sr.Log.Debug("step 3: block header final info has been received", "PubKeysBitmap", proof.GetPubKeysBitmap(), "AggregateSignature", proof.GetAggregatedSignature(), "HederHash", proof.GetHeaderHash()) @@ -153,11 +153,11 @@ func (sr *subroundEndRound) receivedInvalidSignersInfo(_ context.Context, cnsDta err := sr.verifyInvalidSigners(cnsDta.InvalidSigners) if err != nil { - log.Trace("receivedInvalidSignersInfo.verifyInvalidSigners", "error", err.Error()) + sr.Log.Trace("receivedInvalidSignersInfo.verifyInvalidSigners", "error", err.Error()) return false } - log.Debug("step 3: invalid signers info has been evaluated") + sr.Log.Debug("step 3: invalid signers info has been evaluated") sr.PeerHonestyHandler().ChangeScore( messageSender, @@ -198,7 +198,7 @@ func (sr *subroundEndRound) verifyInvalidSigner(msg p2p.MessageP2P) error { err = sr.SigningHandler().VerifySingleSignature(cnsMsg.PubKey, cnsMsg.BlockHeaderHash, cnsMsg.SignatureShare) if err != nil { - log.Debug("verifyInvalidSigner: confirmed that node provided invalid signature", + sr.Log.Debug("verifyInvalidSigner: confirmed that node provided invalid signature", "pubKey", cnsMsg.PubKey, "blockHeaderHash", cnsMsg.BlockHeaderHash, "error", err.Error(), @@ -230,12 +230,12 @@ func (sr *subroundEndRound) commitBlock() error { err := sr.BlockProcessor().CommitBlock(sr.GetHeader(), sr.GetBody()) elapsedTime := time.Since(startTime) if elapsedTime >= common.CommitMaxTime { - log.Warn("doEndRoundJobByNode.CommitBlock", "elapsed time", elapsedTime) + sr.Log.Warn("doEndRoundJobByNode.CommitBlock", "elapsed time", elapsedTime) } else { - log.Debug("elapsed time to commit block", "time [s]", elapsedTime) + sr.Log.Debug("elapsed time to commit block", "time [s]", elapsedTime) } if err != nil { - log.Debug("doEndRoundJobByNode.CommitBlock", "error", err) + sr.Log.Debug("doEndRoundJobByNode.CommitBlock", "error", err) return err } @@ -263,7 +263,7 @@ func (sr *subroundEndRound) doEndRoundJobByNode() bool { if proof != nil { err = sr.EquivalentProofsPool().AddProof(proof) if err != nil { - log.Debug("doEndRoundJobByNode.AddProof", "error", err) + sr.Log.Debug("doEndRoundJobByNode.AddProof", "error", err) return false } } @@ -272,10 +272,10 @@ func (sr *subroundEndRound) doEndRoundJobByNode() bool { sr.worker.DisplayStatistics() - log.Debug("step 3: Body and Header have been committed") + sr.Log.Debug("step 3: Body and Header have been committed") msg := fmt.Sprintf("Added proposed block with nonce %d in blockchain", sr.GetHeader().GetNonce()) - log.Debug(display.Headline(msg, sr.SyncTimer().FormattedCurrentTime(), "+")) + sr.Log.Debug(display.Headline(msg, sr.SyncTimer().FormattedCurrentTime(), "+")) sr.updateMetricsForLeader() @@ -290,14 +290,14 @@ func (sr *subroundEndRound) sendProof() (data.HeaderProofHandler, bool) { bitmap := sr.GenerateBitmap(bls.SrSignature) err := sr.checkSignaturesValidity(bitmap) if err != nil { - log.Debug("sendProof.checkSignaturesValidity", "error", err.Error()) + sr.Log.Debug("sendProof.checkSignaturesValidity", "error", err.Error()) return nil, false } // Aggregate signatures, handle invalid signers and send final info if needed bitmap, sig, err := sr.aggregateSigsAndHandleInvalidSigners(bitmap) if err != nil { - log.Debug("sendProof.aggregateSigsAndHandleInvalidSigners", "error", err.Error()) + sr.Log.Debug("sendProof.aggregateSigsAndHandleInvalidSigners", "error", err.Error()) return nil, false } @@ -309,7 +309,7 @@ func (sr *subroundEndRound) sendProof() (data.HeaderProofHandler, bool) { roundHandler := sr.RoundHandler() if roundHandler.RemainingTime(roundHandler.TimeStamp(), roundHandler.TimeDuration()) < 0 { - log.Debug("sendProof: time is out -> cancel broadcasting final info and header", + sr.Log.Debug("sendProof: time is out -> cancel broadcasting final info and header", "round time stamp", roundHandler.TimeStamp(), "current time", time.Now()) return nil, false @@ -322,7 +322,7 @@ func (sr *subroundEndRound) sendProof() (data.HeaderProofHandler, bool) { func (sr *subroundEndRound) shouldSendProof() bool { if sr.EquivalentProofsPool().HasProof(sr.ShardCoordinator().SelfId(), sr.GetData()) { - log.Debug("shouldSendProof: equivalent message already processed") + sr.Log.Debug("shouldSendProof: equivalent message already processed") return false } @@ -332,21 +332,21 @@ func (sr *subroundEndRound) shouldSendProof() bool { func (sr *subroundEndRound) aggregateSigsAndHandleInvalidSigners(bitmap []byte) ([]byte, []byte, error) { sig, err := sr.SigningHandler().AggregateSigs(bitmap, sr.GetHeader().GetEpoch()) if err != nil { - log.Debug("doEndRoundJobByNode.AggregateSigs", "error", err.Error()) + sr.Log.Debug("doEndRoundJobByNode.AggregateSigs", "error", err.Error()) return sr.handleInvalidSignersOnAggSigFail() } err = sr.SigningHandler().SetAggregatedSig(sig) if err != nil { - log.Debug("doEndRoundJobByNode.SetAggregatedSig", "error", err.Error()) + sr.Log.Debug("doEndRoundJobByNode.SetAggregatedSig", "error", err.Error()) return nil, nil, err } // the header (hash) verified here is with leader signature on it err = sr.SigningHandler().Verify(sr.GetData(), bitmap, sr.GetHeader().GetEpoch()) if err != nil { - log.Debug("doEndRoundJobByNode.Verify", "error", err.Error()) + sr.Log.Debug("doEndRoundJobByNode.Verify", "error", err.Error()) return sr.handleInvalidSignersOnAggSigFail() } @@ -374,7 +374,7 @@ func (sr *subroundEndRound) checkGoRoutinesThrottler(ctx context.Context) error func (sr *subroundEndRound) verifySignature(i int, pk string, sigShare []byte) error { err := sr.SigningHandler().VerifySignatureShare(uint16(i), sigShare, sr.GetData(), sr.GetHeader().GetEpoch()) if err != nil { - log.Trace("VerifySignatureShare returned an error: ", err) + sr.Log.Trace("VerifySignatureShare returned an error: ", err) errSetJob := sr.SetJobDone(pk, bls.SrSignature, false) if errSetJob != nil { return errSetJob @@ -390,7 +390,7 @@ func (sr *subroundEndRound) verifySignature(i int, pk string, sigShare []byte) e return err } - log.Trace("verifyNodesOnAggSigVerificationFail: verifying signature share", "public key", pk) + sr.Log.Trace("verifyNodesOnAggSigVerificationFail: verifying signature share", "public key", pk) return nil } @@ -449,7 +449,7 @@ func (sr *subroundEndRound) getFullMessagesForInvalidSigners(invalidPubKeys []st for _, pk := range invalidPubKeys { p2pMsg, ok := sr.GetMessageWithSignature(pk) if !ok { - log.Trace("message not found in state for invalid signer", "pubkey", pk) + sr.Log.Trace("message not found in state for invalid signer", "pubkey", pk) continue } @@ -469,13 +469,13 @@ func (sr *subroundEndRound) handleInvalidSignersOnAggSigFail() ([]byte, []byte, invalidPubKeys, err := sr.verifyNodesOnAggSigFail(ctx) cancel() if err != nil { - log.Debug("doEndRoundJobByNode.verifyNodesOnAggSigFail", "error", err.Error()) + sr.Log.Debug("doEndRoundJobByNode.verifyNodesOnAggSigFail", "error", err.Error()) return nil, nil, err } _, err = sr.getFullMessagesForInvalidSigners(invalidPubKeys) if err != nil { - log.Debug("doEndRoundJobByNode.getFullMessagesForInvalidSigners", "error", err.Error()) + sr.Log.Debug("doEndRoundJobByNode.getFullMessagesForInvalidSigners", "error", err.Error()) return nil, nil, err } @@ -486,7 +486,7 @@ func (sr *subroundEndRound) handleInvalidSignersOnAggSigFail() ([]byte, []byte, bitmap, sig, err := sr.computeAggSigOnValidNodes() if err != nil { - log.Debug("doEndRoundJobByNode.computeAggSigOnValidNodes", "error", err.Error()) + sr.Log.Debug("doEndRoundJobByNode.computeAggSigOnValidNodes", "error", err.Error()) return nil, nil, err } @@ -540,7 +540,7 @@ func (sr *subroundEndRound) createAndBroadcastProof(signature []byte, bitmap []b return nil, err } - log.Debug("step 3: block header proof has been sent", + sr.Log.Debug("step 3: block header proof has been sent", "PubKeysBitmap", bitmap, "AggregateSignature", signature) @@ -554,7 +554,7 @@ func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []by sender, err := sr.GetLeader() if err != nil { - log.Debug("createAndBroadcastInvalidSigners.getSender", "error", err) + sr.Log.Debug("createAndBroadcastInvalidSigners.getSender", "error", err) return } @@ -577,11 +577,11 @@ func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []by err = sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) if err != nil { - log.Debug("doEndRoundJob.BroadcastConsensusMessage", "error", err.Error()) + sr.Log.Debug("doEndRoundJob.BroadcastConsensusMessage", "error", err.Error()) return } - log.Debug("step 3: invalid signers info has been sent") + sr.Log.Debug("step 3: invalid signers info has been sent") } func (sr *subroundEndRound) updateMetricsForLeader() { @@ -610,7 +610,7 @@ func (sr *subroundEndRound) IsBitmapInvalid(bitmap []byte, consensusPubKeys []st expectedBitmapSize++ } if len(bitmap) != expectedBitmapSize { - log.Debug("wrong size bitmap", + sr.Log.Debug("wrong size bitmap", "expected number of bytes", expectedBitmapSize, "actual", len(bitmap)) return ErrWrongSizeBitmap @@ -624,7 +624,7 @@ func (sr *subroundEndRound) IsBitmapInvalid(bitmap []byte, consensusPubKeys []st minNumRequiredSignatures := core.GetPBFTThreshold(consensusSize) if sr.FallbackHeaderValidator().ShouldApplyFallbackValidation(sr.GetHeader()) { minNumRequiredSignatures = core.GetPBFTFallbackThreshold(consensusSize) - log.Warn("HeaderSigVerifier.verifyConsensusSize: fallback validation has been applied", + sr.Log.Warn("HeaderSigVerifier.verifyConsensusSize: fallback validation has been applied", "minimum number of signatures required", minNumRequiredSignatures, "actual number of signatures in bitmap", numOfOnesInBitmap, ) @@ -634,7 +634,7 @@ func (sr *subroundEndRound) IsBitmapInvalid(bitmap []byte, consensusPubKeys []st return nil } - log.Debug("not enough signatures", + sr.Log.Debug("not enough signatures", "minimum expected", minNumRequiredSignatures, "actual", numOfOnesInBitmap) @@ -667,7 +667,7 @@ func (sr *subroundEndRound) isOutOfTime() bool { startTime := sr.GetRoundTimeStamp() maxTime := sr.RoundHandler().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 if sr.RoundHandler().RemainingTime(startTime, maxTime) < 0 { - log.Debug("canceled round, time is out", + sr.Log.Debug("canceled round, time is out", "round", sr.SyncTimer().FormattedCurrentTime(), sr.RoundHandler().Index(), "subround", sr.Name()) @@ -715,7 +715,7 @@ func (sr *subroundEndRound) waitForSignalSync() bool { select { case <-timerBetweenStatusChecks.C: if sr.IsSubroundFinished(sr.Current()) { - log.Trace("subround already finished", "subround", sr.Name()) + sr.Log.Trace("subround already finished", "subround", sr.Name()) return true } @@ -724,7 +724,7 @@ func (sr *subroundEndRound) waitForSignalSync() bool { } timerBetweenStatusChecks.Reset(timeBetweenSignaturesChecks) case <-timeout.C: - log.Debug("timeout while waiting for signatures or final info", "subround", sr.Name()) + sr.Log.Debug("timeout while waiting for signatures or final info", "subround", sr.Name()) return false } } @@ -784,7 +784,7 @@ func (sr *subroundEndRound) receivedSignature(_ context.Context, cnsDta *consens index, err := sr.ConsensusGroupIndex(node) if err != nil { - log.Debug("receivedSignature.ConsensusGroupIndex", + sr.Log.Debug("receivedSignature.ConsensusGroupIndex", "node", pkForLogs, "error", err.Error()) return false @@ -792,7 +792,7 @@ func (sr *subroundEndRound) receivedSignature(_ context.Context, cnsDta *consens err = sr.SigningHandler().StoreSignatureShare(uint16(index), cnsDta.SignatureShare) if err != nil { - log.Debug("receivedSignature.StoreSignatureShare", + sr.Log.Debug("receivedSignature.StoreSignatureShare", "node", pkForLogs, "index", index, "error", err.Error()) @@ -801,7 +801,7 @@ func (sr *subroundEndRound) receivedSignature(_ context.Context, cnsDta *consens err = sr.SetJobDone(node, bls.SrSignature, true) if err != nil { - log.Debug("receivedSignature.SetJobDone", + sr.Log.Debug("receivedSignature.SetJobDone", "node", pkForLogs, "subround", sr.Name(), "error", err.Error()) @@ -821,7 +821,7 @@ func (sr *subroundEndRound) checkReceivedSignatures() bool { threshold := sr.Threshold(bls.SrSignature) if sr.FallbackHeaderValidator().ShouldApplyFallbackValidation(sr.GetHeader()) { threshold = sr.FallbackThreshold(bls.SrSignature) - log.Warn("subroundEndRound.checkReceivedSignatures: fallback validation has been applied", + sr.Log.Warn("subroundEndRound.checkReceivedSignatures: fallback validation has been applied", "minimum number of signatures required", threshold, "actual number of signatures received", sr.getNumOfSignaturesCollected(), ) @@ -836,7 +836,7 @@ func (sr *subroundEndRound) checkReceivedSignatures() bool { shouldStopWaitingSignatures := isSelfJobDone && isSignatureCollectionDone if shouldStopWaitingSignatures { - log.Debug("step 2: signatures collection done", + sr.Log.Debug("step 2: signatures collection done", "subround", sr.Name(), "signatures received", numSigs, "total signatures", len(sr.ConsensusGroup())) @@ -855,7 +855,7 @@ func (sr *subroundEndRound) getNumOfSignaturesCollected() int { isSignJobDone, err := sr.JobDone(node, bls.SrSignature) if err != nil { - log.Debug("getNumOfSignaturesCollected.JobDone", + sr.Log.Debug("getNumOfSignaturesCollected.JobDone", "node", node, "subround", sr.Name(), "error", err.Error()) diff --git a/consensus/spos/bls/v2/subroundSignature.go b/consensus/spos/bls/v2/subroundSignature.go index 3c273437e41..8a6b19c7fc1 100644 --- a/consensus/spos/bls/v2/subroundSignature.go +++ b/consensus/spos/bls/v2/subroundSignature.go @@ -87,7 +87,7 @@ func (sr *subroundSignature) doSignatureJob(ctx context.Context) bool { return false } if check.IfNil(sr.GetHeader()) { - log.Error("doSignatureJob", "error", spos.ErrNilHeader) + sr.Log.Error("doSignatureJob", "error", spos.ErrNilHeader) return false } @@ -98,12 +98,14 @@ func (sr *subroundSignature) doSignatureJob(ctx context.Context) bool { } } + sr.Log.Info("doSignatureJob", "isSelfSingleKeyInConsensusGroup", isSelfSingleKeyInConsensusGroup) + if !sr.doSignatureJobForManagedKeys(ctx) { return false } sr.SetStatus(sr.Current(), spos.SsFinished) - log.Debug("step 2: subround has been finished", + sr.Log.Debug("step 2: subround has been finished", "subround", sr.Name()) return true @@ -129,12 +131,12 @@ func (sr *subroundSignature) createAndSendSignatureMessage(signatureShare []byte err := sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) if err != nil { - log.Debug("createAndSendSignatureMessage.BroadcastConsensusMessage", + sr.Log.Debug("createAndSendSignatureMessage.BroadcastConsensusMessage", "error", err.Error(), "pk", pkBytes) return false } - log.Debug("step 2: signature has been sent", "pk", pkBytes) + sr.Log.Debug("step 2: signature has been sent", "pk", pkBytes) return true } @@ -142,7 +144,7 @@ func (sr *subroundSignature) createAndSendSignatureMessage(signatureShare []byte func (sr *subroundSignature) completeSignatureSubRound(pk string) bool { err := sr.SetJobDone(pk, sr.Current(), true) if err != nil { - log.Debug("doSignatureJob.SetSelfJobDone", + sr.Log.Debug("doSignatureJob.SetSelfJobDone", "subround", sr.Name(), "error", err.Error(), "pk", []byte(pk), @@ -150,6 +152,11 @@ func (sr *subroundSignature) completeSignatureSubRound(pk string) bool { return false } + sr.Log.Error("doSignatureJob.SetSelfJobDone", + "subround", sr.Name(), + "pk", []byte(pk), + ) + return true } @@ -169,7 +176,7 @@ func (sr *subroundSignature) doSignatureConsensusCheck() bool { isSelfInConsensusGroup := sr.IsSelfInConsensusGroup() if !isSelfInConsensusGroup { - log.Debug("step 2: subround has been finished", + sr.Log.Debug("step 2: subround has been finished", "subround", sr.Name()) sr.SetStatus(sr.Current(), spos.SsFinished) @@ -177,7 +184,7 @@ func (sr *subroundSignature) doSignatureConsensusCheck() bool { } if sr.IsSelfJobDone(sr.Current()) { - log.Debug("step 2: subround has been finished", + sr.Log.Debug("step 2: subround has been finished", "subround", sr.Name()) sr.SetStatus(sr.Current(), spos.SsFinished) sr.appStatusHandler.SetStringValue(common.MetricConsensusRoundState, "signed") @@ -228,7 +235,7 @@ func (sr *subroundSignature) doSignatureJobForManagedKeys(ctx context.Context) b wg.Wait() if numMultiKeysSignaturesSent > 0 { - log.Debug("step 2: multi keys signatures have been sent", "num", numMultiKeysSignaturesSent) + sr.Log.Debug("step 2: multi keys signatures have been sent", "num", numMultiKeysSignaturesSent) } return sentSigForAllKeys.IsSet() @@ -244,7 +251,7 @@ func (sr *subroundSignature) sendSignatureForManagedKey(idx int, pk string) bool pkBytes, ) if err != nil { - log.Debug("sendSignatureForManagedKey.CreateSignatureShareForPublicKey", "error", err.Error()) + sr.Log.Debug("sendSignatureForManagedKey.CreateSignatureShareForPublicKey", "error", err.Error()) return false } @@ -274,9 +281,11 @@ func (sr *subroundSignature) checkGoRoutinesThrottler(ctx context.Context) error } func (sr *subroundSignature) doSignatureJobForSingleKey() bool { + sr.Log.Info("doSignatureJobForSingleKey: START") + selfIndex, err := sr.SelfConsensusGroupIndex() if err != nil { - log.Debug("doSignatureJobForSingleKey.SelfConsensusGroupIndex: not in consensus group") + sr.Log.Debug("doSignatureJobForSingleKey.SelfConsensusGroupIndex: not in consensus group") return false } @@ -287,13 +296,16 @@ func (sr *subroundSignature) doSignatureJobForSingleKey() bool { []byte(sr.SelfPubKey()), ) if err != nil { - log.Debug("doSignatureJobForSingleKey.CreateSignatureShareForPublicKey", "error", err.Error()) + sr.Log.Debug("doSignatureJobForSingleKey.CreateSignatureShareForPublicKey", "error", err.Error()) return false } // leader also sends his signature here ok := sr.createAndSendSignatureMessage(signatureShare, []byte(sr.SelfPubKey())) if !ok { + sr.Log.Error("doSignatureJobForSingleKey: failed to send signature share", + "node", []byte(sr.SelfPubKey()), + ) return false } diff --git a/consensus/spos/bls/v2/subroundStartRound.go b/consensus/spos/bls/v2/subroundStartRound.go index 17c4a890ecf..0b2d7b60680 100644 --- a/consensus/spos/bls/v2/subroundStartRound.go +++ b/consensus/spos/bls/v2/subroundStartRound.go @@ -132,7 +132,7 @@ func (sr *subroundStartRound) initCurrentRound() bool { err := sr.generateNextConsensusGroup(sr.RoundHandler().Index()) if err != nil { - log.Debug("initCurrentRound.generateNextConsensusGroup", + sr.Log.Debug("initCurrentRound.generateNextConsensusGroup", "round index", sr.RoundHandler().Index(), "error", err.Error()) @@ -153,7 +153,7 @@ func (sr *subroundStartRound) initCurrentRound() bool { leader, err := sr.GetLeader() if err != nil { - log.Debug("initCurrentRound.GetLeader", "error", err.Error()) + sr.Log.Debug("initCurrentRound.GetLeader", "error", err.Error()) sr.SetRoundCanceled(true) @@ -167,7 +167,7 @@ func (sr *subroundStartRound) initCurrentRound() bool { sr.AppStatusHandler().SetStringValue(common.MetricConsensusState, "proposer") } - log.Debug("step 0: preparing the round", + sr.Log.Debug("step 0: preparing the round", "leader", core.GetTrimmedPk(hex.EncodeToString([]byte(leader))), "messsage", msg) sr.sentSignatureTracker.StartRound() @@ -175,13 +175,13 @@ func (sr *subroundStartRound) initCurrentRound() bool { pubKeys := sr.ConsensusGroup() numMultiKeysInConsensusGroup := sr.computeNumManagedKeysInConsensusGroup(pubKeys) if numMultiKeysInConsensusGroup > 0 { - log.Debug("in consensus group with multi keys identities", "num", numMultiKeysInConsensusGroup) + sr.Log.Debug("in consensus group with multi keys identities", "num", numMultiKeysInConsensusGroup) } sr.indexRoundIfNeeded(pubKeys) if !sr.IsSelfInConsensusGroup() { - log.Debug("not in consensus group") + sr.Log.Debug("not in consensus group") sr.AppStatusHandler().SetStringValue(common.MetricConsensusState, "not in consensus group") } else { if !sr.IsSelfLeader() { @@ -192,7 +192,7 @@ func (sr *subroundStartRound) initCurrentRound() bool { err = sr.SigningHandler().Reset(pubKeys) if err != nil { - log.Debug("initCurrentRound.Reset", "error", err.Error()) + sr.Log.Debug("initCurrentRound.Reset", "error", err.Error()) sr.SetRoundCanceled(true) @@ -202,7 +202,7 @@ func (sr *subroundStartRound) initCurrentRound() bool { startTime := sr.GetRoundTimeStamp() maxTime := sr.RoundHandler().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 if sr.RoundHandler().RemainingTime(startTime, maxTime) < 0 { - log.Debug("canceled round, time is out", + sr.Log.Debug("canceled round, time is out", "round", sr.SyncTimer().FormattedCurrentTime(), sr.RoundHandler().Index(), "subround", sr.Name()) @@ -225,7 +225,7 @@ func (sr *subroundStartRound) computeNumManagedKeysInConsensusGroup(pubKeys []st pkBytes := []byte(pk) if sr.IsKeyManagedBySelf(pkBytes) { numMultiKeysInConsensusGroup++ - log.Trace("in consensus group with multi key", + sr.Log.Trace("in consensus group with multi key", "pk", core.GetTrimmedPk(hex.EncodeToString(pkBytes))) } sr.IncrementRoundsWithoutReceivedMessages(pkBytes) @@ -251,14 +251,14 @@ func (sr *subroundStartRound) indexRoundIfNeeded(pubKeys []string) { shardId := sr.ShardCoordinator().SelfId() nodesCoordinatorShardID, err := sr.NodesCoordinator().ShardIdForEpoch(epoch) if err != nil { - log.Debug("initCurrentRound.ShardIdForEpoch", + sr.Log.Debug("initCurrentRound.ShardIdForEpoch", "epoch", epoch, "error", err.Error()) return } if shardId != nodesCoordinatorShardID { - log.Debug("initCurrentRound.ShardIdForEpoch", + sr.Log.Debug("initCurrentRound.ShardIdForEpoch", "epoch", epoch, "shardCoordinator.ShardID", shardId, "nodesCoordinator.ShardID", nodesCoordinatorShardID) @@ -267,7 +267,7 @@ func (sr *subroundStartRound) indexRoundIfNeeded(pubKeys []string) { signersIndexes, err := sr.NodesCoordinator().GetValidatorsIndexes(pubKeys, epoch) if err != nil { - log.Error(err.Error()) + sr.Log.Error(err.Error()) return } @@ -299,7 +299,7 @@ func (sr *subroundStartRound) generateNextConsensusGroup(roundIndex int64) error randomSeed := currentHeader.GetRandSeed() - log.Debug("random source for the next consensus group", + sr.Log.Debug("random source for the next consensus group", "rand", randomSeed) shardId := sr.ShardCoordinator().SelfId() @@ -315,11 +315,11 @@ func (sr *subroundStartRound) generateNextConsensusGroup(roundIndex int64) error return err } - log.Trace("consensus group is formed by next validators:", + sr.Log.Trace("consensus group is formed by next validators:", "round", roundIndex) for i := 0; i < len(nextConsensusGroup); i++ { - log.Trace(core.GetTrimmedPk(hex.EncodeToString([]byte(nextConsensusGroup[i])))) + sr.Log.Trace(core.GetTrimmedPk(hex.EncodeToString([]byte(nextConsensusGroup[i])))) } sr.SetConsensusGroup(nextConsensusGroup) @@ -334,12 +334,12 @@ func (sr *subroundStartRound) generateNextConsensusGroup(roundIndex int64) error // EpochStartPrepare wis called when an epoch start event is observed, but not yet confirmed/committed. // Some components may need to do initialisation on this event func (sr *subroundStartRound) EpochStartPrepare(metaHdr data.HeaderHandler, _ data.BodyHandler) { - log.Trace(fmt.Sprintf("epoch %d start prepare in consensus", metaHdr.GetEpoch())) + sr.Log.Trace(fmt.Sprintf("epoch %d start prepare in consensus", metaHdr.GetEpoch())) } // EpochStartAction is called upon a start of epoch event. func (sr *subroundStartRound) EpochStartAction(hdr data.HeaderHandler) { - log.Trace(fmt.Sprintf("epoch %d start action in consensus", hdr.GetEpoch())) + sr.Log.Trace(fmt.Sprintf("epoch %d start action in consensus", hdr.GetEpoch())) sr.changeEpoch(hdr.GetEpoch()) } diff --git a/consensus/spos/subround.go b/consensus/spos/subround.go index 00b2c55fe6c..61460515621 100644 --- a/consensus/spos/subround.go +++ b/consensus/spos/subround.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + logger "github.com/multiversx/mx-chain-logger-go" "github.com/multiversx/mx-chain-go/consensus" ) @@ -41,6 +42,8 @@ type Subround struct { Job func(ctx context.Context) bool // method does the Subround Job and send the result to the peers Check func() bool // method checks if the consensus of the Subround is done Extend func(subroundId int) // method is called when round time is out + + Log logger.Logger } // NewSubround creates a new SubroundId object @@ -58,6 +61,7 @@ func NewSubround( chainID []byte, currentPid core.PeerID, appStatusHandler core.AppStatusHandler, + logger logger.Logger, ) (*Subround, error) { err := checkNewSubroundParams( consensusState, @@ -88,6 +92,7 @@ func NewSubround( Extend: nil, appStatusHandler: appStatusHandler, currentPid: currentPid, + Log: logger, } return &sr, nil diff --git a/dataRetriever/dataPool/proofsCache/proofsPool.go b/dataRetriever/dataPool/proofsCache/proofsPool.go index a412794a6db..82ae29ae31a 100644 --- a/dataRetriever/dataPool/proofsCache/proofsPool.go +++ b/dataRetriever/dataPool/proofsCache/proofsPool.go @@ -110,6 +110,8 @@ func (pp *proofsPool) GetProof( return nil, fmt.Errorf("nil header hash") } + // fmt.Println(string(debug.Stack())) + pp.mutCache.RLock() defer pp.mutCache.RUnlock() diff --git a/dataRetriever/resolvers/headerResolver.go b/dataRetriever/resolvers/headerResolver.go index 877c57a31da..ebc7203d1bc 100644 --- a/dataRetriever/resolvers/headerResolver.go +++ b/dataRetriever/resolvers/headerResolver.go @@ -11,7 +11,7 @@ import ( "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/storage" - "github.com/multiversx/mx-chain-logger-go" + logger "github.com/multiversx/mx-chain-logger-go" ) var log = logger.GetOrCreate("dataRetriever/resolvers") @@ -227,6 +227,8 @@ func (hdrRes *HeaderResolver) resolveHeaderFromEpoch(key []byte) ([]byte, error) return nil, err } if isUnknownEpoch { + log.Error("HeaderResolver: isUnknownEpoch") + hdrRes.mutEpochHandler.RLock() metaEpoch := hdrRes.epochHandler.MetaEpoch() hdrRes.mutEpochHandler.RUnlock() diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index d1482498819..1dca6c61bcf 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -1,6 +1,7 @@ package consensus import ( + "encoding/hex" "fmt" "time" @@ -271,7 +272,12 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { return nil, err } + id := hex.EncodeToString(ccf.processComponents.NodesCoordinator().GetOwnPublicKey())[0:8] + + log := logger.GetOrCreate(fmt.Sprintf("consensus/%s", id)) + subroundsHandlerArgs := &proxy.SubroundsHandlerArgs{ + Logger: log, Chronology: cc.chronology, ConsensusCoreHandler: consensusDataContainer, ConsensusState: consensusState, @@ -343,7 +349,12 @@ func (ccf *consensusComponentsFactory) createChronology() (consensus.ChronologyH wd = &watchdog.DisabledWatchdog{} } + id := hex.EncodeToString(ccf.processComponents.NodesCoordinator().GetOwnPublicKey())[0:8] + + logger := logger.GetOrCreate(fmt.Sprintf("cns/chr/%s", id)) + chronologyArg := chronology.ArgChronology{ + Logger: logger, GenesisTime: ccf.coreComponents.GenesisTime(), RoundHandler: ccf.processComponents.RoundHandler(), SyncTimer: ccf.coreComponents.SyncTimer(), @@ -471,7 +482,12 @@ func (ccf *consensusComponentsFactory) createShardBootstrapper() (process.Bootst return nil, err } + id := hex.EncodeToString(ccf.processComponents.NodesCoordinator().GetOwnPublicKey())[0:8] + + logger := logger.GetOrCreate(fmt.Sprintf("process/sync/%s", id)) + argsBaseBootstrapper := sync.ArgBaseBootstrapper{ + Logger: logger, PoolsHolder: ccf.dataComponents.Datapool(), Store: ccf.dataComponents.StorageService(), ChainHandler: ccf.dataComponents.Blockchain(), @@ -602,7 +618,12 @@ func (ccf *consensusComponentsFactory) createMetaChainBootstrapper() (process.Bo return nil, err } + id := hex.EncodeToString(ccf.processComponents.NodesCoordinator().GetOwnPublicKey())[0:8] + + logger := logger.GetOrCreate(fmt.Sprintf("process/sync/%s", id)) + argsBaseBootstrapper := sync.ArgBaseBootstrapper{ + Logger: logger, PoolsHolder: ccf.dataComponents.Datapool(), Store: ccf.dataComponents.StorageService(), ChainHandler: ccf.dataComponents.Blockchain(), diff --git a/factory/network/networkComponents.go b/factory/network/networkComponents.go index afe5757ecd7..3ce8f684116 100644 --- a/factory/network/networkComponents.go +++ b/factory/network/networkComponents.go @@ -274,7 +274,9 @@ func (ncf *networkComponentsFactory) createNetworkHolder( } func (ncf *networkComponentsFactory) createMainNetworkHolder(peersRatingHandler p2p.PeersRatingHandler) (networkComponentsHolder, error) { - loggerInstance := logger.GetOrCreate("main/p2p") + id := ncf.cryptoComponents.PublicKeyString()[0:8] + loggerInstance := logger.GetOrCreate(fmt.Sprintf("main/p2p/%s", id)) + return ncf.createNetworkHolder(ncf.mainP2PConfig, loggerInstance, peersRatingHandler, p2p.MainNetwork) } diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index dd5075d5dfd..82d4c73f959 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -2,6 +2,7 @@ package processing import ( "context" + "encoding/hex" "errors" "fmt" "math/big" @@ -16,6 +17,7 @@ import ( dataBlock "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/receipt" + logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" vmcommonBuiltInFunctions "github.com/multiversx/mx-chain-vm-common-go/builtInFunctions" @@ -1780,12 +1782,16 @@ func (pcf *processComponentsFactory) newForkDetector( headerBlackList process.TimeCacher, blockTracker process.BlockTracker, ) (process.ForkDetector, error) { + id := hex.EncodeToString(pcf.nodesCoordinator.GetOwnPublicKey())[0:8] + + logger := logger.GetOrCreate(fmt.Sprintf("p/sync/%s", id)) + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { - return sync.NewShardForkDetector(pcf.coreData.RoundHandler(), headerBlackList, blockTracker, pcf.coreData.GenesisNodesSetup().GetStartTime()) + return sync.NewShardForkDetector(logger, pcf.coreData.RoundHandler(), headerBlackList, blockTracker, pcf.coreData.GenesisNodesSetup().GetStartTime()) } if shardCoordinator.SelfId() == core.MetachainShardId { - return sync.NewMetaForkDetector(pcf.coreData.RoundHandler(), headerBlackList, blockTracker, pcf.coreData.GenesisNodesSetup().GetStartTime()) + return sync.NewMetaForkDetector(logger, pcf.coreData.RoundHandler(), headerBlackList, blockTracker, pcf.coreData.GenesisNodesSetup().GetStartTime()) } return nil, errors.New("could not create fork detector") diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index 0b15e3e59ca..b8abe1f1ff8 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -210,7 +210,7 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui NodeShardId: shardID, TxSignPrivKeyShardId: shardID, }) - messenger := integrationTests.CreateMessengerWithNoDiscovery() + messenger := integrationTests.CreateMessengerWithNoDiscovery("") time.Sleep(integrationTests.P2pBootstrapDelay) nodeToJoinLate.MainMessenger = messenger diff --git a/integrationTests/testConsensusNode.go b/integrationTests/testConsensusNode.go index 0fc41378520..dfd15220472 100644 --- a/integrationTests/testConsensusNode.go +++ b/integrationTests/testConsensusNode.go @@ -1,6 +1,7 @@ package integrationTests import ( + "encoding/hex" "fmt" "math/big" "time" @@ -56,6 +57,7 @@ import ( stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" + logger "github.com/multiversx/mx-chain-logger-go" ) const ( @@ -193,7 +195,10 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { pkBytes, _ := tcn.NodeKeys.Pk.ToByteArray() tcn.initNodesCoordinator(args.ConsensusSize, testHasher, epochStartRegistrationHandler, args.EligibleMap, args.WaitingMap, pkBytes, consensusCache) - tcn.MainMessenger = CreateMessengerWithNoDiscovery() + + logID := hex.EncodeToString(tcn.NodesCoordinator.GetOwnPublicKey())[0:8] + + tcn.MainMessenger = CreateMessengerWithNoDiscovery(logID) tcn.FullArchiveMessenger = &p2pmocks.MessengerStub{} tcn.initBlockChain(testHasher) tcn.initBlockProcessor(tcn.ShardCoordinator.SelfId()) @@ -226,7 +231,12 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { } epochStartTrigger, _ := metachain.NewEpochStartTrigger(argsNewMetaEpochStart) + id := hex.EncodeToString(tcn.NodesCoordinator.GetOwnPublicKey())[0:8] + + log := logger.GetOrCreate(fmt.Sprintf("p/sync/%s", id)) + forkDetector, _ := syncFork.NewShardForkDetector( + log, roundHandler, cache.NewTimeCache(time.Second), &mock.BlockTrackerStub{}, diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index caea2235767..0897cc8ab54 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -289,7 +289,7 @@ func NewTestHeartbeatNodeWithCoordinator( shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) - messenger := CreateMessengerFromConfig(p2pConfig) + messenger := CreateMessengerFromConfig(p2pConfig, "") pidPk, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) pkShardId, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) pidShardId, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index c74566ef5ca..88a557a0378 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -193,7 +193,7 @@ func CreateMessengerWithKadDht(initialAddr string) p2p.Messenger { } // CreateMessengerFromConfig creates a new libp2p messenger with provided configuration -func CreateMessengerFromConfig(p2pConfig p2pConfig.P2PConfig) p2p.Messenger { +func CreateMessengerFromConfig(p2pConfig p2pConfig.P2PConfig, logID string) p2p.Messenger { arg := p2pFactory.ArgsNetworkMessenger{ Marshaller: TestMarshalizer, P2pConfig: p2pConfig, @@ -204,7 +204,7 @@ func CreateMessengerFromConfig(p2pConfig p2pConfig.P2PConfig) p2p.Messenger { P2pPrivateKey: mock.NewPrivateKeyMock(), P2pSingleSigner: &mock.SignerMock{}, P2pKeyGenerator: &mock.KeyGenMock{}, - Logger: logger.GetOrCreate("tests/p2p"), + Logger: logger.GetOrCreate(fmt.Sprintf("tests/p2p/%s", logID)), } libP2PMes, err := p2pFactory.NewNetworkMessenger(arg) @@ -258,10 +258,10 @@ func CreateP2PConfigWithNoDiscovery() p2pConfig.P2PConfig { } // CreateMessengerWithNoDiscovery creates a new libp2p messenger with no peer discovery -func CreateMessengerWithNoDiscovery() p2p.Messenger { +func CreateMessengerWithNoDiscovery(logID string) p2p.Messenger { p2pCfg := CreateP2PConfigWithNoDiscovery() - return CreateMessengerFromConfig(p2pCfg) + return CreateMessengerFromConfig(p2pCfg, logID) } // CreateMessengerWithNoDiscoveryAndPeersRatingHandler creates a new libp2p messenger with no peer discovery @@ -345,7 +345,7 @@ func createMessengersWithNoDiscovery(numPeers int) []p2p.Messenger { peers := make([]p2p.Messenger, numPeers) for i := 0; i < numPeers; i++ { - peers[i] = CreateMessengerWithNoDiscovery() + peers[i] = CreateMessengerWithNoDiscovery("") } return peers diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 74f14e6dd21..222b439192b 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -31,6 +31,7 @@ import ( ed25519SingleSig "github.com/multiversx/mx-chain-crypto-go/signing/ed25519/singlesig" "github.com/multiversx/mx-chain-crypto-go/signing/mcl" mclsig "github.com/multiversx/mx-chain-crypto-go/signing/mcl/singlesig" + logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/multiversx/mx-chain-vm-common-go/parsers" wasmConfig "github.com/multiversx/mx-chain-vm-go/config" @@ -2173,10 +2174,14 @@ func (tpn *TestProcessorNode) addMockVm(blockchainHook vmcommon.BlockchainHook) func (tpn *TestProcessorNode) initBlockProcessor() { var err error + id := hex.EncodeToString(tpn.NodesCoordinator.GetOwnPublicKey())[0:8] + + log := logger.GetOrCreate(fmt.Sprintf("p/sync/%s", id)) + if tpn.ShardCoordinator.SelfId() != core.MetachainShardId { - tpn.ForkDetector, _ = processSync.NewShardForkDetector(tpn.RoundHandler, tpn.BlockBlackListHandler, tpn.BlockTracker, tpn.NodesSetup.GetStartTime()) + tpn.ForkDetector, _ = processSync.NewShardForkDetector(log, tpn.RoundHandler, tpn.BlockBlackListHandler, tpn.BlockTracker, tpn.NodesSetup.GetStartTime()) } else { - tpn.ForkDetector, _ = processSync.NewMetaForkDetector(tpn.RoundHandler, tpn.BlockBlackListHandler, tpn.BlockTracker, tpn.NodesSetup.GetStartTime()) + tpn.ForkDetector, _ = processSync.NewMetaForkDetector(log, tpn.RoundHandler, tpn.BlockBlackListHandler, tpn.BlockTracker, tpn.NodesSetup.GetStartTime()) } accountsDb := make(map[state.AccountsDbIdentifier]state.AccountsAdapter) diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 5d041a7bcbe..f63104e98c4 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -108,7 +108,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { } if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { - tpn.ForkDetector, _ = sync.NewMetaForkDetector(tpn.RoundHandler, tpn.BlockBlackListHandler, tpn.BlockTracker, 0) + tpn.ForkDetector, _ = sync.NewMetaForkDetector(nil, tpn.RoundHandler, tpn.BlockBlackListHandler, tpn.BlockTracker, 0) argumentsBase.ForkDetector = tpn.ForkDetector argumentsBase.TxCoordinator = &mock.TransactionCoordinatorMock{} arguments := block.ArgMetaProcessor{ @@ -129,7 +129,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { tpn.BlockProcessor, err = block.NewMetaProcessor(arguments) } else { - tpn.ForkDetector, _ = sync.NewShardForkDetector(tpn.RoundHandler, tpn.BlockBlackListHandler, tpn.BlockTracker, 0) + tpn.ForkDetector, _ = sync.NewShardForkDetector(nil, tpn.RoundHandler, tpn.BlockBlackListHandler, tpn.BlockTracker, 0) argumentsBase.ForkDetector = tpn.ForkDetector argumentsBase.BlockChainHook = tpn.BlockchainHook argumentsBase.TxCoordinator = tpn.TxCoordinator diff --git a/node/nodeRunner.go b/node/nodeRunner.go index f6fa53a660e..a539426a7b6 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -917,6 +917,7 @@ func (nr *nodeRunner) CreateManagedConsensusComponents( if err != nil { return nil, err } + return managedConsensusComponents, nil } diff --git a/process/block/interceptedBlocks/interceptedBlockHeader.go b/process/block/interceptedBlocks/interceptedBlockHeader.go index 9aac8ceabc7..9191fd8e4b7 100644 --- a/process/block/interceptedBlocks/interceptedBlockHeader.go +++ b/process/block/interceptedBlocks/interceptedBlockHeader.go @@ -3,6 +3,7 @@ package interceptedBlocks import ( "fmt" + "github.com/google/martian/log" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/hashing" @@ -73,11 +74,13 @@ func (inHdr *InterceptedHeader) CheckValidity() error { err := inHdr.integrityVerifier.Verify(inHdr.hdr) if err != nil { + log.Error("integrityVerifier.Verify", "err", err) return err } err = inHdr.integrity() if err != nil { + log.Error("integrity", "err", err) return err } diff --git a/process/block/interceptedBlocks/interceptedMetaBlockHeader.go b/process/block/interceptedBlocks/interceptedMetaBlockHeader.go index d57732f56f1..5c9df9681b4 100644 --- a/process/block/interceptedBlocks/interceptedMetaBlockHeader.go +++ b/process/block/interceptedBlocks/interceptedMetaBlockHeader.go @@ -92,6 +92,7 @@ func (imh *InterceptedMetaHeader) CheckValidity() error { err := imh.integrity() if err != nil { + log.Error(err.Error()) return err } @@ -113,16 +114,19 @@ func (imh *InterceptedMetaHeader) CheckValidity() error { err = imh.validityAttester.CheckBlockAgainstRoundHandler(imh.HeaderHandler()) if err != nil { + log.Error(err.Error()) return err } err = imh.sigVerifier.VerifyRandSeedAndLeaderSignature(imh.hdr) if err != nil { + log.Error(err.Error()) return err } err = imh.sigVerifier.VerifySignature(imh.hdr) if err != nil { + log.Error(err.Error()) return err } diff --git a/process/interceptors/interceptedDataVerifier.go b/process/interceptors/interceptedDataVerifier.go index 24a851b3a83..828963b7981 100644 --- a/process/interceptors/interceptedDataVerifier.go +++ b/process/interceptors/interceptedDataVerifier.go @@ -51,6 +51,7 @@ func (idv *interceptedDataVerifier) Verify(interceptedData process.InterceptedDa return nil } + log.Error("interceptedDataVerifier:", "dataHash", interceptedData.Hash()) return process.ErrInvalidInterceptedData } @@ -58,6 +59,7 @@ func (idv *interceptedDataVerifier) Verify(interceptedData process.InterceptedDa if err != nil { log.Debug("Intercepted data is invalid", "hash", interceptedData.Hash(), "err", err) idv.cache.Put(interceptedData.Hash(), invalidInterceptedData, interceptedDataStatusBytesSize) + log.Error("interceptedDataVerifier:", "dataHash", interceptedData.Hash(), "error", err) return process.ErrInvalidInterceptedData } diff --git a/process/interceptors/processor/equivalentProofsInterceptorProcessor.go b/process/interceptors/processor/equivalentProofsInterceptorProcessor.go index ef8beff12af..2b1793702e8 100644 --- a/process/interceptors/processor/equivalentProofsInterceptorProcessor.go +++ b/process/interceptors/processor/equivalentProofsInterceptorProcessor.go @@ -56,7 +56,13 @@ func (epip *equivalentProofsInterceptorProcessor) Save(data process.InterceptedD return process.ErrWrongTypeAssertion } - return epip.equivalentProofsPool.AddProof(interceptedProof.GetProof()) + // TODO: check if we should exclude here already exiting proof case + err := epip.equivalentProofsPool.AddProof(interceptedProof.GetProof()) + if err != nil { + log.Error("equivalentProofsInterceptorProcessor: failed to add proof", "error", err) + } + + return nil } // RegisterHandler registers a callback function to be notified of incoming equivalent proofs diff --git a/process/sync/argBootstrapper.go b/process/sync/argBootstrapper.go index 587ecedd258..3e0fa743218 100644 --- a/process/sync/argBootstrapper.go +++ b/process/sync/argBootstrapper.go @@ -16,11 +16,13 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" + logger "github.com/multiversx/mx-chain-logger-go" ) // ArgBaseBootstrapper holds all dependencies required by the bootstrap data factory in order to create // new instances type ArgBaseBootstrapper struct { + Logger logger.Logger HistoryRepo dblookupext.HistoryRepository PoolsHolder dataRetriever.PoolsHolder Store dataRetriever.StorageService diff --git a/process/sync/baseForkDetector.go b/process/sync/baseForkDetector.go index db5a601524a..97b6e27bd1e 100644 --- a/process/sync/baseForkDetector.go +++ b/process/sync/baseForkDetector.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/process" + logger "github.com/multiversx/mx-chain-logger-go" ) type headerInfo struct { @@ -36,6 +37,7 @@ type forkInfo struct { // baseForkDetector defines a struct with necessary data needed for fork detection type baseForkDetector struct { + log logger.Logger roundHandler consensus.RoundHandler headers map[uint64][]*headerInfo @@ -212,7 +214,7 @@ func (bfd *baseForkDetector) computeProbableHighestNonce() uint64 { func (bfd *baseForkDetector) RemoveHeader(nonce uint64, hash []byte) { finalCheckpointNonce := bfd.finalCheckpoint().nonce if nonce <= finalCheckpointNonce { - log.Debug("baseForkDetector.RemoveHeader: given nonce is lower or equal than final checkpoint", + bfd.log.Debug("baseForkDetector.RemoveHeader: given nonce is lower or equal than final checkpoint", "nonce", nonce, "final checkpoint nonce", finalCheckpointNonce) return @@ -246,7 +248,7 @@ func (bfd *baseForkDetector) RemoveHeader(nonce uint64, hash []byte) { probableHighestNonce := bfd.computeProbableHighestNonce() bfd.setProbableHighestNonce(probableHighestNonce) - log.Debug("forkDetector.RemoveHeader", + bfd.log.Debug("forkDetector.RemoveHeader", "nonce", nonce, "hash", hash, "probable highest nonce", probableHighestNonce, @@ -268,7 +270,7 @@ func (bfd *baseForkDetector) removeCheckpointWithNonce(nonce uint64) { bfd.fork.checkpoint = preservedCheckpoint bfd.mutFork.Unlock() - log.Debug("forkDetector.removeCheckpointWithNonce", + bfd.log.Debug("forkDetector.removeCheckpointWithNonce", "nonce", nonce, "last checkpoint nonce", bfd.lastCheckpoint().nonce) } @@ -316,7 +318,7 @@ func (bfd *baseForkDetector) ResetFork() { bfd.ResetProbableHighestNonce() bfd.setLastRoundWithForcedFork(bfd.roundHandler.Index()) - log.Debug("forkDetector.ResetFork", + bfd.log.Debug("forkDetector.ResetFork", "last round with forced fork", bfd.lastRoundWithForcedFork()) } @@ -326,7 +328,7 @@ func (bfd *baseForkDetector) ResetProbableHighestNonce() { probableHighestNonce := bfd.computeProbableHighestNonce() bfd.setProbableHighestNonce(probableHighestNonce) - log.Debug("forkDetector.ResetProbableHighestNonce", + bfd.log.Debug("forkDetector.ResetProbableHighestNonce", "probable highest nonce", bfd.probableHighestNonce()) } @@ -408,7 +410,7 @@ func (bfd *baseForkDetector) setHighestNonceReceived(nonce uint64) { bfd.fork.highestNonceReceived = nonce bfd.mutFork.Unlock() - log.Debug("forkDetector.setHighestNonceReceived", + bfd.log.Debug("forkDetector.setHighestNonceReceived", "highest nonce received", nonce) } @@ -555,12 +557,12 @@ func (bfd *baseForkDetector) shouldSignalFork( if lastForkRound != process.MinForkRound { if headerInfo.epoch > lastForkEpoch { - log.Trace("shouldSignalFork epoch change false") + bfd.log.Trace("shouldSignalFork epoch change false") return false } if headerInfo.epoch < lastForkEpoch { - log.Trace("shouldSignalFork epoch change true") + bfd.log.Trace("shouldSignalFork epoch change true") return true } } @@ -719,7 +721,7 @@ func (bfd *baseForkDetector) processReceivedBlock( probableHighestNonce := bfd.computeProbableHighestNonce() bfd.setProbableHighestNonce(probableHighestNonce) - log.Debug("forkDetector.AddHeader", + bfd.log.Debug("forkDetector.AddHeader", "round", header.GetRound(), "nonce", header.GetNonce(), "hash", headerHash, diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index cf13638912f..19d897cbf90 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -33,8 +33,6 @@ import ( logger "github.com/multiversx/mx-chain-logger-go" ) -var log = logger.GetOrCreate("process/sync") - var _ closing.Closer = (*baseBootstrap)(nil) // sleepTime defines the time in milliseconds between each iteration made in syncBlocks method @@ -56,6 +54,7 @@ type notarizedInfo struct { } type baseBootstrap struct { + log logger.Logger historyRepo dblookupext.HistoryRepository headers dataRetriever.HeadersPool proofs dataRetriever.ProofsPool @@ -166,7 +165,7 @@ func (boot *baseBootstrap) processReceivedHeader(headerHandler data.HeaderHandle return } - log.Trace("received header from network", + boot.log.Trace("received header from network", "shard", headerHandler.GetShardID(), "round", headerHandler.GetRound(), "nonce", headerHandler.GetNonce(), @@ -175,7 +174,7 @@ func (boot *baseBootstrap) processReceivedHeader(headerHandler data.HeaderHandle err := boot.forkDetector.AddHeader(headerHandler, headerHash, process.BHReceived, nil, nil) if err != nil { - log.Debug("forkDetector.AddHeader", "error", err.Error()) + boot.log.Debug("forkDetector.AddHeader", "error", err.Error()) } go boot.requestMiniBlocks(headerHandler) @@ -188,7 +187,7 @@ func (boot *baseBootstrap) confirmHeaderReceivedByNonce(headerHandler data.Heade boot.mutRcvHdrNonce.Lock() n := boot.requestedHeaderNonce() if n != nil && *n == headerHandler.GetNonce() { - log.Debug("received requested header from network", + boot.log.Debug("received requested header from network", "shard", headerHandler.GetShardID(), "round", headerHandler.GetRound(), "nonce", headerHandler.GetNonce(), @@ -207,7 +206,7 @@ func (boot *baseBootstrap) confirmHeaderReceivedByHash(headerHandler data.Header boot.mutRcvHdrHash.Lock() hash := boot.requestedHeaderHash() if hash != nil && bytes.Equal(hash, hdrHash) { - log.Debug("received requested header from network", + boot.log.Debug("received requested header from network", "shard", headerHandler.GetShardID(), "round", headerHandler.GetRound(), "nonce", headerHandler.GetNonce(), @@ -302,13 +301,13 @@ func (boot *baseBootstrap) computeNodeState() { currentHeader := boot.chainHandler.GetCurrentBlockHeader() if check.IfNil(currentHeader) { boot.hasLastBlock = boot.forkDetector.ProbableHighestNonce() == genesisNonce - log.Debug("computeNodeState", + boot.log.Debug("computeNodeState", "probableHighestNonce", boot.forkDetector.ProbableHighestNonce(), "currentBlockNonce", nil, "boot.hasLastBlock", boot.hasLastBlock) } else { boot.hasLastBlock = boot.forkDetector.ProbableHighestNonce() <= boot.chainHandler.GetCurrentBlockHeader().GetNonce() - log.Debug("computeNodeState", + boot.log.Debug("computeNodeState", "probableHighestNonce", boot.forkDetector.ProbableHighestNonce(), "currentBlockNonce", boot.chainHandler.GetCurrentBlockHeader().GetNonce(), "boot.hasLastBlock", boot.hasLastBlock) @@ -317,7 +316,7 @@ func (boot *baseBootstrap) computeNodeState() { isNodeConnectedToTheNetwork := boot.networkWatcher.IsConnectedToTheNetwork() isNodeSynchronized := !boot.forkInfo.IsDetected && boot.hasLastBlock && isNodeConnectedToTheNetwork if isNodeSynchronized != boot.isNodeSynchronized { - log.Debug("node has changed its synchronized state", + boot.log.Debug("node has changed its synchronized state", "state", isNodeSynchronized, ) } @@ -333,7 +332,7 @@ func (boot *baseBootstrap) computeNodeState() { } boot.statusHandler.SetUInt64Value(common.MetricIsSyncing, result) - log.Debug("computeNodeState", + boot.log.Debug("computeNodeState", "isNodeStateCalculated", boot.isNodeStateCalculated, "isNodeSynchronized", boot.isNodeSynchronized) @@ -379,7 +378,7 @@ func (boot *baseBootstrap) requestHeadersIfSyncIsStuck() { return } - log.Debug("requestHeadersIfSyncIsStuck", + boot.log.Debug("requestHeadersIfSyncIsStuck", "from nonce", fromNonce, "to nonce", toNonce, "probable highest nonce", boot.forkDetector.ProbableHighestNonce()) @@ -390,11 +389,11 @@ func (boot *baseBootstrap) requestHeadersIfSyncIsStuck() { func (boot *baseBootstrap) removeHeaderFromPools(header data.HeaderHandler) []byte { hash, err := core.CalculateHash(boot.marshalizer, boot.hasher, header) if err != nil { - log.Debug("CalculateHash", "error", err.Error()) + boot.log.Debug("CalculateHash", "error", err.Error()) return nil } - log.Debug("removeHeaderFromPools", + boot.log.Debug("removeHeaderFromPools", "shard", header.GetShardID(), "epoch", header.GetEpoch(), "round", header.GetRound(), @@ -408,7 +407,7 @@ func (boot *baseBootstrap) removeHeaderFromPools(header data.HeaderHandler) []by func (boot *baseBootstrap) removeHeadersHigherThanNonceFromPool(nonce uint64) { shardID := boot.shardCoordinator.SelfId() - log.Debug("removeHeadersHigherThanNonceFromPool", + boot.log.Debug("removeHeadersHigherThanNonceFromPool", "shard", shardID, "nonce", nonce) @@ -508,7 +507,7 @@ func (boot *baseBootstrap) requestHeadersFromNonceIfMissing(fromNonce uint64) { return } - log.Debug("requestHeadersFromNonceIfMissing", + boot.log.Debug("requestHeadersFromNonceIfMissing", "from nonce", fromNonce, "to nonce", toNonce, "probable highest nonce", boot.forkDetector.ProbableHighestNonce()) @@ -521,7 +520,7 @@ func (boot *baseBootstrap) syncBlocks(ctx context.Context) { for { select { case <-ctx.Done(): - log.Debug("bootstrap's go routine is stopping...") + boot.log.Debug("bootstrap's go routine is stopping...") return case <-time.After(sleepTime): } @@ -536,11 +535,11 @@ func (boot *baseBootstrap) syncBlocks(ctx context.Context) { err := boot.syncStarter.SyncBlock(ctx) if err != nil { if common.IsContextDone(ctx) { - log.Debug("SyncBlock finished, bootstrap's go routine is stopping...") + boot.log.Debug("SyncBlock finished, bootstrap's go routine is stopping...") return } - log.Debug("SyncBlock", "error", err.Error()) + boot.log.Debug("SyncBlock", "error", err.Error()) } } } @@ -563,7 +562,7 @@ func (boot *baseBootstrap) doJobOnSyncBlockFail(bodyHandler data.BodyHandler, he errNotCritical := boot.rollBack(false) if errNotCritical != nil { - log.Debug("rollBack", "error", errNotCritical.Error()) + boot.log.Debug("rollBack", "error", errNotCritical.Error()) } if isSyncWithErrorsLimitReachedInProperRound { @@ -605,18 +604,18 @@ func (boot *baseBootstrap) syncBlock() error { boot.statusHandler.Increment(common.MetricNumTimesInForkChoice) if boot.isForcedRollBackOneBlock() { - log.Debug("roll back one block has been forced") + boot.log.Debug("roll back one block has been forced") boot.rollBackOneBlockForced() return nil } if boot.isForcedRollBackToNonce() { - log.Debug("roll back to nonce has been forced", "nonce", boot.forkInfo.Nonce) + boot.log.Debug("roll back to nonce has been forced", "nonce", boot.forkInfo.Nonce) boot.rollBackToNonceForced() return nil } - log.Debug("fork detected", + boot.log.Debug("fork detected", "nonce", boot.forkInfo.Nonce, "hash", boot.forkInfo.Hash, ) @@ -662,7 +661,7 @@ func (boot *baseBootstrap) syncBlock() error { startProcessBlockTime := time.Now() err = boot.blockProcessor.ProcessBlock(header, body, haveTime) elapsedTime := time.Since(startProcessBlockTime) - log.Debug("elapsed time to process block", + boot.log.Debug("elapsed time to process block", "time [s]", elapsedTime, ) if err != nil { @@ -672,7 +671,7 @@ func (boot *baseBootstrap) syncBlock() error { startProcessScheduledBlockTime := time.Now() err = boot.blockProcessor.ProcessScheduledBlock(header, body, haveTime) elapsedTime = time.Since(startProcessScheduledBlockTime) - log.Debug("elapsed time to process scheduled block", + boot.log.Debug("elapsed time to process scheduled block", "time [s]", elapsedTime, ) if err != nil { @@ -683,9 +682,9 @@ func (boot *baseBootstrap) syncBlock() error { err = boot.blockProcessor.CommitBlock(header, body) elapsedTime = time.Since(startCommitBlockTime) if elapsedTime >= common.CommitMaxTime { - log.Warn("syncBlock.CommitBlock", "elapsed time", elapsedTime) + boot.log.Warn("syncBlock.CommitBlock", "elapsed time", elapsedTime) } else { - log.Debug("elapsed time to commit block", + boot.log.Debug("elapsed time to commit block", "time [s]", elapsedTime, ) } @@ -693,7 +692,7 @@ func (boot *baseBootstrap) syncBlock() error { return err } - log.Debug("block has been synced successfully", + boot.log.Debug("block has been synced successfully", "nonce", header.GetNonce(), ) @@ -718,7 +717,7 @@ func (boot *baseBootstrap) handleEquivalentProof( if !boot.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, prevHeader.GetEpoch()) { // no need to check proof for first block after activation - log.Info("handleEquivalentProof: no need to check equivalent proof for first activation block") + boot.log.Info("handleEquivalentProof: no need to check equivalent proof for first activation block") return nil } @@ -728,7 +727,7 @@ func (boot *baseBootstrap) handleEquivalentProof( return nil } - log.Trace("baseBootstrap.handleEquivalentProof: did not have proof for header, will try again", "headerHash", headerHash) + boot.log.Trace("baseBootstrap.handleEquivalentProof: did not have proof for header, will try again", "headerHash", headerHash) _, _, err = boot.blockBootstrapper.getHeaderWithNonceRequestingIfMissing(header.GetNonce() + 1) if err != nil { @@ -746,12 +745,12 @@ func (boot *baseBootstrap) handleEquivalentProof( func (boot *baseBootstrap) handleTrieSyncError(err error, ctx context.Context) { shouldOutputLog := err != nil && !common.IsContextDone(ctx) if shouldOutputLog { - log.Debug("SyncBlock syncTrie", "error", err) + boot.log.Debug("SyncBlock syncTrie", "error", err) } } func (boot *baseBootstrap) syncUserAccountsState(key []byte) error { - log.Warn("base sync: started syncUserAccountsState") + boot.log.Warn("base sync: started syncUserAccountsState") return boot.accountsDBSyncer.SyncAccounts(key, storageMarker.NewDisabledStorageMarker()) } @@ -777,13 +776,13 @@ func (boot *baseBootstrap) cleanProofsBehindFinal(header data.HeaderHandler) { err := boot.proofs.CleanupProofsBehindNonce(header.GetShardID(), finalNonce) if err != nil { - log.Warn("failed to cleanup notarized proofs behind nonce", + boot.log.Warn("failed to cleanup notarized proofs behind nonce", "nonce", finalNonce, "shardID", header.GetShardID(), "error", err) } - log.Trace("baseBootstrap.cleanProofsBehindFinal cleanup successfully", "finalNonce", finalNonce) + boot.log.Trace("baseBootstrap.cleanProofsBehindFinal cleanup successfully", "finalNonce", finalNonce) } // rollBack decides if rollBackOneBlock must be called @@ -814,7 +813,7 @@ func (boot *baseBootstrap) rollBack(revertUsingForkNonce bool) error { } }() - log.Debug("starting roll back") + boot.log.Debug("starting roll back") for { currHeaderHash = boot.chainHandler.GetCurrentBlockHeaderHash() currHeader, err = boot.blockBootstrapper.getCurrHeader() @@ -838,11 +837,11 @@ func (boot *baseBootstrap) rollBack(revertUsingForkNonce bool) error { return err } - log.Debug("roll back to block", + boot.log.Debug("roll back to block", "nonce", currHeader.GetNonce()-1, "hash", currHeader.GetPrevHash(), ) - log.Debug("highest final block nonce", + boot.log.Debug("highest final block nonce", "nonce", boot.forkDetector.GetHighestFinalBlockNonce(), ) @@ -861,7 +860,7 @@ func (boot *baseBootstrap) rollBack(revertUsingForkNonce bool) error { err = boot.bootStorer.SaveLastRound(int64(prevHeader.GetRound())) if err != nil { - log.Debug("save last round in storage", + boot.log.Debug("save last round in storage", "error", err.Error(), "round", prevHeader.GetRound(), ) @@ -869,7 +868,7 @@ func (boot *baseBootstrap) rollBack(revertUsingForkNonce bool) error { err = boot.historyRepo.RevertBlock(currHeader, currBody) if err != nil { - log.Debug("boot.historyRepo.RevertBlock", + boot.log.Debug("boot.historyRepo.RevertBlock", "error", err.Error(), ) @@ -893,7 +892,7 @@ func (boot *baseBootstrap) rollBack(revertUsingForkNonce bool) error { Header: currHeader, }) if err != nil { - log.Warn("baseBootstrap.outportHandler.RevertIndexedBlock cannot revert indexed block", "error", err) + boot.log.Warn("baseBootstrap.outportHandler.RevertIndexedBlock cannot revert indexed block", "error", err) } shouldAddHeaderToBlackList := revertUsingForkNonce && boot.blockBootstrapper.isForkTriggeredByMeta() @@ -909,7 +908,7 @@ func (boot *baseBootstrap) rollBack(revertUsingForkNonce bool) error { break } - log.Debug("ending roll back") + boot.log.Debug("ending roll back") return nil } @@ -925,7 +924,7 @@ func (boot *baseBootstrap) shouldAllowRollback(currHeader data.HeaderHandler, cu allowFinalBlockRollBack := (headerWithScheduledMiniBlocks || headerHashDoesNotMatchWithFinalBlockHash) && isFinalBlockRollBack && canRollbackBlock allowRollBack := !isRollBackBehindFinal || allowFinalBlockRollBack - log.Debug("baseBootstrap.shouldAllowRollback", + boot.log.Debug("baseBootstrap.shouldAllowRollback", "isRollBackBehindFinal", isRollBackBehindFinal, "isFinalBlockRollBack", isFinalBlockRollBack, "headerWithScheduledMiniBlocks", headerWithScheduledMiniBlocks, @@ -983,7 +982,7 @@ func (boot *baseBootstrap) rollBackOneBlock( currBlockBody, errNotCritical := boot.blockBootstrapper.getBlockBody(currHeader) if errNotCritical != nil { - log.Debug("rollBackOneBlock getBlockBody error", "error", errNotCritical) + boot.log.Debug("rollBackOneBlock getBlockBody error", "error", errNotCritical) } err = boot.blockProcessor.RestoreBlockIntoPools(currHeader, currBlockBody) @@ -1040,7 +1039,7 @@ func (boot *baseBootstrap) isForcedRollBackToNonce() bool { func (boot *baseBootstrap) rollBackOneBlockForced() { err := boot.rollBack(false) if err != nil { - log.Debug("rollBackOneBlockForced", "error", err.Error()) + boot.log.Debug("rollBackOneBlockForced", "error", err.Error()) } boot.forkDetector.ResetFork() @@ -1050,7 +1049,7 @@ func (boot *baseBootstrap) rollBackOneBlockForced() { func (boot *baseBootstrap) rollBackToNonceForced() { err := boot.rollBack(true) if err != nil { - log.Debug("rollBackToNonceForced", "error", err.Error()) + boot.log.Debug("rollBackToNonceForced", "error", err.Error()) } boot.forkDetector.ResetProbableHighestNonce() @@ -1062,14 +1061,14 @@ func (boot *baseBootstrap) restoreState( currHeader data.HeaderHandler, currRootHash []byte, ) { - log.Debug("revert state to header", + boot.log.Debug("revert state to header", "nonce", currHeader.GetNonce(), "hash", currHeaderHash, "current root hash", currRootHash) err := boot.chainHandler.SetCurrentBlockHeaderAndRootHash(currHeader, currRootHash) if err != nil { - log.Debug("SetCurrentBlockHeader", "error", err.Error()) + boot.log.Debug("SetCurrentBlockHeader", "error", err.Error()) } boot.chainHandler.SetCurrentBlockHeaderHash(currHeaderHash) @@ -1087,7 +1086,7 @@ func (boot *baseBootstrap) restoreState( err = boot.blockProcessor.RevertStateToBlock(currHeader, boot.scheduledTxsExecutionHandler.GetScheduledRootHash()) if err != nil { - log.Debug("RevertState", "error", err.Error()) + boot.log.Debug("RevertState", "error", err.Error()) } } @@ -1123,7 +1122,7 @@ func (boot *baseBootstrap) receivedMiniblock(hash []byte, _ interface{}) { boot.requestedHashes.SetReceivedHash(hash) if boot.requestedHashes.ReceivedAll() { - log.Debug("received all the requested mini blocks from network") + boot.log.Debug("received all the requested mini blocks from network") boot.setRequestedMiniBlocks(nil) boot.mutRcvMiniBlocks.Unlock() boot.chRcvMiniBlocks <- true @@ -1135,7 +1134,7 @@ func (boot *baseBootstrap) receivedMiniblock(hash []byte, _ interface{}) { // requestMiniBlocksByHashes method requests a block body from network when it is not found in the pool func (boot *baseBootstrap) requestMiniBlocksByHashes(hashes [][]byte) { boot.setRequestedMiniBlocks(hashes) - log.Debug("requesting mini blocks from network", + boot.log.Debug("requesting mini blocks from network", "num miniblocks", len(hashes), ) boot.requestHandler.RequestMiniBlocks(boot.shardCoordinator.SelfId(), hashes) @@ -1319,13 +1318,13 @@ func (boot *baseBootstrap) Close() error { func (boot *baseBootstrap) cleanChannels() { nrReads := core.EmptyChannel(boot.chRcvHdrNonce) - log.Debug("close baseSync: emptied channel", "chRcvHdrNonce nrReads", nrReads) + boot.log.Debug("close baseSync: emptied channel", "chRcvHdrNonce nrReads", nrReads) nrReads = core.EmptyChannel(boot.chRcvHdrHash) - log.Debug("close baseSync: emptied channel", "chRcvHdrHash nrReads", nrReads) + boot.log.Debug("close baseSync: emptied channel", "chRcvHdrHash nrReads", nrReads) nrReads = core.EmptyChannel(boot.chRcvMiniBlocks) - log.Debug("close baseSync: emptied channel", "chRcvMiniBlocks nrReads", nrReads) + boot.log.Debug("close baseSync: emptied channel", "chRcvMiniBlocks nrReads", nrReads) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/process/sync/metaForkDetector.go b/process/sync/metaForkDetector.go index 178e4e96042..4fe7aa2ce61 100644 --- a/process/sync/metaForkDetector.go +++ b/process/sync/metaForkDetector.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/process" + logger "github.com/multiversx/mx-chain-logger-go" ) var _ process.ForkDetector = (*metaForkDetector)(nil) @@ -19,6 +20,7 @@ type metaForkDetector struct { // NewMetaForkDetector method creates a new metaForkDetector object func NewMetaForkDetector( + log logger.Logger, roundHandler consensus.RoundHandler, blackListHandler process.TimeCacher, blockTracker process.BlockTracker, @@ -40,7 +42,12 @@ func NewMetaForkDetector( return nil, err } + if log == nil { + log = logger.GetOrCreate("process/sync") + } + bfd := &baseForkDetector{ + log: log, roundHandler: roundHandler, blackListHandler: blackListHandler, genesisTime: genesisTime, diff --git a/process/sync/metablock.go b/process/sync/metablock.go index 72fc8a8688b..8f052c4e212 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/trie/storageMarker" + logger "github.com/multiversx/mx-chain-logger-go" ) // MetaBootstrap implements the bootstrap mechanism @@ -52,7 +53,14 @@ func NewMetaBootstrap(arguments ArgMetaBootstrapper) (*MetaBootstrap, error) { return nil, err } + var log logger.Logger + log = logger.GetOrCreate("process/sync") + if arguments.Logger != nil { + log = arguments.Logger + } + base := &baseBootstrap{ + log: log, chainHandler: arguments.ChainHandler, blockProcessor: arguments.BlockProcessor, store: arguments.Store, @@ -86,7 +94,7 @@ func NewMetaBootstrap(arguments ArgMetaBootstrapper) (*MetaBootstrap, error) { } if base.isInImportMode { - log.Warn("using always-not-synced status because the node is running in import-db") + base.log.Warn("using always-not-synced status because the node is running in import-db") } boot := MetaBootstrap{ @@ -146,7 +154,7 @@ func (boot *MetaBootstrap) StartSyncingBlocks() error { // when a node starts it first tries to bootstrap from storage, if there already exist a database saved errNotCritical := boot.storageBootstrapper.LoadFromStorage() if errNotCritical != nil { - log.Debug("syncFromStorer", "error", errNotCritical.Error()) + boot.log.Debug("syncFromStorer", "error", errNotCritical.Error()) } else { boot.setLastEpochStartRound() } @@ -213,7 +221,7 @@ func (boot *MetaBootstrap) syncAccountsDBs(key []byte, id string) error { } func (boot *MetaBootstrap) syncValidatorAccountsState(key []byte) error { - log.Warn("base sync: started syncValidatorAccountsState") + boot.log.Warn("base sync: started syncValidatorAccountsState") return boot.validatorStatisticsDBSyncer.SyncAccounts(key, storageMarker.NewDisabledStorageMarker()) } @@ -229,7 +237,7 @@ func (boot *MetaBootstrap) Close() error { // requestHeaderWithNonce method requests a block header from network when it is not found in the pool func (boot *MetaBootstrap) requestHeaderWithNonce(nonce uint64) { boot.setRequestedHeaderNonce(&nonce) - log.Debug("requesting meta header from network", + boot.log.Debug("requesting meta header from network", "nonce", nonce, "probable highest nonce", boot.forkDetector.ProbableHighestNonce(), ) @@ -239,7 +247,7 @@ func (boot *MetaBootstrap) requestHeaderWithNonce(nonce uint64) { // requestHeaderWithHash method requests a block header from network when it is not found in the pool func (boot *MetaBootstrap) requestHeaderWithHash(hash []byte) { boot.setRequestedHeaderHash(hash) - log.Debug("requesting meta header from network", + boot.log.Debug("requesting meta header from network", "hash", hash, "probable highest nonce", boot.forkDetector.ProbableHighestNonce(), ) @@ -370,7 +378,7 @@ func (boot *MetaBootstrap) requestMiniBlocksFromHeaderWithNonceIfMissing(headerH header, ok := headerHandler.(*block.MetaBlock) if !ok { - log.Warn("cannot convert headerHandler in block.MetaBlock") + boot.log.Warn("cannot convert headerHandler in block.MetaBlock") return } @@ -381,7 +389,7 @@ func (boot *MetaBootstrap) requestMiniBlocksFromHeaderWithNonceIfMissing(headerH _, missingMiniBlocksHashes := boot.miniBlocksProvider.GetMiniBlocksFromPool(hashes) if len(missingMiniBlocksHashes) > 0 { - log.Trace("requesting in advance mini blocks", + boot.log.Trace("requesting in advance mini blocks", "num miniblocks", len(missingMiniBlocksHashes), "header nonce", header.Nonce, ) diff --git a/process/sync/shardForkDetector.go b/process/sync/shardForkDetector.go index 52715f36163..3535a9b45f5 100644 --- a/process/sync/shardForkDetector.go +++ b/process/sync/shardForkDetector.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/process" + logger "github.com/multiversx/mx-chain-logger-go" ) var _ process.ForkDetector = (*shardForkDetector)(nil) @@ -20,6 +21,7 @@ type shardForkDetector struct { // NewShardForkDetector method creates a new shardForkDetector object func NewShardForkDetector( + log logger.Logger, roundHandler consensus.RoundHandler, blackListHandler process.TimeCacher, blockTracker process.BlockTracker, @@ -41,7 +43,12 @@ func NewShardForkDetector( return nil, err } + if log == nil { + log = logger.GetOrCreate("process/sync") + } + bfd := &baseForkDetector{ + log: log, roundHandler: roundHandler, blackListHandler: blackListHandler, genesisTime: genesisTime, @@ -143,7 +150,7 @@ func (sfd *shardForkDetector) appendSelfNotarizedHeaders( state: process.BHNotarized, }) if appended { - log.Debug("added self notarized header in fork detector", + sfd.log.Debug("added self notarized header in fork detector", "notarized by shard", shardID, "round", selfNotarizedHeaders[i].GetRound(), "nonce", selfNotarizedHeaders[i].GetNonce(), diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index 10a3492d024..f45e58b4ce3 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/storage" + logger "github.com/multiversx/mx-chain-logger-go" ) // ShardBootstrap implements the bootstrap mechanism @@ -39,7 +40,14 @@ func NewShardBootstrap(arguments ArgShardBootstrapper) (*ShardBootstrap, error) return nil, err } + var log logger.Logger + log = logger.GetOrCreate("process/sync") + if arguments.Logger != nil { + log = arguments.Logger + } + base := &baseBootstrap{ + log: log, chainHandler: arguments.ChainHandler, blockProcessor: arguments.BlockProcessor, store: arguments.Store, @@ -74,7 +82,7 @@ func NewShardBootstrap(arguments ArgShardBootstrapper) (*ShardBootstrap, error) } if base.isInImportMode { - log.Warn("using always-not-synced status because the node is running in import-db") + base.log.Warn("using always-not-synced status because the node is running in import-db") } boot := ShardBootstrap{ @@ -131,7 +139,7 @@ func (boot *ShardBootstrap) getBlockBody(headerHandler data.HeaderHandler) (data func (boot *ShardBootstrap) StartSyncingBlocks() error { errNotCritical := boot.storageBootstrapper.LoadFromStorage() if errNotCritical != nil { - log.Debug("boot.syncFromStorer", + boot.log.Debug("boot.syncFromStorer", "error", errNotCritical.Error(), ) } @@ -182,7 +190,7 @@ func (boot *ShardBootstrap) Close() error { // requestHeaderWithNonce method requests a block header from network when it is not found in the pool func (boot *ShardBootstrap) requestHeaderWithNonce(nonce uint64) { boot.setRequestedHeaderNonce(&nonce) - log.Debug("requesting shard header from network", + boot.log.Debug("requesting shard header from network", "nonce", nonce, "probable highest nonce", boot.forkDetector.ProbableHighestNonce(), ) @@ -192,7 +200,7 @@ func (boot *ShardBootstrap) requestHeaderWithNonce(nonce uint64) { // requestHeaderWithHash method requests a block header from network when it is not found in the pool func (boot *ShardBootstrap) requestHeaderWithHash(hash []byte) { boot.setRequestedHeaderHash(hash) - log.Debug("requesting shard header from network", + boot.log.Debug("requesting shard header from network", "hash", hash, "probable highest nonce", boot.forkDetector.ProbableHighestNonce(), ) @@ -302,7 +310,7 @@ func (boot *ShardBootstrap) requestMiniBlocksFromHeaderWithNonceIfMissing(header header, ok := headerHandler.(data.ShardHeaderHandler) if !ok { - log.Warn("cannot convert headerHandler in block.Header") + boot.log.Warn("cannot convert headerHandler in block.Header") return } @@ -313,7 +321,7 @@ func (boot *ShardBootstrap) requestMiniBlocksFromHeaderWithNonceIfMissing(header _, missingMiniBlocksHashes := boot.miniBlocksProvider.GetMiniBlocksFromPool(hashes) if len(missingMiniBlocksHashes) > 0 { - log.Trace("requesting in advance mini blocks", + boot.log.Trace("requesting in advance mini blocks", "num miniblocks", len(missingMiniBlocksHashes), "header nonce", header.GetNonce(), ) diff --git a/process/sync/testMetaBootstrap.go b/process/sync/testMetaBootstrap.go index 62530e54a71..c14341f2159 100644 --- a/process/sync/testMetaBootstrap.go +++ b/process/sync/testMetaBootstrap.go @@ -16,7 +16,7 @@ func (tmb *TestMetaBootstrap) RollBack(revertUsingForkNonce bool) error { func (tmb *TestMetaBootstrap) SetProbableHighestNonce(nonce uint64) { forkDetector, ok := tmb.forkDetector.(*metaForkDetector) if !ok { - log.Error("inner forkdetector impl is not of type metaForkDetector") + tmb.log.Error("inner forkdetector impl is not of type metaForkDetector") return } diff --git a/process/sync/testShardBootstrap.go b/process/sync/testShardBootstrap.go index 280a1be9a87..fdbc8648f7e 100644 --- a/process/sync/testShardBootstrap.go +++ b/process/sync/testShardBootstrap.go @@ -16,7 +16,7 @@ func (tsb *TestShardBootstrap) RollBack(revertUsingForkNonce bool) error { func (tsb *TestShardBootstrap) SetProbableHighestNonce(nonce uint64) { forkDetector, ok := tsb.forkDetector.(*shardForkDetector) if !ok { - log.Error("inner forkdetector impl is not of type shardForkDetector") + tsb.log.Error("inner forkdetector impl is not of type shardForkDetector") return } From 44d5ab0c22584083db66e280eb6729bf723e47d6 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Wed, 8 Jan 2025 23:02:13 +0200 Subject: [PATCH 02/10] extend custom logging in block tracker --- .../sync/basicSync/basicSync_test.go | 1 + integrationTests/testProcessorNode.go | 8 +++ integrationTests/testSyncNode.go | 18 +++++++ .../interceptedBlockHeader.go | 1 - process/track/argBlockTrack.go | 2 + process/track/baseBlockTrack.go | 54 +++++++++++-------- process/track/blockNotarizer.go | 8 ++- 7 files changed, 66 insertions(+), 26 deletions(-) diff --git a/integrationTests/sync/basicSync/basicSync_test.go b/integrationTests/sync/basicSync/basicSync_test.go index 727f26c57dd..73b8167d64f 100644 --- a/integrationTests/sync/basicSync/basicSync_test.go +++ b/integrationTests/sync/basicSync/basicSync_test.go @@ -204,6 +204,7 @@ func TestSyncWorksInShard_EmptyBlocksNoForks_With_EquivalentProofs(t *testing.T) } _ = logger.SetLogLevel("*:DEBUG,process:TRACE,consensus:TRACE") + logger.ToggleLoggerName(true) // 3 shard nodes and 1 metachain node maxShards := uint32(1) diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 222b439192b..fe9d3acc0dc 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -3105,7 +3105,15 @@ func (tpn *TestProcessorNode) initRequestedItemsHandler() { } func (tpn *TestProcessorNode) initBlockTracker() { + id := hex.EncodeToString(tpn.NodesCoordinator.GetOwnPublicKey()) + if len(id) > 8 { + id = id[0:8] + } + + log := logger.GetOrCreate(fmt.Sprintf("p/track/%s", id)) + argBaseTracker := track.ArgBaseTracker{ + Logger: log, Hasher: TestHasher, HeaderValidator: tpn.HeaderValidator, Marshalizer: TestMarshalizer, diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index f63104e98c4..e81b1425a4e 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -1,10 +1,12 @@ package integrationTests import ( + "encoding/hex" "fmt" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" + logger "github.com/multiversx/mx-chain-logger-go" "github.com/multiversx/mx-chain-go/common/enablers" "github.com/multiversx/mx-chain-go/common/forking" @@ -147,7 +149,15 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { } func (tpn *TestProcessorNode) createShardBootstrapper() (TestBootstrapper, error) { + id := hex.EncodeToString(tpn.NodesCoordinator.GetOwnPublicKey()) + if len(id) > 8 { + id = id[0:8] + } + + logger := logger.GetOrCreate(fmt.Sprintf("p/sync/%s", id)) + argsBaseBootstrapper := sync.ArgBaseBootstrapper{ + Logger: logger, PoolsHolder: tpn.DataPool, Store: tpn.Storage, ChainHandler: tpn.BlockChain, @@ -194,7 +204,15 @@ func (tpn *TestProcessorNode) createShardBootstrapper() (TestBootstrapper, error } func (tpn *TestProcessorNode) createMetaChainBootstrapper() (TestBootstrapper, error) { + id := hex.EncodeToString(tpn.NodesCoordinator.GetOwnPublicKey()) + if len(id) > 8 { + id = id[0:8] + } + + logger := logger.GetOrCreate(fmt.Sprintf("p/sync/%s", id)) + argsBaseBootstrapper := sync.ArgBaseBootstrapper{ + Logger: logger, PoolsHolder: tpn.DataPool, Store: tpn.Storage, ChainHandler: tpn.BlockChain, diff --git a/process/block/interceptedBlocks/interceptedBlockHeader.go b/process/block/interceptedBlocks/interceptedBlockHeader.go index 9191fd8e4b7..7201d5eb4eb 100644 --- a/process/block/interceptedBlocks/interceptedBlockHeader.go +++ b/process/block/interceptedBlocks/interceptedBlockHeader.go @@ -3,7 +3,6 @@ package interceptedBlocks import ( "fmt" - "github.com/google/martian/log" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/hashing" diff --git a/process/track/argBlockTrack.go b/process/track/argBlockTrack.go index c44bb6254b7..021985acf33 100644 --- a/process/track/argBlockTrack.go +++ b/process/track/argBlockTrack.go @@ -8,11 +8,13 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" + logger "github.com/multiversx/mx-chain-logger-go" ) // ArgBaseTracker holds all dependencies required by the process data factory in order to create // new instances of shard/meta block tracker type ArgBaseTracker struct { + Logger logger.Logger Hasher hashing.Hasher HeaderValidator process.HeaderConstructionValidator Marshalizer marshal.Marshalizer diff --git a/process/track/baseBlockTrack.go b/process/track/baseBlockTrack.go index 22eb1c86cc1..8ccb5cb8e42 100644 --- a/process/track/baseBlockTrack.go +++ b/process/track/baseBlockTrack.go @@ -15,7 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" - "github.com/multiversx/mx-chain-logger-go" + logger "github.com/multiversx/mx-chain-logger-go" ) var _ process.ValidityAttester = (*baseBlockTrack)(nil) @@ -29,6 +29,7 @@ type HeaderInfo struct { } type baseBlockTrack struct { + log logger.Logger hasher hashing.Hasher headerValidator process.HeaderConstructionValidator marshalizer marshal.Marshalizer @@ -61,12 +62,12 @@ func createBaseBlockTrack(arguments ArgBaseTracker) (*baseBlockTrack, error) { maxNumHeadersToKeepPerShard := arguments.PoolsHolder.Headers().MaxSize() - crossNotarizer, err := NewBlockNotarizer(arguments.Hasher, arguments.Marshalizer, arguments.ShardCoordinator) + crossNotarizer, err := NewBlockNotarizer(arguments.Logger, arguments.Hasher, arguments.Marshalizer, arguments.ShardCoordinator) if err != nil { return nil, err } - selfNotarizer, err := NewBlockNotarizer(arguments.Hasher, arguments.Marshalizer, arguments.ShardCoordinator) + selfNotarizer, err := NewBlockNotarizer(arguments.Logger, arguments.Hasher, arguments.Marshalizer, arguments.ShardCoordinator) if err != nil { return nil, err } @@ -96,7 +97,14 @@ func createBaseBlockTrack(arguments ArgBaseTracker) (*baseBlockTrack, error) { return nil, err } + var log logger.Logger + log = logger.GetOrCreate("process/track") + if arguments.Logger != nil { + log = arguments.Logger + } + bbt := &baseBlockTrack{ + log: log, hasher: arguments.Hasher, headerValidator: arguments.HeaderValidator, marshalizer: arguments.Marshalizer, @@ -131,11 +139,11 @@ func (bbt *baseBlockTrack) receivedHeader(headerHandler data.HeaderHandler, head func (bbt *baseBlockTrack) receivedShardHeader(headerHandler data.HeaderHandler, shardHeaderHash []byte) { shardHeader, ok := headerHandler.(data.ShardHeaderHandler) if !ok { - log.Warn("cannot convert data.HeaderHandler in data.ShardHeaderHandler") + bbt.log.Warn("cannot convert data.HeaderHandler in data.ShardHeaderHandler") return } - log.Debug("received shard header from network in block tracker", + bbt.log.Debug("received shard header from network in block tracker", "shard", shardHeader.GetShardID(), "epoch", shardHeader.GetEpoch(), "round", shardHeader.GetRound(), @@ -144,12 +152,12 @@ func (bbt *baseBlockTrack) receivedShardHeader(headerHandler data.HeaderHandler, ) if !bbt.ShouldAddHeader(headerHandler) { - log.Trace("received shard header is out of range", "nonce", headerHandler.GetNonce()) + bbt.log.Trace("received shard header is out of range", "nonce", headerHandler.GetNonce()) return } if !bbt.addHeader(shardHeader, shardHeaderHash) { - log.Trace("received shard header was not added", "nonce", headerHandler.GetNonce()) + bbt.log.Trace("received shard header was not added", "nonce", headerHandler.GetNonce()) return } @@ -160,11 +168,11 @@ func (bbt *baseBlockTrack) receivedShardHeader(headerHandler data.HeaderHandler, func (bbt *baseBlockTrack) receivedMetaBlock(headerHandler data.HeaderHandler, metaBlockHash []byte) { metaBlock, ok := headerHandler.(*block.MetaBlock) if !ok { - log.Warn("cannot convert data.HeaderHandler in *block.Metablock") + bbt.log.Warn("cannot convert data.HeaderHandler in *block.Metablock") return } - log.Debug("received meta block from network in block tracker", + bbt.log.Debug("received meta block from network in block tracker", "shard", metaBlock.GetShardID(), "epoch", metaBlock.GetEpoch(), "round", metaBlock.GetRound(), @@ -173,12 +181,12 @@ func (bbt *baseBlockTrack) receivedMetaBlock(headerHandler data.HeaderHandler, m ) if !bbt.ShouldAddHeader(headerHandler) { - log.Trace("received meta block is out of range", "nonce", headerHandler.GetNonce()) + bbt.log.Trace("received meta block is out of range", "nonce", headerHandler.GetNonce()) return } if !bbt.addHeader(metaBlock, metaBlockHash) { - log.Trace("received meta block was not added", "nonce", headerHandler.GetNonce()) + bbt.log.Trace("received meta block was not added", "nonce", headerHandler.GetNonce()) return } @@ -203,7 +211,7 @@ func (bbt *baseBlockTrack) shouldAddHeaderForShard( ) bool { lastNotarizedHeader, _, err := blockNotarizer.GetLastNotarizedHeader(headerHandler.GetShardID()) if err != nil { - log.Debug("shouldAddHeaderForShard.GetLastNotarizedHeader", + bbt.log.Debug("shouldAddHeaderForShard.GetLastNotarizedHeader", "shard", headerHandler.GetShardID(), "error", err.Error()) return false @@ -386,12 +394,12 @@ func (bbt *baseBlockTrack) displayTrackedHeadersForShard(shardID uint32, message return } - log.Debug(message, + bbt.log.Debug(message, "shard", shardID, "nb", len(headers)) for index, header := range headers { - log.Trace("tracked header info", + bbt.log.Trace("tracked header info", "round", header.GetRound(), "nonce", header.GetNonce(), "hash", hashes[index]) @@ -572,7 +580,7 @@ func (bbt *baseBlockTrack) SortHeadersFromNonce(shardID uint32, nonce uint64) ([ func (bbt *baseBlockTrack) AddHeaderFromPool(shardID uint32, nonce uint64) { headers, hashes, err := bbt.headersPool.GetHeadersByNonceAndShardId(nonce, shardID) if err != nil { - log.Trace("baseBlockTrack.AddHeaderFromPool", "error", err.Error()) + bbt.log.Trace("baseBlockTrack.AddHeaderFromPool", "error", err.Error()) return } @@ -680,7 +688,7 @@ func (bbt *baseBlockTrack) computeMetaBlocksDifferenceForShard(shardID uint32) i func (bbt *baseBlockTrack) computeMetaBlocksBehind() int64 { selfHdrNotarizedByItself, _, err := bbt.GetLastSelfNotarizedHeader(bbt.shardCoordinator.SelfId()) if err != nil { - log.Debug("isMetaStuck.GetLastSelfNotarizedHeader", + bbt.log.Debug("isMetaStuck.GetLastSelfNotarizedHeader", "shard", bbt.shardCoordinator.SelfId(), "error", err.Error()) return 0 @@ -688,7 +696,7 @@ func (bbt *baseBlockTrack) computeMetaBlocksBehind() int64 { selfHdrNotarizedByMeta, _, err := bbt.GetLastSelfNotarizedHeader(core.MetachainShardId) if err != nil { - log.Debug("isMetaStuck.GetLastSelfNotarizedHeader", + bbt.log.Debug("isMetaStuck.GetLastSelfNotarizedHeader", "shard", core.MetachainShardId, "error", err.Error()) return 0 @@ -829,7 +837,7 @@ func (bbt *baseBlockTrack) doWhitelistWithMetaBlockIfNeeded(metablock data.MetaH miniBlockHdrs := metablock.GetMiniBlockHeaderHandlers() keys := make([][]byte, 0) - crossMbKeysMeta := getCrossShardMiniblockKeys(miniBlockHdrs, selfShardID, core.MetachainShardId) + crossMbKeysMeta := bbt.getCrossShardMiniblockKeys(miniBlockHdrs, selfShardID, core.MetachainShardId) if len(crossMbKeysMeta) > 0 { keys = append(keys, crossMbKeysMeta...) } @@ -839,7 +847,7 @@ func (bbt *baseBlockTrack) doWhitelistWithMetaBlockIfNeeded(metablock data.MetaH continue } - crossMbKeysShard := getCrossShardMiniblockKeys(shardData.GetShardMiniBlockHeaderHandlers(), selfShardID, shardData.GetShardID()) + crossMbKeysShard := bbt.getCrossShardMiniblockKeys(shardData.GetShardMiniBlockHeaderHandlers(), selfShardID, shardData.GetShardID()) if len(crossMbKeysShard) > 0 { keys = append(keys, crossMbKeysShard...) } @@ -863,7 +871,7 @@ func (bbt *baseBlockTrack) doWhitelistWithShardHeaderIfNeeded(shardHeader data.H miniBlockHdrs := shardHeader.GetMiniBlockHeaderHandlers() keys := make([][]byte, 0) - crossMbKeysShard := getCrossShardMiniblockKeys(miniBlockHdrs, selfShardID, shardHeader.GetShardID()) + crossMbKeysShard := bbt.getCrossShardMiniblockKeys(miniBlockHdrs, selfShardID, shardHeader.GetShardID()) if len(crossMbKeysShard) > 0 { keys = append(keys, crossMbKeysShard...) } @@ -871,7 +879,7 @@ func (bbt *baseBlockTrack) doWhitelistWithShardHeaderIfNeeded(shardHeader data.H bbt.whitelistHandler.Add(keys) } -func getCrossShardMiniblockKeys(miniBlockHdrs []data.MiniBlockHeaderHandler, selfShardID uint32, processingShard uint32) [][]byte { +func (bbt *baseBlockTrack) getCrossShardMiniblockKeys(miniBlockHdrs []data.MiniBlockHeaderHandler, selfShardID uint32, processingShard uint32) [][]byte { keys := make([][]byte, 0) for _, miniBlockHdr := range miniBlockHdrs { receiverShard := miniBlockHdr.GetReceiverShardID() @@ -879,7 +887,7 @@ func getCrossShardMiniblockKeys(miniBlockHdrs []data.MiniBlockHeaderHandler, sel senderIsCrossShard := miniBlockHdr.GetSenderShardID() != selfShardID if receiverIsSelfShard && senderIsCrossShard { keys = append(keys, miniBlockHdr.GetHash()) - log.Trace( + bbt.log.Trace( "getCrossShardMiniblockKeys", "type", miniBlockHdr.GetTypeInt32(), "sender", miniBlockHdr.GetSenderShardID(), @@ -894,7 +902,7 @@ func getCrossShardMiniblockKeys(miniBlockHdrs []data.MiniBlockHeaderHandler, sel func (bbt *baseBlockTrack) isHeaderOutOfRange(headerHandler data.HeaderHandler) bool { lastCrossNotarizedHeader, _, err := bbt.GetLastCrossNotarizedHeader(headerHandler.GetShardID()) if err != nil { - log.Debug("isHeaderOutOfRange.GetLastCrossNotarizedHeader", + bbt.log.Debug("isHeaderOutOfRange.GetLastCrossNotarizedHeader", "shard", headerHandler.GetShardID(), "error", err.Error()) return true diff --git a/process/track/blockNotarizer.go b/process/track/blockNotarizer.go index 0d6b581fd6e..e6122de45a4 100644 --- a/process/track/blockNotarizer.go +++ b/process/track/blockNotarizer.go @@ -11,9 +11,11 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" + logger "github.com/multiversx/mx-chain-logger-go" ) type blockNotarizer struct { + log logger.Logger hasher hashing.Hasher marshalizer marshal.Marshalizer shardCoordinator sharding.Coordinator @@ -24,6 +26,7 @@ type blockNotarizer struct { // NewBlockNotarizer creates a block notarizer object which implements blockNotarizerHandler interface func NewBlockNotarizer( + logger logger.Logger, hasher hashing.Hasher, marshalizer marshal.Marshalizer, shardCoordinator sharding.Coordinator, @@ -39,6 +42,7 @@ func NewBlockNotarizer( } bn := blockNotarizer{ + log: logger, hasher: hasher, marshalizer: marshalizer, shardCoordinator: shardCoordinator, @@ -124,12 +128,12 @@ func (bn *blockNotarizer) DisplayNotarizedHeaders(shardID uint32, message string return } - log.Debug(message, + bn.log.Debug(message, "shard", shardID, "nb", len(notarizedHeaders)) for _, hdrInfo := range notarizedHeaders { - log.Trace("notarized header info", + bn.log.Trace("notarized header info", "round", hdrInfo.Header.GetRound(), "nonce", hdrInfo.Header.GetNonce(), "hash", hdrInfo.Hash) From ec5be33b16e4a27b191ddd624188806c716173b3 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Fri, 10 Jan 2025 01:04:56 +0200 Subject: [PATCH 03/10] add custom log for process block --- process/block/argProcessor.go | 2 + process/block/baseProcess.go | 123 +++++++++---------- process/block/displayBlock.go | 18 ++- process/block/displayMetaBlock.go | 14 ++- process/block/headerValidator.go | 18 ++- process/block/metablock.go | 181 +++++++++++++++------------- process/block/shardblock.go | 192 ++++++++++++++++-------------- 7 files changed, 296 insertions(+), 252 deletions(-) diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index df929214829..397981573ab 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" + logger "github.com/multiversx/mx-chain-logger-go" ) type coreComponentsHolder interface { @@ -94,6 +95,7 @@ type ArgBaseProcessor struct { BlockProcessingCutoffHandler cutoff.BlockProcessingCutoffHandler ManagedPeersHolder common.ManagedPeersHolder SentSignaturesTracker process.SentSignaturesTracker + Logger logger.Logger } // ArgShardProcessor holds all dependencies required by the process data factory in order to create diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 4f2a3661ece..a4ea9d08d14 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -64,6 +64,7 @@ type hdrInfo struct { } type baseProcessor struct { + log logger.Logger shardCoordinator sharding.Coordinator nodesCoordinator nodesCoordinator.NodesCoordinator accountsDB map[state.AccountsDbIdentifier]state.AccountsAdapter @@ -171,14 +172,14 @@ func (bp *baseProcessor) checkBlockValidity( return nil } - log.Debug("hash does not match", + bp.log.Debug("hash does not match", "local block hash", bp.blockChain.GetGenesisHeaderHash(), "received previous hash", headerHandler.GetPrevHash()) return process.ErrBlockHashDoesNotMatch } - log.Debug("nonce does not match", + bp.log.Debug("nonce does not match", "local block nonce", 0, "received nonce", headerHandler.GetNonce()) @@ -186,7 +187,7 @@ func (bp *baseProcessor) checkBlockValidity( } if headerHandler.GetRound() <= currentBlockHeader.GetRound() { - log.Debug("round does not match", + bp.log.Debug("round does not match", "local block round", currentBlockHeader.GetRound(), "received block round", headerHandler.GetRound()) @@ -194,7 +195,7 @@ func (bp *baseProcessor) checkBlockValidity( } if headerHandler.GetNonce() != currentBlockHeader.GetNonce()+1 { - log.Debug("nonce does not match", + bp.log.Debug("nonce does not match", "local block nonce", currentBlockHeader.GetNonce(), "received nonce", headerHandler.GetNonce()) @@ -202,7 +203,7 @@ func (bp *baseProcessor) checkBlockValidity( } if !bytes.Equal(headerHandler.GetPrevHash(), bp.blockChain.GetCurrentBlockHeaderHash()) { - log.Debug("hash does not match", + bp.log.Debug("hash does not match", "local block hash", bp.blockChain.GetCurrentBlockHeaderHash(), "received previous hash", headerHandler.GetPrevHash()) @@ -210,7 +211,7 @@ func (bp *baseProcessor) checkBlockValidity( } if !bytes.Equal(headerHandler.GetPrevRandSeed(), currentBlockHeader.GetRandSeed()) { - log.Debug("random seed does not match", + bp.log.Debug("random seed does not match", "local random seed", currentBlockHeader.GetRandSeed(), "received previous random seed", headerHandler.GetPrevRandSeed()) @@ -241,7 +242,7 @@ func (bp *baseProcessor) checkScheduledRootHash(headerHandler data.HeaderHandler } if !bytes.Equal(additionalData.GetScheduledRootHash(), bp.getRootHash()) { - log.Debug("scheduled root hash does not match", + bp.log.Debug("scheduled root hash does not match", "current root hash", bp.getRootHash(), "header scheduled root hash", additionalData.GetScheduledRootHash()) return process.ErrScheduledRootHashDoesNotMatch @@ -255,7 +256,7 @@ func (bp *baseProcessor) checkScheduledRootHash(headerHandler data.HeaderHandler func (bp *baseProcessor) verifyStateRoot(rootHash []byte) bool { trieRootHash, err := bp.accountsDB[state.UserAccountsState].RootHash() if err != nil { - log.Debug("verify account.RootHash", "error", err.Error()) + bp.log.Debug("verify account.RootHash", "error", err.Error()) } return bytes.Equal(trieRootHash, rootHash) @@ -265,7 +266,7 @@ func (bp *baseProcessor) verifyStateRoot(rootHash []byte) bool { func (bp *baseProcessor) getRootHash() []byte { rootHash, err := bp.accountsDB[state.UserAccountsState].RootHash() if err != nil { - log.Trace("get account.RootHash", "error", err.Error()) + bp.log.Trace("get account.RootHash", "error", err.Error()) } return rootHash @@ -906,7 +907,7 @@ func (bp *baseProcessor) checkScheduledMiniBlocksValidity(headerHandler data.Hea scheduledMiniBlocks := bp.scheduledTxsExecutionHandler.GetScheduledMiniBlocks() if len(scheduledMiniBlocks) > len(headerHandler.GetMiniBlockHeadersHashes()) { - log.Debug("baseProcessor.checkScheduledMiniBlocksValidity", "num mbs scheduled", len(scheduledMiniBlocks), "num mbs received", len(headerHandler.GetMiniBlockHeadersHashes())) + bp.log.Debug("baseProcessor.checkScheduledMiniBlocksValidity", "num mbs scheduled", len(scheduledMiniBlocks), "num mbs received", len(headerHandler.GetMiniBlockHeadersHashes())) return process.ErrScheduledMiniBlocksMismatch } @@ -917,7 +918,7 @@ func (bp *baseProcessor) checkScheduledMiniBlocksValidity(headerHandler data.Hea } if !bytes.Equal(scheduledMiniBlockHash, headerHandler.GetMiniBlockHeadersHashes()[index]) { - log.Debug("baseProcessor.checkScheduledMiniBlocksValidity", "index", index, "scheduled mb hash", scheduledMiniBlockHash, "received mb hash", headerHandler.GetMiniBlockHeadersHashes()[index]) + bp.log.Debug("baseProcessor.checkScheduledMiniBlocksValidity", "index", index, "scheduled mb hash", scheduledMiniBlockHash, "received mb hash", headerHandler.GetMiniBlockHeadersHashes()[index]) return process.ErrScheduledMiniBlocksMismatch } } @@ -961,7 +962,7 @@ func (bp *baseProcessor) requestMissingFinalityAttestingHeaders( } if requestedHeaders > 0 { - log.Debug("requested missing finality attesting headers", + bp.log.Debug("requested missing finality attesting headers", "num headers", requestedHeaders, "shard", shardID) } @@ -995,7 +996,7 @@ func (bp *baseProcessor) cleanupPools(headerHandler data.HeaderHandler) { if bp.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, headerHandler.GetEpoch()) { err := bp.dataPool.Proofs().CleanupProofsBehindNonce(bp.shardCoordinator.SelfId(), highestPrevFinalBlockNonce) if err != nil { - log.Warn("failed to cleanup notarized proofs behind nonce", + bp.log.Warn("failed to cleanup notarized proofs behind nonce", "nonce", noncesToPrevFinal, "shardID", bp.shardCoordinator.SelfId(), "error", err) @@ -1018,7 +1019,7 @@ func (bp *baseProcessor) cleanupPoolsForCrossShard( ) { crossNotarizedHeader, _, err := bp.blockTracker.GetCrossNotarizedHeader(shardID, noncesToPrevFinal) if err != nil { - displayCleanupErrorMessage("cleanupPoolsForCrossShard", + bp.displayCleanupErrorMessage("cleanupPoolsForCrossShard", shardID, noncesToPrevFinal, err) @@ -1034,7 +1035,7 @@ func (bp *baseProcessor) cleanupPoolsForCrossShard( if bp.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, crossNotarizedHeader.GetEpoch()) { err = bp.dataPool.Proofs().CleanupProofsBehindNonce(shardID, noncesToPrevFinal) if err != nil { - log.Warn("failed to cleanup notarized proofs behind nonce", + bp.log.Warn("failed to cleanup notarized proofs behind nonce", "nonce", noncesToPrevFinal, "shardID", shardID, "error", err) @@ -1076,7 +1077,7 @@ func (bp *baseProcessor) removeBlocksBody(nonce uint64, shardId uint32) { for _, header := range headers { errNotCritical := bp.removeBlockBodyOfHeader(header) if errNotCritical != nil { - log.Debug("RemoveBlockDataFromPool", "error", errNotCritical.Error()) + bp.log.Debug("RemoveBlockDataFromPool", "error", errNotCritical.Error()) } } } @@ -1126,14 +1127,14 @@ func (bp *baseProcessor) getFinalMiniBlocks(header data.HeaderHandler, body *blo var miniBlocks block.MiniBlockSlice if len(body.MiniBlocks) != len(header.GetMiniBlockHeaderHandlers()) { - log.Warn("baseProcessor.getFinalMiniBlocks: num of mini blocks and mini blocks headers does not match", "num of mb", len(body.MiniBlocks), "num of mbh", len(header.GetMiniBlockHeaderHandlers())) + bp.log.Warn("baseProcessor.getFinalMiniBlocks: num of mini blocks and mini blocks headers does not match", "num of mb", len(body.MiniBlocks), "num of mbh", len(header.GetMiniBlockHeaderHandlers())) return nil, process.ErrNumOfMiniBlocksAndMiniBlocksHeadersMismatch } for index, miniBlock := range body.MiniBlocks { miniBlockHeader := header.GetMiniBlockHeaderHandlers()[index] if !miniBlockHeader.IsFinal() { - log.Debug("shardProcessor.getFinalMiniBlocks: do not remove from pool / broadcast mini block which is not final", "mb hash", miniBlockHeader.GetHash()) + bp.log.Debug("shardProcessor.getFinalMiniBlocks: do not remove from pool / broadcast mini block which is not final", "mb hash", miniBlockHeader.GetHash()) continue } @@ -1158,7 +1159,7 @@ func (bp *baseProcessor) cleanupBlockTrackerPools(noncesToPrevFinal uint64) { func (bp *baseProcessor) cleanupBlockTrackerPoolsForShard(shardID uint32, noncesToPrevFinal uint64) { selfNotarizedHeader, _, errSelfNotarized := bp.blockTracker.GetSelfNotarizedHeader(shardID, noncesToPrevFinal) if errSelfNotarized != nil { - displayCleanupErrorMessage("cleanupBlockTrackerPoolsForShard.GetSelfNotarizedHeader", + bp.displayCleanupErrorMessage("cleanupBlockTrackerPoolsForShard.GetSelfNotarizedHeader", shardID, noncesToPrevFinal, errSelfNotarized) @@ -1171,7 +1172,7 @@ func (bp *baseProcessor) cleanupBlockTrackerPoolsForShard(shardID uint32, nonces if shardID != bp.shardCoordinator.SelfId() { crossNotarizedHeader, _, errCrossNotarized := bp.blockTracker.GetCrossNotarizedHeader(shardID, noncesToPrevFinal) if errCrossNotarized != nil { - displayCleanupErrorMessage("cleanupBlockTrackerPoolsForShard.GetCrossNotarizedHeader", + bp.displayCleanupErrorMessage("cleanupBlockTrackerPoolsForShard.GetCrossNotarizedHeader", shardID, noncesToPrevFinal, errCrossNotarized) @@ -1187,7 +1188,7 @@ func (bp *baseProcessor) cleanupBlockTrackerPoolsForShard(shardID uint32, nonces crossNotarizedNonce, ) - log.Trace("cleanupBlockTrackerPoolsForShard.CleanupHeadersBehindNonce", + bp.log.Trace("cleanupBlockTrackerPoolsForShard.CleanupHeadersBehindNonce", "shard", shardID, "self notarized nonce", selfNotarizedNonce, "cross notarized nonce", crossNotarizedNonce, @@ -1219,7 +1220,7 @@ func (bp *baseProcessor) prepareDataForBootStorer(args bootStorerDataArgs) { elapsedTime := time.Since(startTime) if elapsedTime >= common.PutInStorerMaxTime { - log.Warn("saveDataForBootStorer", "elapsed time", elapsedTime) + bp.log.Warn("saveDataForBootStorer", "elapsed time", elapsedTime) } } @@ -1248,7 +1249,7 @@ func (bp *baseProcessor) getLastCrossNotarizedHeaders() []bootstrapStorage.Boots func (bp *baseProcessor) getLastCrossNotarizedHeadersForShard(shardID uint32) *bootstrapStorage.BootstrapHeaderInfo { lastCrossNotarizedHeader, lastCrossNotarizedHeaderHash, err := bp.blockTracker.GetLastCrossNotarizedHeader(shardID) if err != nil { - log.Warn("getLastCrossNotarizedHeadersForShard", + bp.log.Warn("getLastCrossNotarizedHeadersForShard", "shard", shardID, "error", err.Error()) return nil @@ -1292,7 +1293,7 @@ func (bp *baseProcessor) getLastSelfNotarizedHeaders() []bootstrapStorage.Bootst func (bp *baseProcessor) getLastSelfNotarizedHeadersForShard(shardID uint32) *bootstrapStorage.BootstrapHeaderInfo { lastSelfNotarizedHeader, lastSelfNotarizedHeaderHash, err := bp.blockTracker.GetLastSelfNotarizedHeader(shardID) if err != nil { - log.Warn("getLastSelfNotarizedHeadersForShard", + bp.log.Warn("getLastSelfNotarizedHeadersForShard", "shard", shardID, "error", err.Error()) return nil @@ -1350,7 +1351,7 @@ func (bp *baseProcessor) DecodeBlockBody(dta []byte) data.BodyHandler { err := bp.marshalizer.Unmarshal(body, dta) if err != nil { - log.Debug("DecodeBlockBody.Unmarshal", "error", err.Error()) + bp.log.Debug("DecodeBlockBody.Unmarshal", "error", err.Error()) return nil } @@ -1361,14 +1362,14 @@ func (bp *baseProcessor) saveBody(body *block.Body, header data.HeaderHandler, h startTime := time.Now() bp.txCoordinator.SaveTxsToStorage(body) - log.Trace("saveBody.SaveTxsToStorage", "time", time.Since(startTime)) + bp.log.Trace("saveBody.SaveTxsToStorage", "time", time.Since(startTime)) var errNotCritical error var marshalizedMiniBlock []byte for i := 0; i < len(body.MiniBlocks); i++ { marshalizedMiniBlock, errNotCritical = bp.marshalizer.Marshal(body.MiniBlocks[i]) if errNotCritical != nil { - log.Warn("saveBody.Marshal", "error", errNotCritical.Error()) + bp.log.Warn("saveBody.Marshal", "error", errNotCritical.Error()) continue } @@ -1379,7 +1380,7 @@ func (bp *baseProcessor) saveBody(body *block.Body, header data.HeaderHandler, h "saveBody.Put -> MiniBlockUnit", "err", errNotCritical) } - log.Trace("saveBody.Put -> MiniBlockUnit", "time", time.Since(startTime), "hash", miniBlockHash) + bp.log.Trace("saveBody.Put -> MiniBlockUnit", "time", time.Since(startTime), "hash", miniBlockHash) } receiptsHolder := holders.NewReceiptsHolder(bp.txCoordinator.GetCreatedInShardMiniBlocks()) @@ -1394,7 +1395,7 @@ func (bp *baseProcessor) saveBody(body *block.Body, header data.HeaderHandler, h elapsedTime := time.Since(startTime) if elapsedTime >= common.PutInStorerMaxTime { - log.Warn("saveBody", "elapsed time", elapsedTime) + bp.log.Warn("saveBody", "elapsed time", elapsedTime) } } @@ -1420,7 +1421,7 @@ func (bp *baseProcessor) saveShardHeader(header data.HeaderHandler, headerHash [ elapsedTime := time.Since(startTime) if elapsedTime >= common.PutInStorerMaxTime { - log.Warn("saveShardHeader", "elapsed time", elapsedTime) + bp.log.Warn("saveShardHeader", "elapsed time", elapsedTime) } } @@ -1445,7 +1446,7 @@ func (bp *baseProcessor) saveMetaHeader(header data.HeaderHandler, headerHash [] elapsedTime := time.Since(startTime) if elapsedTime >= common.PutInStorerMaxTime { - log.Warn("saveMetaHeader", "elapsed time", elapsedTime) + bp.log.Warn("saveMetaHeader", "elapsed time", elapsedTime) } } @@ -1461,7 +1462,7 @@ func getLastSelfNotarizedHeaderByItself(chainHandler data.ChainHandler) (data.He } func (bp *baseProcessor) setFinalizedHeaderHashInIndexer(hdrHash []byte) { - log.Debug("baseProcessor.setFinalizedHeaderHashInIndexer", "finalized header hash", hdrHash) + bp.log.Debug("baseProcessor.setFinalizedHeaderHashInIndexer", "finalized header hash", hdrHash) bp.outportHandler.FinalizedBlock(&outportcore.FinalizedBlock{ShardID: bp.shardCoordinator.SelfId(), HeaderHash: hdrHash}) } @@ -1494,7 +1495,7 @@ func (bp *baseProcessor) revertAccountState() { for key := range bp.accountsDB { err := bp.accountsDB[key].RevertToSnapshot(0) if err != nil { - log.Debug("RevertToSnapshot", "error", err.Error()) + bp.log.Debug("RevertToSnapshot", "error", err.Error()) } } } @@ -1503,7 +1504,7 @@ func (bp *baseProcessor) revertScheduledInfo() { header, headerHash := bp.getLastCommittedHeaderAndHash() err := bp.scheduledTxsExecutionHandler.RollBackToBlock(headerHash) if err != nil { - log.Trace("baseProcessor.revertScheduledInfo", "error", err.Error()) + bp.log.Trace("baseProcessor.revertScheduledInfo", "error", err.Error()) scheduledInfo := &process.ScheduledInfo{ RootHash: header.GetRootHash(), IntermediateTxs: make(map[block.Type][]data.TransactionHandler), @@ -1540,7 +1541,7 @@ func (bp *baseProcessor) RevertAccountsDBToSnapshot(accountsSnapshot map[state.A for key := range bp.accountsDB { err := bp.accountsDB[key].RevertToSnapshot(accountsSnapshot[key]) if err != nil { - log.Debug("RevertAccountsDBToSnapshot", "error", err.Error()) + bp.log.Debug("RevertAccountsDBToSnapshot", "error", err.Error()) } } } @@ -1623,7 +1624,7 @@ func (bp *baseProcessor) PruneStateOnRollback(currHeader data.HeaderHandler, cur func (bp *baseProcessor) getPruningHandler(finalHeaderNonce uint64) state.PruningHandler { if finalHeaderNonce-bp.lastRestartNonce <= uint64(bp.pruningDelay) { - log.Debug("will skip pruning", + bp.log.Debug("will skip pruning", "finalHeaderNonce", finalHeaderNonce, "last restart nonce", bp.lastRestartNonce, "num blocks for pruning delay", bp.pruningDelay, @@ -1659,17 +1660,17 @@ func (bp *baseProcessor) displayMiniBlocksPool() { for _, hash := range miniBlocksPool.Keys() { value, ok := miniBlocksPool.Get(hash) if !ok { - log.Debug("displayMiniBlocksPool: mini block not found", "hash", logger.DisplayByteSlice(hash)) + bp.log.Debug("displayMiniBlocksPool: mini block not found", "hash", logger.DisplayByteSlice(hash)) continue } miniBlock, ok := value.(*block.MiniBlock) if !ok { - log.Debug("displayMiniBlocksPool: wrong type assertion", "hash", logger.DisplayByteSlice(hash)) + bp.log.Debug("displayMiniBlocksPool: wrong type assertion", "hash", logger.DisplayByteSlice(hash)) continue } - log.Trace("mini block in pool", + bp.log.Trace("mini block in pool", "hash", logger.DisplayByteSlice(hash), "type", miniBlock.Type, "sender", miniBlock.SenderShardID, @@ -1690,19 +1691,19 @@ func trimSliceBootstrapHeaderInfo(in []bootstrapStorage.BootstrapHeaderInfo) []b func (bp *baseProcessor) restoreBlockBody(headerHandler data.HeaderHandler, bodyHandler data.BodyHandler) { if check.IfNil(bodyHandler) { - log.Debug("restoreMiniblocks nil bodyHandler") + bp.log.Debug("restoreMiniblocks nil bodyHandler") return } body, ok := bodyHandler.(*block.Body) if !ok { - log.Debug("restoreMiniblocks wrong type assertion for bodyHandler") + bp.log.Debug("restoreMiniblocks wrong type assertion for bodyHandler") return } _, errNotCritical := bp.txCoordinator.RestoreBlockDataFromStorage(body) if errNotCritical != nil { - log.Debug("restoreBlockBody RestoreBlockDataFromStorage", "error", errNotCritical.Error()) + bp.log.Debug("restoreBlockBody RestoreBlockDataFromStorage", "error", errNotCritical.Error()) } go bp.txCounter.headerReverted(headerHandler) @@ -1730,7 +1731,7 @@ func (bp *baseProcessor) RestoreBlockBodyIntoPools(bodyHandler data.BodyHandler) func (bp *baseProcessor) requestMiniBlocksIfNeeded(headerHandler data.HeaderHandler) { lastCrossNotarizedHeader, _, err := bp.blockTracker.GetLastCrossNotarizedHeader(headerHandler.GetShardID()) if err != nil { - log.Debug("requestMiniBlocksIfNeeded.GetLastCrossNotarizedHeader", + bp.log.Debug("requestMiniBlocksIfNeeded.GetLastCrossNotarizedHeader", "shard", headerHandler.GetShardID(), "error", err.Error()) return @@ -1765,7 +1766,7 @@ func (bp *baseProcessor) recordBlockInHistory(blockHeaderHash []byte, blockHeade if core.IsClosingError(err) { logLevel = logger.LogDebug } - log.Log(logLevel, "historyRepo.RecordBlock()", "blockHeaderHash", blockHeaderHash, "error", err.Error()) + bp.log.Log(logLevel, "historyRepo.RecordBlock()", "blockHeaderHash", blockHeaderHash, "error", err.Error()) } } @@ -1773,7 +1774,7 @@ func (bp *baseProcessor) addHeaderIntoTrackerPool(nonce uint64, shardID uint32) headersPool := bp.dataPool.Headers() headers, hashes, err := headersPool.GetHeadersByNonceAndShardId(nonce, shardID) if err != nil { - log.Trace("baseProcessor.addHeaderIntoTrackerPool", "error", err.Error()) + bp.log.Trace("baseProcessor.addHeaderIntoTrackerPool", "error", err.Error()) return } @@ -1841,7 +1842,7 @@ func (bp *baseProcessor) commitTrieEpochRootHashIfNeeded(metaBlock *block.MetaBl if errUnmarshal != nil { numCodeLeaves++ totalSizeCodeLeaves += len(leaf.Value()) - log.Trace("cannot unmarshal user account. it may be a code leaf", "error", errUnmarshal) + bp.log.Trace("cannot unmarshal user account. it may be a code leaf", "error", errUnmarshal) continue } @@ -1902,7 +1903,7 @@ func (bp *baseProcessor) commitTrieEpochRootHashIfNeeded(metaBlock *block.MetaBl "from which totalSizeAccountsDataTries", totalSizeAccountsDataTries}...) } - log.Debug("sum of addresses in shard at epoch start", stats...) + bp.log.Debug("sum of addresses in shard at epoch start", stats...) return nil } @@ -1958,7 +1959,7 @@ func (bp *baseProcessor) ProcessScheduledBlock(headerHandler data.HeaderHandler, bp.processStatusHandler.SetIdle() }() - scheduledMiniBlocksFromMe, err := getScheduledMiniBlocksFromMe(headerHandler, bodyHandler) + scheduledMiniBlocksFromMe, err := bp.getScheduledMiniBlocksFromMe(headerHandler, bodyHandler) if err != nil { return err } @@ -1970,7 +1971,7 @@ func (bp *baseProcessor) ProcessScheduledBlock(headerHandler data.HeaderHandler, startTime := time.Now() err = bp.scheduledTxsExecutionHandler.ExecuteAll(haveTime) elapsedTime := time.Since(startTime) - log.Debug("elapsed time to execute all scheduled transactions", + bp.log.Debug("elapsed time to execute all scheduled transactions", "time [s]", elapsedTime, ) if err != nil { @@ -1986,21 +1987,21 @@ func (bp *baseProcessor) ProcessScheduledBlock(headerHandler data.HeaderHandler, finalProcessingGasAndFees := bp.getGasAndFeesWithScheduled() - scheduledProcessingGasAndFees := gasAndFeesDelta(normalProcessingGasAndFees, finalProcessingGasAndFees) + scheduledProcessingGasAndFees := bp.gasAndFeesDelta(normalProcessingGasAndFees, finalProcessingGasAndFees) bp.scheduledTxsExecutionHandler.SetScheduledRootHash(rootHash) bp.scheduledTxsExecutionHandler.SetScheduledGasAndFees(scheduledProcessingGasAndFees) return nil } -func getScheduledMiniBlocksFromMe(headerHandler data.HeaderHandler, bodyHandler data.BodyHandler) (block.MiniBlockSlice, error) { +func (bp *baseProcessor) getScheduledMiniBlocksFromMe(headerHandler data.HeaderHandler, bodyHandler data.BodyHandler) (block.MiniBlockSlice, error) { body, ok := bodyHandler.(*block.Body) if !ok { return nil, process.ErrWrongTypeAssertion } if len(body.MiniBlocks) != len(headerHandler.GetMiniBlockHeaderHandlers()) { - log.Warn("getScheduledMiniBlocksFromMe: num of mini blocks and mini blocks headers does not match", "num of mb", len(body.MiniBlocks), "num of mbh", len(headerHandler.GetMiniBlockHeaderHandlers())) + bp.log.Warn("getScheduledMiniBlocksFromMe: num of mini blocks and mini blocks headers does not match", "num of mb", len(body.MiniBlocks), "num of mbh", len(headerHandler.GetMiniBlockHeaderHandlers())) return nil, process.ErrNumOfMiniBlocksAndMiniBlocksHeadersMismatch } @@ -2033,13 +2034,13 @@ func (bp *baseProcessor) getGasAndFeesWithScheduled() scheduled.GasAndFees { return gasAndFees } -func gasAndFeesDelta(initialGasAndFees, finalGasAndFees scheduled.GasAndFees) scheduled.GasAndFees { +func (bp *baseProcessor) gasAndFeesDelta(initialGasAndFees, finalGasAndFees scheduled.GasAndFees) scheduled.GasAndFees { zero := big.NewInt(0) result := process.GetZeroGasAndFees() deltaAccumulatedFees := big.NewInt(0).Sub(finalGasAndFees.AccumulatedFees, initialGasAndFees.AccumulatedFees) if deltaAccumulatedFees.Cmp(zero) < 0 { - log.Error("gasAndFeesDelta", + bp.log.Error("gasAndFeesDelta", "initial accumulatedFees", initialGasAndFees.AccumulatedFees.String(), "final accumulatedFees", finalGasAndFees.AccumulatedFees.String(), "error", process.ErrNegativeValue) @@ -2048,7 +2049,7 @@ func gasAndFeesDelta(initialGasAndFees, finalGasAndFees scheduled.GasAndFees) sc deltaDevFees := big.NewInt(0).Sub(finalGasAndFees.DeveloperFees, initialGasAndFees.DeveloperFees) if deltaDevFees.Cmp(zero) < 0 { - log.Error("gasAndFeesDelta", + bp.log.Error("gasAndFeesDelta", "initial devFees", initialGasAndFees.DeveloperFees.String(), "final devFees", finalGasAndFees.DeveloperFees.String(), "error", process.ErrNegativeValue) @@ -2057,7 +2058,7 @@ func gasAndFeesDelta(initialGasAndFees, finalGasAndFees scheduled.GasAndFees) sc deltaGasProvided := int64(finalGasAndFees.GasProvided) - int64(initialGasAndFees.GasProvided) if deltaGasProvided < 0 { - log.Error("gasAndFeesDelta", + bp.log.Error("gasAndFeesDelta", "initial gasProvided", initialGasAndFees.GasProvided, "final gasProvided", finalGasAndFees.GasProvided, "error", process.ErrNegativeValue) @@ -2066,7 +2067,7 @@ func gasAndFeesDelta(initialGasAndFees, finalGasAndFees scheduled.GasAndFees) sc deltaGasPenalized := int64(finalGasAndFees.GasPenalized) - int64(initialGasAndFees.GasPenalized) if deltaGasPenalized < 0 { - log.Error("gasAndFeesDelta", + bp.log.Error("gasAndFeesDelta", "initial gasPenalized", initialGasAndFees.GasPenalized, "final gasPenalized", finalGasAndFees.GasPenalized, "error", process.ErrNegativeValue) @@ -2075,7 +2076,7 @@ func gasAndFeesDelta(initialGasAndFees, finalGasAndFees scheduled.GasAndFees) sc deltaGasRefunded := int64(finalGasAndFees.GasRefunded) - int64(initialGasAndFees.GasRefunded) if deltaGasRefunded < 0 { - log.Error("gasAndFeesDelta", + bp.log.Error("gasAndFeesDelta", "initial gasRefunded", initialGasAndFees.GasRefunded, "final gasRefunded", finalGasAndFees.GasRefunded, "error", process.ErrNegativeValue) @@ -2098,7 +2099,7 @@ func (bp *baseProcessor) getIndexOfFirstMiniBlockToBeExecuted(header data.Header for index, miniBlockHeaderHandler := range header.GetMiniBlockHeaderHandlers() { if miniBlockHeaderHandler.GetProcessingType() == int32(block.Processed) { - log.Debug("baseProcessor.getIndexOfFirstMiniBlockToBeExecuted: mini block is already executed", + bp.log.Debug("baseProcessor.getIndexOfFirstMiniBlockToBeExecuted: mini block is already executed", "mb hash", miniBlockHeaderHandler.GetHash(), "mb index", index) continue @@ -2110,7 +2111,7 @@ func (bp *baseProcessor) getIndexOfFirstMiniBlockToBeExecuted(header data.Header return len(header.GetMiniBlockHeaderHandlers()) } -func displayCleanupErrorMessage(message string, shardID uint32, noncesToPrevFinal uint64, err error) { +func (bp *baseProcessor) displayCleanupErrorMessage(message string, shardID uint32, noncesToPrevFinal uint64, err error) { // 2 blocks on shard + 2 blocks on meta + 1 block to previous final maxNoncesToPrevFinalWithoutWarn := uint64(process.BlockFinality+1)*2 + 1 level := logger.LogWarning @@ -2118,7 +2119,7 @@ func displayCleanupErrorMessage(message string, shardID uint32, noncesToPrevFina level = logger.LogDebug } - log.Log(level, message, + bp.log.Log(level, message, "shard", shardID, "nonces to previous final", noncesToPrevFinal, "error", err.Error()) diff --git a/process/block/displayBlock.go b/process/block/displayBlock.go index 3b1ab7410cc..54664482c47 100644 --- a/process/block/displayBlock.go +++ b/process/block/displayBlock.go @@ -20,6 +20,7 @@ import ( ) type transactionCounter struct { + log logger.Logger mutex sync.RWMutex currentBlockTxs uint64 totalTxs uint64 @@ -31,6 +32,7 @@ type transactionCounter struct { // ArgsTransactionCounter represents the arguments needed to create a new transaction counter type ArgsTransactionCounter struct { + Logger logger.Logger AppStatusHandler core.AppStatusHandler Hasher hashing.Hasher Marshalizer marshal.Marshalizer @@ -50,7 +52,14 @@ func NewTransactionCounter(args ArgsTransactionCounter) (*transactionCounter, er return nil, process.ErrNilMarshalizer } + var log logger.Logger + log = logger.GetOrCreate("process/block") + if args.Logger != nil { + log = args.Logger + } + return &transactionCounter{ + log: log, mutex: sync.RWMutex{}, appStatusHandler: args.AppStatusHandler, currentBlockTxs: 0, @@ -71,7 +80,7 @@ func (txc *transactionCounter) getPoolCounts(poolsHolder dataRetriever.PoolsHold // headerReverted updates the total processed txs in case of restore. It also sets the current block txs to 0 func (txc *transactionCounter) headerReverted(hdr data.HeaderHandler) { if check.IfNil(hdr) { - log.Warn("programming error: nil header in transactionCounter.headerReverted function") + txc.log.Warn("programming error: nil header in transactionCounter.headerReverted function") return } @@ -95,7 +104,7 @@ func (txc *transactionCounter) safeSubtractTotalTxs(delta uint64) { func (txc *transactionCounter) headerExecuted(hdr data.HeaderHandler) { if check.IfNil(hdr) { - log.Warn("programming error: nil header in transactionCounter.headerExecuted function") + txc.log.Warn("programming error: nil header in transactionCounter.headerExecuted function") return } @@ -141,7 +150,7 @@ func (txc *transactionCounter) displayLogInfo( tblString, err := display.CreateTableString(dispHeader, dispLines) if err != nil { - log.Debug("CreateTableString", "error", err.Error()) + txc.log.Debug("CreateTableString", "error", err.Error()) return } @@ -154,7 +163,7 @@ func (txc *transactionCounter) displayLogInfo( "shard", selfId, } txc.mutex.RUnlock() - log.Debug(message, arguments...) + txc.log.Debug(message, arguments...) blockTracker.DisplayTrackedHeaders() } @@ -330,6 +339,7 @@ func getConstructionStateAsString(miniBlockHeader data.MiniBlockHeaderHandler) s // DisplayLastNotarized will display information about last notarized block func DisplayLastNotarized( + log logger.Logger, marshalizer marshal.Marshalizer, hasher hashing.Hasher, lastNotarizedHdrForShard data.HeaderHandler, diff --git a/process/block/displayMetaBlock.go b/process/block/displayMetaBlock.go index 2018b819925..2c03af8ce36 100644 --- a/process/block/displayMetaBlock.go +++ b/process/block/displayMetaBlock.go @@ -9,7 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/display" "github.com/multiversx/mx-chain-go/process" - "github.com/multiversx/mx-chain-logger-go" + logger "github.com/multiversx/mx-chain-logger-go" ) type transactionCountersProvider interface { @@ -19,6 +19,7 @@ type transactionCountersProvider interface { } type headersCounter struct { + log logger.Logger shardMBHeaderCounterMutex sync.RWMutex shardMBHeadersCurrentBlockProcessed uint64 shardMBHeadersTotalProcessed uint64 @@ -26,8 +27,9 @@ type headersCounter struct { // NewHeaderCounter returns a new object that keeps track of how many headers // were processed in total, and in the current block -func NewHeaderCounter() *headersCounter { +func NewHeaderCounter(log logger.Logger) *headersCounter { return &headersCounter{ + log: log, shardMBHeaderCounterMutex: sync.RWMutex{}, shardMBHeadersCurrentBlockProcessed: 0, shardMBHeadersTotalProcessed: 0, @@ -72,7 +74,7 @@ func (hc *headersCounter) displayLogInfo( blockTracker process.BlockTracker, ) { if check.IfNil(countersProvider) { - log.Warn("programming error in headersCounter.displayLogInfo - nil countersProvider") + hc.log.Warn("programming error in headersCounter.displayLogInfo - nil countersProvider") return } @@ -83,7 +85,7 @@ func (hc *headersCounter) displayLogInfo( tblString, err := display.CreateTableString(dispHeader, dispLines) if err != nil { - log.Debug("CreateTableString", "error", err.Error()) + hc.log.Debug("CreateTableString", "error", err.Error()) return } @@ -96,9 +98,9 @@ func (hc *headersCounter) displayLogInfo( } hc.shardMBHeaderCounterMutex.RUnlock() - log.Debug(message, arguments...) + hc.log.Debug(message, arguments...) - log.Debug("metablock metrics info", + hc.log.Debug("metablock metrics info", "total txs processed", countersProvider.TotalTxs(), "block txs processed", countersProvider.CurrentBlockTxs(), "hash", headerHash, diff --git a/process/block/headerValidator.go b/process/block/headerValidator.go index 9459280c847..2d90892272b 100644 --- a/process/block/headerValidator.go +++ b/process/block/headerValidator.go @@ -9,17 +9,20 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/process" + logger "github.com/multiversx/mx-chain-logger-go" ) var _ process.HeaderConstructionValidator = (*headerValidator)(nil) // ArgsHeaderValidator are the arguments needed to create a new header validator type ArgsHeaderValidator struct { + Logger logger.Logger Hasher hashing.Hasher Marshalizer marshal.Marshalizer } type headerValidator struct { + log logger.Logger hasher hashing.Hasher marshalizer marshal.Marshalizer } @@ -33,7 +36,14 @@ func NewHeaderValidator(args ArgsHeaderValidator) (*headerValidator, error) { return nil, process.ErrNilMarshalizer } + var log logger.Logger + log = logger.GetOrCreate("process/block") + if args.Logger != nil { + log = args.Logger + } + return &headerValidator{ + log: log, hasher: args.Hasher, marshalizer: args.Marshalizer, }, nil @@ -49,7 +59,7 @@ func (h *headerValidator) IsHeaderConstructionValid(currHeader, prevHeader data. } if prevHeader.GetRound() >= currHeader.GetRound() { - log.Trace("round does not match", + h.log.Trace("round does not match", "shard", currHeader.GetShardID(), "local header round", prevHeader.GetRound(), "received round", currHeader.GetRound()) @@ -57,7 +67,7 @@ func (h *headerValidator) IsHeaderConstructionValid(currHeader, prevHeader data. } if currHeader.GetNonce() != prevHeader.GetNonce()+1 { - log.Trace("nonce does not match", + h.log.Trace("nonce does not match", "shard", currHeader.GetShardID(), "local header nonce", prevHeader.GetNonce(), "received nonce", currHeader.GetNonce()) @@ -70,7 +80,7 @@ func (h *headerValidator) IsHeaderConstructionValid(currHeader, prevHeader data. } if !bytes.Equal(currHeader.GetPrevHash(), prevHeaderHash) { - log.Trace("header hash does not match", + h.log.Trace("header hash does not match", "shard", currHeader.GetShardID(), "local header hash", prevHeaderHash, "received header with prev hash", currHeader.GetPrevHash(), @@ -79,7 +89,7 @@ func (h *headerValidator) IsHeaderConstructionValid(currHeader, prevHeader data. } if !bytes.Equal(currHeader.GetPrevRandSeed(), prevHeader.GetRandSeed()) { - log.Trace("header random seed does not match", + h.log.Trace("header random seed does not match", "shard", currHeader.GetShardID(), "local header random seed", prevHeader.GetRandSeed(), "received header with prev random seed", currHeader.GetPrevRandSeed(), diff --git a/process/block/metablock.go b/process/block/metablock.go index 04220d9936d..37f2c94cf7d 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -94,7 +94,15 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { } genesisHdr := arguments.DataComponents.Blockchain().GetGenesisHeader() + + var log logger.Logger + log = logger.GetOrCreate("process/sync") + if arguments.Logger != nil { + log = arguments.Logger + } + base := &baseProcessor{ + log: log, accountsDB: arguments.AccountsDB, blockSizeThrottler: arguments.BlockSizeThrottler, forkDetector: arguments.ForkDetector, @@ -146,7 +154,7 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { mp := metaProcessor{ baseProcessor: base, - headersCounter: NewHeaderCounter(), + headersCounter: NewHeaderCounter(log), scToProtocol: arguments.SCToProtocol, pendingMiniBlocksHandler: arguments.PendingMiniBlocksHandler, epochStartDataCreator: arguments.EpochStartDataCreator, @@ -158,6 +166,7 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { } argsTransactionCounter := ArgsTransactionCounter{ + Logger: log, AppStatusHandler: mp.appStatusHandler, Hasher: mp.hasher, Marshalizer: mp.marshalizer, @@ -205,7 +214,7 @@ func (mp *metaProcessor) ProcessBlock( err := mp.checkBlockValidity(headerHandler, bodyHandler) if err != nil { if errors.Is(err, process.ErrBlockHashDoesNotMatch) { - log.Debug("requested missing meta header", + mp.log.Debug("requested missing meta header", "hash", headerHandler.GetPrevHash(), "for shard", headerHandler.GetShardID(), ) @@ -220,7 +229,7 @@ func (mp *metaProcessor) ProcessBlock( mp.epochNotifier.CheckEpoch(headerHandler) mp.requestHandler.SetEpoch(headerHandler.GetEpoch()) - log.Debug("started processing block", + mp.log.Debug("started processing block", "epoch", headerHandler.GetEpoch(), "shard", headerHandler.GetShardID(), "round", headerHandler.GetRound(), @@ -253,9 +262,9 @@ func (mp *metaProcessor) ProcessBlock( } txCounts, rewardCounts, unsignedCounts := mp.txCounter.getPoolCounts(mp.dataPool) - log.Debug("total txs in pool", "counts", txCounts.String()) - log.Debug("total txs in rewards pool", "counts", rewardCounts.String()) - log.Debug("total txs in unsigned pool", "counts", unsignedCounts.String()) + mp.log.Debug("total txs in pool", "counts", txCounts.String()) + mp.log.Debug("total txs in rewards pool", "counts", rewardCounts.String()) + mp.log.Debug("total txs in unsigned pool", "counts", unsignedCounts.String()) go getMetricsFromMetaHeader( header, @@ -285,7 +294,7 @@ func (mp *metaProcessor) ProcessBlock( } if mp.accountsDB[state.UserAccountsState].JournalLen() != 0 { - log.Error("metaProcessor.ProcessBlock first entry", "stack", string(mp.accountsDB[state.UserAccountsState].GetStackDebugFirstEntry())) + mp.log.Error("metaProcessor.ProcessBlock first entry", "stack", string(mp.accountsDB[state.UserAccountsState].GetStackDebugFirstEntry())) return process.ErrAccountStateDirty } @@ -314,12 +323,12 @@ func (mp *metaProcessor) ProcessBlock( haveMissingShardHeaders := requestedShardHdrs > 0 || requestedFinalityAttestingShardHdrs > 0 if haveMissingShardHeaders { if requestedShardHdrs > 0 { - log.Debug("requested missing shard headers", + mp.log.Debug("requested missing shard headers", "num headers", requestedShardHdrs, ) } if requestedFinalityAttestingShardHdrs > 0 { - log.Debug("requested missing finality attesting shard headers", + mp.log.Debug("requested missing finality attesting shard headers", "num finality shard headers", requestedFinalityAttestingShardHdrs, ) } @@ -333,7 +342,7 @@ func (mp *metaProcessor) ProcessBlock( mp.hdrsForCurrBlock.resetMissingHdrs() if requestedShardHdrs > 0 { - log.Debug("received missing shard headers", + mp.log.Debug("received missing shard headers", "num headers", requestedShardHdrs-missingShardHdrs, ) } @@ -378,7 +387,7 @@ func (mp *metaProcessor) ProcessBlock( startTime := time.Now() err = mp.txCoordinator.ProcessBlockTransaction(header, &block.Body{MiniBlocks: miniBlocks}, haveTime) elapsedTime := time.Since(startTime) - log.Debug("elapsed time to process block transaction", + mp.log.Debug("elapsed time to process block transaction", "time [s]", elapsedTime, ) if err != nil { @@ -428,7 +437,7 @@ func (mp *metaProcessor) checkProofsForShardData(header *block.MetaBlock) error // compare the one from proofsPool with what shardData.CurrentSignature and shardData.CurrentPubKeysBitmap hold // if they are different, verify the proof received on header if !mp.proofsPool.HasProof(shardData.ShardID, shardData.HeaderHash) { - return fmt.Errorf("%w for header hash %s", process.ErrMissingHeaderProof, hex.EncodeToString(shardData.HeaderHash)) + return fmt.Errorf("%w for meta header hash %s", process.ErrMissingHeaderProof, hex.EncodeToString(shardData.HeaderHash)) } } @@ -542,7 +551,7 @@ func (mp *metaProcessor) checkEpochCorrectness( isEpochIncorrect := headerHandler.GetEpoch() != currentBlockHeader.GetEpoch() && mp.epochStartTrigger.Epoch() == currentBlockHeader.GetEpoch() if isEpochIncorrect { - log.Warn("epoch does not match", "currentHeaderEpoch", currentBlockHeader.GetEpoch(), "receivedHeaderEpoch", headerHandler.GetEpoch(), "epochStartTrigger", mp.epochStartTrigger.Epoch()) + mp.log.Warn("epoch does not match", "currentHeaderEpoch", currentBlockHeader.GetEpoch(), "receivedHeaderEpoch", headerHandler.GetEpoch(), "epochStartTrigger", mp.epochStartTrigger.Epoch()) return process.ErrEpochDoesNotMatch } @@ -550,7 +559,7 @@ func (mp *metaProcessor) checkEpochCorrectness( mp.epochStartTrigger.EpochStartRound() <= headerHandler.GetRound() && headerHandler.GetEpoch() != currentBlockHeader.GetEpoch()+1 if isEpochIncorrect { - log.Warn("is epoch start and epoch does not match", "currentHeaderEpoch", currentBlockHeader.GetEpoch(), "receivedHeaderEpoch", headerHandler.GetEpoch(), "epochStartTrigger", mp.epochStartTrigger.Epoch()) + mp.log.Warn("is epoch start and epoch does not match", "currentHeaderEpoch", currentBlockHeader.GetEpoch(), "receivedHeaderEpoch", headerHandler.GetEpoch(), "epochStartTrigger", mp.epochStartTrigger.Epoch()) return process.ErrEpochDoesNotMatch } @@ -630,7 +639,7 @@ func (mp *metaProcessor) checkAndRequestIfShardHeadersMissing() { for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { err := mp.requestHeadersIfMissing(orderedHdrsPerShard[i], i) if err != nil { - log.Debug("checkAndRequestIfShardHeadersMissing", "error", err.Error()) + mp.log.Debug("checkAndRequestIfShardHeadersMissing", "error", err.Error()) continue } } @@ -648,7 +657,7 @@ func (mp *metaProcessor) indexBlock( return } - log.Debug("preparing to index block", "hash", headerHash, "nonce", metaBlock.GetNonce(), "round", metaBlock.GetRound()) + mp.log.Debug("preparing to index block", "hash", headerHash, "nonce", metaBlock.GetNonce(), "round", metaBlock.GetRound()) argSaveBlock, err := mp.outportDataProvider.PrepareOutportSaveBlockData(processOutport.ArgPrepareOutportSaveBlockData{ HeaderHash: headerHash, Header: metaBlock, @@ -660,18 +669,18 @@ func (mp *metaProcessor) indexBlock( HighestFinalBlockHash: mp.forkDetector.GetHighestFinalBlockHash(), }) if err != nil { - log.Error("metaProcessor.indexBlock cannot prepare argSaveBlock", "error", err.Error(), + mp.log.Error("metaProcessor.indexBlock cannot prepare argSaveBlock", "error", err.Error(), "hash", headerHash, "nonce", metaBlock.GetNonce(), "round", metaBlock.GetRound()) return } err = mp.outportHandler.SaveBlock(argSaveBlock) if err != nil { - log.Error("metaProcessor.outportHandler.SaveBlock cannot save block", "error", err, + mp.log.Error("metaProcessor.outportHandler.SaveBlock cannot save block", "error", err, "hash", headerHash, "nonce", metaBlock.GetNonce(), "round", metaBlock.GetRound()) return } - log.Debug("indexed block", "hash", headerHash, "nonce", metaBlock.GetNonce(), "round", metaBlock.GetRound()) + mp.log.Debug("indexed block", "hash", headerHash, "nonce", metaBlock.GetNonce(), "round", metaBlock.GetRound()) indexRoundInfo(mp.outportHandler, mp.nodesCoordinator, core.MetachainShardId, metaBlock, lastMetaBlock, argSaveBlock.SignersIndexes) @@ -708,7 +717,7 @@ func (mp *metaProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler, for _, hdrHash := range hdrHashes { shardHeader, errNotCritical := process.GetShardHeaderFromStorage(hdrHash, mp.marshalizer, mp.store) if errNotCritical != nil { - log.Debug("shard header not found in BlockHeaderUnit", + mp.log.Debug("shard header not found in BlockHeaderUnit", "hash", hdrHash, ) continue @@ -719,14 +728,14 @@ func (mp *metaProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler, hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardHeader.GetShardID()) storer, errNotCritical := mp.store.GetStorer(hdrNonceHashDataUnit) if errNotCritical != nil { - log.Debug("storage unit not found", "unit", hdrNonceHashDataUnit, "error", errNotCritical.Error()) + mp.log.Debug("storage unit not found", "unit", hdrNonceHashDataUnit, "error", errNotCritical.Error()) continue } nonceToByteSlice := mp.uint64Converter.ToByteSlice(shardHeader.GetNonce()) errNotCritical = storer.Remove(nonceToByteSlice) if errNotCritical != nil { - log.Debug("ShardHdrNonceHashDataUnit.Remove", "error", errNotCritical.Error()) + mp.log.Debug("ShardHdrNonceHashDataUnit.Remove", "error", errNotCritical.Error()) } mp.headersCounter.subtractRestoredMBHeaders(len(shardHeader.GetMiniBlockHeaderHandlers())) @@ -763,7 +772,7 @@ func (mp *metaProcessor) CreateBlock( var body data.BodyHandler if mp.accountsDB[state.UserAccountsState].JournalLen() != 0 { - log.Error("metaProcessor.CreateBlock first entry", "stack", string(mp.accountsDB[state.UserAccountsState].GetStackDebugFirstEntry())) + mp.log.Error("metaProcessor.CreateBlock first entry", "stack", string(mp.accountsDB[state.UserAccountsState].GetStackDebugFirstEntry())) return nil, nil, process.ErrAccountStateDirty } @@ -832,7 +841,7 @@ func (mp *metaProcessor) updateEpochStartHeader(metaHdr *block.MetaBlock) error sw.Start("createEpochStartForMetablock") defer func() { sw.Stop("createEpochStartForMetablock") - log.Debug("epochStartHeaderDataCreation", sw.GetMeasurements()...) + mp.log.Debug("epochStartHeaderDataCreation", sw.GetMeasurements()...) }() epochStart, err := mp.epochStartDataCreator.CreateEpochStartData() @@ -874,7 +883,7 @@ func (mp *metaProcessor) createEpochStartBody(metaBlock *block.MetaBlock) (data. return nil, err } - log.Debug("started creating epoch start block body", + mp.log.Debug("started creating epoch start block body", "epoch", metaBlock.GetEpoch(), "round", metaBlock.GetRound(), "nonce", metaBlock.GetNonce(), @@ -951,7 +960,7 @@ func (mp *metaProcessor) createBlockBody(metaBlock data.HeaderHandler, haveTime mp.blockSizeThrottler.ComputeCurrentMaxSize() - log.Debug("started creating meta block body", + mp.log.Debug("started creating meta block body", "epoch", metaBlock.GetEpoch(), "round", metaBlock.GetRound(), "nonce", metaBlock.GetNonce(), @@ -984,26 +993,26 @@ func (mp *metaProcessor) createMiniBlocks( } if !haveTime() { - log.Debug("metaProcessor.createMiniBlocks", "error", process.ErrTimeIsOut) + mp.log.Debug("metaProcessor.createMiniBlocks", "error", process.ErrTimeIsOut) interMBs := mp.txCoordinator.CreatePostProcessMiniBlocks() if len(interMBs) > 0 { miniBlocks = append(miniBlocks, interMBs...) } - log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) + mp.log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) return &block.Body{MiniBlocks: miniBlocks}, nil } mbsToMe, numTxs, numShardHeaders, err := mp.createAndProcessCrossMiniBlocksDstMe(haveTime) if err != nil { - log.Debug("createAndProcessCrossMiniBlocksDstMe", "error", err.Error()) + mp.log.Debug("createAndProcessCrossMiniBlocksDstMe", "error", err.Error()) } if len(mbsToMe) > 0 { miniBlocks = append(miniBlocks, mbsToMe...) - log.Debug("processed miniblocks and txs with destination in self shard", + mp.log.Debug("processed miniblocks and txs with destination in self shard", "num miniblocks", len(mbsToMe), "num txs", numTxs, "num shard headers", numShardHeaders, @@ -1019,13 +1028,13 @@ func (mp *metaProcessor) createMiniBlocks( numTxs += uint32(len(mb.TxHashes)) } - log.Debug("processed miniblocks and txs from self shard", + mp.log.Debug("processed miniblocks and txs from self shard", "num miniblocks", len(mbsFromMe), "num txs", numTxs, ) } - log.Debug("creating mini blocks has been finished", + mp.log.Debug("creating mini blocks has been finished", "miniblocks created", len(miniBlocks), ) @@ -1049,12 +1058,12 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( sw.Start("ComputeLongestShardsChainsFromLastNotarized") orderedHdrs, orderedHdrsHashes, _, err := mp.blockTracker.ComputeLongestShardsChainsFromLastNotarized() sw.Stop("ComputeLongestShardsChainsFromLastNotarized") - log.Debug("measurements ComputeLongestShardsChainsFromLastNotarized", sw.GetMeasurements()...) + mp.log.Debug("measurements ComputeLongestShardsChainsFromLastNotarized", sw.GetMeasurements()...) if err != nil { return nil, 0, 0, err } - log.Debug("shard headers ordered", + mp.log.Debug("shard headers ordered", "num shard headers", len(orderedHdrs), ) @@ -1076,14 +1085,14 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for i := 0; i < len(orderedHdrs); i++ { if !haveTime() { - log.Debug("time is up after putting cross txs with destination to current shard", + mp.log.Debug("time is up after putting cross txs with destination to current shard", "num txs", txsAdded, ) break } if hdrsAdded >= maxShardHeadersAllowedInOneMetaBlock { - log.Debug("maximum shard headers allowed to be included in one meta block has been reached", + mp.log.Debug("maximum shard headers allowed to be included in one meta block has been reached", "shard headers added", hdrsAdded, ) break @@ -1091,7 +1100,7 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( currShardHdr := orderedHdrs[i] if currShardHdr.GetNonce() > lastShardHdr[currShardHdr.GetShardID()].GetNonce()+1 { - log.Trace("skip searching", + mp.log.Trace("skip searching", "shard", currShardHdr.GetShardID(), "last shard hdr nonce", lastShardHdr[currShardHdr.GetShardID()].GetNonce(), "curr shard hdr nonce", currShardHdr.GetNonce()) @@ -1099,7 +1108,7 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( } if hdrsAddedForShard[currShardHdr.GetShardID()] >= maxShardHeadersFromSameShard { - log.Trace("maximum shard headers from same shard allowed to be included in one meta block has been reached", + mp.log.Trace("maximum shard headers from same shard allowed to be included in one meta block has been reached", "shard", currShardHdr.GetShardID(), "shard headers added", hdrsAddedForShard[currShardHdr.GetShardID()], ) @@ -1110,7 +1119,7 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( if shouldCheckProof { hasProofForHdr := mp.proofsPool.HasProof(currShardHdr.GetShardID(), orderedHdrsHashes[i]) if !hasProofForHdr { - log.Trace("no proof for shard header", + mp.log.Trace("no proof for shard header", "shard", currShardHdr.GetShardID(), "hash", logger.DisplayByteSlice(orderedHdrsHashes[i]), ) @@ -1143,7 +1152,7 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( } if !hdrProcessFinished { - log.Debug("shard header cannot be fully processed", + mp.log.Debug("shard header cannot be fully processed", "round", currShardHdr.GetRound(), "nonce", currShardHdr.GetNonce(), "hash", orderedHdrsHashes[i]) @@ -1152,7 +1161,7 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( errAccountState := mp.accountsDB[state.UserAccountsState].RevertToSnapshot(snapshot) if errAccountState != nil { // TODO: evaluate if reloading the trie from disk will might solve the problem - log.Warn("accounts.RevertToSnapshot", "error", errAccountState.Error()) + mp.log.Warn("accounts.RevertToSnapshot", "error", errAccountState.Error()) } continue } @@ -1182,7 +1191,7 @@ func (mp *metaProcessor) requestShardHeadersIfNeeded( lastShardHdr map[uint32]data.HeaderHandler, ) { for shardID := uint32(0); shardID < mp.shardCoordinator.NumberOfShards(); shardID++ { - log.Debug("shard headers added", + mp.log.Debug("shard headers added", "shard", shardID, "num", hdrsAddedForShard[shardID], "highest nonce", lastShardHdr[shardID].GetNonce()) @@ -1219,7 +1228,7 @@ func (mp *metaProcessor) CommitBlock( return err } - log.Debug("started committing block", + mp.log.Debug("started committing block", "epoch", headerHandler.GetEpoch(), "shard", headerHandler.GetShardID(), "round", headerHandler.GetRound(), @@ -1275,7 +1284,7 @@ func (mp *metaProcessor) CommitBlock( return err } - log.Info("meta block has been committed successfully", + mp.log.Info("meta block has been committed successfully", "epoch", headerHandler.GetEpoch(), "shard", headerHandler.GetShardID(), "round", headerHandler.GetRound(), @@ -1286,17 +1295,17 @@ func (mp *metaProcessor) CommitBlock( errNotCritical := mp.checkSentSignaturesAtCommitTime(headerHandler) if errNotCritical != nil { - log.Debug("checkSentSignaturesBeforeCommitting", "error", errNotCritical.Error()) + mp.log.Debug("checkSentSignaturesBeforeCommitting", "error", errNotCritical.Error()) } notarizedHeadersHashes, errNotCritical := mp.updateCrossShardInfo(header) if errNotCritical != nil { - log.Debug("updateCrossShardInfo", "error", errNotCritical.Error()) + mp.log.Debug("updateCrossShardInfo", "error", errNotCritical.Error()) } errNotCritical = mp.forkDetector.AddHeader(header, headerHash, process.BHProcessed, nil, nil) if errNotCritical != nil { - log.Debug("forkDetector.AddHeader", "error", errNotCritical.Error()) + mp.log.Debug("forkDetector.AddHeader", "error", errNotCritical.Error()) } currentHeader, currentHeaderHash := getLastSelfNotarizedHeaderByItself(mp.blockChain) @@ -1309,7 +1318,7 @@ func (mp *metaProcessor) CommitBlock( go mp.historyRepo.OnNotarizedBlocks(mp.shardCoordinator.SelfId(), []data.HeaderHandler{currentHeader}, [][]byte{currentHeaderHash}) - log.Debug("highest final meta block", + mp.log.Debug("highest final meta block", "nonce", mp.forkDetector.GetHighestFinalBlockNonce(), ) @@ -1317,9 +1326,9 @@ func (mp *metaProcessor) CommitBlock( lastMetaBlock, ok := lastHeader.(data.MetaHeaderHandler) if !ok { if headerHandler.GetNonce() == firstHeaderNonce { - log.Debug("metaBlock.CommitBlock - nil current block header, this is expected at genesis time") + mp.log.Debug("metaBlock.CommitBlock - nil current block header, this is expected at genesis time") } else { - log.Error("metaBlock.CommitBlock - nil current block header, last current header should have not been nil") + mp.log.Error("metaBlock.CommitBlock - nil current block header, last current header should have not been nil") } } lastMetaBlockHash := mp.blockChain.GetCurrentBlockHeaderHash() @@ -1411,7 +1420,7 @@ func (mp *metaProcessor) CommitBlock( errNotCritical = mp.removeTxsFromPools(header, body) if errNotCritical != nil { - log.Debug("removeTxsFromPools", "error", errNotCritical.Error()) + mp.log.Debug("removeTxsFromPools", "error", errNotCritical.Error()) } mp.cleanupPools(headerHandler) @@ -1461,12 +1470,12 @@ func (mp *metaProcessor) displayPoolsInfo() { miniBlocksPool := mp.dataPool.MiniBlocks() for shardID := uint32(0); shardID < mp.shardCoordinator.NumberOfShards(); shardID++ { - log.Trace("pools info", + mp.log.Trace("pools info", "shard", shardID, "num headers", headersPool.GetNumHeaders(shardID)) } - log.Trace("pools info", + mp.log.Trace("pools info", "shard", core.MetachainShardId, "num headers", headersPool.GetNumHeaders(core.MetachainShardId)) @@ -1474,7 +1483,7 @@ func (mp *metaProcessor) displayPoolsInfo() { // (in this case this number is equal with: number of shards + metachain (self shard)) numShardsToKeepHeaders := int(mp.shardCoordinator.NumberOfShards()) + 1 capacity := headersPool.MaxSize() * numShardsToKeepHeaders - log.Debug("pools info", + mp.log.Debug("pools info", "total headers", headersPool.Len(), "headers pool capacity", capacity, "total miniblocks", miniBlocksPool.Len(), @@ -1486,7 +1495,7 @@ func (mp *metaProcessor) displayPoolsInfo() { func (mp *metaProcessor) updateState(lastMetaBlock data.MetaHeaderHandler, lastMetaBlockHash []byte) { if check.IfNil(lastMetaBlock) { - log.Debug("updateState nil header") + mp.log.Debug("updateState nil header") return } @@ -1500,12 +1509,12 @@ func (mp *metaProcessor) updateState(lastMetaBlock data.MetaHeaderHandler, lastM mp.store, ) if errNotCritical != nil { - log.Debug("could not get meta header from storage") + mp.log.Debug("could not get meta header from storage") return } if lastMetaBlock.IsStartOfEpochBlock() { - log.Debug("trie snapshot", + mp.log.Debug("trie snapshot", "rootHash", lastMetaBlock.GetRootHash(), "prevRootHash", prevMetaBlock.GetRootHash(), "validatorStatsRootHash", lastMetaBlock.GetValidatorStatsRootHash()) @@ -1514,12 +1523,12 @@ func (mp *metaProcessor) updateState(lastMetaBlock data.MetaHeaderHandler, lastM go func() { metaBlock, ok := lastMetaBlock.(*block.MetaBlock) if !ok { - log.Warn("cannot commit Trie Epoch Root Hash: lastMetaBlock is not *block.MetaBlock") + mp.log.Warn("cannot commit Trie Epoch Root Hash: lastMetaBlock is not *block.MetaBlock") return } err := mp.commitTrieEpochRootHashIfNeeded(metaBlock, lastMetaBlock.GetRootHash()) if err != nil { - log.Warn("couldn't commit trie checkpoint", "epoch", metaBlock.Epoch, "error", err) + mp.log.Warn("couldn't commit trie checkpoint", "epoch", metaBlock.Epoch, "error", err) } }() } @@ -1549,7 +1558,7 @@ func (mp *metaProcessor) getLastSelfNotarizedHeaderByShard( lastNotarizedMetaHeader, lastNotarizedMetaHeaderHash, err := mp.blockTracker.GetLastSelfNotarizedHeader(shardID) if err != nil { - log.Warn("getLastSelfNotarizedHeaderByShard.GetLastSelfNotarizedHeader", + mp.log.Warn("getLastSelfNotarizedHeaderByShard.GetLastSelfNotarizedHeader", "shard", shardID, "error", err.Error()) return nil, nil @@ -1565,7 +1574,7 @@ func (mp *metaProcessor) getLastSelfNotarizedHeaderByShard( headerInfo, ok := mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardData.HeaderHash)] mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() if !ok { - log.Debug("getLastSelfNotarizedHeaderByShard", + mp.log.Debug("getLastSelfNotarizedHeaderByShard", "error", process.ErrMissingHeader, "hash", shardData.HeaderHash) continue @@ -1573,7 +1582,7 @@ func (mp *metaProcessor) getLastSelfNotarizedHeaderByShard( shardHeader, ok := headerInfo.hdr.(data.ShardHeaderHandler) if !ok { - log.Debug("getLastSelfNotarizedHeaderByShard", + mp.log.Debug("getLastSelfNotarizedHeaderByShard", "error", process.ErrWrongTypeAssertion, "hash", shardData.HeaderHash) continue @@ -1587,7 +1596,7 @@ func (mp *metaProcessor) getLastSelfNotarizedHeaderByShard( mp.store, ) if errGet != nil { - log.Trace("getLastSelfNotarizedHeaderByShard.GetMetaHeader", "error", errGet.Error()) + mp.log.Trace("getLastSelfNotarizedHeaderByShard.GetMetaHeader", "error", errGet.Error()) continue } @@ -1600,7 +1609,7 @@ func (mp *metaProcessor) getLastSelfNotarizedHeaderByShard( } if lastNotarizedMetaHeader != nil { - log.Debug("last notarized meta header in shard", + mp.log.Debug("last notarized meta header in shard", "shard", shardID, "epoch", lastNotarizedMetaHeader.GetEpoch(), "round", lastNotarizedMetaHeader.GetRound(), @@ -1644,7 +1653,7 @@ func (mp *metaProcessor) RevertStateToBlock(header data.HeaderHandler, rootHash rootHashHolder := holders.NewDefaultRootHashesHolder(rootHash) err := mp.accountsDB[state.UserAccountsState].RecreateTrie(rootHashHolder) if err != nil { - log.Debug("recreate trie with error for header", + mp.log.Debug("recreate trie with error for header", "nonce", header.GetNonce(), "header root hash", header.GetRootHash(), "given root hash", rootHash, @@ -1661,7 +1670,7 @@ func (mp *metaProcessor) RevertStateToBlock(header data.HeaderHandler, rootHash err = mp.validatorStatisticsProcessor.RevertPeerState(metaHeader) if err != nil { - log.Debug("revert peer state with error for header", + mp.log.Debug("revert peer state with error for header", "nonce", metaHeader.GetNonce(), "validators root hash", metaHeader.GetValidatorStatsRootHash(), "error", err.Error(), @@ -1672,7 +1681,7 @@ func (mp *metaProcessor) RevertStateToBlock(header data.HeaderHandler, rootHash err = mp.epochStartTrigger.RevertStateToBlock(metaHeader) if err != nil { - log.Debug("revert epoch start trigger for header", + mp.log.Debug("revert epoch start trigger for header", "nonce", metaHeader.GetNonce(), "error", err, ) @@ -1759,7 +1768,7 @@ func (mp *metaProcessor) saveLastNotarizedHeader(header *block.MetaBlock) error hdr := lastCrossNotarizedHeaderForShard[shardID].hdr hash := lastCrossNotarizedHeaderForShard[shardID].hash mp.blockTracker.AddCrossNotarizedHeader(shardID, hdr, hash) - DisplayLastNotarized(mp.marshalizer, mp.hasher, hdr, shardID) + DisplayLastNotarized(mp.log, mp.marshalizer, mp.hasher, hdr, shardID) } return nil @@ -1776,7 +1785,7 @@ func (mp *metaProcessor) getLastCrossNotarizedShardHdrs() (map[uint32]data.Heade return nil, err } - log.Debug("lastCrossNotarizedHeader for shard", "shardID", shardID, "hash", hash) + mp.log.Debug("lastCrossNotarizedHeader for shard", "shardID", shardID, "hash", hash) lastCrossNotarizedHeader[shardID] = lastCrossNotarizedHeaderForShard usedInBlock := mp.isGenesisShardBlockAndFirstMeta(lastCrossNotarizedHeaderForShard.GetNonce()) mp.hdrsForCurrBlock.hdrHashAndInfo[string(hash)] = &hdrInfo{ @@ -1893,7 +1902,7 @@ func (mp *metaProcessor) getFinalMiniBlockHeaders(miniBlockHeaderHandlers []data miniBlockHeaders := make([]data.MiniBlockHeaderHandler, 0) for _, miniBlockHeader := range miniBlockHeaderHandlers { if !miniBlockHeader.IsFinal() { - log.Debug("metaProcessor.getFinalMiniBlockHeaders: do not check validity for mini block which is not final", "mb hash", miniBlockHeader.GetHash()) + mp.log.Debug("metaProcessor.getFinalMiniBlockHeaders: do not check validity for mini block which is not final", "mb hash", miniBlockHeader.GetHash()) continue } @@ -1950,7 +1959,7 @@ func (mp *metaProcessor) checkShardHeadersFinality( if shardHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { err := mp.headerValidator.IsHeaderConstructionValid(shardHdr, lastVerifiedHdr) if err != nil { - log.Debug("checkShardHeadersFinality -> isHdrConstructionValid", + mp.log.Debug("checkShardHeadersFinality -> isHdrConstructionValid", "error", err.Error()) continue } @@ -1996,7 +2005,7 @@ func (mp *metaProcessor) receivedShardHeader(headerHandler data.HeaderHandler, s return } - log.Trace("received shard header from network", + mp.log.Trace("received shard header from network", "shard", shardHeader.GetShardID(), "round", shardHeader.GetRound(), "nonce", shardHeader.GetNonce(), @@ -2023,7 +2032,7 @@ func (mp *metaProcessor) receivedShardHeader(headerHandler data.HeaderHandler, s if mp.hdrsForCurrBlock.missingHdrs == 0 && !shouldConsiderProofsForNotarization { mp.hdrsForCurrBlock.missingFinalityAttestingHdrs = mp.requestMissingFinalityAttestingShardHeaders() if mp.hdrsForCurrBlock.missingFinalityAttestingHdrs == 0 { - log.Debug("received all missing finality attesting shard headers") + mp.log.Debug("received all missing finality attesting shard headers") } } @@ -2080,11 +2089,11 @@ func (mp *metaProcessor) computeExistingAndRequestMissingShardHeaders(metaBlock if shardData.Nonce == mp.genesisNonce { lastCrossNotarizedHeaderForShard, hash, err := mp.blockTracker.GetLastCrossNotarizedHeader(shardData.ShardID) if err != nil { - log.Warn("computeExistingAndRequestMissingShardHeaders.GetLastCrossNotarizedHeader", "error", err.Error()) + mp.log.Warn("computeExistingAndRequestMissingShardHeaders.GetLastCrossNotarizedHeader", "error", err.Error()) continue } if !bytes.Equal(hash, shardData.HeaderHash) { - log.Warn("genesis hash missmatch", + mp.log.Warn("genesis hash missmatch", "last notarized nonce", lastCrossNotarizedHeaderForShard.GetNonce(), "last notarized hash", hash, "genesis nonce", mp.genesisNonce, @@ -2185,7 +2194,7 @@ func (mp *metaProcessor) createShardInfo() ([]data.ShardDataHandler, error) { if mp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { miniBlockHeader := shardHdr.GetMiniBlockHeaderHandlers()[i] if !miniBlockHeader.IsFinal() { - log.Debug("metaProcessor.createShardInfo: do not create shard data with mini block which is not final", "mb hash", miniBlockHeader.GetHash()) + mp.log.Debug("metaProcessor.createShardInfo: do not create shard data with mini block which is not final", "mb hash", miniBlockHeader.GetHash()) continue } } @@ -2203,7 +2212,7 @@ func (mp *metaProcessor) createShardInfo() ([]data.ShardDataHandler, error) { shardInfo = append(shardInfo, &shardData) } - log.Debug("created shard data", + mp.log.Debug("created shard data", "size", len(shardInfo), ) return shardInfo, nil @@ -2245,14 +2254,14 @@ func (mp *metaProcessor) computeAccumulatedFeesInEpoch(metaHdr data.MetaHeaderHa currentlyAccumulatedFeesInEpoch.Add(currentlyAccumulatedFeesInEpoch, metaHdr.GetAccumulatedFees()) currentDevFeesInEpoch.Add(currentDevFeesInEpoch, metaHdr.GetDeveloperFees()) - log.Debug("computeAccumulatedFeesInEpoch - meta block fees", + mp.log.Debug("computeAccumulatedFeesInEpoch - meta block fees", "meta nonce", metaHdr.GetNonce(), "accumulatedFees", metaHdr.GetAccumulatedFees().String(), "devFees", metaHdr.GetDeveloperFees().String(), "meta leader fees", core.GetIntTrimmedPercentageOfValue(big.NewInt(0).Sub(metaHdr.GetAccumulatedFees(), metaHdr.GetDeveloperFees()), mp.economicsData.LeaderPercentage()).String()) for _, shardData := range metaHdr.GetShardInfoHandlers() { - log.Debug("computeAccumulatedFeesInEpoch - adding shard data fees", + mp.log.Debug("computeAccumulatedFeesInEpoch - adding shard data fees", "shardHeader hash", shardData.GetHeaderHash(), "shardHeader nonce", shardData.GetNonce(), "shardHeader accumulated fees", shardData.GetAccumulatedFees().String(), @@ -2264,7 +2273,7 @@ func (mp *metaProcessor) computeAccumulatedFeesInEpoch(metaHdr data.MetaHeaderHa currentDevFeesInEpoch.Add(currentDevFeesInEpoch, shardData.GetDeveloperFees()) } - log.Debug("computeAccumulatedFeesInEpoch - fees in epoch", + mp.log.Debug("computeAccumulatedFeesInEpoch - fees in epoch", "accumulatedFeesInEpoch", currentlyAccumulatedFeesInEpoch.String(), "devFeesInEpoch", currentDevFeesInEpoch.String()) @@ -2278,7 +2287,7 @@ func (mp *metaProcessor) applyBodyToHeader(metaHdr data.MetaHeaderHandler, bodyH defer func() { sw.Stop("applyBodyToHeader") - log.Debug("measurements", sw.GetMeasurements()...) + mp.log.Debug("measurements", sw.GetMeasurements()...) }() if check.IfNil(bodyHandler) { @@ -2424,7 +2433,7 @@ func (mp *metaProcessor) verifyValidatorStatisticsRootHash(header *block.MetaBlo } if !bytes.Equal(validatorStatsRH, header.GetValidatorStatsRootHash()) { - log.Debug("validator stats root hash mismatch", + mp.log.Debug("validator stats root hash mismatch", "computed", validatorStatsRH, "received", header.GetValidatorStatsRootHash(), ) @@ -2560,7 +2569,7 @@ func (mp *metaProcessor) MarshalizedDataToBroadcast( for shardId, subsetBlockBody := range bodies { buff, err := mp.marshalizer.Marshal(&block.Body{MiniBlocks: subsetBlockBody}) if err != nil { - log.Error("metaProcessor.MarshalizedDataToBroadcast.Marshal", "error", err.Error()) + mp.log.Error("metaProcessor.MarshalizedDataToBroadcast.Marshal", "error", err.Error()) continue } mrsData[shardId] = buff @@ -2577,12 +2586,12 @@ func (mp *metaProcessor) getAllMarshalledTxs(body *block.Body) map[string][][]by for topic, marshalledTxs := range marshalledRewardsTxs { allMarshalledTxs[topic] = append(allMarshalledTxs[topic], marshalledTxs...) - log.Trace("metaProcessor.getAllMarshalledTxs", "topic", topic, "num rewards txs", len(marshalledTxs)) + mp.log.Trace("metaProcessor.getAllMarshalledTxs", "topic", topic, "num rewards txs", len(marshalledTxs)) } for topic, marshalledTxs := range marshalledValidatorInfoTxs { allMarshalledTxs[topic] = append(allMarshalledTxs[topic], marshalledTxs...) - log.Trace("metaProcessor.getAllMarshalledTxs", "topic", topic, "num validator info txs", len(marshalledTxs)) + mp.log.Trace("metaProcessor.getAllMarshalledTxs", "topic", topic, "num validator info txs", len(marshalledTxs)) } return allMarshalledTxs @@ -2684,7 +2693,7 @@ func (mp *metaProcessor) DecodeBlockHeader(dta []byte) data.HeaderHandler { metaBlock := &block.MetaBlock{} err := mp.marshalizer.Unmarshal(metaBlock, dta) if err != nil { - log.Debug("DecodeBlockHeader.Unmarshal", "error", err.Error()) + mp.log.Debug("DecodeBlockHeader.Unmarshal", "error", err.Error()) return nil } diff --git a/process/block/shardblock.go b/process/block/shardblock.go index d35ed73aa6b..6e317d23a34 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -80,7 +80,14 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { return nil, err } + var log logger.Logger + log = logger.GetOrCreate("process/sync") + if arguments.Logger != nil { + log = arguments.Logger + } + base := &baseProcessor{ + log: log, accountsDB: arguments.AccountsDB, blockSizeThrottler: arguments.BlockSizeThrottler, forkDetector: arguments.ForkDetector, @@ -135,6 +142,7 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { } argsTransactionCounter := ArgsTransactionCounter{ + Logger: log, AppStatusHandler: sp.appStatusHandler, Hasher: sp.hasher, Marshalizer: sp.marshalizer, @@ -176,7 +184,7 @@ func (sp *shardProcessor) ProcessBlock( err := sp.checkBlockValidity(headerHandler, bodyHandler) if err != nil { if errors.Is(err, process.ErrBlockHashDoesNotMatch) { - log.Debug("requested missing shard header", + sp.log.Debug("requested missing shard header", "hash", headerHandler.GetPrevHash(), "for shard", headerHandler.GetShardID(), ) @@ -196,7 +204,7 @@ func (sp *shardProcessor) ProcessBlock( return err } - log.Debug("started processing block", + sp.log.Debug("started processing block", "epoch", headerHandler.GetEpoch(), "shard", headerHandler.GetShardID(), "round", headerHandler.GetRound(), @@ -226,9 +234,9 @@ func (sp *shardProcessor) ProcessBlock( } txCounts, rewardCounts, unsignedCounts := sp.txCounter.getPoolCounts(sp.dataPool) - log.Debug("total txs in pool", "counts", txCounts.String()) - log.Debug("total txs in rewards pool", "counts", rewardCounts.String()) - log.Debug("total txs in unsigned pool", "counts", unsignedCounts.String()) + sp.log.Debug("total txs in pool", "counts", txCounts.String()) + sp.log.Debug("total txs in rewards pool", "counts", rewardCounts.String()) + sp.log.Debug("total txs in unsigned pool", "counts", unsignedCounts.String()) go getMetricsFromHeader(header, uint64(txCounts.GetTotal()), sp.marshalizer, sp.appStatusHandler) @@ -254,12 +262,12 @@ func (sp *shardProcessor) ProcessBlock( haveMissingMetaHeaders := requestedMetaHdrs > 0 || requestedFinalityAttestingMetaHdrs > 0 if haveMissingMetaHeaders { if requestedMetaHdrs > 0 { - log.Debug("requested missing meta headers", + sp.log.Debug("requested missing meta headers", "num headers", requestedMetaHdrs, ) } if requestedFinalityAttestingMetaHdrs > 0 { - log.Debug("requested missing finality attesting meta headers", + sp.log.Debug("requested missing finality attesting meta headers", "num finality meta headers", requestedFinalityAttestingMetaHdrs, ) } @@ -273,7 +281,7 @@ func (sp *shardProcessor) ProcessBlock( sp.hdrsForCurrBlock.resetMissingHdrs() if requestedMetaHdrs > 0 { - log.Debug("received missing meta headers", + sp.log.Debug("received missing meta headers", "num headers", requestedMetaHdrs-missingMetaHdrs, ) } @@ -289,7 +297,7 @@ func (sp *shardProcessor) ProcessBlock( } if sp.accountsDB[state.UserAccountsState].JournalLen() != 0 { - log.Error("shardProcessor.ProcessBlock first entry", "stack", string(sp.accountsDB[state.UserAccountsState].GetStackDebugFirstEntry())) + sp.log.Error("shardProcessor.ProcessBlock first entry", "stack", string(sp.accountsDB[state.UserAccountsState].GetStackDebugFirstEntry())) return process.ErrAccountStateDirty } @@ -299,6 +307,8 @@ func (sp *shardProcessor) ProcessBlock( if !sp.proofsPool.HasProof(core.MetachainShardId, metaBlockHash) { return fmt.Errorf("%w for header hash %s", process.ErrMissingHeaderProof, hex.EncodeToString(metaBlockHash)) } + + sp.log.Debug("checking cross notarized metablocks: has proof for meta header", "headerHash", metaBlockHash) } } @@ -338,7 +348,7 @@ func (sp *shardProcessor) ProcessBlock( startTime := time.Now() err = sp.txCoordinator.ProcessBlockTransaction(header, &block.Body{MiniBlocks: miniBlocks}, haveTime) elapsedTime := time.Since(startTime) - log.Debug("elapsed time to process block transaction", + sp.log.Debug("elapsed time to process block transaction", "time [s]", elapsedTime, ) if err != nil { @@ -420,7 +430,7 @@ func (sp *shardProcessor) RevertStateToBlock(header data.HeaderHandler, rootHash rootHashHolder := holders.NewDefaultRootHashesHolder(rootHash) err := sp.accountsDB[state.UserAccountsState].RecreateTrie(rootHashHolder) if err != nil { - log.Debug("recreate trie with error for header", + sp.log.Debug("recreate trie with error for header", "nonce", header.GetNonce(), "header root hash", header.GetRootHash(), "given root hash", rootHash, @@ -432,7 +442,7 @@ func (sp *shardProcessor) RevertStateToBlock(header data.HeaderHandler, rootHash err = sp.epochStartTrigger.RevertStateToBlock(header) if err != nil { - log.Debug("revert epoch start trigger for header", + sp.log.Debug("revert epoch start trigger for header", "nonce", header.GetNonce(), "error", err, ) @@ -493,7 +503,7 @@ func (sp *shardProcessor) checkEpochCorrectness( header.GetEpoch() == sp.epochStartTrigger.MetaEpoch() if isEpochStartMetaHashIncorrect { go sp.requestHandler.RequestMetaHeader(header.GetEpochStartMetaHash()) - log.Warn("epoch start meta hash mismatch", "proposed", header.GetEpochStartMetaHash(), "calculated", sp.epochStartTrigger.EpochStartMetaHdrHash()) + sp.log.Warn("epoch start meta hash mismatch", "proposed", header.GetEpochStartMetaHash(), "calculated", sp.epochStartTrigger.EpochStartMetaHdrHash()) return fmt.Errorf("%w proposed header with epoch %d has invalid epochStartMetaHash", process.ErrEpochDoesNotMatch, header.GetEpoch()) } @@ -536,7 +546,7 @@ func (sp *shardProcessor) checkMetaHeadersValidityAndFinality() error { return err } - log.Trace("checkMetaHeadersValidityAndFinality", "lastCrossNotarizedHeader nonce", lastCrossNotarizedHeader.GetNonce()) + sp.log.Trace("checkMetaHeadersValidityAndFinality", "lastCrossNotarizedHeader nonce", lastCrossNotarizedHeader.GetNonce()) usedMetaHdrs, err := sp.sortHeadersForCurrentBlockByNonce(true) if err != nil { return err @@ -546,7 +556,7 @@ func (sp *shardProcessor) checkMetaHeadersValidityAndFinality() error { } for _, metaHdr := range usedMetaHdrs[core.MetachainShardId] { - log.Trace("checkMetaHeadersValidityAndFinality", "metaHeader nonce", metaHdr.GetNonce()) + sp.log.Trace("checkMetaHeadersValidityAndFinality", "metaHeader nonce", metaHdr.GetNonce()) err = sp.headerValidator.IsHeaderConstructionValid(metaHdr, lastCrossNotarizedHeader) if err != nil { return fmt.Errorf("%w : checkMetaHeadersValidityAndFinality -> isHdrConstructionValid", err) @@ -600,7 +610,7 @@ func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler) error if metaHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { err := sp.headerValidator.IsHeaderConstructionValid(metaHdr, lastVerifiedHdr) if err != nil { - log.Debug("checkMetaHdrFinality -> isHdrConstructionValid", + sp.log.Debug("checkMetaHdrFinality -> isHdrConstructionValid", "error", err.Error()) continue } @@ -624,7 +634,7 @@ func (sp *shardProcessor) checkAndRequestIfMetaHeadersMissing() { err := sp.requestHeadersIfMissing(orderedMetaBlocks, core.MetachainShardId) if err != nil { - log.Debug("checkAndRequestIfMetaHeadersMissing", "error", err.Error()) + sp.log.Debug("checkAndRequestIfMetaHeadersMissing", "error", err.Error()) } } @@ -638,7 +648,7 @@ func (sp *shardProcessor) indexBlockIfNeeded( return } - log.Debug("preparing to index block", "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) + sp.log.Debug("preparing to index block", "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) argSaveBlock, err := sp.outportDataProvider.PrepareOutportSaveBlockData(processOutport.ArgPrepareOutportSaveBlockData{ HeaderHash: headerHash, Header: header, @@ -648,18 +658,18 @@ func (sp *shardProcessor) indexBlockIfNeeded( HighestFinalBlockHash: sp.forkDetector.GetHighestFinalBlockHash(), }) if err != nil { - log.Error("shardProcessor.indexBlockIfNeeded cannot prepare argSaveBlock", "error", err.Error(), + sp.log.Error("shardProcessor.indexBlockIfNeeded cannot prepare argSaveBlock", "error", err.Error(), "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) return } err = sp.outportHandler.SaveBlock(argSaveBlock) if err != nil { - log.Error("shardProcessor.outportHandler.SaveBlock cannot save block", "error", err, + sp.log.Error("shardProcessor.outportHandler.SaveBlock cannot save block", "error", err, "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) return } - log.Debug("indexed block", "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) + sp.log.Debug("indexed block", "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) shardID := sp.shardCoordinator.SelfId() indexRoundInfo(sp.outportHandler, sp.nodesCoordinator, shardID, header, lastBlockHeader, argSaveBlock.SignersIndexes) @@ -702,7 +712,7 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool( for _, metaBlockHash := range metaBlockHashes { metaBlock, errNotCritical := process.GetMetaHeaderFromStorage(metaBlockHash, sp.marshalizer, sp.store) if errNotCritical != nil { - log.Debug("meta block is not fully processed yet and not committed in MetaBlockUnit", + sp.log.Debug("meta block is not fully processed yet and not committed in MetaBlockUnit", "hash", metaBlockHash) continue } @@ -717,14 +727,14 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool( metablockStorer, err := sp.store.GetStorer(dataRetriever.MetaBlockUnit) if err != nil { - log.Debug("unable to get storage unit", + sp.log.Debug("unable to get storage unit", "unit", dataRetriever.MetaBlockUnit.String()) return err } err = metablockStorer.Remove(metaBlockHash) if err != nil { - log.Debug("unable to remove hash from MetaBlockUnit", + sp.log.Debug("unable to remove hash from MetaBlockUnit", "hash", metaBlockHash) return err } @@ -733,18 +743,18 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool( metaHdrNonceHashStorer, err := sp.store.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit) if err != nil { - log.Debug("unable to get storage unit", + sp.log.Debug("unable to get storage unit", "unit", dataRetriever.MetaHdrNonceHashDataUnit.String()) return err } errNotCritical = metaHdrNonceHashStorer.Remove(nonceToByteSlice) if errNotCritical != nil { - log.Debug("error not critical", + sp.log.Debug("error not critical", "error", errNotCritical.Error()) } - log.Trace("meta block has been restored successfully", + sp.log.Trace("meta block has been restored successfully", "round", metaBlock.Round, "nonce", metaBlock.Nonce, "hash", metaBlockHash) @@ -797,7 +807,7 @@ func (sp *shardProcessor) rollBackProcessedMiniBlocksInfo(headerHandler data.Hea for miniBlockHash := range mapMiniBlockHashes { miniBlockHeader := process.GetMiniBlockHeaderWithHash(headerHandler, []byte(miniBlockHash)) if miniBlockHeader == nil { - log.Warn("shardProcessor.rollBackProcessedMiniBlocksInfo: GetMiniBlockHeaderWithHash", + sp.log.Warn("shardProcessor.rollBackProcessedMiniBlocksInfo: GetMiniBlockHeaderWithHash", "mb hash", miniBlockHash, "error", process.ErrMissingMiniBlockHeader) continue @@ -820,7 +830,7 @@ func (sp *shardProcessor) rollBackProcessedMiniBlockInfo(miniBlockHeader data.Mi _, metaBlockHash := sp.processedMiniBlocksTracker.GetProcessedMiniBlockInfo(miniBlockHash) if metaBlockHash == nil { - log.Warn("shardProcessor.rollBackProcessedMiniBlockInfo: mini block was not found in ProcessedMiniBlockTracker component", + sp.log.Warn("shardProcessor.rollBackProcessedMiniBlockInfo: mini block was not found in ProcessedMiniBlockTracker component", "sender shard", miniBlockHeader.GetSenderShardID(), "receiver shard", miniBlockHeader.GetReceiverShardID(), "tx count", miniBlockHeader.GetTxCount(), @@ -860,7 +870,7 @@ func (sp *shardProcessor) CreateBlock( // placeholder for shardProcessor.CreateBlock script 2 if sp.epochStartTrigger.IsEpochStart() { - log.Debug("CreateBlock", "IsEpochStart", sp.epochStartTrigger.IsEpochStart(), + sp.log.Debug("CreateBlock", "IsEpochStart", sp.epochStartTrigger.IsEpochStart(), "epoch start meta header hash", sp.epochStartTrigger.EpochStartMetaHdrHash()) err = shardHdr.SetEpochStartMetaHash(sp.epochStartTrigger.EpochStartMetaHdrHash()) if err != nil { @@ -869,7 +879,7 @@ func (sp *shardProcessor) CreateBlock( epoch := sp.epochStartTrigger.MetaEpoch() if initialHdr.GetEpoch() != epoch { - log.Debug("shardProcessor.CreateBlock: epoch from header is not the same as epoch from epoch start trigger, overwriting", + sp.log.Debug("shardProcessor.CreateBlock: epoch from header is not the same as epoch from epoch start trigger, overwriting", "epoch from header", initialHdr.GetEpoch(), "epoch from epoch start trigger", epoch) err = shardHdr.SetEpoch(epoch) if err != nil { @@ -891,7 +901,7 @@ func (sp *shardProcessor) CreateBlock( } for _, miniBlock := range finalBody.MiniBlocks { - log.Trace("CreateBlock: miniblock", + sp.log.Trace("CreateBlock: miniblock", "sender shard", miniBlock.SenderShardID, "receiver shard", miniBlock.ReceiverShardID, "type", miniBlock.Type, @@ -906,7 +916,7 @@ func (sp *shardProcessor) CreateBlock( func (sp *shardProcessor) createBlockBody(shardHdr data.HeaderHandler, haveTime func() bool) (*block.Body, map[string]*processedMb.ProcessedMiniBlockInfo, error) { sp.blockSizeThrottler.ComputeCurrentMaxSize() - log.Debug("started creating block body", + sp.log.Debug("started creating block body", "epoch", shardHdr.GetEpoch(), "round", shardHdr.GetRound(), "nonce", shardHdr.GetNonce(), @@ -944,7 +954,7 @@ func (sp *shardProcessor) CommitBlock( sp.store.SetEpochForPutOperation(headerHandler.GetEpoch()) - log.Debug("started committing block", + sp.log.Debug("started committing block", "epoch", headerHandler.GetEpoch(), "shard", headerHandler.GetShardID(), "round", headerHandler.GetRound(), @@ -1013,7 +1023,7 @@ func (sp *shardProcessor) CommitBlock( return err } - log.Info("shard block has been committed successfully", + sp.log.Info("shard block has been committed successfully", "epoch", header.GetEpoch(), "shard", header.GetShardID(), "round", header.GetRound(), @@ -1026,17 +1036,17 @@ func (sp *shardProcessor) CommitBlock( errNotCritical := sp.checkSentSignaturesAtCommitTime(headerHandler) if errNotCritical != nil { - log.Debug("checkSentSignaturesBeforeCommitting", "error", errNotCritical.Error()) + sp.log.Debug("checkSentSignaturesBeforeCommitting", "error", errNotCritical.Error()) } errNotCritical = sp.updateCrossShardInfo(processedMetaHdrs) if errNotCritical != nil { - log.Debug("updateCrossShardInfo", "error", errNotCritical.Error()) + sp.log.Debug("updateCrossShardInfo", "error", errNotCritical.Error()) } errNotCritical = sp.forkDetector.AddHeader(header, headerHash, process.BHProcessed, selfNotarizedHeaders, selfNotarizedHeadersHashes) if errNotCritical != nil { - log.Debug("forkDetector.AddHeader", "error", errNotCritical.Error()) + sp.log.Debug("forkDetector.AddHeader", "error", errNotCritical.Error()) } currentHeader, currentHeaderHash := getLastSelfNotarizedHeaderByItself(sp.blockChain) @@ -1054,7 +1064,7 @@ func (sp *shardProcessor) CommitBlock( sp.updateState(selfNotarizedHeaders, header) highestFinalBlockNonce := sp.forkDetector.GetHighestFinalBlockNonce() - log.Debug("highest final shard block", + sp.log.Debug("highest final shard block", "shard", sp.shardCoordinator.SelfId(), "nonce", highestFinalBlockNonce, ) @@ -1132,7 +1142,7 @@ func (sp *shardProcessor) CommitBlock( errNotCritical = sp.removeTxsFromPools(header, body) if errNotCritical != nil { - log.Debug("removeTxsFromPools", "error", errNotCritical.Error()) + sp.log.Debug("removeTxsFromPools", "error", errNotCritical.Error()) } sp.cleanupPools(headerHandler) @@ -1149,7 +1159,7 @@ func (sp *shardProcessor) notifyFinalMetaHdrs(processedMetaHeaders []data.Header for _, metaHeader := range processedMetaHeaders { metaHeaderHash, err := core.CalculateHash(sp.marshalizer, sp.hasher, metaHeader) if err != nil { - log.Debug("shardProcessor.notifyFinalMetaHdrs", "error", err.Error()) + sp.log.Debug("shardProcessor.notifyFinalMetaHdrs", "error", err.Error()) continue } @@ -1166,11 +1176,11 @@ func (sp *shardProcessor) displayPoolsInfo() { headersPool := sp.dataPool.Headers() miniBlocksPool := sp.dataPool.MiniBlocks() - log.Trace("pools info", + sp.log.Trace("pools info", "shard", sp.shardCoordinator.SelfId(), "num headers", headersPool.GetNumHeaders(sp.shardCoordinator.SelfId())) - log.Trace("pools info", + sp.log.Trace("pools info", "shard", core.MetachainShardId, "num headers", headersPool.GetNumHeaders(core.MetachainShardId)) @@ -1178,7 +1188,7 @@ func (sp *shardProcessor) displayPoolsInfo() { // (in this case this number is equal with: self shard + metachain) numShardsToKeepHeaders := 2 capacity := headersPool.MaxSize() * numShardsToKeepHeaders - log.Debug("pools info", + sp.log.Debug("pools info", "total headers", headersPool.Len(), "headers pool capacity", capacity, "total miniblocks", miniBlocksPool.Len(), @@ -1204,7 +1214,7 @@ func (sp *shardProcessor) updateState(headers []data.HeaderHandler, currentHeade sp.store, ) if errNotCritical != nil { - log.Debug("could not get shard header from storage") + sp.log.Debug("could not get shard header from storage") return } if header.IsStartOfEpochBlock() { @@ -1213,7 +1223,7 @@ func (sp *shardProcessor) updateState(headers []data.HeaderHandler, currentHeade headerHash, err := core.CalculateHash(sp.marshalizer, sp.hasher, header) if err != nil { - log.Debug("updateState.CalculateHash", "error", err.Error()) + sp.log.Debug("updateState.CalculateHash", "error", err.Error()) return } @@ -1232,7 +1242,7 @@ func (sp *shardProcessor) updateState(headers []data.HeaderHandler, currentHeade prevHeaderRootHashForPruning = prevHeaderAdditionalData.GetScheduledRootHash() } - log.Trace("updateState: prevHeader", + sp.log.Trace("updateState: prevHeader", "shard", prevHeader.GetShardID(), "epoch", prevHeader.GetEpoch(), "round", prevHeader.GetRound(), @@ -1242,7 +1252,7 @@ func (sp *shardProcessor) updateState(headers []data.HeaderHandler, currentHeade "scheduled root hash after processing", scheduledPrevHeaderRootHash, ) - log.Trace("updateState: currHeader", + sp.log.Trace("updateState: currHeader", "shard", header.GetShardID(), "epoch", header.GetEpoch(), "round", header.GetRound(), @@ -1297,18 +1307,18 @@ func (sp *shardProcessor) snapShotEpochStartFromMeta(header data.ShardHeaderHand rootHash := epochStartShData.RootHash schRootHash := epochStartShData.GetScheduledRootHash() if schRootHash != nil { - log.Debug("using scheduled root hash for snapshotting", "schRootHash", schRootHash) + sp.log.Debug("using scheduled root hash for snapshotting", "schRootHash", schRootHash) rootHash = schRootHash } epoch := sp.epochStartTrigger.MetaEpoch() - log.Debug("shard trie snapshot from epoch start shard data", "rootHash", rootHash, "epoch", epoch) + sp.log.Debug("shard trie snapshot from epoch start shard data", "rootHash", rootHash, "epoch", epoch) accounts.SnapshotState(rootHash, epoch) sp.markSnapshotDoneInPeerAccounts() saveEpochStartEconomicsMetrics(sp.appStatusHandler, metaHdr) go func() { err := sp.commitTrieEpochRootHashIfNeeded(metaHdr, rootHash) if err != nil { - log.Warn("couldn't commit trie checkpoint", "epoch", header.GetEpoch(), "error", err) + sp.log.Warn("couldn't commit trie checkpoint", "epoch", header.GetEpoch(), "error", err) } }() } @@ -1318,18 +1328,18 @@ func (sp *shardProcessor) snapShotEpochStartFromMeta(header data.ShardHeaderHand func (sp *shardProcessor) markSnapshotDoneInPeerAccounts() { peerAccounts := sp.accountsDB[state.PeerAccountsState] if check.IfNil(peerAccounts) { - log.Warn("programming error: peerAccounts is nil while trying to take a snapshot on a shard node: this can cause OOM exceptions") + sp.log.Warn("programming error: peerAccounts is nil while trying to take a snapshot on a shard node: this can cause OOM exceptions") return } peerAccountsHandler, ok := peerAccounts.(peerAccountsDBHandler) if !ok { - log.Warn("programming error: peerAccounts is not of type peerAccountsDBHandler: this can cause OOM exceptions") + sp.log.Warn("programming error: peerAccounts is not of type peerAccountsDBHandler: this can cause OOM exceptions") return } peerAccountsHandler.MarkSnapshotDone() - log.Debug("shardProcessor.markSnapshotDoneInPeerAccounts completed") + sp.log.Debug("shardProcessor.markSnapshotDoneInPeerAccounts completed") } func (sp *shardProcessor) checkEpochCorrectnessCrossChain() error { @@ -1372,7 +1382,7 @@ func (sp *shardProcessor) checkEpochCorrectnessCrossChain() error { } if shouldRevertChain { - log.Debug("blockchain is wrongly constructed", + sp.log.Debug("blockchain is wrongly constructed", "reverted to nonce", nonce) sp.forkDetector.SetRollBackNonce(nonce) @@ -1390,7 +1400,7 @@ func (sp *shardProcessor) getLastSelfNotarizedHeaderByMetachain() (data.HeaderHa hash := sp.forkDetector.GetHighestFinalBlockHash() header, err := process.GetShardHeader(hash, sp.dataPool.Headers(), sp.marshalizer, sp.store) if err != nil { - log.Warn("getLastSelfNotarizedHeaderByMetachain.GetShardHeader", "error", err.Error(), "hash", hash, "nonce", sp.forkDetector.GetHighestFinalBlockNonce()) + sp.log.Warn("getLastSelfNotarizedHeaderByMetachain.GetShardHeader", "error", err.Error(), "hash", hash, "nonce", sp.forkDetector.GetHighestFinalBlockNonce()) return nil, nil } @@ -1415,7 +1425,7 @@ func (sp *shardProcessor) saveLastNotarizedHeader(shardId uint32, processedHdrs } sp.blockTracker.AddCrossNotarizedHeader(shardId, lastCrossNotarizedHeader, lastCrossNotarizedHeaderHash) - DisplayLastNotarized(sp.marshalizer, sp.hasher, lastCrossNotarizedHeader, shardId) + DisplayLastNotarized(sp.log, sp.marshalizer, sp.hasher, lastCrossNotarizedHeader, shardId) return nil } @@ -1523,7 +1533,7 @@ func (sp *shardProcessor) getHighestHdrForShardFromMetachain(shardId uint32, hdr if err != nil { go sp.requestHandler.RequestShardHeader(shardInfo.ShardID, shardInfo.HeaderHash) - log.Debug("requested missing shard header", + sp.log.Debug("requested missing shard header", "hash", shardInfo.HeaderHash, "shard", shardInfo.ShardID, ) @@ -1548,7 +1558,7 @@ func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromHeader(header data.He miniBlockHashes[i] = miniBlockHeaders[i].GetHash() } - log.Trace("cross mini blocks in body", + sp.log.Trace("cross mini blocks in body", "num miniblocks", len(miniBlockHashes), ) @@ -1598,7 +1608,7 @@ func (sp *shardProcessor) addProcessedCrossMiniBlocksFromHeader(headerHandler da miniBlockHeader := process.GetMiniBlockHeaderWithHash(headerHandler, miniBlockHash) if miniBlockHeader == nil { - log.Warn("shardProcessor.addProcessedCrossMiniBlocksFromHeader: GetMiniBlockHeaderWithHash", "mb hash", miniBlockHash, "error", process.ErrMissingMiniBlockHeader) + sp.log.Warn("shardProcessor.addProcessedCrossMiniBlocksFromHeader: GetMiniBlockHeaderWithHash", "mb hash", miniBlockHash, "error", process.ErrMissingMiniBlockHeader) continue } @@ -1635,7 +1645,7 @@ func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromMiniBlockHashes( return nil, process.ErrWrongTypeAssertion } - log.Trace("meta header", + sp.log.Trace("meta header", "nonce", metaBlock.Nonce, ) @@ -1655,7 +1665,7 @@ func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromMiniBlockHashes( delete(miniBlockHashes, key) } - log.Trace("cross mini blocks in meta header", + sp.log.Trace("cross mini blocks in meta header", "num miniblocks", len(crossMiniBlockHashes), ) @@ -1696,7 +1706,7 @@ func (sp *shardProcessor) updateCrossShardInfo(processedMetaHdrs []data.HeaderHa // metablock was processed and finalized marshalizedHeader, errMarshal := sp.marshalizer.Marshal(hdr) if errMarshal != nil { - log.Debug("updateCrossShardInfo.Marshal", "error", errMarshal.Error()) + sp.log.Debug("updateCrossShardInfo.Marshal", "error", errMarshal.Error()) continue } @@ -1719,7 +1729,7 @@ func (sp *shardProcessor) receivedMetaBlock(headerHandler data.HeaderHandler, me return } - log.Trace("received meta block from network", + sp.log.Trace("received meta block from network", "round", metaBlock.Round, "nonce", metaBlock.Nonce, "hash", metaBlockHash, @@ -1747,7 +1757,7 @@ func (sp *shardProcessor) receivedMetaBlock(headerHandler data.HeaderHandler, me hasProofForMetablock = sp.hasProofForMetablock(metaBlockHash, metaBlock) if sp.hdrsForCurrBlock.missingFinalityAttestingHdrs == 0 { - log.Debug("received all missing finality attesting meta headers") + sp.log.Debug("received all missing finality attesting meta headers") } } @@ -1893,18 +1903,18 @@ func (sp *shardProcessor) getAllMiniBlockDstMeFromMeta(header data.ShardHeaderHa // full verification through metachain header func (sp *shardProcessor) createAndProcessMiniBlocksDstMe(haveTime func() bool) (*createAndProcessMiniBlocksDestMeInfo, error) { - log.Debug("createAndProcessMiniBlocksDstMe has been started") + sp.log.Debug("createAndProcessMiniBlocksDstMe has been started") sw := core.NewStopWatch() sw.Start("ComputeLongestMetaChainFromLastNotarized") orderedMetaBlocks, orderedMetaBlocksHashes, err := sp.blockTracker.ComputeLongestMetaChainFromLastNotarized() sw.Stop("ComputeLongestMetaChainFromLastNotarized") - log.Debug("measurements", sw.GetMeasurements()...) + sp.log.Debug("measurements", sw.GetMeasurements()...) if err != nil { return nil, err } - log.Debug("metablocks ordered", + sp.log.Debug("metablocks ordered", "num metablocks", len(orderedMetaBlocks), ) @@ -1931,7 +1941,7 @@ func (sp *shardProcessor) createAndProcessMiniBlocksDstMe(haveTime func() bool) sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for i := 0; i < len(orderedMetaBlocks); i++ { if !createAndProcessInfo.haveTime() && !createAndProcessInfo.haveAdditionalTime() { - log.Debug("time is up after putting cross txs with destination to current shard", + sp.log.Debug("time is up after putting cross txs with destination to current shard", "scheduled mode", createAndProcessInfo.scheduledMode, "num txs added", createAndProcessInfo.numTxsAdded, ) @@ -1939,7 +1949,7 @@ func (sp *shardProcessor) createAndProcessMiniBlocksDstMe(haveTime func() bool) } if createAndProcessInfo.numHdrsAdded >= process.MaxMetaHeadersAllowedInOneShardBlock { - log.Debug("maximum meta headers allowed to be included in one shard block has been reached", + sp.log.Debug("maximum meta headers allowed to be included in one shard block has been reached", "scheduled mode", createAndProcessInfo.scheduledMode, "meta headers added", createAndProcessInfo.numHdrsAdded, ) @@ -1948,7 +1958,7 @@ func (sp *shardProcessor) createAndProcessMiniBlocksDstMe(haveTime func() bool) createAndProcessInfo.currMetaHdr = orderedMetaBlocks[i] if createAndProcessInfo.currMetaHdr.GetNonce() > lastMetaHdr.GetNonce()+1 { - log.Debug("skip searching", + sp.log.Debug("skip searching", "scheduled mode", createAndProcessInfo.scheduledMode, "last meta hdr nonce", lastMetaHdr.GetNonce(), "curr meta hdr nonce", createAndProcessInfo.currMetaHdr.GetNonce()) @@ -1958,7 +1968,7 @@ func (sp *shardProcessor) createAndProcessMiniBlocksDstMe(haveTime func() bool) hasProofForHdr := sp.proofsPool.HasProof(core.MetachainShardId, orderedMetaBlocksHashes[i]) shouldConsiderProofsForNotarization := sp.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, orderedMetaBlocks[i].GetEpoch()) if !hasProofForHdr && shouldConsiderProofsForNotarization { - log.Trace("no proof for meta header", + sp.log.Trace("no proof for meta header", "hash", logger.DisplayByteSlice(orderedMetaBlocksHashes[i]), ) break @@ -1994,14 +2004,14 @@ func (sp *shardProcessor) createAndProcessMiniBlocksDstMe(haveTime func() bool) go sp.requestMetaHeadersIfNeeded(createAndProcessInfo.numHdrsAdded, lastMetaHdr) for _, miniBlock := range createAndProcessInfo.miniBlocks { - log.Debug("mini block info", + sp.log.Debug("mini block info", "type", miniBlock.Type, "sender shard", miniBlock.SenderShardID, "receiver shard", miniBlock.ReceiverShardID, "txs added", len(miniBlock.TxHashes)) } - log.Debug("createAndProcessMiniBlocksDstMe has been finished", + sp.log.Debug("createAndProcessMiniBlocksDstMe has been finished", "num txs added", createAndProcessInfo.numTxsAdded, "num hdrs added", createAndProcessInfo.numHdrsAdded) @@ -2042,7 +2052,7 @@ func (sp *shardProcessor) createMbsAndProcessCrossShardTransactionsDstMe( } if !hdrProcessFinished { - log.Debug("meta block cannot be fully processed", + sp.log.Debug("meta block cannot be fully processed", "scheduled mode", createAndProcessInfo.scheduledMode, "round", createAndProcessInfo.currMetaHdr.GetRound(), "nonce", createAndProcessInfo.currMetaHdr.GetNonce(), @@ -2063,7 +2073,7 @@ func (sp *shardProcessor) createMbsAndProcessCrossShardTransactionsDstMe( } func (sp *shardProcessor) requestMetaHeadersIfNeeded(hdrsAdded uint32, lastMetaHdr data.HeaderHandler) { - log.Debug("meta headers added", + sp.log.Debug("meta headers added", "num", hdrsAdded, "highest nonce", lastMetaHdr.GetNonce(), ) @@ -2095,7 +2105,7 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by // placeholder for shardProcessor.createMiniBlocks script if sp.accountsDB[state.UserAccountsState].JournalLen() != 0 { - log.Error("shardProcessor.createMiniBlocks", + sp.log.Error("shardProcessor.createMiniBlocks", "error", process.ErrAccountStateDirty, "stack", string(sp.accountsDB[state.UserAccountsState].GetStackDebugFirstEntry())) @@ -2104,35 +2114,35 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by miniBlocks = append(miniBlocks, interMBs...) } - log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) + sp.log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) return &block.Body{MiniBlocks: miniBlocks}, processedMiniBlocksDestMeInfo, nil } if !haveTime() { - log.Debug("shardProcessor.createMiniBlocks", "error", process.ErrTimeIsOut) + sp.log.Debug("shardProcessor.createMiniBlocks", "error", process.ErrTimeIsOut) interMBs := sp.txCoordinator.CreatePostProcessMiniBlocks() if len(interMBs) > 0 { miniBlocks = append(miniBlocks, interMBs...) } - log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) + sp.log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) return &block.Body{MiniBlocks: miniBlocks}, processedMiniBlocksDestMeInfo, nil } startTime := time.Now() createAndProcessMBsDestMeInfo, err := sp.createAndProcessMiniBlocksDstMe(haveTime) elapsedTime := time.Since(startTime) - log.Debug("elapsed time to create mbs to me", "time", elapsedTime) + sp.log.Debug("elapsed time to create mbs to me", "time", elapsedTime) if err != nil { - log.Debug("createAndProcessCrossMiniBlocksDstMe", "error", err.Error()) + sp.log.Debug("createAndProcessCrossMiniBlocksDstMe", "error", err.Error()) } if createAndProcessMBsDestMeInfo != nil { processedMiniBlocksDestMeInfo = createAndProcessMBsDestMeInfo.allProcessedMiniBlocksInfo if len(createAndProcessMBsDestMeInfo.miniBlocks) > 0 { miniBlocks = append(miniBlocks, createAndProcessMBsDestMeInfo.miniBlocks...) - log.Debug("processed miniblocks and txs with destination in self shard", + sp.log.Debug("processed miniblocks and txs with destination in self shard", "num miniblocks", len(createAndProcessMBsDestMeInfo.miniBlocks), "num txs", createAndProcessMBsDestMeInfo.numTxsAdded, "num meta headers", createAndProcessMBsDestMeInfo.numHdrsAdded) @@ -2140,7 +2150,7 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by } if sp.blockTracker.IsShardStuck(core.MetachainShardId) { - log.Warn("shardProcessor.createMiniBlocks", + sp.log.Warn("shardProcessor.createMiniBlocks", "error", process.ErrShardIsStuck, "shard", core.MetachainShardId) @@ -2149,14 +2159,14 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by miniBlocks = append(miniBlocks, interMBs...) } - log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) + sp.log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) return &block.Body{MiniBlocks: miniBlocks}, processedMiniBlocksDestMeInfo, nil } startTime = time.Now() mbsFromMe := sp.txCoordinator.CreateMbsAndProcessTransactionsFromMe(haveTime, randomness) elapsedTime = time.Since(startTime) - log.Debug("elapsed time to create mbs from me", "time", elapsedTime) + sp.log.Debug("elapsed time to create mbs from me", "time", elapsedTime) if len(mbsFromMe) > 0 { miniBlocks = append(miniBlocks, mbsFromMe...) @@ -2166,12 +2176,12 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by numTxs += len(mb.TxHashes) } - log.Debug("processed miniblocks and txs from self shard", + sp.log.Debug("processed miniblocks and txs from self shard", "num miniblocks", len(mbsFromMe), "num txs", numTxs) } - log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) + sp.log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) return &block.Body{MiniBlocks: miniBlocks}, processedMiniBlocksDestMeInfo, nil } @@ -2185,7 +2195,7 @@ func (sp *shardProcessor) applyBodyToHeader( sw.Start("applyBodyToHeader") defer func() { sw.Stop("applyBodyToHeader") - log.Debug("measurements", sw.GetMeasurements()...) + sp.log.Debug("measurements", sw.GetMeasurements()...) }() var err error @@ -2324,7 +2334,7 @@ func (sp *shardProcessor) MarshalizedDataToBroadcast( bodyForShard := block.Body{MiniBlocks: subsetBlockBody} buff, errMarshal := sp.marshalizer.Marshal(&bodyForShard) if errMarshal != nil { - log.Error("shardProcessor.MarshalizedDataToBroadcast.Marshal", "error", errMarshal.Error()) + sp.log.Error("shardProcessor.MarshalizedDataToBroadcast.Marshal", "error", errMarshal.Error()) continue } mrsData[shardId] = buff @@ -2431,7 +2441,7 @@ func (sp *shardProcessor) DecodeBlockHeader(dta []byte) data.HeaderHandler { header, err := process.UnmarshalShardHeader(sp.marshalizer, dta) if err != nil { - log.Debug("DecodeBlockHeader.UnmarshalShardHeader", "error", err.Error()) + sp.log.Debug("DecodeBlockHeader.UnmarshalShardHeader", "error", err.Error()) return nil } From 95df853339a02a7a3dcaf3bb3c86cd18b923140f Mon Sep 17 00:00:00 2001 From: ssd04 Date: Fri, 10 Jan 2025 01:05:41 +0200 Subject: [PATCH 04/10] add custom log ids for integration tests sync --- integrationTests/testProcessorNode.go | 15 +++++++++++++-- integrationTests/testSyncNode.go | 17 +++++++++++++---- 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index fe9d3acc0dc..5426019d7e6 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -2174,7 +2174,10 @@ func (tpn *TestProcessorNode) addMockVm(blockchainHook vmcommon.BlockchainHook) func (tpn *TestProcessorNode) initBlockProcessor() { var err error - id := hex.EncodeToString(tpn.NodesCoordinator.GetOwnPublicKey())[0:8] + id := hex.EncodeToString(tpn.OwnAccount.PkTxSignBytes) + if len(id) > 8 { + id = id[0:8] + } log := logger.GetOrCreate(fmt.Sprintf("p/sync/%s", id)) @@ -2216,6 +2219,13 @@ func (tpn *TestProcessorNode) initBlockProcessor() { AppStatusHandlerField: &statusHandlerMock.AppStatusHandlerStub{}, } + id = hex.EncodeToString(tpn.OwnAccount.PkTxSignBytes) + if len(id) > 8 { + id = id[0:8] + } + + logger := logger.GetOrCreate(fmt.Sprintf("p/b/%s", id)) + argumentsBase := block.ArgBaseProcessor{ CoreComponents: coreComponents, DataComponents: dataComponents, @@ -2246,6 +2256,7 @@ func (tpn *TestProcessorNode) initBlockProcessor() { BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, + Logger: logger, } if check.IfNil(tpn.EpochStartNotifier) { @@ -3105,7 +3116,7 @@ func (tpn *TestProcessorNode) initRequestedItemsHandler() { } func (tpn *TestProcessorNode) initBlockTracker() { - id := hex.EncodeToString(tpn.NodesCoordinator.GetOwnPublicKey()) + id := hex.EncodeToString(tpn.OwnAccount.PkTxSignBytes) if len(id) > 8 { id = id[0:8] } diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index e81b1425a4e..35fa9b4302c 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -76,6 +76,14 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { AppStatusHandlerField: &statusHandlerMock.AppStatusHandlerStub{}, } + id := hex.EncodeToString(tpn.OwnAccount.PkTxSignBytes) + if len(id) > 8 { + id = id[0:8] + } + + log := logger.GetOrCreate(fmt.Sprintf("p/sync/%s", id)) + blockProcessorLogger := logger.GetOrCreate(fmt.Sprintf("p/b/%s", id)) + argumentsBase := block.ArgBaseProcessor{ CoreComponents: coreComponents, DataComponents: dataComponents, @@ -107,10 +115,11 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, + Logger: blockProcessorLogger, } if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { - tpn.ForkDetector, _ = sync.NewMetaForkDetector(nil, tpn.RoundHandler, tpn.BlockBlackListHandler, tpn.BlockTracker, 0) + tpn.ForkDetector, _ = sync.NewMetaForkDetector(log, tpn.RoundHandler, tpn.BlockBlackListHandler, tpn.BlockTracker, 0) argumentsBase.ForkDetector = tpn.ForkDetector argumentsBase.TxCoordinator = &mock.TransactionCoordinatorMock{} arguments := block.ArgMetaProcessor{ @@ -131,7 +140,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { tpn.BlockProcessor, err = block.NewMetaProcessor(arguments) } else { - tpn.ForkDetector, _ = sync.NewShardForkDetector(nil, tpn.RoundHandler, tpn.BlockBlackListHandler, tpn.BlockTracker, 0) + tpn.ForkDetector, _ = sync.NewShardForkDetector(log, tpn.RoundHandler, tpn.BlockBlackListHandler, tpn.BlockTracker, 0) argumentsBase.ForkDetector = tpn.ForkDetector argumentsBase.BlockChainHook = tpn.BlockchainHook argumentsBase.TxCoordinator = tpn.TxCoordinator @@ -149,7 +158,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { } func (tpn *TestProcessorNode) createShardBootstrapper() (TestBootstrapper, error) { - id := hex.EncodeToString(tpn.NodesCoordinator.GetOwnPublicKey()) + id := hex.EncodeToString(tpn.OwnAccount.PkTxSignBytes) if len(id) > 8 { id = id[0:8] } @@ -204,7 +213,7 @@ func (tpn *TestProcessorNode) createShardBootstrapper() (TestBootstrapper, error } func (tpn *TestProcessorNode) createMetaChainBootstrapper() (TestBootstrapper, error) { - id := hex.EncodeToString(tpn.NodesCoordinator.GetOwnPublicKey()) + id := hex.EncodeToString(tpn.OwnAccount.PkTxSignBytes) if len(id) > 8 { id = id[0:8] } From 6bef0746db7eb613ac9c5e068d9deb70da58e846 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Mon, 13 Jan 2025 13:28:54 +0200 Subject: [PATCH 05/10] headers pool custom logging --- .../dataPool/headersCache/headersCache.go | 13 +- .../dataPool/headersCache/headersPool.go | 24 ++-- .../dataPool/headersCache/headersPool_test.go | 22 +++- dataRetriever/factory/dataPoolFactory.go | 2 +- .../requestHandlers/requestHandler.go | 119 +++++++++--------- .../requestHandlers/requestHandler_test.go | 75 +++++++++++ epochStart/bootstrap/process.go | 1 + epochStart/bootstrap/storageProcess.go | 1 + factory/processing/processComponents.go | 1 + .../sync/basicSync/basicSync_test.go | 2 +- integrationTests/testHeartbeatNode.go | 1 + integrationTests/testProcessorNode.go | 18 ++- testscommon/dataRetriever/poolFactory.go | 30 ++++- testscommon/dataRetriever/poolsHolderMock.go | 12 +- update/sync/syncHeaders_test.go | 4 +- 15 files changed, 246 insertions(+), 79 deletions(-) diff --git a/dataRetriever/dataPool/headersCache/headersCache.go b/dataRetriever/dataPool/headersCache/headersCache.go index 4b1ef31d8d9..f238ff27fc3 100644 --- a/dataRetriever/dataPool/headersCache/headersCache.go +++ b/dataRetriever/dataPool/headersCache/headersCache.go @@ -7,9 +7,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" + logger "github.com/multiversx/mx-chain-logger-go" ) type headersCache struct { + log logger.Logger headersNonceCache map[uint32]listOfHeadersByNonces headersByHash headersByHashMap @@ -19,8 +21,13 @@ type headersCache struct { maxHeadersPerShard int } -func newHeadersCache(numMaxHeaderPerShard int, numHeadersToRemove int) *headersCache { +func newHeadersCache( + log logger.Logger, + numMaxHeaderPerShard int, + numHeadersToRemove int, +) *headersCache { return &headersCache{ + log: log, headersNonceCache: make(map[uint32]listOfHeadersByNonces), headersCounter: make(numHeadersByShard), headersByHash: make(headersByHashMap), @@ -105,7 +112,7 @@ func (cache *headersCache) removeHeaderByNonceAndShardId(headerNonce uint64, sha headersHashes := headers.getHashes() for _, hash := range headersHashes { - log.Trace("removeHeaderByNonceAndShardId", + cache.log.Trace("removeHeaderByNonceAndShardId", "shard", shardId, "nonce", headerNonce, "hash", hash, @@ -132,7 +139,7 @@ func (cache *headersCache) removeHeaderByHash(hash []byte) { return } - log.Trace("removeHeaderByHash", + cache.log.Trace("removeHeaderByHash", "shard", info.headerShardId, "nonce", info.headerNonce, "hash", hash, diff --git a/dataRetriever/dataPool/headersCache/headersPool.go b/dataRetriever/dataPool/headersCache/headersPool.go index 8b2e044b432..e09c1b4fd9c 100644 --- a/dataRetriever/dataPool/headersCache/headersPool.go +++ b/dataRetriever/dataPool/headersCache/headersPool.go @@ -2,20 +2,20 @@ package headersCache import ( "fmt" + "runtime/debug" "sync" "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-logger-go" + logger "github.com/multiversx/mx-chain-logger-go" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" ) -var log = logger.GetOrCreate("dataRetriever/headersCache") - var _ dataRetriever.HeadersPool = (*headersPool)(nil) type headersPool struct { + log logger.Logger cache *headersCache mutAddedDataHandlers sync.RWMutex mutHeadersPool sync.RWMutex @@ -23,15 +23,23 @@ type headersPool struct { } // NewHeadersPool will create a new items cacher -func NewHeadersPool(hdrsPoolConfig config.HeadersPoolConfig) (*headersPool, error) { +func NewHeadersPool( + log logger.Logger, + hdrsPoolConfig config.HeadersPoolConfig, +) (*headersPool, error) { err := checkHeadersPoolConfig(hdrsPoolConfig) if err != nil { return nil, err } - headersCacheObject := newHeadersCache(hdrsPoolConfig.MaxHeadersPerShard, hdrsPoolConfig.NumElementsToRemoveOnEviction) + if log == nil { + log = logger.GetOrCreate("dataRetriever/headersCache") + } + + headersCacheObject := newHeadersCache(log, hdrsPoolConfig.MaxHeadersPerShard, hdrsPoolConfig.NumElementsToRemoveOnEviction) return &headersPool{ + log: log, cache: headersCacheObject, mutAddedDataHandlers: sync.RWMutex{}, mutHeadersPool: sync.RWMutex{}, @@ -65,8 +73,10 @@ func (pool *headersPool) AddHeader(headerHash []byte, header data.HeaderHandler) added := pool.cache.addHeader(headerHash, header) if added { - log.Debug("TOREMOVE - added header to pool", "cache ptr", fmt.Sprintf("%p", pool.cache), "header shard", header.GetShardID(), "header nonce", header.GetNonce()) + pool.log.Debug("TOREMOVE - added header to pool", "cache ptr", fmt.Sprintf("%p", pool.cache), "header shard", header.GetShardID(), "header nonce", header.GetNonce()) pool.callAddedDataHandlers(header, headerHash) + + debug.PrintStack() } } @@ -132,7 +142,7 @@ func (pool *headersPool) Clear() { // RegisterHandler registers a new handler to be called when a new data is added func (pool *headersPool) RegisterHandler(handler func(headerHandler data.HeaderHandler, headerHash []byte)) { if handler == nil { - log.Error("attempt to register a nil handler to a cacher object") + pool.log.Error("attempt to register a nil handler to a cacher object") return } diff --git a/dataRetriever/dataPool/headersCache/headersPool_test.go b/dataRetriever/dataPool/headersCache/headersPool_test.go index 2b2fb4cf3c6..d2df4a78884 100644 --- a/dataRetriever/dataPool/headersCache/headersPool_test.go +++ b/dataRetriever/dataPool/headersCache/headersPool_test.go @@ -37,7 +37,7 @@ func TestNewHeadersCacher(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - headersCacher, err := headersCache.NewHeadersPool(config.HeadersPoolConfig{ + headersCacher, err := headersCache.NewHeadersPool(nil, config.HeadersPoolConfig{ MaxHeadersPerShard: 2, NumElementsToRemoveOnEviction: 1, }) @@ -50,7 +50,7 @@ func testNewHeadersCacher(cfg config.HeadersPoolConfig) func(t *testing.T) { return func(t *testing.T) { t.Parallel() - headersCacher, err := headersCache.NewHeadersPool(cfg) + headersCacher, err := headersCache.NewHeadersPool(nil, cfg) require.True(t, errors.Is(err, headersCache.ErrInvalidHeadersCacheParameter)) require.Nil(t, headersCacher) } @@ -60,6 +60,7 @@ func TestNewHeadersCacher_AddHeadersInCache(t *testing.T) { t.Parallel() headersCacher, _ := headersCache.NewHeadersPool( + nil, config.HeadersPoolConfig{ MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 100}, @@ -99,6 +100,7 @@ func Test_RemoveHeaderByHash(t *testing.T) { t.Parallel() headersCacher, _ := headersCache.NewHeadersPool( + nil, config.HeadersPoolConfig{ MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 100}, @@ -132,6 +134,7 @@ func TestHeadersCacher_AddHeadersInCacheAndRemoveByNonceAndShardId(t *testing.T) t.Parallel() headersCacher, _ := headersCache.NewHeadersPool( + nil, config.HeadersPoolConfig{ MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 100}, @@ -166,6 +169,7 @@ func TestHeadersCacher_Eviction(t *testing.T) { numHeadersToGenerate := 1001 headers, headersHashes := createASliceOfHeaders(numHeadersToGenerate, 0) headersCacher, _ := headersCache.NewHeadersPool( + nil, config.HeadersPoolConfig{ MaxHeadersPerShard: 900, NumElementsToRemoveOnEviction: 100}, @@ -192,6 +196,7 @@ func TestHeadersCacher_ConcurrentRequests_NoEviction(t *testing.T) { headers, headersHashes := createASliceOfHeaders(numHeadersToGenerate, 0) headersCacher, _ := headersCache.NewHeadersPool( + nil, config.HeadersPoolConfig{ MaxHeadersPerShard: numHeadersToGenerate + 1, NumElementsToRemoveOnEviction: 10}, @@ -219,6 +224,7 @@ func TestHeadersCacher_ConcurrentRequests_WithEviction(t *testing.T) { headers, headersHashes := createASliceOfHeaders(numHeadersToGenerate, shardId) headersCacher, _ := headersCache.NewHeadersPool( + nil, config.HeadersPoolConfig{ MaxHeadersPerShard: cacheSize, NumElementsToRemoveOnEviction: 1}, @@ -263,6 +269,7 @@ func TestHeadersCacher_AddHeadersWithSameNonceShouldBeRemovedAtEviction(t *testi header1, header2, header3 := &block.Header{Nonce: 0}, &block.Header{Nonce: 0}, &block.Header{Nonce: 1} headersCacher, _ := headersCache.NewHeadersPool( + nil, config.HeadersPoolConfig{ MaxHeadersPerShard: cacheSize, NumElementsToRemoveOnEviction: 1}, @@ -287,6 +294,7 @@ func TestHeadersCacher_AddALotOfHeadersAndCheckEviction(t *testing.T) { headers, headersHash := createASliceOfHeaders(numHeaders, shardId) headersCacher, _ := headersCache.NewHeadersPool( + nil, config.HeadersPoolConfig{ MaxHeadersPerShard: cacheSize, NumElementsToRemoveOnEviction: 50}, @@ -314,6 +322,7 @@ func TestHeadersCacher_BigCacheALotOfHeaders(t *testing.T) { headers, headersHash := createASliceOfHeaders(numHeadersToGenerate, shardId) headersCacher, _ := headersCache.NewHeadersPool( + nil, config.HeadersPoolConfig{ MaxHeadersPerShard: cacheSize, NumElementsToRemoveOnEviction: 50}, @@ -368,6 +377,7 @@ func TestHeadersCacher_AddHeadersWithDifferentShardIdOnMultipleGoroutines(t *tes headersShard2, hashesShad2 := createASliceOfHeaders(numHdrsToGenerate, 2) numElemsToRemove := 25 headersCacher, _ := headersCache.NewHeadersPool( + nil, config.HeadersPoolConfig{ MaxHeadersPerShard: cacheSize, NumElementsToRemoveOnEviction: numElemsToRemove}, @@ -429,6 +439,7 @@ func TestHeadersCacher_TestEvictionRemoveCorrectHeader(t *testing.T) { headers, headersHashes := createASliceOfHeaders(numHeadersToGenerate, shardId) headersCacher, _ := headersCache.NewHeadersPool( + nil, config.HeadersPoolConfig{ MaxHeadersPerShard: cacheSize, NumElementsToRemoveOnEviction: 1}, @@ -467,6 +478,7 @@ func TestHeadersCacher_TestEvictionRemoveCorrectHeader2(t *testing.T) { headers, headersHashes := createASliceOfHeaders(numHeadersToGenerate, shardId) headersCacher, _ := headersCache.NewHeadersPool( + nil, config.HeadersPoolConfig{ MaxHeadersPerShard: cacheSize, NumElementsToRemoveOnEviction: 1}, @@ -512,6 +524,7 @@ func TestHeadersPool_AddHeadersMultipleShards(t *testing.T) { headersShardMeta, headersHashesShardMeta := createASliceOfHeaders(numHeadersToGenerate, shardMeta) headersCacher, _ := headersCache.NewHeadersPool( + nil, config.HeadersPoolConfig{ MaxHeadersPerShard: cacheSize, NumElementsToRemoveOnEviction: numElemsToRemove}, @@ -588,6 +601,7 @@ func TestHeadersPool_Nonces(t *testing.T) { headersShard0, headersHashesShard0 := createASliceOfHeaders(numHeadersToGenerate, shardId) headersCacher, _ := headersCache.NewHeadersPool( + nil, config.HeadersPoolConfig{ MaxHeadersPerShard: cacheSize, NumElementsToRemoveOnEviction: numHeadersToRemove}, @@ -616,6 +630,7 @@ func TestHeadersPool_RegisterHandler(t *testing.T) { wasCalled := false headersCacher, _ := headersCache.NewHeadersPool( + nil, config.HeadersPoolConfig{ MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 100}, @@ -640,6 +655,7 @@ func TestHeadersPool_Clear(t *testing.T) { t.Parallel() headersCacher, _ := headersCache.NewHeadersPool( + nil, config.HeadersPoolConfig{ MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 10}, @@ -657,6 +673,7 @@ func TestHeadersPool_IsInterfaceNil(t *testing.T) { t.Parallel() headersCacher, _ := headersCache.NewHeadersPool( + nil, config.HeadersPoolConfig{ MaxHeadersPerShard: 0, }, @@ -664,6 +681,7 @@ func TestHeadersPool_IsInterfaceNil(t *testing.T) { require.True(t, headersCacher.IsInterfaceNil()) headersCacher, _ = headersCache.NewHeadersPool( + nil, config.HeadersPoolConfig{ MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 10, diff --git a/dataRetriever/factory/dataPoolFactory.go b/dataRetriever/factory/dataPoolFactory.go index b9651bf3d6a..02aa061b59f 100644 --- a/dataRetriever/factory/dataPoolFactory.go +++ b/dataRetriever/factory/dataPoolFactory.go @@ -80,7 +80,7 @@ func NewDataPoolFromConfig(args ArgsDataPool) (dataRetriever.PoolsHolder, error) return nil, fmt.Errorf("%w while creating the cache for the rewards", err) } - hdrPool, err := headersCache.NewHeadersPool(mainConfig.HeadersPoolConfig) + hdrPool, err := headersCache.NewHeadersPool(log, mainConfig.HeadersPoolConfig) if err != nil { return nil, fmt.Errorf("%w while creating the cache for the headers", err) } diff --git a/dataRetriever/requestHandlers/requestHandler.go b/dataRetriever/requestHandlers/requestHandler.go index 7166715dd3c..e3d6ad11f44 100644 --- a/dataRetriever/requestHandlers/requestHandler.go +++ b/dataRetriever/requestHandlers/requestHandler.go @@ -14,7 +14,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process/factory" - "github.com/multiversx/mx-chain-logger-go" + logger "github.com/multiversx/mx-chain-logger-go" ) var _ epochStart.RequestHandler = (*resolverRequestHandler)(nil) @@ -35,6 +35,7 @@ const uniqueValidatorInfoSuffix = "vi" // TODO move the keys definitions that are whitelisted in core and use them in InterceptedData implementations, Identifiers() function type resolverRequestHandler struct { + log logger.Logger mutEpoch sync.RWMutex epoch uint32 shardID uint32 @@ -53,6 +54,7 @@ type resolverRequestHandler struct { // NewResolverRequestHandler creates a requestHandler interface implementation with request functions func NewResolverRequestHandler( + log logger.Logger, finder dataRetriever.RequestersFinder, requestedItemsHandler dataRetriever.RequestedItemsHandler, whiteList dataRetriever.WhiteListHandler, @@ -77,7 +79,12 @@ func NewResolverRequestHandler( return nil, fmt.Errorf("%w:request interval is smaller than a millisecond", dataRetriever.ErrRequestIntervalTooSmall) } + if log == nil { + log = logger.GetOrCreate("dataretriever/requesthandlers") + } + rrh := &resolverRequestHandler{ + log: log, requestersFinder: finder, requestedItemsHandler: requestedItemsHandler, epoch: uint32(0), // will be updated after creation of the request handler @@ -97,7 +104,7 @@ func NewResolverRequestHandler( func (rrh *resolverRequestHandler) SetEpoch(epoch uint32) { rrh.mutEpoch.Lock() if rrh.epoch != epoch { - log.Debug("resolverRequestHandler.SetEpoch", "old epoch", rrh.epoch, "new epoch", epoch) + rrh.log.Debug("resolverRequestHandler.SetEpoch", "old epoch", rrh.epoch, "new epoch", epoch) } rrh.epoch = epoch rrh.mutEpoch.Unlock() @@ -121,14 +128,14 @@ func (rrh *resolverRequestHandler) requestByHashes(destShardID uint32, hashes [] if len(unrequestedHashes) == 0 { return } - log.Debug("requesting transactions from network", + rrh.log.Debug("requesting transactions from network", "topic", topic, "shard", destShardID, "num txs", len(unrequestedHashes), ) requester, err := rrh.requestersFinder.CrossShardRequester(topic, destShardID) if err != nil { - log.Error("requestByHashes.CrossShardRequester", + rrh.log.Error("requestByHashes.CrossShardRequester", "error", err.Error(), "topic", topic, "shard", destShardID, @@ -138,12 +145,12 @@ func (rrh *resolverRequestHandler) requestByHashes(destShardID uint32, hashes [] txRequester, ok := requester.(HashSliceRequester) if !ok { - log.Warn("wrong assertion type when creating transaction requester") + rrh.log.Warn("wrong assertion type when creating transaction requester") return } for _, txHash := range hashes { - log.Trace("requestByHashes", "hash", txHash, "topic", topic, + rrh.log.Trace("requestByHashes", "hash", txHash, "topic", topic, "shard", destShardID, "num txs", len(unrequestedHashes), "stack", string(debug.Stack())) @@ -163,7 +170,7 @@ func (rrh *resolverRequestHandler) requestHashesWithDataSplit( dataSplit := &partitioning.DataSplit{} sliceBatches, err := dataSplit.SplitDataInChunks(unrequestedHashes, rrh.maxTxsToRequest) if err != nil { - log.Debug("requestByHashes.SplitDataInChunks", + rrh.log.Debug("requestByHashes.SplitDataInChunks", "error", err.Error(), "num txs", len(unrequestedHashes), "max txs to request", rrh.maxTxsToRequest, @@ -174,7 +181,7 @@ func (rrh *resolverRequestHandler) requestHashesWithDataSplit( for _, batch := range sliceBatches { err = requester.RequestDataFromHashArray(batch, epoch) if err != nil { - log.Debug("requestByHashes.RequestDataFromHashArray", + rrh.log.Debug("requestByHashes.RequestDataFromHashArray", "error", err.Error(), "epoch", epoch, "batch size", len(batch), @@ -190,7 +197,7 @@ func (rrh *resolverRequestHandler) requestReferenceWithChunkIndex( ) { err := requester.RequestDataFromReferenceAndChunk(reference, chunkIndex) if err != nil { - log.Debug("requestByHashes.requestReferenceWithChunkIndex", + rrh.log.Debug("requestByHashes.requestReferenceWithChunkIndex", "error", err.Error(), "reference", reference, "chunk index", chunkIndex, @@ -215,7 +222,7 @@ func (rrh *resolverRequestHandler) RequestMiniBlock(destShardID uint32, minibloc return } - log.Debug("requesting miniblock from network", + rrh.log.Debug("requesting miniblock from network", "topic", factory.MiniBlocksTopic, "shard", destShardID, "hash", miniblockHash, @@ -223,7 +230,7 @@ func (rrh *resolverRequestHandler) RequestMiniBlock(destShardID uint32, minibloc requester, err := rrh.requestersFinder.CrossShardRequester(factory.MiniBlocksTopic, destShardID) if err != nil { - log.Error("RequestMiniBlock.CrossShardRequester", + rrh.log.Error("RequestMiniBlock.CrossShardRequester", "error", err.Error(), "topic", factory.MiniBlocksTopic, "shard", destShardID, @@ -236,7 +243,7 @@ func (rrh *resolverRequestHandler) RequestMiniBlock(destShardID uint32, minibloc epoch := rrh.getEpoch() err = requester.RequestDataFromHash(miniblockHash, epoch) if err != nil { - log.Debug("RequestMiniBlock.RequestDataFromHash", + rrh.log.Debug("RequestMiniBlock.RequestDataFromHash", "error", err.Error(), "epoch", epoch, "hash", miniblockHash, @@ -254,7 +261,7 @@ func (rrh *resolverRequestHandler) RequestMiniBlocks(destShardID uint32, miniblo if len(unrequestedHashes) == 0 { return } - log.Debug("requesting miniblocks from network", + rrh.log.Debug("requesting miniblocks from network", "topic", factory.MiniBlocksTopic, "shard", destShardID, "num mbs", len(unrequestedHashes), @@ -262,7 +269,7 @@ func (rrh *resolverRequestHandler) RequestMiniBlocks(destShardID uint32, miniblo requester, err := rrh.requestersFinder.CrossShardRequester(factory.MiniBlocksTopic, destShardID) if err != nil { - log.Error("RequestMiniBlocks.CrossShardRequester", + rrh.log.Error("RequestMiniBlocks.CrossShardRequester", "error", err.Error(), "topic", factory.MiniBlocksTopic, "shard", destShardID, @@ -272,7 +279,7 @@ func (rrh *resolverRequestHandler) RequestMiniBlocks(destShardID uint32, miniblo miniBlocksRequester, ok := requester.(HashSliceRequester) if !ok { - log.Warn("wrong assertion type when creating miniblocks requester") + rrh.log.Warn("wrong assertion type when creating miniblocks requester") return } @@ -281,7 +288,7 @@ func (rrh *resolverRequestHandler) RequestMiniBlocks(destShardID uint32, miniblo epoch := rrh.getEpoch() err = miniBlocksRequester.RequestDataFromHashArray(unrequestedHashes, epoch) if err != nil { - log.Debug("RequestMiniBlocks.RequestDataFromHashArray", + rrh.log.Debug("RequestMiniBlocks.RequestDataFromHashArray", "error", err.Error(), "epoch", epoch, "num mbs", len(unrequestedHashes), @@ -299,14 +306,14 @@ func (rrh *resolverRequestHandler) RequestShardHeader(shardID uint32, hash []byt return } - log.Debug("requesting shard header from network", + rrh.log.Debug("requesting shard header from network", "shard", shardID, "hash", hash, ) headerRequester, err := rrh.getShardHeaderRequester(shardID) if err != nil { - log.Error("RequestShardHeader.getShardHeaderRequester", + rrh.log.Error("RequestShardHeader.getShardHeaderRequester", "error", err.Error(), "shard", shardID, ) @@ -318,7 +325,7 @@ func (rrh *resolverRequestHandler) RequestShardHeader(shardID uint32, hash []byt epoch := rrh.getEpoch() err = headerRequester.RequestDataFromHash(hash, epoch) if err != nil { - log.Debug("RequestShardHeader.RequestDataFromHash", + rrh.log.Debug("RequestShardHeader.RequestDataFromHash", "error", err.Error(), "epoch", epoch, "hash", hash, @@ -335,13 +342,13 @@ func (rrh *resolverRequestHandler) RequestMetaHeader(hash []byte) { return } - log.Debug("requesting meta header from network", + rrh.log.Debug("requesting meta header from network", "hash", hash, ) requester, err := rrh.getMetaHeaderRequester() if err != nil { - log.Error("RequestMetaHeader.getMetaHeaderRequester", + rrh.log.Error("RequestMetaHeader.getMetaHeaderRequester", "error", err.Error(), "hash", hash, ) @@ -350,7 +357,7 @@ func (rrh *resolverRequestHandler) RequestMetaHeader(hash []byte) { headerRequester, ok := requester.(dataRetriever.Requester) if !ok { - log.Warn("wrong assertion type when creating header requester") + rrh.log.Warn("wrong assertion type when creating header requester") return } @@ -359,7 +366,7 @@ func (rrh *resolverRequestHandler) RequestMetaHeader(hash []byte) { epoch := rrh.getEpoch() err = headerRequester.RequestDataFromHash(hash, epoch) if err != nil { - log.Debug("RequestMetaHeader.RequestDataFromHash", + rrh.log.Debug("RequestMetaHeader.RequestDataFromHash", "error", err.Error(), "epoch", epoch, "hash", hash, @@ -378,14 +385,14 @@ func (rrh *resolverRequestHandler) RequestShardHeaderByNonce(shardID uint32, non return } - log.Debug("requesting shard header by nonce from network", + rrh.log.Debug("requesting shard header by nonce from network", "shard", shardID, "nonce", nonce, ) requester, err := rrh.getShardHeaderRequester(shardID) if err != nil { - log.Error("RequestShardHeaderByNonce.getShardHeaderRequester", + rrh.log.Error("RequestShardHeaderByNonce.getShardHeaderRequester", "error", err.Error(), "shard", shardID, ) @@ -394,7 +401,7 @@ func (rrh *resolverRequestHandler) RequestShardHeaderByNonce(shardID uint32, non headerRequester, ok := requester.(NonceRequester) if !ok { - log.Warn("wrong assertion type when creating header requester") + rrh.log.Warn("wrong assertion type when creating header requester") return } @@ -403,7 +410,7 @@ func (rrh *resolverRequestHandler) RequestShardHeaderByNonce(shardID uint32, non epoch := rrh.getEpoch() err = headerRequester.RequestDataFromNonce(nonce, epoch) if err != nil { - log.Debug("RequestShardHeaderByNonce.RequestDataFromNonce", + rrh.log.Debug("RequestShardHeaderByNonce.RequestDataFromNonce", "error", err.Error(), "epoch", epoch, "nonce", nonce, @@ -442,7 +449,7 @@ func (rrh *resolverRequestHandler) RequestTrieNodes(destShardID uint32, hashes [ return } - log.Trace("requesting trie nodes from network", + rrh.log.Trace("requesting trie nodes from network", "topic", topic, "shard", destShardID, "num nodes", len(rrh.trieHashesAccumulator), @@ -452,7 +459,7 @@ func (rrh *resolverRequestHandler) RequestTrieNodes(destShardID uint32, hashes [ requester, err := rrh.requestersFinder.MetaCrossShardRequester(topic, destShardID) if err != nil { - log.Error("requestersFinder.MetaCrossShardRequester", + rrh.log.Error("requestersFinder.MetaCrossShardRequester", "error", err.Error(), "topic", topic, "shard", destShardID, @@ -462,7 +469,7 @@ func (rrh *resolverRequestHandler) RequestTrieNodes(destShardID uint32, hashes [ trieRequester, ok := requester.(HashSliceRequester) if !ok { - log.Warn("wrong assertion type when creating a trie nodes requester") + rrh.log.Warn("wrong assertion type when creating a trie nodes requester") return } @@ -494,7 +501,7 @@ func (rrh *resolverRequestHandler) RequestTrieNode(requestHash []byte, topic str rrh.whiteList.Add(unrequestedHashes) rrh.whiteList.Add([][]byte{requestHash}) - log.Trace("requesting trie node from network", + rrh.log.Trace("requesting trie node from network", "topic", topic, "hash", requestHash, "chunk index", chunkIndex, @@ -502,7 +509,7 @@ func (rrh *resolverRequestHandler) RequestTrieNode(requestHash []byte, topic str requester, err := rrh.requestersFinder.MetaChainRequester(topic) if err != nil { - log.Error("requestersFinder.MetaChainRequester", + rrh.log.Error("requestersFinder.MetaChainRequester", "error", err.Error(), "topic", topic, ) @@ -511,7 +518,7 @@ func (rrh *resolverRequestHandler) RequestTrieNode(requestHash []byte, topic str trieRequester, ok := requester.(ChunkRequester) if !ok { - log.Warn("wrong assertion type when creating a trie chunk requester") + rrh.log.Warn("wrong assertion type when creating a trie chunk requester") return } @@ -521,12 +528,12 @@ func (rrh *resolverRequestHandler) RequestTrieNode(requestHash []byte, topic str } func (rrh *resolverRequestHandler) logTrieHashesFromAccumulator() { - if log.GetLevel() != logger.LogTrace { + if rrh.log.GetLevel() != logger.LogTrace { return } for txHash := range rrh.trieHashesAccumulator { - log.Trace("logTrieHashesFromAccumulator", "hash", []byte(txHash)) + rrh.log.Trace("logTrieHashesFromAccumulator", "hash", []byte(txHash)) } } @@ -537,13 +544,13 @@ func (rrh *resolverRequestHandler) RequestMetaHeaderByNonce(nonce uint64) { return } - log.Debug("requesting meta header by nonce from network", + rrh.log.Debug("requesting meta header by nonce from network", "nonce", nonce, ) headerRequester, err := rrh.getMetaHeaderRequester() if err != nil { - log.Error("RequestMetaHeaderByNonce.getMetaHeaderRequester", + rrh.log.Error("RequestMetaHeaderByNonce.getMetaHeaderRequester", "error", err.Error(), ) return @@ -554,7 +561,7 @@ func (rrh *resolverRequestHandler) RequestMetaHeaderByNonce(nonce uint64) { epoch := rrh.getEpoch() err = headerRequester.RequestDataFromNonce(nonce, epoch) if err != nil { - log.Debug("RequestMetaHeaderByNonce.RequestDataFromNonce", + rrh.log.Debug("RequestMetaHeaderByNonce.RequestDataFromNonce", "error", err.Error(), "epoch", epoch, "nonce", nonce, @@ -571,7 +578,7 @@ func (rrh *resolverRequestHandler) RequestValidatorInfo(hash []byte) { return } - log.Debug("requesting validator info messages from network", + rrh.log.Debug("requesting validator info messages from network", "topic", common.ValidatorInfoTopic, "hash", hash, "epoch", rrh.epoch, @@ -579,7 +586,7 @@ func (rrh *resolverRequestHandler) RequestValidatorInfo(hash []byte) { requester, err := rrh.requestersFinder.MetaChainRequester(common.ValidatorInfoTopic) if err != nil { - log.Error("RequestValidatorInfo.MetaChainRequester", + rrh.log.Error("RequestValidatorInfo.MetaChainRequester", "error", err.Error(), "topic", common.ValidatorInfoTopic, "hash", hash, @@ -592,7 +599,7 @@ func (rrh *resolverRequestHandler) RequestValidatorInfo(hash []byte) { err = requester.RequestDataFromHash(hash, rrh.epoch) if err != nil { - log.Debug("RequestValidatorInfo.RequestDataFromHash", + rrh.log.Debug("RequestValidatorInfo.RequestDataFromHash", "error", err.Error(), "topic", common.ValidatorInfoTopic, "hash", hash, @@ -611,7 +618,7 @@ func (rrh *resolverRequestHandler) RequestValidatorsInfo(hashes [][]byte) { return } - log.Debug("requesting validator info messages from network", + rrh.log.Debug("requesting validator info messages from network", "topic", common.ValidatorInfoTopic, "num hashes", len(unrequestedHashes), "epoch", rrh.epoch, @@ -619,7 +626,7 @@ func (rrh *resolverRequestHandler) RequestValidatorsInfo(hashes [][]byte) { requester, err := rrh.requestersFinder.MetaChainRequester(common.ValidatorInfoTopic) if err != nil { - log.Error("RequestValidatorInfo.MetaChainRequester", + rrh.log.Error("RequestValidatorInfo.MetaChainRequester", "error", err.Error(), "topic", common.ValidatorInfoTopic, "num hashes", len(unrequestedHashes), @@ -630,7 +637,7 @@ func (rrh *resolverRequestHandler) RequestValidatorsInfo(hashes [][]byte) { validatorInfoRequester, ok := requester.(HashSliceRequester) if !ok { - log.Warn("wrong assertion type when creating a validator info requester") + rrh.log.Warn("wrong assertion type when creating a validator info requester") return } @@ -638,7 +645,7 @@ func (rrh *resolverRequestHandler) RequestValidatorsInfo(hashes [][]byte) { err = validatorInfoRequester.RequestDataFromHashArray(unrequestedHashes, rrh.epoch) if err != nil { - log.Debug("RequestValidatorInfo.RequestDataFromHash", + rrh.log.Debug("RequestValidatorInfo.RequestDataFromHash", "error", err.Error(), "topic", common.ValidatorInfoTopic, "num hashes", len(unrequestedHashes), @@ -654,7 +661,7 @@ func (rrh *resolverRequestHandler) testIfRequestIsNeeded(key []byte, suffix stri rrh.sweepIfNeeded() if rrh.requestedItemsHandler.Has(string(key) + suffix) { - log.Trace("item already requested", + rrh.log.Trace("item already requested", "key", key) return false } @@ -666,7 +673,7 @@ func (rrh *resolverRequestHandler) addRequestedItems(keys [][]byte, suffix strin for _, key := range keys { err := rrh.requestedItemsHandler.Add(string(key) + suffix) if err != nil { - log.Trace("addRequestedItems", + rrh.log.Trace("addRequestedItems", "error", err.Error(), "key", key) continue @@ -695,7 +702,7 @@ func (rrh *resolverRequestHandler) getShardHeaderRequester(shardID uint32) (data err = fmt.Errorf("%w, topic: %s, current shard ID: %d, cross shard ID: %d", err, factory.ShardBlocksTopic, rrh.shardID, crossShardID) - log.Warn("available requesters in container", + rrh.log.Warn("available requesters in container", "requesters", rrh.requestersFinder.RequesterKeys(), ) return nil, err @@ -730,7 +737,7 @@ func (rrh *resolverRequestHandler) RequestStartOfEpochMetaBlock(epoch uint32) { } baseTopic := factory.MetachainBlocksTopic - log.Debug("requesting header by epoch", + rrh.log.Debug("requesting header by epoch", "topic", baseTopic, "epoch", epoch, "hash", epochStartIdentifier, @@ -738,7 +745,7 @@ func (rrh *resolverRequestHandler) RequestStartOfEpochMetaBlock(epoch uint32) { requester, err := rrh.requestersFinder.MetaChainRequester(baseTopic) if err != nil { - log.Error("RequestStartOfEpochMetaBlock.MetaChainRequester", + rrh.log.Error("RequestStartOfEpochMetaBlock.MetaChainRequester", "error", err.Error(), "topic", baseTopic, ) @@ -747,7 +754,7 @@ func (rrh *resolverRequestHandler) RequestStartOfEpochMetaBlock(epoch uint32) { headerRequester, ok := requester.(EpochRequester) if !ok { - log.Warn("wrong assertion type when creating header requester") + rrh.log.Warn("wrong assertion type when creating header requester") return } @@ -755,7 +762,7 @@ func (rrh *resolverRequestHandler) RequestStartOfEpochMetaBlock(epoch uint32) { err = headerRequester.RequestDataFromEpoch(epochStartIdentifier) if err != nil { - log.Debug("RequestStartOfEpochMetaBlock.RequestDataFromEpoch", + rrh.log.Debug("RequestStartOfEpochMetaBlock.RequestDataFromEpoch", "error", err.Error(), "epochStartIdentifier", epochStartIdentifier, ) @@ -827,7 +834,7 @@ func (rrh *resolverRequestHandler) GetNumPeersToQuery(key string) (int, int, err // RequestPeerAuthenticationsByHashes asks for peer authentication messages from specific peers hashes func (rrh *resolverRequestHandler) RequestPeerAuthenticationsByHashes(destShardID uint32, hashes [][]byte) { - log.Debug("requesting peer authentication messages from network", + rrh.log.Debug("requesting peer authentication messages from network", "topic", common.PeerAuthenticationTopic, "shard", destShardID, "num hashes", len(hashes), @@ -836,7 +843,7 @@ func (rrh *resolverRequestHandler) RequestPeerAuthenticationsByHashes(destShardI requester, err := rrh.requestersFinder.MetaChainRequester(common.PeerAuthenticationTopic) if err != nil { - log.Error("RequestPeerAuthenticationsByHashes.MetaChainRequester", + rrh.log.Error("RequestPeerAuthenticationsByHashes.MetaChainRequester", "error", err.Error(), "topic", common.PeerAuthenticationTopic, "shard", destShardID, @@ -847,13 +854,13 @@ func (rrh *resolverRequestHandler) RequestPeerAuthenticationsByHashes(destShardI peerAuthRequester, ok := requester.(HashSliceRequester) if !ok { - log.Warn("wrong assertion type when creating peer authentication requester") + rrh.log.Warn("wrong assertion type when creating peer authentication requester") return } err = peerAuthRequester.RequestDataFromHashArray(hashes, rrh.epoch) if err != nil { - log.Debug("RequestPeerAuthenticationsByHashes.RequestDataFromHashArray", + rrh.log.Debug("RequestPeerAuthenticationsByHashes.RequestDataFromHashArray", "error", err.Error(), "topic", common.PeerAuthenticationTopic, "shard", destShardID, diff --git a/dataRetriever/requestHandlers/requestHandler_test.go b/dataRetriever/requestHandlers/requestHandler_test.go index 48d27f46217..3632b943483 100644 --- a/dataRetriever/requestHandlers/requestHandler_test.go +++ b/dataRetriever/requestHandlers/requestHandler_test.go @@ -44,6 +44,7 @@ func TestNewResolverRequestHandler(t *testing.T) { t.Parallel() rrh, err := NewResolverRequestHandler( + nil, nil, &mock.RequestedItemsHandlerStub{}, &mock.WhiteListHandlerStub{}, @@ -59,6 +60,7 @@ func TestNewResolverRequestHandler(t *testing.T) { t.Parallel() rrh, err := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{}, nil, &mock.WhiteListHandlerStub{}, @@ -74,6 +76,7 @@ func TestNewResolverRequestHandler(t *testing.T) { t.Parallel() rrh, err := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{}, &mock.RequestedItemsHandlerStub{}, nil, @@ -89,6 +92,7 @@ func TestNewResolverRequestHandler(t *testing.T) { t.Parallel() rrh, err := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{}, &mock.RequestedItemsHandlerStub{}, &mock.WhiteListHandlerStub{}, @@ -104,6 +108,7 @@ func TestNewResolverRequestHandler(t *testing.T) { t.Parallel() rrh, err := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{}, &mock.RequestedItemsHandlerStub{}, &mock.WhiteListHandlerStub{}, @@ -119,6 +124,7 @@ func TestNewResolverRequestHandler(t *testing.T) { t.Parallel() rrh, err := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{}, &mock.RequestedItemsHandlerStub{}, &mock.WhiteListHandlerStub{}, @@ -146,6 +152,7 @@ func TestResolverRequestHandler_RequestTransaction(t *testing.T) { }() rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { require.Fail(t, "should have not been called") @@ -172,6 +179,7 @@ func TestResolverRequestHandler_RequestTransaction(t *testing.T) { }() rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { return nil, errExpected @@ -199,6 +207,7 @@ func TestResolverRequestHandler_RequestTransaction(t *testing.T) { wrongTxRequester := &dataRetrieverMocks.NonceRequesterStub{} rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { return wrongTxRequester, nil @@ -225,6 +234,7 @@ func TestResolverRequestHandler_RequestTransaction(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { return txRequester, nil @@ -261,6 +271,7 @@ func TestResolverRequestHandler_RequestTransaction(t *testing.T) { timeSpan := time.Second timeCache := cache.NewTimeCache(timeSpan) rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { return txRequester, nil @@ -309,6 +320,7 @@ func TestResolverRequestHandler_RequestTransaction(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { return txRequester, nil @@ -347,6 +359,7 @@ func TestResolverRequestHandler_RequestMiniBlock(t *testing.T) { }() rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { require.Fail(t, "should not have been called") @@ -377,6 +390,7 @@ func TestResolverRequestHandler_RequestMiniBlock(t *testing.T) { }() rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { return nil, errExpected @@ -408,6 +422,7 @@ func TestResolverRequestHandler_RequestMiniBlock(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { return mbRequester, nil @@ -434,6 +449,7 @@ func TestResolverRequestHandler_RequestMiniBlock(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { return mbRequester, nil @@ -462,6 +478,7 @@ func TestResolverRequestHandler_RequestMiniBlock(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { return mbRequester, nil @@ -487,6 +504,7 @@ func TestResolverRequestHandler_RequestShardHeader(t *testing.T) { t.Parallel() rrh, _ := NewResolverRequestHandler( + nil, createRequestersFinderStubThatShouldNotBeCalled(t), &mock.RequestedItemsHandlerStub{ HasCalled: func(key string) bool { @@ -505,6 +523,7 @@ func TestResolverRequestHandler_RequestShardHeader(t *testing.T) { t.Parallel() rrh, _ := NewResolverRequestHandler( + nil, createRequestersFinderStubThatShouldNotBeCalled(t), &mock.RequestedItemsHandlerStub{}, &mock.WhiteListHandlerStub{}, @@ -525,6 +544,7 @@ func TestResolverRequestHandler_RequestShardHeader(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { return mbRequester, nil @@ -551,6 +571,7 @@ func TestResolverRequestHandler_RequestShardHeader(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { return mbRequester, nil @@ -576,6 +597,7 @@ func TestResolverRequestHandler_RequestMetaHeader(t *testing.T) { t.Parallel() rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{}, &mock.RequestedItemsHandlerStub{ HasCalled: func(key string) bool { @@ -601,6 +623,7 @@ func TestResolverRequestHandler_RequestMetaHeader(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { return req, nil @@ -619,6 +642,7 @@ func TestResolverRequestHandler_RequestMetaHeader(t *testing.T) { t.Parallel() rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { return nil, errExpected @@ -643,6 +667,7 @@ func TestResolverRequestHandler_RequestMetaHeader(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { return req, nil @@ -669,6 +694,7 @@ func TestResolverRequestHandler_RequestMetaHeader(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { return mbRequester, nil @@ -695,6 +721,7 @@ func TestResolverRequestHandler_RequestShardHeaderByNonce(t *testing.T) { called := false rrh, _ := NewResolverRequestHandler( + nil, createRequestersFinderStubThatShouldNotBeCalled(t), &mock.RequestedItemsHandlerStub{ HasCalled: func(key string) bool { @@ -716,6 +743,7 @@ func TestResolverRequestHandler_RequestShardHeaderByNonce(t *testing.T) { called := false rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, err error) { called = true @@ -743,6 +771,7 @@ func TestResolverRequestHandler_RequestShardHeaderByNonce(t *testing.T) { }() rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { return nil, errExpected @@ -770,6 +799,7 @@ func TestResolverRequestHandler_RequestShardHeaderByNonce(t *testing.T) { hdrRequester := &dataRetrieverMocks.RequesterStub{} rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { return hdrRequester, nil @@ -801,6 +831,7 @@ func TestResolverRequestHandler_RequestShardHeaderByNonce(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { return hdrRequester, nil @@ -827,6 +858,7 @@ func TestResolverRequestHandler_RequestShardHeaderByNonce(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { return hdrRequester, nil @@ -852,6 +884,7 @@ func TestResolverRequestHandler_RequestMetaHeaderByNonce(t *testing.T) { t.Parallel() rrh, _ := NewResolverRequestHandler( + nil, createRequestersFinderStubThatShouldNotBeCalled(t), &mock.RequestedItemsHandlerStub{ HasCalled: func(key string) bool { @@ -870,6 +903,7 @@ func TestResolverRequestHandler_RequestMetaHeaderByNonce(t *testing.T) { t.Parallel() rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { return nil, errExpected @@ -898,6 +932,7 @@ func TestResolverRequestHandler_RequestMetaHeaderByNonce(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { return hdrRequester, nil @@ -924,6 +959,7 @@ func TestResolverRequestHandler_RequestMetaHeaderByNonce(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { return hdrRequester, nil @@ -953,6 +989,7 @@ func TestResolverRequestHandler_RequestScrErrorWhenGettingCrossShardRequesterSho }() rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { return nil, errExpected @@ -981,6 +1018,7 @@ func TestResolverRequestHandler_RequestScrWrongResolverShouldNotPanic(t *testing wrongTxRequester := &dataRetrieverMocks.NonceRequesterStub{} rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { return wrongTxRequester, nil @@ -1008,6 +1046,7 @@ func TestResolverRequestHandler_RequestScrShouldRequestScr(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { return txRequester, nil @@ -1050,6 +1089,7 @@ func TestResolverRequestHandler_RequestScrErrorsOnRequestShouldNotPanic(t *testi } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { return txRequester, nil @@ -1085,6 +1125,7 @@ func TestResolverRequestHandler_RequestRewardShouldRequestReward(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { return txRequester, nil @@ -1123,6 +1164,7 @@ func TestRequestTrieNodes(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaCrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { return requesterMock, nil @@ -1150,6 +1192,7 @@ func TestRequestTrieNodes(t *testing.T) { localError := errors.New("test error") called := false rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaCrossShardRequesterCalled: func(baseTopic string, shId uint32) (requester dataRetriever.Requester, err error) { called = true @@ -1170,6 +1213,7 @@ func TestRequestTrieNodes(t *testing.T) { t.Parallel() rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaCrossShardRequesterCalled: func(baseTopic string, shId uint32) (requester dataRetriever.Requester, err error) { require.Fail(t, "should have not been called") @@ -1194,6 +1238,7 @@ func TestResolverRequestHandler_RequestStartOfEpochMetaBlock(t *testing.T) { t.Parallel() rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { require.Fail(t, "should not have been called") @@ -1218,6 +1263,7 @@ func TestResolverRequestHandler_RequestStartOfEpochMetaBlock(t *testing.T) { called := false rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { called = true @@ -1239,6 +1285,7 @@ func TestResolverRequestHandler_RequestStartOfEpochMetaBlock(t *testing.T) { called := false rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { called = true @@ -1267,6 +1314,7 @@ func TestResolverRequestHandler_RequestStartOfEpochMetaBlock(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { return requesterMock, nil @@ -1293,6 +1341,7 @@ func TestResolverRequestHandler_RequestStartOfEpochMetaBlock(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { return requesterMock, nil @@ -1326,6 +1375,7 @@ func TestResolverRequestHandler_RequestTrieNodeRequestFails(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (dataRetriever.Requester, error) { return requesterMock, nil @@ -1358,6 +1408,7 @@ func TestResolverRequestHandler_RequestTrieNodeShouldWork(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (dataRetriever.Requester, error) { return requesterMock, nil @@ -1384,6 +1435,7 @@ func TestResolverRequestHandler_RequestTrieNodeNilResolver(t *testing.T) { localError := errors.New("test error") called := false rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { called = true @@ -1406,6 +1458,7 @@ func TestResolverRequestHandler_RequestTrieNodeNotAValidResolver(t *testing.T) { called := false rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { called = true @@ -1439,6 +1492,7 @@ func TestResolverRequestHandler_RequestPeerAuthenticationsByHashes(t *testing.T) }, } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (dataRetriever.Requester, error) { assert.Equal(t, common.PeerAuthenticationTopic, baseTopic) @@ -1460,6 +1514,7 @@ func TestResolverRequestHandler_RequestPeerAuthenticationsByHashes(t *testing.T) req := &dataRetrieverMocks.NonceRequesterStub{} rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (dataRetriever.Requester, error) { assert.Equal(t, common.PeerAuthenticationTopic, baseTopic) @@ -1487,6 +1542,7 @@ func TestResolverRequestHandler_RequestPeerAuthenticationsByHashes(t *testing.T) }, } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (dataRetriever.Requester, error) { assert.Equal(t, common.PeerAuthenticationTopic, baseTopic) @@ -1527,6 +1583,7 @@ func TestResolverRequestHandler_RequestPeerAuthenticationsByHashes(t *testing.T) }, } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (dataRetriever.Requester, error) { assert.Equal(t, common.PeerAuthenticationTopic, baseTopic) @@ -1553,6 +1610,7 @@ func TestResolverRequestHandler_RequestValidatorInfo(t *testing.T) { providedHash := []byte("provided hash") rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { require.Fail(t, "should not have been called") @@ -1585,6 +1643,7 @@ func TestResolverRequestHandler_RequestValidatorInfo(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { return res, errExpected @@ -1611,6 +1670,7 @@ func TestResolverRequestHandler_RequestValidatorInfo(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { return res, nil @@ -1644,6 +1704,7 @@ func TestResolverRequestHandler_RequestValidatorInfo(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { assert.Equal(t, common.ValidatorInfoTopic, baseTopic) @@ -1669,6 +1730,7 @@ func TestResolverRequestHandler_RequestValidatorsInfo(t *testing.T) { t.Parallel() rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { require.Fail(t, "should not have been called") @@ -1697,6 +1759,7 @@ func TestResolverRequestHandler_RequestValidatorsInfo(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { return res, errExpected @@ -1723,6 +1786,7 @@ func TestResolverRequestHandler_RequestValidatorsInfo(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { return res, nil @@ -1749,6 +1813,7 @@ func TestResolverRequestHandler_RequestValidatorsInfo(t *testing.T) { mbRequester := &dataRetrieverMocks.NonceRequesterStub{} // uncastable to HashSliceRequester wasCalled := false rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { return mbRequester, nil @@ -1782,6 +1847,7 @@ func TestResolverRequestHandler_RequestValidatorsInfo(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { assert.Equal(t, common.ValidatorInfoTopic, baseTopic) @@ -1807,6 +1873,7 @@ func TestResolverRequestHandler_RequestMiniblocks(t *testing.T) { t.Parallel() rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { require.Fail(t, "should have not been called") @@ -1826,6 +1893,7 @@ func TestResolverRequestHandler_RequestMiniblocks(t *testing.T) { t.Parallel() rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { return nil, errExpected @@ -1845,6 +1913,7 @@ func TestResolverRequestHandler_RequestMiniblocks(t *testing.T) { nonceRequester := &dataRetrieverMocks.NonceRequesterStub{} // uncastable to HashSliceRequester rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { return nonceRequester, nil @@ -1872,6 +1941,7 @@ func TestResolverRequestHandler_RequestMiniblocks(t *testing.T) { }, } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { return mbRequester, nil @@ -1890,6 +1960,7 @@ func TestResolverRequestHandler_RequestMiniblocks(t *testing.T) { t.Parallel() rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { return &dataRetrieverMocks.HashSliceRequesterStub{}, nil @@ -1910,6 +1981,7 @@ func TestResolverRequestHandler_RequestInterval(t *testing.T) { t.Parallel() rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{}, &mock.RequestedItemsHandlerStub{}, &mock.WhiteListHandlerStub{}, @@ -1927,6 +1999,7 @@ func TestResolverRequestHandler_NumPeersToQuery(t *testing.T) { t.Parallel() rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ GetCalled: func(key string) (dataRetriever.Requester, error) { return nil, errExpected @@ -1959,6 +2032,7 @@ func TestResolverRequestHandler_NumPeersToQuery(t *testing.T) { } rrh, _ := NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{ GetCalled: func(key string) (dataRetriever.Requester, error) { return req, nil @@ -1988,6 +2062,7 @@ func TestResolverRequestHandler_IsInterfaceNil(t *testing.T) { require.True(t, rrh.IsInterfaceNil()) rrh, _ = NewResolverRequestHandler( + nil, &dataRetrieverMocks.RequestersFinderStub{}, &mock.RequestedItemsHandlerStub{}, &mock.WhiteListHandlerStub{}, diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 91d40db1a8d..a56e1bc160a 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -1311,6 +1311,7 @@ func (e *epochStartBootstrap) createRequestHandler() error { requestedItemsHandler := cache.NewTimeCache(timeBetweenRequests) e.requestHandler, err = requestHandlers.NewResolverRequestHandler( + nil, finder, requestedItemsHandler, e.whiteListHandler, diff --git a/epochStart/bootstrap/storageProcess.go b/epochStart/bootstrap/storageProcess.go index 0ec16f6548d..cf38c04bbc5 100644 --- a/epochStart/bootstrap/storageProcess.go +++ b/epochStart/bootstrap/storageProcess.go @@ -212,6 +212,7 @@ func (sesb *storageEpochStartBootstrap) createStorageRequestHandler() error { requestedItemsHandler := cache.NewTimeCache(timeBetweenRequests) sesb.requestHandler, err = requestHandlers.NewResolverRequestHandler( + nil, finder, requestedItemsHandler, sesb.whiteListHandler, diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 82d4c73f959..cdbf089f409 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -344,6 +344,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { } requestHandler, err := requestHandlers.NewResolverRequestHandler( + nil, requestersFinder, pcf.requestedItemsHandler, pcf.whiteListHandler, diff --git a/integrationTests/sync/basicSync/basicSync_test.go b/integrationTests/sync/basicSync/basicSync_test.go index 73b8167d64f..bd3c5cab5ac 100644 --- a/integrationTests/sync/basicSync/basicSync_test.go +++ b/integrationTests/sync/basicSync/basicSync_test.go @@ -263,7 +263,7 @@ func TestSyncWorksInShard_EmptyBlocksNoForks_With_EquivalentProofs(t *testing.T) integrationTests.UpdateRound(nodes, round) nonce++ - numRoundsToTest := 5 + numRoundsToTest := 10 for i := 0; i < numRoundsToTest; i++ { integrationTests.ProposeBlock(nodes, leaders, round, nonce) diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index 0897cc8ab54..1850dd04ec5 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -615,6 +615,7 @@ func (thn *TestHeartbeatNode) createShardRequestersContainer(args requesterscont func (thn *TestHeartbeatNode) createRequestHandler() { thn.RequestersFinder, _ = containers.NewRequestersFinder(thn.RequestersContainer, thn.ShardCoordinator) thn.RequestHandler, _ = requestHandlers.NewResolverRequestHandler( + nil, thn.RequestersFinder, thn.RequestedItemsHandler, thn.WhiteListHandler, diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 5426019d7e6..fd892648276 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -1092,7 +1092,15 @@ func (tpn *TestProcessorNode) InitializeProcessors(gasMap map[string]map[string] func (tpn *TestProcessorNode) initDataPools() { tpn.ProofsPool = proofscache.NewProofsPool() - tpn.DataPool = dataRetrieverMock.CreatePoolsHolderWithProofsPool(1, tpn.ShardCoordinator.SelfId(), tpn.ProofsPool) + + id := hex.EncodeToString(tpn.OwnAccount.PkTxSignBytes) + if len(id) > 8 { + id = id[0:8] + } + + log := logger.GetOrCreate(fmt.Sprintf("dtr/hc/%s", id)) + + tpn.DataPool = dataRetrieverMock.CreatePoolsHolderWithProofsPool(log, 1, tpn.ShardCoordinator.SelfId(), tpn.ProofsPool) cacherCfg := storageunit.CacheConfig{Capacity: 10000, Type: storageunit.LRUCache, Shards: 1} suCache, _ := storageunit.NewCache(cacherCfg) tpn.WhiteListHandler, _ = interceptors.NewWhiteListDataVerifier(suCache) @@ -1540,8 +1548,16 @@ func (tpn *TestProcessorNode) initRequesters() { tpn.createShardRequestersContainer(requestersContainerFactoryArgs) } + id := hex.EncodeToString(tpn.OwnAccount.PkTxSignBytes) + if len(id) > 8 { + id = id[0:8] + } + + log := logger.GetOrCreate(fmt.Sprintf("dtr/rh/%s", id)) + tpn.RequestersFinder, _ = containers.NewRequestersFinder(tpn.RequestersContainer, tpn.ShardCoordinator) tpn.RequestHandler, _ = requestHandlers.NewResolverRequestHandler( + log, tpn.RequestersFinder, tpn.RequestedItemsHandler, tpn.WhiteListHandler, diff --git a/testscommon/dataRetriever/poolFactory.go b/testscommon/dataRetriever/poolFactory.go index 43aaeb3e78f..8bb46a68529 100644 --- a/testscommon/dataRetriever/poolFactory.go +++ b/testscommon/dataRetriever/poolFactory.go @@ -19,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon/txcachemocks" "github.com/multiversx/mx-chain-go/trie/factory" + logger "github.com/multiversx/mx-chain-logger-go" ) var peerAuthDuration = 10 * time.Second @@ -51,7 +52,10 @@ func CreateTxPool(numShards uint32, selfShard uint32) (dataRetriever.ShardedData ) } -func createPoolHolderArgs(numShards uint32, selfShard uint32) dataPool.DataPoolArgs { +func createPoolHolderArgs( + log logger.Logger, + numShards uint32, selfShard uint32, +) dataPool.DataPoolArgs { var err error txPool, err := CreateTxPool(numShards, selfShard) @@ -71,7 +75,7 @@ func createPoolHolderArgs(numShards uint32, selfShard uint32) dataPool.DataPoolA }) panicIfError("CreatePoolsHolder", err) - headersPool, err := headersCache.NewHeadersPool(config.HeadersPoolConfig{ + headersPool, err := headersCache.NewHeadersPool(log, config.HeadersPoolConfig{ MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 100, }) @@ -163,10 +167,25 @@ func createPoolHolderArgs(numShards uint32, selfShard uint32) dataPool.DataPoolA return dataPoolArgs } +// CreatePoolsHolderWithLog - +func CreatePoolsHolderWithLog( + log logger.Logger, + numShards uint32, selfShard uint32, +) dataRetriever.PoolsHolder { + return createPoolsHolder(log, numShards, selfShard) +} + // CreatePoolsHolder - func CreatePoolsHolder(numShards uint32, selfShard uint32) dataRetriever.PoolsHolder { + return createPoolsHolder(nil, numShards, selfShard) +} + +func createPoolsHolder( + log logger.Logger, + numShards uint32, selfShard uint32, +) dataRetriever.PoolsHolder { - dataPoolArgs := createPoolHolderArgs(numShards, selfShard) + dataPoolArgs := createPoolHolderArgs(log, numShards, selfShard) holder, err := dataPool.NewDataPool(dataPoolArgs) panicIfError("CreatePoolsHolder", err) @@ -176,10 +195,11 @@ func CreatePoolsHolder(numShards uint32, selfShard uint32) dataRetriever.PoolsHo // CreatePoolsHolderWithProofsPool - func CreatePoolsHolderWithProofsPool( + log logger.Logger, numShards uint32, selfShard uint32, proofsPool dataRetriever.ProofsPool, ) dataRetriever.PoolsHolder { - dataPoolArgs := createPoolHolderArgs(numShards, selfShard) + dataPoolArgs := createPoolHolderArgs(log, numShards, selfShard) dataPoolArgs.Proofs = proofsPool holder, err := dataPool.NewDataPool(dataPoolArgs) @@ -206,7 +226,7 @@ func CreatePoolsHolderWithTxPool(txPool dataRetriever.ShardedDataCacherNotifier) }) panicIfError("CreatePoolsHolderWithTxPool", err) - headersPool, err := headersCache.NewHeadersPool(config.HeadersPoolConfig{ + headersPool, err := headersCache.NewHeadersPool(nil, config.HeadersPoolConfig{ MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 100, }) diff --git a/testscommon/dataRetriever/poolsHolderMock.go b/testscommon/dataRetriever/poolsHolderMock.go index 7e5cd64f5a4..108787b1c83 100644 --- a/testscommon/dataRetriever/poolsHolderMock.go +++ b/testscommon/dataRetriever/poolsHolderMock.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/storage/cache" "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon/txcachemocks" + logger "github.com/multiversx/mx-chain-logger-go" ) // PoolsHolderMock - @@ -37,8 +38,17 @@ type PoolsHolderMock struct { proofs dataRetriever.ProofsPool } +// NewPoolsHolderMockWithLog - +func NewPoolsHolderMockWithLog(log logger.Logger) *PoolsHolderMock { + return newPoolsHolderMock(log) +} + // NewPoolsHolderMock - func NewPoolsHolderMock() *PoolsHolderMock { + return newPoolsHolderMock(nil) +} + +func newPoolsHolderMock(log logger.Logger) *PoolsHolderMock { var err error holder := &PoolsHolderMock{} @@ -75,7 +85,7 @@ func NewPoolsHolderMock() *PoolsHolderMock { }) panicIfError("NewPoolsHolderMock", err) - holder.headers, err = headersCache.NewHeadersPool(config.HeadersPoolConfig{MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 100}) + holder.headers, err = headersCache.NewHeadersPool(log, config.HeadersPoolConfig{MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 100}) panicIfError("NewPoolsHolderMock", err) holder.miniBlocks, err = storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 10000, Shards: 1, SizeInBytes: 0}) diff --git a/update/sync/syncHeaders_test.go b/update/sync/syncHeaders_test.go index 611c1069e71..d2df13f4842 100644 --- a/update/sync/syncHeaders_test.go +++ b/update/sync/syncHeaders_test.go @@ -192,7 +192,7 @@ func TestSyncEpochStartMetaHeader_ReceiveWrongHeaderTimeout(t *testing.T) { metaHash := []byte("metaHash") meta := &block.MetaBlock{Epoch: 1} args := createMockHeadersSyncHandlerArgs() - args.Cache, _ = headersCache.NewHeadersPool(config.HeadersPoolConfig{ + args.Cache, _ = headersCache.NewHeadersPool(nil, config.HeadersPoolConfig{ MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 1, }) @@ -238,7 +238,7 @@ func TestSyncEpochStartMetaHeader_ReceiveHeaderOk(t *testing.T) { }, }} args := createMockHeadersSyncHandlerArgs() - args.Cache, _ = headersCache.NewHeadersPool(config.HeadersPoolConfig{ + args.Cache, _ = headersCache.NewHeadersPool(nil, config.HeadersPoolConfig{ MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 1, }) From d8c9b0a26cceacd15c26ea34db8b8a6502ac62a3 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Thu, 16 Jan 2025 15:50:07 +0200 Subject: [PATCH 06/10] do not return false if already existing proof --- consensus/spos/bls/v2/subroundEndRound.go | 1 - 1 file changed, 1 deletion(-) diff --git a/consensus/spos/bls/v2/subroundEndRound.go b/consensus/spos/bls/v2/subroundEndRound.go index b461d6ba34d..b2c179acf21 100644 --- a/consensus/spos/bls/v2/subroundEndRound.go +++ b/consensus/spos/bls/v2/subroundEndRound.go @@ -264,7 +264,6 @@ func (sr *subroundEndRound) doEndRoundJobByNode() bool { err = sr.EquivalentProofsPool().AddProof(proof) if err != nil { sr.Log.Debug("doEndRoundJobByNode.AddProof", "error", err) - return false } } From 3b4c4337176155ea31ec68b351f20a5d2ba5b198 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Sun, 26 Jan 2025 12:32:45 +0200 Subject: [PATCH 07/10] added full integration test - without equivalent proofs activation --- consensus/spos/bls/v1/subroundEndRound.go | 1 + consensus/spos/bls/v2/subroundBlock.go | 4 +- factory/consensus/consensusComponents.go | 20 +- integrationTests/consensus/consensus_test.go | 272 ++++ integrationTests/testFullNode.go | 1161 ++++++++++++++++++ integrationTests/testProcessorNode.go | 16 +- 6 files changed, 1466 insertions(+), 8 deletions(-) create mode 100644 integrationTests/testFullNode.go diff --git a/consensus/spos/bls/v1/subroundEndRound.go b/consensus/spos/bls/v1/subroundEndRound.go index c591c736aca..51c1f4a1af3 100644 --- a/consensus/spos/bls/v1/subroundEndRound.go +++ b/consensus/spos/bls/v1/subroundEndRound.go @@ -321,6 +321,7 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { return false } + log.Error("doEndRoundJobByLeader.SetSignature", "set sig", "sig", sig) err = header.SetSignature(sig) if err != nil { log.Debug("doEndRoundJobByLeader.SetSignature", "error", err.Error()) diff --git a/consensus/spos/bls/v2/subroundBlock.go b/consensus/spos/bls/v2/subroundBlock.go index ba5f560354c..49a20e06670 100644 --- a/consensus/spos/bls/v2/subroundBlock.go +++ b/consensus/spos/bls/v2/subroundBlock.go @@ -75,6 +75,7 @@ func checkNewSubroundBlockParams( func (sr *subroundBlock) doBlockJob(ctx context.Context) bool { isSelfLeader := sr.IsSelfLeader() && sr.ShouldConsiderSelfKeyInConsensus() if !isSelfLeader { // is NOT self leader in this round? + sr.Log.Warn("is not Leader") return false } @@ -347,6 +348,7 @@ func (sr *subroundBlock) createHeader() (data.HeaderHandler, error) { if err != nil { return nil, err } + sr.Log.Debug("set timestamp for header", "timestamp", hdr.GetTimeStamp(), "header nonnce", hdr.GetNonce()) err = hdr.SetPrevRandSeed(prevRandSeed) if err != nil { @@ -369,7 +371,7 @@ func (sr *subroundBlock) createHeader() (data.HeaderHandler, error) { func (sr *subroundBlock) addProofOnHeader(header data.HeaderHandler) bool { prevBlockProof, err := sr.EquivalentProofsPool().GetProof(sr.ShardCoordinator().SelfId(), header.GetPrevHash()) if err != nil { - sr.Log.Error("failed to get proof for header", "headerHash", sr.GetData()) + sr.Log.Error("failed to get proof for header", "headerHash", header.GetPrevHash()) if header.GetNonce() == 1 { sr.Log.Error("first nonce") diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index 1dca6c61bcf..9e0305d6462 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -272,7 +272,10 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { return nil, err } - id := hex.EncodeToString(ccf.processComponents.NodesCoordinator().GetOwnPublicKey())[0:8] + id := hex.EncodeToString(ccf.processComponents.NodesCoordinator().GetOwnPublicKey()) + if len(id) > 8 { + id = id[0:8] + } log := logger.GetOrCreate(fmt.Sprintf("consensus/%s", id)) @@ -349,7 +352,10 @@ func (ccf *consensusComponentsFactory) createChronology() (consensus.ChronologyH wd = &watchdog.DisabledWatchdog{} } - id := hex.EncodeToString(ccf.processComponents.NodesCoordinator().GetOwnPublicKey())[0:8] + id := hex.EncodeToString(ccf.processComponents.NodesCoordinator().GetOwnPublicKey()) + if len(id) > 8 { + id = id[0:8] + } logger := logger.GetOrCreate(fmt.Sprintf("cns/chr/%s", id)) @@ -482,7 +488,10 @@ func (ccf *consensusComponentsFactory) createShardBootstrapper() (process.Bootst return nil, err } - id := hex.EncodeToString(ccf.processComponents.NodesCoordinator().GetOwnPublicKey())[0:8] + id := hex.EncodeToString(ccf.processComponents.NodesCoordinator().GetOwnPublicKey()) + if len(id) > 8 { + id = id[0:8] + } logger := logger.GetOrCreate(fmt.Sprintf("process/sync/%s", id)) @@ -618,7 +627,10 @@ func (ccf *consensusComponentsFactory) createMetaChainBootstrapper() (process.Bo return nil, err } - id := hex.EncodeToString(ccf.processComponents.NodesCoordinator().GetOwnPublicKey())[0:8] + id := hex.EncodeToString(ccf.processComponents.NodesCoordinator().GetOwnPublicKey()) + if len(id) > 8 { + id = id[0:8] + } logger := logger.GetOrCreate(fmt.Sprintf("process/sync/%s", id)) diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index 5d740dc3549..55faae12bac 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -8,11 +8,13 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/pubkeyConverter" "github.com/multiversx/mx-chain-core-go/data" crypto "github.com/multiversx/mx-chain-crypto-go" logger "github.com/multiversx/mx-chain-logger-go" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/multiversx/mx-chain-go/config" consensusComp "github.com/multiversx/mx-chain-go/factory/consensus" @@ -67,6 +69,276 @@ func TestConsensusBLSNotEnoughValidators(t *testing.T) { runConsensusWithNotEnoughValidators(t, blsConsensusType) } +func TestConsensusBLSWithProcessing(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + _ = logger.SetLogLevel("*:DEBUG,process:TRACE,consensus:TRACE") + logger.ToggleLoggerName(true) + + numKeysOnEachNode := 1 + numMetaNodes := uint32(2) + numNodes := uint32(2) + consensusSize := uint32(2 * numKeysOnEachNode) + roundTime := uint64(1000) + + log.Info("runFullConsensusTest", + "numNodes", numNodes, + "numKeysOnEachNode", numKeysOnEachNode, + "consensusSize", consensusSize, + ) + + enableEpochsConfig := integrationTests.CreateEnableEpochsConfig() + + equivalentProodsActivationEpoch := uint32(0) + + enableEpochsConfig.EquivalentMessagesEnableEpoch = equivalentProodsActivationEpoch + enableEpochsConfig.FixedOrderInConsensusEnableEpoch = equivalentProodsActivationEpoch + + fmt.Println("Step 1. Setup nodes...") + + nodes := integrationTests.CreateNodesWithTestConsensusNode( + int(numMetaNodes), + int(numNodes), + int(consensusSize), + roundTime, + blsConsensusType, + numKeysOnEachNode, + enableEpochsConfig, + ) + + // leaders := []*integrationTests.TestConsensusNode{} + for shardID, nodesList := range nodes { + // leaders = append(leaders, nodesList[0]) + + displayAndStartNodes(shardID, nodesList) + } + + time.Sleep(p2pBootstrapDelay) + + // round := uint64(0) + // nonce := uint64(0) + // round = integrationTests.IncrementAndPrintRound(round) + // integrationTests.UpdateRound(nodes, round) + // nonce++ + + // numRoundsToTest := 5 + // for i := 0; i < numRoundsToTest; i++ { + // integrationTests.ProposeBlock(nodes, leaders, round, nonce) + + // time.Sleep(integrationTests.SyncDelay) + + // round = integrationTests.IncrementAndPrintRound(round) + // integrationTests.UpdateRound(nodes, round) + // nonce++ + // } + + for _, nodesList := range nodes { + for _, n := range nodesList { + statusComponents := integrationTests.GetDefaultStatusComponents() + + consensusArgs := consensusComp.ConsensusComponentsFactoryArgs{ + Config: config.Config{ + Consensus: config.ConsensusConfig{ + Type: blsConsensusType, + }, + ValidatorPubkeyConverter: config.PubkeyConfig{ + Length: 96, + Type: "bls", + SignatureLength: 48, + }, + TrieSync: config.TrieSyncConfig{ + NumConcurrentTrieSyncers: 5, + MaxHardCapForMissingNodes: 5, + TrieSyncerVersion: 2, + CheckNodesOnDisk: false, + }, + GeneralSettings: config.GeneralSettingsConfig{ + SyncProcessTimeInMillis: 6000, + }, + }, + BootstrapRoundIndex: 0, + CoreComponents: n.Node.GetCoreComponents(), + NetworkComponents: n.Node.GetNetworkComponents(), + CryptoComponents: n.Node.GetCryptoComponents(), + DataComponents: n.Node.GetDataComponents(), + ProcessComponents: n.Node.GetProcessComponents(), + StateComponents: n.Node.GetStateComponents(), + StatusComponents: statusComponents, + StatusCoreComponents: n.Node.GetStatusCoreComponents(), + ScheduledProcessor: &consensusMocks.ScheduledProcessorStub{}, + IsInImportMode: n.Node.IsInImportMode(), + } + + consensusFactory, err := consensusComp.NewConsensusComponentsFactory(consensusArgs) + require.Nil(t, err) + + managedConsensusComponents, err := consensusComp.NewManagedConsensusComponents(consensusFactory) + require.Nil(t, err) + + err = managedConsensusComponents.Create() + require.Nil(t, err) + } + } + + time.Sleep(100 * time.Second) + + fmt.Println("Checking shards...") + + for _, nodesList := range nodes { + // expectedNonce := nodesList[0].Node.GetDataComponents().Blockchain().GetCurrentBlockHeader().GetNonce() + expectedNonce := 1 + for _, n := range nodesList { + for i := 1; i < len(nodes); i++ { + if check.IfNil(n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader()) { + assert.Fail(t, fmt.Sprintf("Node with idx %d does not have a current block", i)) + } else { + assert.Equal(t, expectedNonce, n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader().GetNonce()) + } + } + } + } +} + +func TestConsensusBLSWithFullProcessing(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + _ = logger.SetLogLevel("*:DEBUG,process:TRACE,consensus:TRACE") + logger.ToggleLoggerName(true) + + numKeysOnEachNode := 1 + numMetaNodes := uint32(4) + numNodes := uint32(4) + consensusSize := uint32(4 * numKeysOnEachNode) + roundTime := uint64(1000) + + // maxShards := uint32(1) + // shardId := uint32(0) + // numNodesPerShard := 3 + + log.Info("runFullNodesTest", + "numNodes", numNodes, + "numKeysOnEachNode", numKeysOnEachNode, + "consensusSize", consensusSize, + ) + + enableEpochsConfig := integrationTests.CreateEnableEpochsConfig() + + equivalentProodsActivationEpoch := uint32(10) + + enableEpochsConfig.EquivalentMessagesEnableEpoch = equivalentProodsActivationEpoch + enableEpochsConfig.FixedOrderInConsensusEnableEpoch = equivalentProodsActivationEpoch + + fmt.Println("Step 1. Setup nodes...") + + nodes := integrationTests.CreateNodesWithTestFullNode( + int(numMetaNodes), + int(numNodes), + int(consensusSize), + roundTime, + blsConsensusType, + numKeysOnEachNode, + enableEpochsConfig, + ) + + for shardID, nodesList := range nodes { + for _, n := range nodesList { + skBuff, _ := n.NodeKeys.MainKey.Sk.ToByteArray() + pkBuff, _ := n.NodeKeys.MainKey.Pk.ToByteArray() + + encodedNodePkBuff := testPubkeyConverter.SilentEncode(pkBuff, log) + + fmt.Printf("Shard ID: %v, sk: %s, pk: %s\n", + shardID, + hex.EncodeToString(skBuff), + encodedNodePkBuff, + ) + } + } + + time.Sleep(p2pBootstrapDelay) + + defer func() { + for _, nodesList := range nodes { + for _, n := range nodesList { + n.Close() + } + } + }() + + for _, nodesList := range nodes { + for _, n := range nodesList { + statusComponents := integrationTests.GetDefaultStatusComponents() + + consensusArgs := consensusComp.ConsensusComponentsFactoryArgs{ + Config: config.Config{ + Consensus: config.ConsensusConfig{ + Type: blsConsensusType, + }, + ValidatorPubkeyConverter: config.PubkeyConfig{ + Length: 96, + Type: "bls", + SignatureLength: 48, + }, + TrieSync: config.TrieSyncConfig{ + NumConcurrentTrieSyncers: 5, + MaxHardCapForMissingNodes: 5, + TrieSyncerVersion: 2, + CheckNodesOnDisk: false, + }, + GeneralSettings: config.GeneralSettingsConfig{ + SyncProcessTimeInMillis: 6000, + }, + }, + BootstrapRoundIndex: 0, + CoreComponents: n.Node.GetCoreComponents(), + NetworkComponents: n.Node.GetNetworkComponents(), + CryptoComponents: n.Node.GetCryptoComponents(), + DataComponents: n.Node.GetDataComponents(), + ProcessComponents: n.Node.GetProcessComponents(), + StateComponents: n.Node.GetStateComponents(), + StatusComponents: statusComponents, + StatusCoreComponents: n.Node.GetStatusCoreComponents(), + ScheduledProcessor: &consensusMocks.ScheduledProcessorStub{}, + IsInImportMode: n.Node.IsInImportMode(), + } + + consensusFactory, err := consensusComp.NewConsensusComponentsFactory(consensusArgs) + require.Nil(t, err) + + managedConsensusComponents, err := consensusComp.NewManagedConsensusComponents(consensusFactory) + require.Nil(t, err) + + err = managedConsensusComponents.Create() + require.Nil(t, err) + } + } + + time.Sleep(10 * time.Second) + + fmt.Println("Checking shards...") + + for _, nodesList := range nodes { + expectedNonce := uint64(0) + if !check.IfNil(nodesList[0].Node.GetDataComponents().Blockchain().GetCurrentBlockHeader()) { + expectedNonce = nodesList[0].Node.GetDataComponents().Blockchain().GetCurrentBlockHeader().GetNonce() + } + for _, n := range nodesList { + for i := 1; i < len(nodes); i++ { + if check.IfNil(n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader()) { + // assert.Fail(t, fmt.Sprintf("Node with idx %d does not have a current block", i)) + } else { + fmt.Println("FOUND") + assert.Equal(t, expectedNonce, n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader().GetNonce()) + } + } + } + } +} + func initNodesAndTest( numMetaNodes, numNodes, diff --git a/integrationTests/testFullNode.go b/integrationTests/testFullNode.go new file mode 100644 index 00000000000..9638459af8f --- /dev/null +++ b/integrationTests/testFullNode.go @@ -0,0 +1,1161 @@ +package integrationTests + +import ( + "encoding/hex" + "fmt" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/core/versioning" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/hashing" + crypto "github.com/multiversx/mx-chain-crypto-go" + mclMultiSig "github.com/multiversx/mx-chain-crypto-go/signing/mcl/multisig" + "github.com/multiversx/mx-chain-crypto-go/signing/multisig" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/enablers" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/round" + "github.com/multiversx/mx-chain-go/consensus/spos/sposFactory" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" + epochStartDisabled "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" + "github.com/multiversx/mx-chain-go/epochStart/metachain" + "github.com/multiversx/mx-chain-go/epochStart/notifier" + "github.com/multiversx/mx-chain-go/epochStart/shardchain" + cryptoFactory "github.com/multiversx/mx-chain-go/factory/crypto" + "github.com/multiversx/mx-chain-go/factory/peerSignatureHandler" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/keysManagement" + "github.com/multiversx/mx-chain-go/node" + "github.com/multiversx/mx-chain-go/node/nodeDebugFactory" + "github.com/multiversx/mx-chain-go/ntp" + p2pFactory "github.com/multiversx/mx-chain-go/p2p/factory" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/block" + "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" + "github.com/multiversx/mx-chain-go/process/factory" + "github.com/multiversx/mx-chain-go/process/factory/interceptorscontainer" + "github.com/multiversx/mx-chain-go/process/interceptors" + disabledInterceptors "github.com/multiversx/mx-chain-go/process/interceptors/disabled" + interceptorsFactory "github.com/multiversx/mx-chain-go/process/interceptors/factory" + processMock "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/process/scToProtocol" + "github.com/multiversx/mx-chain-go/process/smartContract" + processSync "github.com/multiversx/mx-chain-go/process/sync" + "github.com/multiversx/mx-chain-go/process/track" + chainShardingMocks "github.com/multiversx/mx-chain-go/sharding/mock" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/state/blockInfoProviders" + "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/storage/cache" + "github.com/multiversx/mx-chain-go/storage/storageunit" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/chainParameters" + consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + testFactory "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" + "github.com/multiversx/mx-chain-go/testscommon/outport" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" + "github.com/multiversx/mx-chain-go/vm" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" + logger "github.com/multiversx/mx-chain-logger-go" + wasmConfig "github.com/multiversx/mx-chain-vm-go/config" +) + +func CreateNodesWithTestFullNode( + numMetaNodes int, + nodesPerShard int, + consensusSize int, + roundTime uint64, + consensusType string, + numKeysOnEachNode int, + enableEpochsConfig config.EnableEpochs, +) map[uint32][]*TestFullNode { + + nodes := make(map[uint32][]*TestFullNode, nodesPerShard) + cp := CreateCryptoParams(nodesPerShard, numMetaNodes, maxShards, numKeysOnEachNode) + keysMap := PubKeysMapFromNodesKeysMap(cp.NodesKeys) + validatorsMap := GenValidatorsFromPubKeys(keysMap, maxShards) + eligibleMap, _ := nodesCoordinator.NodesInfoToValidators(validatorsMap) + waitingMap := make(map[uint32][]nodesCoordinator.Validator) + connectableNodes := make(map[uint32][]Connectable, 0) + + startTime := time.Now().Unix() + testHasher := createHasher(consensusType) + + for shardID := range cp.NodesKeys { + for _, keysPair := range cp.NodesKeys[shardID] { + multiSigner, _ := multisig.NewBLSMultisig(&mclMultiSig.BlsMultiSigner{Hasher: testHasher}, cp.KeyGen) + multiSignerMock := createCustomMultiSignerMock(multiSigner) + + args := ArgsTestFullNode{ + ArgTestProcessorNode: &ArgTestProcessorNode{ + MaxShards: 2, + NodeShardId: 0, + TxSignPrivKeyShardId: 0, + WithSync: false, + EpochsConfig: &enableEpochsConfig, + NodeKeys: keysPair, + }, + ConsensusSize: consensusSize, + RoundTime: roundTime, + ConsensusType: consensusType, + EligibleMap: eligibleMap, + WaitingMap: waitingMap, + KeyGen: cp.KeyGen, + P2PKeyGen: cp.P2PKeyGen, + MultiSigner: multiSignerMock, + StartTime: startTime, + } + + tfn := NewTestFullNode(args) + nodes[shardID] = append(nodes[shardID], tfn) + connectableNodes[shardID] = append(connectableNodes[shardID], tfn) + } + } + + for shardID := range nodes { + ConnectNodes(connectableNodes[shardID]) + } + + return nodes +} + +type ArgsTestFullNode struct { + *ArgTestProcessorNode + + ConsensusSize int + RoundTime uint64 + ConsensusType string + EligibleMap map[uint32][]nodesCoordinator.Validator + WaitingMap map[uint32][]nodesCoordinator.Validator + KeyGen crypto.KeyGenerator + P2PKeyGen crypto.KeyGenerator + MultiSigner *cryptoMocks.MultisignerMock + StartTime int64 +} + +type TestFullNode struct { + *TestProcessorNode +} + +func NewTestFullNode(args ArgsTestFullNode) *TestFullNode { + tpn := newBaseTestProcessorNode(*args.ArgTestProcessorNode) + + tfn := &TestFullNode{ + TestProcessorNode: tpn, + } + + tfn.initTestNodeWithArgs(*args.ArgTestProcessorNode, args) + + return tfn +} + +func (tfn *TestFullNode) initNodesCoordinator( + consensusSize int, + hasher hashing.Hasher, + epochStartRegistrationHandler notifier.EpochStartNotifier, + eligibleMap map[uint32][]nodesCoordinator.Validator, + waitingMap map[uint32][]nodesCoordinator.Validator, + pkBytes []byte, + cache storage.Cacher, +) { + argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ + ChainParametersHandler: &chainParameters.ChainParametersHandlerStub{ + ChainParametersForEpochCalled: func(_ uint32) (config.ChainParametersByEpochConfig, error) { + return config.ChainParametersByEpochConfig{ + ShardConsensusGroupSize: uint32(consensusSize), + MetachainConsensusGroupSize: uint32(consensusSize), + }, nil + }, + }, + Marshalizer: TestMarshalizer, + Hasher: hasher, + Shuffler: &shardingMocks.NodeShufflerMock{}, + EpochStartNotifier: epochStartRegistrationHandler, + BootStorer: CreateMemUnit(), + NbShards: maxShards, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: pkBytes, + ConsensusGroupCache: cache, + ShuffledOutHandler: &chainShardingMocks.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardIDAsObserver: tfn.ShardCoordinator.SelfId(), + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, + } + + tfn.NodesCoordinator, _ = nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) +} + +func (tpn *TestFullNode) initTestNodeWithArgs(args ArgTestProcessorNode, fullArgs ArgsTestFullNode) { + tpn.AppStatusHandler = args.AppStatusHandler + if check.IfNil(args.AppStatusHandler) { + tpn.AppStatusHandler = TestAppStatusHandler + } + + id := hex.EncodeToString(tpn.OwnAccount.PkTxSignBytes) + if len(id) > 8 { + id = id[0:8] + } + + tpn.MainMessenger = CreateMessengerWithNoDiscovery(id) + + tpn.StatusMetrics = args.StatusMetrics + if check.IfNil(args.StatusMetrics) { + args.StatusMetrics = &testscommon.StatusMetricsStub{} + } + + tpn.initChainHandler() + tpn.initHeaderValidator() + + syncer := ntp.NewSyncTime(ntp.NewNTPGoogleConfig(), nil) + syncer.StartSyncingTime() + + roundHandler, _ := round.NewRound( + time.Unix(fullArgs.StartTime, 0), + syncer.CurrentTime(), + time.Millisecond*time.Duration(fullArgs.RoundTime), + syncer, + 0) + + tpn.NetworkShardingCollector = mock.NewNetworkShardingCollectorMock() + if check.IfNil(tpn.EpochNotifier) { + tpn.EpochStartNotifier = notifier.NewEpochStartSubscriptionHandler() + } + tpn.initStorage() + if check.IfNil(args.TrieStore) { + tpn.initAccountDBsWithPruningStorer() + } else { + tpn.initAccountDBs(args.TrieStore) + } + + economicsConfig := args.EconomicsConfig + if economicsConfig == nil { + economicsConfig = createDefaultEconomicsConfig() + } + + tpn.initEconomicsData(economicsConfig) + tpn.initRatingsData() + tpn.initRequestedItemsHandler() + tpn.initResolvers() + tpn.initRequesters() + tpn.initValidatorStatistics() + tpn.initGenesisBlocks(args) + tpn.initBlockTracker(roundHandler) + + gasMap := wasmConfig.MakeGasMapForTests() + defaults.FillGasMapInternal(gasMap, 1) + if args.GasScheduleMap != nil { + gasMap = args.GasScheduleMap + } + vmConfig := getDefaultVMConfig() + if args.VMConfig != nil { + vmConfig = args.VMConfig + } + tpn.initInnerProcessors(gasMap, vmConfig) + + if check.IfNil(args.TrieStore) { + var apiBlockchain data.ChainHandler + if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { + apiBlockchain, _ = blockchain.NewMetaChain(statusHandlerMock.NewAppStatusHandlerMock()) + } else { + apiBlockchain, _ = blockchain.NewBlockChain(statusHandlerMock.NewAppStatusHandlerMock()) + } + argsNewScQueryService := smartContract.ArgsNewSCQueryService{ + VmContainer: tpn.VMContainer, + EconomicsFee: tpn.EconomicsData, + BlockChainHook: tpn.BlockchainHook, + MainBlockChain: tpn.BlockChain, + APIBlockChain: apiBlockchain, + WasmVMChangeLocker: tpn.WasmVMChangeLocker, + Bootstrapper: tpn.Bootstrapper, + AllowExternalQueriesChan: common.GetClosedUnbufferedChannel(), + HistoryRepository: tpn.HistoryRepository, + ShardCoordinator: tpn.ShardCoordinator, + StorageService: tpn.Storage, + Marshaller: TestMarshaller, + Hasher: TestHasher, + Uint64ByteSliceConverter: TestUint64Converter, + } + tpn.SCQueryService, _ = smartContract.NewSCQueryService(argsNewScQueryService) + } else { + tpn.createFullSCQueryService(gasMap, vmConfig) + } + + testHasher := createHasher(fullArgs.ConsensusType) + epochStartRegistrationHandler := notifier.NewEpochStartSubscriptionHandler() + pkBytes, _ := tpn.NodeKeys.MainKey.Pk.ToByteArray() + consensusCache, _ := cache.NewLRUCache(10000) + + tpn.initNodesCoordinator( + fullArgs.ConsensusSize, + testHasher, + epochStartRegistrationHandler, + fullArgs.EligibleMap, + fullArgs.WaitingMap, + pkBytes, + consensusCache, + ) + + tpn.BroadcastMessenger, _ = sposFactory.GetBroadcastMessenger( + TestMarshalizer, + TestHasher, + tpn.MainMessenger, + tpn.ShardCoordinator, + tpn.OwnAccount.PeerSigHandler, + tpn.DataPool.Headers(), + tpn.MainInterceptorsContainer, + &testscommon.AlarmSchedulerStub{}, + testscommon.NewKeysHandlerSingleSignerMock( + tpn.NodeKeys.MainKey.Sk, + tpn.MainMessenger.ID(), + ), + config.ConsensusGradualBroadcastConfig{GradualIndexBroadcastDelay: []config.IndexBroadcastDelay{}}, + ) + + if args.WithSync { + tpn.initBootstrapper() + } + tpn.setGenesisBlock() + tpn.initNode(fullArgs, syncer, roundHandler) + tpn.addHandlersForCounters() + tpn.addGenesisBlocksIntoStorage() + + if args.GenesisFile != "" { + tpn.createHeartbeatWithHardforkTrigger() + } +} + +func (tpn *TestFullNode) initNode( + args ArgsTestFullNode, + syncer ntp.SyncTimer, + roundHandler consensus.RoundHandler, +) { + var err error + + statusCoreComponents := &testFactory.StatusCoreComponentsStub{ + StatusMetricsField: tpn.StatusMetrics, + AppStatusHandlerField: tpn.AppStatusHandler, + } + if tpn.EpochNotifier == nil { + tpn.EpochNotifier = forking.NewGenericEpochNotifier() + } + if tpn.EnableEpochsHandler == nil { + tpn.EnableEpochsHandler, _ = enablers.NewEnableEpochsHandler(CreateEnableEpochsConfig(), tpn.EpochNotifier) + } + + var epochTrigger TestEpochStartTrigger + if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { + argsNewMetaEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ + GenesisTime: time.Unix(args.StartTime, 0), + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + Settings: &config.EpochStartConfig{ + MinRoundsBetweenEpochs: 1, + RoundsPerEpoch: 1000, + }, + Epoch: 0, + Storage: createTestStore(), + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + DataPool: tpn.DataPool, + } + epochStartTrigger, err := metachain.NewEpochStartTrigger(argsNewMetaEpochStart) + if err != nil { + fmt.Println(err.Error()) + } + epochTrigger = &metachain.TestTrigger{} + epochTrigger.SetTrigger(epochStartTrigger) + } else { + argsPeerMiniBlocksSyncer := shardchain.ArgPeerMiniBlockSyncer{ + MiniBlocksPool: tpn.DataPool.MiniBlocks(), + ValidatorsInfoPool: tpn.DataPool.ValidatorsInfo(), + RequestHandler: &testscommon.RequestHandlerStub{}, + } + peerMiniBlockSyncer, _ := shardchain.NewPeerMiniBlockSyncer(argsPeerMiniBlocksSyncer) + + argsShardEpochStart := &shardchain.ArgsShardEpochStartTrigger{ + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + HeaderValidator: &mock.HeaderValidatorStub{}, + Uint64Converter: TestUint64Converter, + DataPool: tpn.DataPool, + Storage: tpn.Storage, + RequestHandler: &testscommon.RequestHandlerStub{}, + Epoch: 0, + Validity: 1, + Finality: 1, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + PeerMiniBlocksSyncer: peerMiniBlockSyncer, + RoundHandler: roundHandler, + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + EnableEpochsHandler: tpn.EnableEpochsHandler, + } + epochStartTrigger, err := shardchain.NewEpochStartTrigger(argsShardEpochStart) + if err != nil { + fmt.Println("NewEpochStartTrigger shard") + fmt.Println(err.Error()) + } + epochTrigger = &shardchain.TestTrigger{} + epochTrigger.SetTrigger(epochStartTrigger) + } + + tpn.EpochStartTrigger = epochTrigger + + strPk := "" + if !check.IfNil(args.HardforkPk) { + buff, err := args.HardforkPk.ToByteArray() + log.LogIfError(err) + + strPk = hex.EncodeToString(buff) + } + _ = tpn.createHardforkTrigger(strPk) + + coreComponents := GetDefaultCoreComponents(tpn.EnableEpochsHandler, tpn.EpochNotifier) + coreComponents.SyncTimerField = syncer + coreComponents.RoundHandlerField = roundHandler + + coreComponents.InternalMarshalizerField = TestMarshalizer + coreComponents.VmMarshalizerField = TestVmMarshalizer + coreComponents.TxMarshalizerField = TestTxSignMarshalizer + coreComponents.HasherField = TestHasher + coreComponents.AddressPubKeyConverterField = TestAddressPubkeyConverter + coreComponents.ValidatorPubKeyConverterField = TestValidatorPubkeyConverter + coreComponents.ChainIdCalled = func() string { + return string(tpn.ChainID) + } + coreComponents.GenesisTimeField = time.Unix(args.StartTime, 0) + coreComponents.GenesisNodesSetupField = &genesisMocks.NodesSetupStub{ + GetShardConsensusGroupSizeCalled: func() uint32 { + return uint32(args.ConsensusSize) + }, + GetMetaConsensusGroupSizeCalled: func() uint32 { + return uint32(args.ConsensusSize) + }, + } + coreComponents.MinTransactionVersionCalled = func() uint32 { + return tpn.MinTransactionVersion + } + coreComponents.TxVersionCheckField = versioning.NewTxVersionChecker(tpn.MinTransactionVersion) + hardforkPubKeyBytes, _ := coreComponents.ValidatorPubKeyConverterField.Decode(hardforkPubKey) + coreComponents.HardforkTriggerPubKeyField = hardforkPubKeyBytes + coreComponents.Uint64ByteSliceConverterField = TestUint64Converter + coreComponents.EconomicsDataField = tpn.EconomicsData + coreComponents.APIEconomicsHandler = tpn.EconomicsData + coreComponents.EnableEpochsHandlerField = tpn.EnableEpochsHandler + coreComponents.EpochNotifierField = tpn.EpochNotifier + coreComponents.RoundNotifierField = tpn.RoundNotifier + coreComponents.WasmVMChangeLockerInternal = tpn.WasmVMChangeLocker + coreComponents.EconomicsDataField = tpn.EconomicsData + + dataComponents := GetDefaultDataComponents() + dataComponents.BlockChain = tpn.BlockChain + dataComponents.DataPool = tpn.DataPool + dataComponents.Store = tpn.Storage + + bootstrapComponents := getDefaultBootstrapComponents(tpn.ShardCoordinator, tpn.EnableEpochsHandler) + + tpn.BlockBlackListHandler = cache.NewTimeCache(TimeSpanForBadHeaders) + + if tpn.ShardCoordinator.SelfId() != core.MetachainShardId { + tpn.ForkDetector, err = processSync.NewShardForkDetector( + log, + roundHandler, + tpn.BlockBlackListHandler, + tpn.BlockTracker, + args.StartTime, + tpn.EnableEpochsHandler, + tpn.DataPool.Proofs()) + } else { + tpn.ForkDetector, err = processSync.NewMetaForkDetector( + log, + roundHandler, + tpn.BlockBlackListHandler, + tpn.BlockTracker, + args.StartTime, + tpn.EnableEpochsHandler, + tpn.DataPool.Proofs()) + } + if err != nil { + panic(err.Error()) + } + + argsKeysHolder := keysManagement.ArgsManagedPeersHolder{ + KeyGenerator: args.KeyGen, + P2PKeyGenerator: args.P2PKeyGen, + MaxRoundsOfInactivity: 10, + PrefsConfig: config.Preferences{}, + P2PKeyConverter: p2pFactory.NewP2PKeyConverter(), + } + keysHolder, _ := keysManagement.NewManagedPeersHolder(argsKeysHolder) + + // adding provided handled keys + for _, key := range args.NodeKeys.HandledKeys { + skBytes, _ := key.Sk.ToByteArray() + _ = keysHolder.AddManagedPeer(skBytes) + } + + multiSigContainer := cryptoMocks.NewMultiSignerContainerMock(args.MultiSigner) + pubKey := tpn.NodeKeys.MainKey.Sk.GeneratePublic() + pubKeyBytes, _ := pubKey.ToByteArray() + pubKeyString := coreComponents.ValidatorPubKeyConverterField.SilentEncode(pubKeyBytes, log) + argsKeysHandler := keysManagement.ArgsKeysHandler{ + ManagedPeersHolder: keysHolder, + PrivateKey: tpn.NodeKeys.MainKey.Sk, + Pid: tpn.MainMessenger.ID(), + } + keysHandler, _ := keysManagement.NewKeysHandler(argsKeysHandler) + + signingHandlerArgs := cryptoFactory.ArgsSigningHandler{ + PubKeys: []string{pubKeyString}, + MultiSignerContainer: multiSigContainer, + KeyGenerator: args.KeyGen, + KeysHandler: keysHandler, + SingleSigner: TestSingleBlsSigner, + } + sigHandler, _ := cryptoFactory.NewSigningHandler(signingHandlerArgs) + + cryptoComponents := GetDefaultCryptoComponents() + cryptoComponents.PrivKey = tpn.NodeKeys.MainKey.Sk + cryptoComponents.PubKey = tpn.NodeKeys.MainKey.Pk + cryptoComponents.TxSig = tpn.OwnAccount.SingleSigner + cryptoComponents.BlockSig = tpn.OwnAccount.SingleSigner + cryptoComponents.MultiSigContainer = cryptoMocks.NewMultiSignerContainerMock(tpn.MultiSigner) + cryptoComponents.BlKeyGen = tpn.OwnAccount.KeygenTxSign + cryptoComponents.TxKeyGen = TestKeyGenForAccounts + + peerSigCache, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) + peerSigHandler, _ := peerSignatureHandler.NewPeerSignatureHandler(peerSigCache, TestSingleBlsSigner, args.KeyGen) + cryptoComponents.PeerSignHandler = peerSigHandler + cryptoComponents.SigHandler = sigHandler + cryptoComponents.KeysHandlerField = keysHandler + + tpn.initInterceptors(coreComponents, cryptoComponents, roundHandler, tpn.EnableEpochsHandler, tpn.Storage, epochTrigger) + + if args.WithSync { + tpn.initBlockProcessorWithSync() + } else { + tpn.initBlockProcessor(coreComponents, dataComponents, args, roundHandler) + } + + processComponents := GetDefaultProcessComponents() + processComponents.ForkDetect = tpn.ForkDetector + processComponents.BlockProcess = tpn.BlockProcessor + processComponents.ReqFinder = tpn.RequestersFinder + processComponents.HeaderIntegrVerif = tpn.HeaderIntegrityVerifier + processComponents.HeaderSigVerif = tpn.HeaderSigVerifier + processComponents.BlackListHdl = tpn.BlockBlackListHandler + processComponents.NodesCoord = tpn.NodesCoordinator + processComponents.ShardCoord = tpn.ShardCoordinator + processComponents.IntContainer = tpn.MainInterceptorsContainer + processComponents.FullArchiveIntContainer = tpn.FullArchiveInterceptorsContainer + processComponents.HistoryRepositoryInternal = tpn.HistoryRepository + processComponents.WhiteListHandlerInternal = tpn.WhiteListHandler + processComponents.WhiteListerVerifiedTxsInternal = tpn.WhiteListerVerifiedTxs + processComponents.TxsSenderHandlerField = createTxsSender(tpn.ShardCoordinator, tpn.MainMessenger) + processComponents.HardforkTriggerField = tpn.HardforkTrigger + processComponents.ScheduledTxsExecutionHandlerInternal = &testscommon.ScheduledTxsExecutionStub{} + processComponents.ProcessedMiniBlocksTrackerInternal = &testscommon.ProcessedMiniBlocksTrackerStub{} + processComponents.SentSignaturesTrackerInternal = &testscommon.SentSignatureTrackerStub{} + + processComponents.RoundHandlerField = roundHandler + processComponents.EpochNotifier = tpn.EpochStartNotifier + + stateComponents := GetDefaultStateComponents() + stateComponents.Accounts = tpn.AccntState + stateComponents.AccountsAPI = tpn.AccntState + + finalProvider, _ := blockInfoProviders.NewFinalBlockInfo(dataComponents.BlockChain) + finalAccountsApi, _ := state.NewAccountsDBApi(tpn.AccntState, finalProvider) + + currentProvider, _ := blockInfoProviders.NewCurrentBlockInfo(dataComponents.BlockChain) + currentAccountsApi, _ := state.NewAccountsDBApi(tpn.AccntState, currentProvider) + + historicalAccountsApi, _ := state.NewAccountsDBApiWithHistory(tpn.AccntState) + + argsAccountsRepo := state.ArgsAccountsRepository{ + FinalStateAccountsWrapper: finalAccountsApi, + CurrentStateAccountsWrapper: currentAccountsApi, + HistoricalStateAccountsWrapper: historicalAccountsApi, + } + stateComponents.AccountsRepo, _ = state.NewAccountsRepository(argsAccountsRepo) + + networkComponents := GetDefaultNetworkComponents() + networkComponents.Messenger = tpn.MainMessenger + networkComponents.FullArchiveNetworkMessengerField = tpn.FullArchiveMessenger + networkComponents.PeersRatingHandlerField = tpn.PeersRatingHandler + networkComponents.PeersRatingMonitorField = tpn.PeersRatingMonitor + networkComponents.InputAntiFlood = &mock.NilAntifloodHandler{} + networkComponents.PeerHonesty = &mock.PeerHonestyHandlerStub{} + + tpn.Node, err = node.NewNode( + node.WithAddressSignatureSize(64), + node.WithValidatorSignatureSize(48), + node.WithBootstrapComponents(bootstrapComponents), + node.WithCoreComponents(coreComponents), + node.WithStatusCoreComponents(statusCoreComponents), + node.WithDataComponents(dataComponents), + node.WithProcessComponents(processComponents), + node.WithCryptoComponents(cryptoComponents), + node.WithNetworkComponents(networkComponents), + node.WithStateComponents(stateComponents), + node.WithPeerDenialEvaluator(&mock.PeerDenialEvaluatorStub{}), + node.WithStatusCoreComponents(statusCoreComponents), + node.WithGenesisTime(time.Unix(args.StartTime, 0)), + node.WithRoundDuration(args.RoundTime), + node.WithPublicKeySize(publicKeySize), + ) + log.LogIfError(err) + + err = nodeDebugFactory.CreateInterceptedDebugHandler( + tpn.Node, + tpn.MainInterceptorsContainer, + tpn.ResolversContainer, + tpn.RequestersFinder, + config.InterceptorResolverDebugConfig{ + Enabled: true, + CacheSize: 1000, + EnablePrint: true, + IntervalAutoPrintInSeconds: 1, + NumRequestsThreshold: 1, + NumResolveFailureThreshold: 1, + DebugLineExpiration: 1000, + }, + ) + log.LogIfError(err) +} + +func (tcn *TestFullNode) initInterceptors( + coreComponents process.CoreComponentsHolder, + cryptoComponents process.CryptoComponentsHolder, + roundHandler consensus.RoundHandler, + enableEpochsHandler common.EnableEpochsHandler, + storage dataRetriever.StorageService, + epochStartTrigger TestEpochStartTrigger, +) { + interceptorDataVerifierArgs := interceptorsFactory.InterceptedDataVerifierFactoryArgs{ + CacheSpan: time.Second * 10, + CacheExpiry: time.Second * 10, + } + + accountsAdapter := epochStartDisabled.NewAccountsAdapter() + + blockBlackListHandler := cache.NewTimeCache(TimeSpanForBadHeaders) + + genesisBlocks := make(map[uint32]data.HeaderHandler) + blockTracker := processMock.NewBlockTrackerMock(tcn.ShardCoordinator, genesisBlocks) + + whiteLstHandler, _ := disabledInterceptors.NewDisabledWhiteListDataVerifier() + + cacherVerifiedCfg := storageunit.CacheConfig{Capacity: 5000, Type: storageunit.LRUCache, Shards: 1} + cacheVerified, _ := storageunit.NewCache(cacherVerifiedCfg) + whiteListerVerifiedTxs, _ := interceptors.NewWhiteListDataVerifier(cacheVerified) + + if tcn.ShardCoordinator.SelfId() == core.MetachainShardId { + metaInterceptorContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ + CoreComponents: coreComponents, + CryptoComponents: cryptoComponents, + Accounts: accountsAdapter, + ShardCoordinator: tcn.ShardCoordinator, + NodesCoordinator: tcn.NodesCoordinator, + MainMessenger: tcn.MainMessenger, + FullArchiveMessenger: tcn.FullArchiveMessenger, + Store: storage, + DataPool: tcn.DataPool, + MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, + TxFeeHandler: &economicsmocks.EconomicsHandlerMock{}, + BlockBlackList: blockBlackListHandler, + HeaderSigVerifier: &consensusMocks.HeaderSigVerifierMock{}, + HeaderIntegrityVerifier: CreateHeaderIntegrityVerifier(), + ValidityAttester: blockTracker, + EpochStartTrigger: epochStartTrigger, + WhiteListHandler: whiteLstHandler, + WhiteListerVerifiedTxs: whiteListerVerifiedTxs, + AntifloodHandler: &mock.NilAntifloodHandler{}, + ArgumentsParser: smartContract.NewArgumentParser(), + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + SizeCheckDelta: sizeCheckDelta, + RequestHandler: &testscommon.RequestHandlerStub{}, + PeerSignatureHandler: &processMock.PeerSignatureHandlerStub{}, + SignaturesHandler: &processMock.SignaturesHandlerStub{}, + HeartbeatExpiryTimespanInSec: 30, + MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), + FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), + HardforkTrigger: &testscommon.HardforkTriggerStub{}, + NodeOperationMode: common.NormalOperation, + InterceptedDataVerifierFactory: interceptorsFactory.NewInterceptedDataVerifierFactory(interceptorDataVerifierArgs), + } + interceptorContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(metaInterceptorContainerFactoryArgs) + if err != nil { + fmt.Println(err.Error()) + } + + tcn.MainInterceptorsContainer, _, err = interceptorContainerFactory.Create() + if err != nil { + log.Debug("interceptor container factory Create", "error", err.Error()) + } + } else { + argsPeerMiniBlocksSyncer := shardchain.ArgPeerMiniBlockSyncer{ + MiniBlocksPool: tcn.DataPool.MiniBlocks(), + ValidatorsInfoPool: tcn.DataPool.ValidatorsInfo(), + RequestHandler: &testscommon.RequestHandlerStub{}, + } + peerMiniBlockSyncer, _ := shardchain.NewPeerMiniBlockSyncer(argsPeerMiniBlocksSyncer) + argsShardEpochStart := &shardchain.ArgsShardEpochStartTrigger{ + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + HeaderValidator: &mock.HeaderValidatorStub{}, + Uint64Converter: TestUint64Converter, + DataPool: tcn.DataPool, + Storage: storage, + RequestHandler: &testscommon.RequestHandlerStub{}, + Epoch: 0, + Validity: 1, + Finality: 1, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + PeerMiniBlocksSyncer: peerMiniBlockSyncer, + RoundHandler: roundHandler, + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + EnableEpochsHandler: enableEpochsHandler, + } + _, _ = shardchain.NewEpochStartTrigger(argsShardEpochStart) + + shardIntereptorContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ + CoreComponents: coreComponents, + CryptoComponents: cryptoComponents, + Accounts: accountsAdapter, + ShardCoordinator: tcn.ShardCoordinator, + NodesCoordinator: tcn.NodesCoordinator, + MainMessenger: tcn.MainMessenger, + FullArchiveMessenger: tcn.FullArchiveMessenger, + Store: storage, + DataPool: tcn.DataPool, + MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, + TxFeeHandler: &economicsmocks.EconomicsHandlerMock{}, + BlockBlackList: blockBlackListHandler, + HeaderSigVerifier: &consensusMocks.HeaderSigVerifierMock{}, + HeaderIntegrityVerifier: CreateHeaderIntegrityVerifier(), + ValidityAttester: blockTracker, + EpochStartTrigger: epochStartTrigger, + WhiteListHandler: whiteLstHandler, + WhiteListerVerifiedTxs: whiteListerVerifiedTxs, + AntifloodHandler: &mock.NilAntifloodHandler{}, + ArgumentsParser: smartContract.NewArgumentParser(), + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + SizeCheckDelta: sizeCheckDelta, + RequestHandler: &testscommon.RequestHandlerStub{}, + PeerSignatureHandler: &processMock.PeerSignatureHandlerStub{}, + SignaturesHandler: &processMock.SignaturesHandlerStub{}, + HeartbeatExpiryTimespanInSec: 30, + MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), + FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), + HardforkTrigger: &testscommon.HardforkTriggerStub{}, + NodeOperationMode: common.NormalOperation, + InterceptedDataVerifierFactory: interceptorsFactory.NewInterceptedDataVerifierFactory(interceptorDataVerifierArgs), + } + + interceptorContainerFactory, err := interceptorscontainer.NewShardInterceptorsContainerFactory(shardIntereptorContainerFactoryArgs) + if err != nil { + fmt.Println(err.Error()) + } + + tcn.MainInterceptorsContainer, _, err = interceptorContainerFactory.Create() + if err != nil { + fmt.Println(err.Error()) + } + } +} + +func (tpn *TestFullNode) initBlockProcessor( + coreComponents *mock.CoreComponentsStub, + dataComponents *mock.DataComponentsStub, + args ArgsTestFullNode, + roundHandler consensus.RoundHandler, +) { + var err error + + id := hex.EncodeToString(tpn.OwnAccount.PkTxSignBytes) + if len(id) > 8 { + id = id[0:8] + } + + log := logger.GetOrCreate(fmt.Sprintf("p/sync/%s", id)) + + accountsDb := make(map[state.AccountsDbIdentifier]state.AccountsAdapter) + accountsDb[state.UserAccountsState] = tpn.AccntState + accountsDb[state.PeerAccountsState] = tpn.PeerState + + if tpn.EpochNotifier == nil { + tpn.EpochNotifier = forking.NewGenericEpochNotifier() + } + if tpn.EnableEpochsHandler == nil { + tpn.EnableEpochsHandler, _ = enablers.NewEnableEpochsHandler(CreateEnableEpochsConfig(), tpn.EpochNotifier) + } + + // if tpn.ShardCoordinator.SelfId() != core.MetachainShardId { + // tpn.ForkDetector, _ = processSync.NewShardForkDetector( + // log, + // tpn.RoundHandler, + // tpn.BlockBlackListHandler, + // tpn.BlockTracker, + // tpn.NodesSetup.GetStartTime(), + // tpn.EnableEpochsHandler, + // tpn.DataPool.Proofs()) + // } else { + // tpn.ForkDetector, _ = processSync.NewMetaForkDetector( + // log, + // tpn.RoundHandler, + // tpn.BlockBlackListHandler, + // tpn.BlockTracker, + // tpn.NodesSetup.GetStartTime(), + // tpn.EnableEpochsHandler, + // tpn.DataPool.Proofs()) + // } + + // if tpn.ForkDetector == nil { + // panic("AAAAAAAAAAAAAAAAA") + // } + + bootstrapComponents := getDefaultBootstrapComponents(tpn.ShardCoordinator, tpn.EnableEpochsHandler) + bootstrapComponents.HdrIntegrityVerifier = tpn.HeaderIntegrityVerifier + + statusComponents := GetDefaultStatusComponents() + + statusCoreComponents := &testFactory.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandlerMock.AppStatusHandlerStub{}, + } + + id = hex.EncodeToString(tpn.OwnAccount.PkTxSignBytes) + if len(id) > 8 { + id = id[0:8] + } + + logger := logger.GetOrCreate(fmt.Sprintf("p/b/%s", id)) + + argumentsBase := block.ArgBaseProcessor{ + CoreComponents: coreComponents, + DataComponents: dataComponents, + BootstrapComponents: bootstrapComponents, + StatusComponents: statusComponents, + StatusCoreComponents: statusCoreComponents, + Config: config.Config{}, + AccountsDB: accountsDb, + ForkDetector: tpn.ForkDetector, + NodesCoordinator: tpn.NodesCoordinator, + FeeHandler: tpn.FeeAccumulator, + RequestHandler: tpn.RequestHandler, + BlockChainHook: tpn.BlockchainHook, + HeaderValidator: tpn.HeaderValidator, + BootStorer: &mock.BoostrapStorerMock{ + PutCalled: func(round int64, bootData bootstrapStorage.BootstrapData) error { + return nil + }, + }, + BlockTracker: tpn.BlockTracker, + BlockSizeThrottler: TestBlockSizeThrottler, + HistoryRepository: tpn.HistoryRepository, + GasHandler: tpn.GasHandler, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, + ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, + OutportDataProvider: &outport.OutportDataProviderStub{}, + BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, + ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, + Logger: logger, + } + + if check.IfNil(tpn.EpochStartNotifier) { + tpn.EpochStartNotifier = notifier.NewEpochStartSubscriptionHandler() + } + + if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { + if check.IfNil(tpn.EpochStartTrigger) { + argsEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ + GenesisTime: time.Unix(args.StartTime, 0), + Settings: &config.EpochStartConfig{ + MinRoundsBetweenEpochs: 1000, + RoundsPerEpoch: 10000, + }, + Epoch: 0, + EpochStartNotifier: tpn.EpochStartNotifier, + Storage: tpn.Storage, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + DataPool: tpn.DataPool, + } + epochStartTrigger, _ := metachain.NewEpochStartTrigger(argsEpochStart) + tpn.EpochStartTrigger = &metachain.TestTrigger{} + tpn.EpochStartTrigger.SetTrigger(epochStartTrigger) + } + + argumentsBase.EpochStartTrigger = tpn.EpochStartTrigger + argumentsBase.TxCoordinator = tpn.TxCoordinator + + argsStakingToPeer := scToProtocol.ArgStakingToPeer{ + PubkeyConv: TestValidatorPubkeyConverter, + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + PeerState: tpn.PeerState, + BaseState: tpn.AccntState, + ArgParser: tpn.ArgsParser, + CurrTxs: tpn.DataPool.CurrentBlockTxs(), + RatingsData: tpn.RatingsData, + EnableEpochsHandler: tpn.EnableEpochsHandler, + } + scToProtocolInstance, _ := scToProtocol.NewStakingToPeer(argsStakingToPeer) + + argsEpochStartData := metachain.ArgsNewEpochStartData{ + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + Store: tpn.Storage, + DataPool: tpn.DataPool, + BlockTracker: tpn.BlockTracker, + ShardCoordinator: tpn.ShardCoordinator, + EpochStartTrigger: tpn.EpochStartTrigger, + RequestHandler: tpn.RequestHandler, + EnableEpochsHandler: tpn.EnableEpochsHandler, + } + epochStartDataCreator, _ := metachain.NewEpochStartData(argsEpochStartData) + + economicsDataProvider := metachain.NewEpochEconomicsStatistics() + argsEpochEconomics := metachain.ArgsNewEpochEconomics{ + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + Store: tpn.Storage, + ShardCoordinator: tpn.ShardCoordinator, + RewardsHandler: tpn.EconomicsData, + RoundTime: roundHandler, + GenesisTotalSupply: tpn.EconomicsData.GenesisTotalSupply(), + EconomicsDataNotified: economicsDataProvider, + StakingV2EnableEpoch: tpn.EnableEpochs.StakingV2EnableEpoch, + } + epochEconomics, _ := metachain.NewEndOfEpochEconomicsDataCreator(argsEpochEconomics) + + systemVM, errGet := tpn.VMContainer.Get(factory.SystemVirtualMachine) + if errGet != nil { + log.Error("initBlockProcessor tpn.VMContainer.Get", "error", errGet) + } + + argsStakingDataProvider := metachain.StakingDataProviderArgs{ + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + SystemVM: systemVM, + MinNodePrice: "1000", + } + stakingDataProvider, errRsp := metachain.NewStakingDataProvider(argsStakingDataProvider) + if errRsp != nil { + log.Error("initBlockProcessor NewRewardsStakingProvider", "error", errRsp) + } + + rewardsStorage, _ := tpn.Storage.GetStorer(dataRetriever.RewardTransactionUnit) + miniBlockStorage, _ := tpn.Storage.GetStorer(dataRetriever.MiniBlockUnit) + argsEpochRewards := metachain.RewardsCreatorProxyArgs{ + BaseRewardsCreatorArgs: metachain.BaseRewardsCreatorArgs{ + ShardCoordinator: tpn.ShardCoordinator, + PubkeyConverter: TestAddressPubkeyConverter, + RewardsStorage: rewardsStorage, + MiniBlockStorage: miniBlockStorage, + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + DataPool: tpn.DataPool, + ProtocolSustainabilityAddress: testProtocolSustainabilityAddress, + NodesConfigProvider: tpn.NodesCoordinator, + UserAccountsDB: tpn.AccntState, + EnableEpochsHandler: tpn.EnableEpochsHandler, + ExecutionOrderHandler: tpn.TxExecutionOrderHandler, + }, + StakingDataProvider: stakingDataProvider, + RewardsHandler: tpn.EconomicsData, + EconomicsDataProvider: economicsDataProvider, + } + epochStartRewards, _ := metachain.NewRewardsCreatorProxy(argsEpochRewards) + + validatorInfoStorage, _ := tpn.Storage.GetStorer(dataRetriever.UnsignedTransactionUnit) + argsEpochValidatorInfo := metachain.ArgsNewValidatorInfoCreator{ + ShardCoordinator: tpn.ShardCoordinator, + ValidatorInfoStorage: validatorInfoStorage, + MiniBlockStorage: miniBlockStorage, + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + DataPool: tpn.DataPool, + EnableEpochsHandler: tpn.EnableEpochsHandler, + } + epochStartValidatorInfo, _ := metachain.NewValidatorInfoCreator(argsEpochValidatorInfo) + + maxNodesChangeConfigProvider, _ := notifier.NewNodesConfigProvider( + tpn.EpochNotifier, + nil, + ) + auctionCfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } + ald, _ := metachain.NewAuctionListDisplayer(metachain.ArgsAuctionListDisplayer{ + TableDisplayHandler: metachain.NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: auctionCfg, + }) + + argsAuctionListSelector := metachain.AuctionListSelectorArgs{ + ShardCoordinator: tpn.ShardCoordinator, + StakingDataProvider: stakingDataProvider, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListDisplayHandler: ald, + SoftAuctionConfig: auctionCfg, + } + auctionListSelector, _ := metachain.NewAuctionListSelector(argsAuctionListSelector) + + argsEpochSystemSC := metachain.ArgsNewEpochStartSystemSCProcessing{ + SystemVM: systemVM, + UserAccountsDB: tpn.AccntState, + PeerAccountsDB: tpn.PeerState, + Marshalizer: TestMarshalizer, + StartRating: tpn.RatingsData.StartRating(), + ValidatorInfoCreator: tpn.ValidatorStatisticsProcessor, + EndOfEpochCallerAddress: vm.EndOfEpochAddress, + StakingSCAddress: vm.StakingSCAddress, + ChanceComputer: tpn.NodesCoordinator, + EpochNotifier: tpn.EpochNotifier, + GenesisNodesConfig: tpn.NodesSetup, + StakingDataProvider: stakingDataProvider, + NodesConfigProvider: tpn.NodesCoordinator, + ShardCoordinator: tpn.ShardCoordinator, + ESDTOwnerAddressBytes: vm.EndOfEpochAddress, + EnableEpochsHandler: tpn.EnableEpochsHandler, + AuctionListSelector: auctionListSelector, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + } + epochStartSystemSCProcessor, _ := metachain.NewSystemSCProcessor(argsEpochSystemSC) + tpn.EpochStartSystemSCProcessor = epochStartSystemSCProcessor + + arguments := block.ArgMetaProcessor{ + ArgBaseProcessor: argumentsBase, + SCToProtocol: scToProtocolInstance, + PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, + EpochEconomics: epochEconomics, + EpochStartDataCreator: epochStartDataCreator, + EpochRewardsCreator: epochStartRewards, + EpochValidatorInfoCreator: epochStartValidatorInfo, + ValidatorStatisticsProcessor: tpn.ValidatorStatisticsProcessor, + EpochSystemSCProcessor: epochStartSystemSCProcessor, + } + + tpn.BlockProcessor, err = block.NewMetaProcessor(arguments) + } else { + if check.IfNil(tpn.EpochStartTrigger) { + argsPeerMiniBlocksSyncer := shardchain.ArgPeerMiniBlockSyncer{ + MiniBlocksPool: tpn.DataPool.MiniBlocks(), + ValidatorsInfoPool: tpn.DataPool.ValidatorsInfo(), + RequestHandler: tpn.RequestHandler, + } + peerMiniBlocksSyncer, _ := shardchain.NewPeerMiniBlockSyncer(argsPeerMiniBlocksSyncer) + argsShardEpochStart := &shardchain.ArgsShardEpochStartTrigger{ + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + HeaderValidator: tpn.HeaderValidator, + Uint64Converter: TestUint64Converter, + DataPool: tpn.DataPool, + Storage: tpn.Storage, + RequestHandler: tpn.RequestHandler, + Epoch: 0, + Validity: 1, + Finality: 1, + EpochStartNotifier: tpn.EpochStartNotifier, + PeerMiniBlocksSyncer: peerMiniBlocksSyncer, + RoundHandler: tpn.RoundHandler, + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + EnableEpochsHandler: tpn.EnableEpochsHandler, + } + epochStartTrigger, _ := shardchain.NewEpochStartTrigger(argsShardEpochStart) + tpn.EpochStartTrigger = &shardchain.TestTrigger{} + tpn.EpochStartTrigger.SetTrigger(epochStartTrigger) + } + + argumentsBase.EpochStartTrigger = tpn.EpochStartTrigger + argumentsBase.BlockChainHook = tpn.BlockchainHook + argumentsBase.TxCoordinator = tpn.TxCoordinator + argumentsBase.ScheduledTxsExecutionHandler = &testscommon.ScheduledTxsExecutionStub{} + + arguments := block.ArgShardProcessor{ + ArgBaseProcessor: argumentsBase, + } + + tpn.BlockProcessor, err = block.NewShardProcessor(arguments) + } + + if err != nil { + panic(fmt.Sprintf("error creating blockprocessor: %s", err.Error())) + } +} + +func (tpn *TestFullNode) initBlockTracker( + roundHandler consensus.RoundHandler, +) { + id := hex.EncodeToString(tpn.OwnAccount.PkTxSignBytes) + if len(id) > 8 { + id = id[0:8] + } + + log := logger.GetOrCreate(fmt.Sprintf("p/track/%s", id)) + + argBaseTracker := track.ArgBaseTracker{ + Logger: log, + Hasher: TestHasher, + HeaderValidator: tpn.HeaderValidator, + Marshalizer: TestMarshalizer, + RequestHandler: tpn.RequestHandler, + RoundHandler: roundHandler, + ShardCoordinator: tpn.ShardCoordinator, + Store: tpn.Storage, + StartHeaders: tpn.GenesisBlocks, + PoolsHolder: tpn.DataPool, + WhitelistHandler: tpn.WhiteListHandler, + FeeHandler: tpn.EconomicsData, + EnableEpochsHandler: tpn.EnableEpochsHandler, + ProofsPool: tpn.DataPool.Proofs(), + } + + var err error + if tpn.ShardCoordinator.SelfId() != core.MetachainShardId { + arguments := track.ArgShardTracker{ + ArgBaseTracker: argBaseTracker, + } + + tpn.BlockTracker, err = track.NewShardBlockTrack(arguments) + if err != nil { + panic(err.Error()) + } + } else { + arguments := track.ArgMetaTracker{ + ArgBaseTracker: argBaseTracker, + } + + tpn.BlockTracker, err = track.NewMetaBlockTrack(arguments) + if err != nil { + panic(err.Error()) + } + } +} diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 8ac20e24030..2451b9e62ac 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -1792,7 +1792,7 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u ) processedMiniBlocksTracker := processedMb.NewProcessedMiniBlocksTracker() - fact, _ := shard.NewPreProcessorsContainerFactory( + fact, err := shard.NewPreProcessorsContainerFactory( tpn.ShardCoordinator, tpn.Storage, TestMarshalizer, @@ -1816,6 +1816,9 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u processedMiniBlocksTracker, tpn.TxExecutionOrderHandler, ) + if err != nil { + panic(err.Error()) + } tpn.PreProcessorsContainer, _ = fact.Create() argsTransactionCoordinator := coordinator.ArgTransactionCoordinator{ @@ -3166,18 +3169,25 @@ func (tpn *TestProcessorNode) initBlockTracker() { ProofsPool: tpn.DataPool.Proofs(), } + var err error if tpn.ShardCoordinator.SelfId() != core.MetachainShardId { arguments := track.ArgShardTracker{ ArgBaseTracker: argBaseTracker, } - tpn.BlockTracker, _ = track.NewShardBlockTrack(arguments) + tpn.BlockTracker, err = track.NewShardBlockTrack(arguments) + if err != nil { + panic(err.Error()) + } } else { arguments := track.ArgMetaTracker{ ArgBaseTracker: argBaseTracker, } - tpn.BlockTracker, _ = track.NewMetaBlockTrack(arguments) + tpn.BlockTracker, err = track.NewMetaBlockTrack(arguments) + if err != nil { + panic(err.Error()) + } } } From 1518f7663d4b57ccc93681dc5459650f99b7a3e1 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Mon, 27 Jan 2025 17:05:32 +0200 Subject: [PATCH 08/10] merge conflicts fixes --- consensus/spos/bls/v2/subroundBlock.go | 5 ++++- integrationTests/consensus/consensus_test.go | 2 +- integrationTests/testProcessorNode.go | 3 +-- process/sync/baseForkDetector.go | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/consensus/spos/bls/v2/subroundBlock.go b/consensus/spos/bls/v2/subroundBlock.go index 5b90f06f886..14bd0f06043 100644 --- a/consensus/spos/bls/v2/subroundBlock.go +++ b/consensus/spos/bls/v2/subroundBlock.go @@ -454,7 +454,10 @@ func (sr *subroundBlock) isHeaderForCurrentConsensus(header data.HeaderHandler) func (sr *subroundBlock) getLeaderForHeader(headerHandler data.HeaderHandler) ([]byte, error) { nc := sr.NodesCoordinator() - prevBlockEpoch := sr.Blockchain().GetCurrentBlockHeader().GetEpoch() + prevBlockEpoch := uint32(0) + if sr.Blockchain().GetCurrentBlockHeader() != nil { + prevBlockEpoch = sr.Blockchain().GetCurrentBlockHeader().GetEpoch() + } // TODO: remove this if first block in new epoch will be validated by epoch validators // first block in epoch is validated by previous epoch validators selectionEpoch := headerHandler.GetEpoch() diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index 55faae12bac..1d18f7d1913 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -329,7 +329,7 @@ func TestConsensusBLSWithFullProcessing(t *testing.T) { for _, n := range nodesList { for i := 1; i < len(nodes); i++ { if check.IfNil(n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader()) { - // assert.Fail(t, fmt.Sprintf("Node with idx %d does not have a current block", i)) + assert.Fail(t, fmt.Sprintf("Node with idx %d does not have a current block", i)) } else { fmt.Println("FOUND") assert.Equal(t, expectedNonce, n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader().GetNonce()) diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 619fe0a1a9d..d5f9324b13e 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -1098,9 +1098,8 @@ func (tpn *TestProcessorNode) initDataPools() { log := logger.GetOrCreate(fmt.Sprintf("dtr/hc/%s", id)) - tpn.DataPool = dataRetrieverMock.CreatePoolsHolderWithProofsPool(log, 1, tpn.ShardCoordinator.SelfId(), tpn.ProofsPool) tpn.ProofsPool = proofscache.NewProofsPool(3) - tpn.DataPool = dataRetrieverMock.CreatePoolsHolderWithProofsPool(1, tpn.ShardCoordinator.SelfId(), tpn.ProofsPool) + tpn.DataPool = dataRetrieverMock.CreatePoolsHolderWithProofsPool(log, 1, tpn.ShardCoordinator.SelfId(), tpn.ProofsPool) cacherCfg := storageunit.CacheConfig{Capacity: 10000, Type: storageunit.LRUCache, Shards: 1} suCache, _ := storageunit.NewCache(cacherCfg) tpn.WhiteListHandler, _ = interceptors.NewWhiteListDataVerifier(suCache) diff --git a/process/sync/baseForkDetector.go b/process/sync/baseForkDetector.go index 94b4c30ec6c..203ccc881c5 100644 --- a/process/sync/baseForkDetector.go +++ b/process/sync/baseForkDetector.go @@ -736,7 +736,7 @@ func (bfd *baseForkDetector) processReceivedProof(proof data.HeaderProofHandler) probableHighestNonce := bfd.computeProbableHighestNonce() bfd.setProbableHighestNonce(probableHighestNonce) - log.Debug("forkDetector.processReceivedProof", + bfd.log.Debug("forkDetector.processReceivedProof", "round", hInfo.round, "nonce", hInfo.nonce, "hash", hInfo.hash, From 976b413c852b2dbf5274cc1a9a234fc1a37616ed Mon Sep 17 00:00:00 2001 From: ssd04 Date: Tue, 28 Jan 2025 16:48:13 +0200 Subject: [PATCH 09/10] check for genesis block --- consensus/spos/bls/v2/subroundBlock.go | 2 +- process/block/baseProcess.go | 2 +- process/block/metablock.go | 4 ++-- process/block/shardblock.go | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/consensus/spos/bls/v2/subroundBlock.go b/consensus/spos/bls/v2/subroundBlock.go index 14bd0f06043..943fe246878 100644 --- a/consensus/spos/bls/v2/subroundBlock.go +++ b/consensus/spos/bls/v2/subroundBlock.go @@ -371,7 +371,7 @@ func (sr *subroundBlock) saveProofForPreviousHeaderIfNeeded(header data.HeaderHa proof := header.GetPreviousProof() err := common.VerifyProofAgainstHeader(proof, prevHeader) if err != nil { - sr.Log.Debug("saveProofForPreviousHeaderIfNeeded: invalid proof, %s", err.Error()) + sr.Log.Debug("saveProofForPreviousHeaderIfNeeded: invalid proof", "error", err) return } diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 1a7bc71a5c0..2bd8467e265 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -747,7 +747,7 @@ func (bp *baseProcessor) sortHeaderHashesForCurrentBlockByNonce(usedInBlock bool } func (bp *baseProcessor) hasMissingProof(headerInfo *hdrInfo, hdrHash string) bool { - isFlagEnabledForHeader := bp.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, headerInfo.hdr.GetEpoch()) + isFlagEnabledForHeader := common.ShouldBlockHavePrevProof(headerInfo.hdr, bp.enableEpochsHandler, common.EquivalentMessagesFlag) if !isFlagEnabledForHeader { return false } diff --git a/process/block/metablock.go b/process/block/metablock.go index 953d8f66cc3..47aa7ecfe04 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -446,7 +446,7 @@ func (mp *metaProcessor) checkProofsForShardData(header *block.MetaBlock) error continue } - if !mp.proofsPool.HasProof(shardData.ShardID, shardData.HeaderHash) { + if !mp.proofsPool.HasProof(shardData.ShardID, shardData.HeaderHash) && shardData.GetNonce() > 1 { return fmt.Errorf("%w for meta header hash %s", process.ErrMissingHeaderProof, hex.EncodeToString(shardData.HeaderHash)) } @@ -2243,7 +2243,7 @@ func (mp *metaProcessor) createShardInfo() ([]data.ShardDataHandler, error) { } isBlockAfterEquivalentMessagesFlag := !check.IfNil(headerInfo.hdr) && - mp.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, headerInfo.hdr.GetEpoch()) + mp.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, headerInfo.hdr.GetEpoch()) && headerInfo.hdr.GetNonce() > 1 hasMissingShardHdrProof := isBlockAfterEquivalentMessagesFlag && !mp.proofsPool.HasProof(headerInfo.hdr.GetShardID(), []byte(hdrHash)) if hasMissingShardHdrProof { return nil, fmt.Errorf("%w for shard header with hash %s", process.ErrMissingHeaderProof, hex.EncodeToString([]byte(hdrHash))) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index dcfe5b7ee0e..bcf2ed121b6 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -313,7 +313,7 @@ func (sp *shardProcessor) ProcessBlock( continue } - if !sp.proofsPool.HasProof(core.MetachainShardId, metaBlockHash) { + if !sp.proofsPool.HasProof(core.MetachainShardId, metaBlockHash) && header.GetNonce() > 1 { return fmt.Errorf("%w for header hash %s", process.ErrMissingHeaderProof, hex.EncodeToString(metaBlockHash)) } From 0e9adb9fd9636c19dba933a28eea9c55a425dd5a Mon Sep 17 00:00:00 2001 From: ssd04 Date: Tue, 28 Jan 2025 16:48:38 +0200 Subject: [PATCH 10/10] integration tests meta chain fixes --- integrationTests/consensus/consensus_test.go | 14 ++++---- integrationTests/testFullNode.go | 36 +++++++++++++++++++- 2 files changed, 42 insertions(+), 8 deletions(-) diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index 1d18f7d1913..96fb5ae36a5 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -210,9 +210,9 @@ func TestConsensusBLSWithFullProcessing(t *testing.T) { logger.ToggleLoggerName(true) numKeysOnEachNode := 1 - numMetaNodes := uint32(4) - numNodes := uint32(4) - consensusSize := uint32(4 * numKeysOnEachNode) + numMetaNodes := uint32(2) + numNodes := uint32(2) + consensusSize := uint32(2 * numKeysOnEachNode) roundTime := uint64(1000) // maxShards := uint32(1) @@ -227,10 +227,10 @@ func TestConsensusBLSWithFullProcessing(t *testing.T) { enableEpochsConfig := integrationTests.CreateEnableEpochsConfig() - equivalentProodsActivationEpoch := uint32(10) + equivalentProofsActivationEpoch := uint32(0) - enableEpochsConfig.EquivalentMessagesEnableEpoch = equivalentProodsActivationEpoch - enableEpochsConfig.FixedOrderInConsensusEnableEpoch = equivalentProodsActivationEpoch + enableEpochsConfig.EquivalentMessagesEnableEpoch = equivalentProofsActivationEpoch + enableEpochsConfig.FixedOrderInConsensusEnableEpoch = equivalentProofsActivationEpoch fmt.Println("Step 1. Setup nodes...") @@ -332,7 +332,7 @@ func TestConsensusBLSWithFullProcessing(t *testing.T) { assert.Fail(t, fmt.Sprintf("Node with idx %d does not have a current block", i)) } else { fmt.Println("FOUND") - assert.Equal(t, expectedNonce, n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader().GetNonce()) + assert.GreaterOrEqual(t, n.Node.GetDataComponents().Blockchain().GetCurrentBlockHeader().GetNonce(), expectedNonce) } } } diff --git a/integrationTests/testFullNode.go b/integrationTests/testFullNode.go index 9638459af8f..0ca2d58a602 100644 --- a/integrationTests/testFullNode.go +++ b/integrationTests/testFullNode.go @@ -48,6 +48,7 @@ import ( "github.com/multiversx/mx-chain-go/process/smartContract" processSync "github.com/multiversx/mx-chain-go/process/sync" "github.com/multiversx/mx-chain-go/process/track" + "github.com/multiversx/mx-chain-go/sharding" chainShardingMocks "github.com/multiversx/mx-chain-go/sharding/mock" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" @@ -110,6 +111,7 @@ func CreateNodesWithTestFullNode( EpochsConfig: &enableEpochsConfig, NodeKeys: keysPair, }, + ShardID: shardID, ConsensusSize: consensusSize, RoundTime: roundTime, ConsensusType: consensusType, @@ -137,6 +139,7 @@ func CreateNodesWithTestFullNode( type ArgsTestFullNode struct { *ArgTestProcessorNode + ShardID uint32 ConsensusSize int RoundTime uint64 ConsensusType string @@ -150,13 +153,18 @@ type ArgsTestFullNode struct { type TestFullNode struct { *TestProcessorNode + + ShardCoordinator sharding.Coordinator } func NewTestFullNode(args ArgsTestFullNode) *TestFullNode { tpn := newBaseTestProcessorNode(*args.ArgTestProcessorNode) + shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, args.ShardID) + tfn := &TestFullNode{ TestProcessorNode: tpn, + ShardCoordinator: shardCoordinator, } tfn.initTestNodeWithArgs(*args.ArgTestProcessorNode, args) @@ -345,6 +353,25 @@ func (tpn *TestFullNode) initTestNodeWithArgs(args ArgTestProcessorNode, fullArg } } +func (tpn *TestFullNode) setGenesisBlock() { + genesisBlock := tpn.GenesisBlocks[tpn.ShardCoordinator.SelfId()] + _ = tpn.BlockChain.SetGenesisHeader(genesisBlock) + hash, _ := core.CalculateHash(TestMarshalizer, TestHasher, genesisBlock) + tpn.BlockChain.SetGenesisHeaderHash(hash) + log.Info("set genesis", + "shard ID", tpn.ShardCoordinator.SelfId(), + "hash", hex.EncodeToString(hash), + ) +} + +func (tpn *TestFullNode) initChainHandler() { + if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { + tpn.BlockChain = CreateMetaChain() + } else { + tpn.BlockChain = CreateShardChain() + } +} + func (tpn *TestFullNode) initNode( args ArgsTestFullNode, syncer ntp.SyncTimer, @@ -957,6 +984,10 @@ func (tpn *TestFullNode) initBlockProcessor( log.Error("initBlockProcessor tpn.VMContainer.Get", "error", errGet) } + if systemVM == nil { + systemVM, _ = mock.NewOneSCExecutorMockVM(tpn.BlockchainHook, TestHasher) + } + argsStakingDataProvider := metachain.StakingDataProviderArgs{ EnableEpochsHandler: coreComponents.EnableEpochsHandler(), SystemVM: systemVM, @@ -988,7 +1019,10 @@ func (tpn *TestFullNode) initBlockProcessor( RewardsHandler: tpn.EconomicsData, EconomicsDataProvider: economicsDataProvider, } - epochStartRewards, _ := metachain.NewRewardsCreatorProxy(argsEpochRewards) + epochStartRewards, err := metachain.NewRewardsCreatorProxy(argsEpochRewards) + if err != nil { + panic(fmt.Sprintf("error creating rewards creator proxy: %s", err.Error())) + } validatorInfoStorage, _ := tpn.Storage.GetStorer(dataRetriever.UnsignedTransactionUnit) argsEpochValidatorInfo := metachain.ArgsNewValidatorInfoCreator{