diff --git a/backend/cmd/exporter/main.go b/backend/cmd/exporter/main.go index 8be9c7fc4..426ca33e0 100644 --- a/backend/cmd/exporter/main.go +++ b/backend/cmd/exporter/main.go @@ -19,7 +19,6 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/version" "github.com/gobitfly/beaconchain/pkg/exporter/modules" "github.com/gobitfly/beaconchain/pkg/exporter/services" - "github.com/gobitfly/beaconchain/pkg/monitoring" ) func Run() { @@ -138,8 +137,8 @@ func Run() { wg.Wait() // enable light-weight db connection monitoring - monitoring.Init(false) - monitoring.Start() + // monitoring.Init(false) + // monitoring.Start() if utils.Config.TieredCacheProvider != "redis" { log.Fatal(fmt.Errorf("no cache provider set, please set TierdCacheProvider (example redis)"), "", 0) diff --git a/backend/cmd/misc/main.go b/backend/cmd/misc/main.go index 43b499bcb..12da592b2 100644 --- a/backend/cmd/misc/main.go +++ b/backend/cmd/misc/main.go @@ -138,12 +138,13 @@ func Run() { requires, ok := REQUIRES_LIST[opts.Command] if !ok { requires = misctypes.Requires{ - Bigtable: true, - Redis: true, - ClNode: true, - ElNode: true, - UserDBs: true, - NetworkDBs: true, + Bigtable: true, + Redis: true, + ClNode: true, + ElNode: true, + UserDBs: true, + NetworkDBs: true, + ClickhouseDBs: false, } } @@ -195,9 +196,11 @@ func Run() { } // clickhouse - db.ClickHouseWriter, db.ClickHouseReader = db.MustInitDB(&cfg.ClickHouse.WriterDatabase, &cfg.ClickHouse.ReaderDatabase, "clickhouse", "clickhouse") - defer db.ClickHouseReader.Close() - defer db.ClickHouseWriter.Close() + if requires.ClickhouseDBs { + db.ClickHouseWriter, db.ClickHouseReader = db.MustInitDB(&cfg.ClickHouse.WriterDatabase, &cfg.ClickHouse.ReaderDatabase, "clickhouse", "clickhouse") + defer db.ClickHouseReader.Close() + defer db.ClickHouseWriter.Close() + } // Initialize the persistent redis client if requires.Redis { @@ -1142,7 +1145,7 @@ func debugBlocks(clClient *rpc.LighthouseClient) error { } else if clBlock.ExecutionPayload.BlockNumber != i { log.Warnf("clBlock.ExecutionPayload.BlockNumber != i: %v != %v", clBlock.ExecutionPayload.BlockNumber, i) } else { - logFields["cl.txs"] = len(clBlock.ExecutionPayload.Transactions) + logFields["cl.txs"] = clBlock.ExecutionPayload.TransactionsCount } log.InfoWithFields(logFields, "debug block") diff --git a/backend/cmd/misc/misctypes/requires.go b/backend/cmd/misc/misctypes/requires.go index 6d2337412..178db5bd7 100644 --- a/backend/cmd/misc/misctypes/requires.go +++ b/backend/cmd/misc/misctypes/requires.go @@ -1,10 +1,11 @@ package misctypes type Requires struct { - Bigtable bool - Redis bool - ClNode bool - ElNode bool - NetworkDBs bool - UserDBs bool + Bigtable bool + Redis bool + ClNode bool + ElNode bool + NetworkDBs bool + UserDBs bool + ClickhouseDBs bool } diff --git a/backend/pkg/api/data_access/dummy.go b/backend/pkg/api/data_access/dummy.go index d45874650..4e338cdb0 100644 --- a/backend/pkg/api/data_access/dummy.go +++ b/backend/pkg/api/data_access/dummy.go @@ -214,6 +214,10 @@ func (d *DummyService) GetValidatorsFromSlices(ctx context.Context, indices []ui return getDummyData[[]t.VDBValidator](ctx) } +func (d *DummyService) GetValidatorsEffectiveBalanceTotal(ctx context.Context, indices []uint64) (uint64, error) { + return getDummyData[uint64](ctx) +} + func (d *DummyService) GetUserDashboards(ctx context.Context, userId uint64) (*t.UserDashboardsData, error) { return getDummyStruct[t.UserDashboardsData](ctx) } @@ -266,7 +270,7 @@ func (d *DummyService) GetValidatorDashboardGroupExists(ctx context.Context, das return true, nil } -func (d *DummyService) AddValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, validators []t.VDBValidator) ([]t.VDBPostValidatorsData, error) { +func (d *DummyService) AddValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, validators []t.VDBValidator, limit uint64) ([]t.VDBPostValidatorsData, error) { return getDummyData[[]t.VDBPostValidatorsData](ctx) } @@ -542,7 +546,7 @@ func (d *DummyService) GetValidatorDashboardGroupCount(ctx context.Context, dash return getDummyData[uint64](ctx) } -func (d *DummyService) GetValidatorDashboardValidatorsCount(ctx context.Context, dashboardId t.VDBIdPrimary) (uint64, error) { +func (d *DummyService) GetValidatorDashboardEffectiveBalanceTotal(ctx context.Context, dashboardId t.VDBId) (uint64, error) { return getDummyData[uint64](ctx) } diff --git a/backend/pkg/api/data_access/general.go b/backend/pkg/api/data_access/general.go index 39046f03c..b8e71d44a 100644 --- a/backend/pkg/api/data_access/general.go +++ b/backend/pkg/api/data_access/general.go @@ -2,13 +2,17 @@ package dataaccess import ( "context" + "database/sql" "fmt" "github.com/doug-martin/goqu/v9" "github.com/doug-martin/goqu/v9/exp" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/gobitfly/beaconchain/pkg/api/types" + t "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/utils" + constypes "github.com/gobitfly/beaconchain/pkg/consapi/types" ) // retrieve (primary) ens name and optional name (=label) maintained by beaconcha.in, if present @@ -131,3 +135,83 @@ func applySortAndPagination(defaultColumns []types.SortColumn, primary types.Sor return queryOrder, queryWhere, nil } + +// returns the effective balances of the provided validators +// executed from the vdb premium limits pov, i.e. exited validators account for the EB at exit time +func (d *DataAccessService) GetValidatorsEffectiveBalances(ctx context.Context, validators []t.VDBValidator, onlyActive bool) (map[t.VDBValidator]uint64, error) { + validatorMapping, err := d.services.GetCurrentValidatorMapping() + if err != nil { + return nil, err + } + + // active + effectiveBalances := make(map[t.VDBValidator]uint64) + var validatorExitEpochs []exp.Expression + for _, validator := range validators { + if len(validatorMapping.ValidatorMetadata) <= int(validator) { + return nil, fmt.Errorf("validator index %d not found in validator mapping", validator) + } + status := constypes.ValidatorDbStatus(validatorMapping.ValidatorMetadata[validator].Status) + if !onlyActive && + (status == constypes.DbSlashed || status == constypes.DbExited) && + validatorMapping.ValidatorMetadata[validator].EffectiveBalance == 0 { + // exited & balance withdrawn, need to query latest EB before exit + if !validatorMapping.ValidatorMetadata[validator].ExitEpoch.Valid { + return nil, fmt.Errorf("validator %d has no exit epoch", validator) + } + // goqu can't pass tuples directly + epochTs := utils.EpochToTime(uint64(validatorMapping.ValidatorMetadata[validator].ExitEpoch.Int64)).Unix() + validatorExitEpochs = append(validatorExitEpochs, goqu.L("(fromUnixTimestamp(?), ?)", epochTs, validator)) + } else { + effectiveBalances[validator] = validatorMapping.ValidatorMetadata[validator].EffectiveBalance + } + } + + // exited + if len(validatorExitEpochs) > 0 { + ds := goqu.Dialect("postgres"). + Select( + // goqu.SUM(goqu.I("balance_effective_end")).As("balance_effective_end"), + goqu.I("validator_index"), + goqu.I("balance_effective_end"), + ). + From("validator_dashboard_data_epoch"). + Where( + goqu.L("(epoch_timestamp, validator_index)").In(validatorExitEpochs), + ) + query, args, err := ds.Prepared(true).ToSQL() + if err != nil { + return nil, err + } + + ebsBeforeExit := []struct { + ValidatorIndex uint64 `db:"validator_index"` + EffectiveBalance uint64 `db:"balance_effective_end"` + }{} + err = d.clickhouseReader.SelectContext(ctx, &ebsBeforeExit, query, args...) + if err != nil && err != sql.ErrNoRows { + return nil, err + } + for _, eb := range ebsBeforeExit { + effectiveBalances[eb.ValidatorIndex] = eb.EffectiveBalance + } + } + return effectiveBalances, nil +} + +func (d *DataAccessService) GetValidatorsEffectiveBalanceTotal(ctx context.Context, validators []t.VDBValidator) (uint64, error) { + validatorEbs, err := d.GetValidatorsEffectiveBalances(ctx, validators, false) + if err != nil { + return 0, err + } + + var totalEb uint64 + for _, validator := range validators { + if eb, ok := validatorEbs[validator]; !ok { + return 0, fmt.Errorf("effective balance for validator %d not found", validator) + } else { + totalEb += eb + } + } + return totalEb, nil +} diff --git a/backend/pkg/api/data_access/networks.go b/backend/pkg/api/data_access/networks.go index bea2864db..7f201bdab 100644 --- a/backend/pkg/api/data_access/networks.go +++ b/backend/pkg/api/data_access/networks.go @@ -31,5 +31,15 @@ func (d *DataAccessService) GetAllNetworks() ([]types.NetworkInfo, error) { Name: "sepolia", NotificationsName: "sepolia", }, + { + ChainId: 7088110746, + Name: "pectra-devnet-5", + NotificationsName: "pectra-devnet-5", + }, + { + ChainId: 7072151312, + Name: "pectra-devnet-6", + NotificationsName: "pectra-devnet-6", + }, }, nil } diff --git a/backend/pkg/api/data_access/user.go b/backend/pkg/api/data_access/user.go index 349ceb8d3..c7c568a43 100644 --- a/backend/pkg/api/data_access/user.go +++ b/backend/pkg/api/data_access/user.go @@ -6,6 +6,7 @@ import ( "fmt" "time" + "github.com/doug-martin/goqu/v9" t "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/utils" @@ -268,7 +269,11 @@ func (d *DataAccessService) GetProductSummary(ctx context.Context) (*t.ProductSu } func (d *DataAccessService) GetFreeTierPerks(ctx context.Context) (*t.PremiumPerks, error) { - return db.GetFreeTierPerks(ctx) + perks, err := db.GetFreeTierProduct(ctx) + if err != nil { + return nil, err + } + return &perks.PremiumPerks, nil } func (d *DataAccessService) GetUserDashboards(ctx context.Context, userId uint64) (*t.UserDashboardsData, error) { @@ -361,6 +366,61 @@ func (d *DataAccessService) GetUserDashboards(ctx context.Context, userId uint64 return nil }) + // TODO could merge with above query + validatorDashboardEBs := make(map[uint64]uint64, 0) + wg.Go(func() error { + validatorsDs := goqu.Dialect("postgres"). + From(goqu.T("users_val_dashboards").As("uvd")). + LeftJoin( + goqu.T("users_val_dashboards_validators").As("uvdv"), + goqu.On(goqu.I("uvd.id").Eq(goqu.I("uvdv.dashboard_id"))), + ). + Select( + goqu.I("uvd.id").As("dashboard_id"), + goqu.I("uvdv.validator_index").As("validator_index"), + ). + Where(goqu.I("uvd.user_id").Eq(userId)) + + validatorsQuery, args, err := validatorsDs.Prepared(true).ToSQL() + if err != nil { + return err + } + + // could also scan into array + dashboardValidators := []struct { + Id uint64 `db:"dashboard_id"` + ValidatorIndex uint64 `db:"validator_index"` + }{} + err = d.alloyReader.SelectContext(ctx, &dashboardValidators, validatorsQuery, args...) + if err != nil { + return err + } + dashboardValidatorsMap := make(map[uint64][]t.VDBValidator, 0) + validators := make([]t.VDBValidator, 0, len(dashboardValidators)) + for _, row := range dashboardValidators { + dashboardValidatorsMap[row.Id] = append(dashboardValidatorsMap[row.Id], row.ValidatorIndex) + validators = append(validators, row.ValidatorIndex) + } + validatorEbs, err := d.GetValidatorsEffectiveBalances(ctx, validators, false) + if err != nil { + return err + } + + for dashboard_id, validators := range dashboardValidatorsMap { + var dashboardTotalEb uint64 + for _, validator := range validators { + if eb, ok := validatorEbs[validator]; !ok { + return fmt.Errorf("effective balance for validator %d not found", validator) + } else { + dashboardTotalEb += eb + } + } + validatorDashboardEBs[dashboard_id] = dashboardTotalEb + } + + return nil + }) + err := wg.Wait() if err != nil { return nil, fmt.Errorf("error retrieving user dashboards data: %w", err) @@ -370,6 +430,7 @@ func (d *DataAccessService) GetUserDashboards(ctx context.Context, userId uint64 for _, validatorDashboard := range validatorDashboardMap { validatorDashboard.GroupCount = validatorDashboardCountMap[validatorDashboard.Id].GroupCount validatorDashboard.ValidatorCount = validatorDashboardCountMap[validatorDashboard.Id].ValidatorCount + validatorDashboard.EffectiveBalance = validatorDashboardEBs[validatorDashboard.Id] result.ValidatorDashboards = append(result.ValidatorDashboards, *validatorDashboard) } diff --git a/backend/pkg/api/data_access/vdb.go b/backend/pkg/api/data_access/vdb.go index 8eadc6352..8b16aa39b 100644 --- a/backend/pkg/api/data_access/vdb.go +++ b/backend/pkg/api/data_access/vdb.go @@ -29,14 +29,14 @@ type ValidatorDashboardRepository interface { GetValidatorDashboardGroupCount(ctx context.Context, dashboardId t.VDBIdPrimary) (uint64, error) GetValidatorDashboardGroupExists(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64) (bool, error) - AddValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, validators []t.VDBValidator) ([]t.VDBPostValidatorsData, error) + AddValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, validators []t.VDBValidator, limit uint64) ([]t.VDBPostValidatorsData, error) AddValidatorDashboardValidatorsByDepositAddress(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, address string, limit uint64) ([]t.VDBPostValidatorsData, error) AddValidatorDashboardValidatorsByWithdrawalCredential(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, credential string, limit uint64) ([]t.VDBPostValidatorsData, error) AddValidatorDashboardValidatorsByGraffiti(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, graffiti string, limit uint64) ([]t.VDBPostValidatorsData, error) RemoveValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, validators []t.VDBValidator) error GetValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBId, groupId int64, cursor string, colSort t.Sort[enums.VDBManageValidatorsColumn], search string, limit uint64) ([]t.VDBManageValidatorsTableRow, *t.Paging, error) - GetValidatorDashboardValidatorsCount(ctx context.Context, dashboardId t.VDBIdPrimary) (uint64, error) + GetValidatorDashboardEffectiveBalanceTotal(ctx context.Context, dashboardId t.VDBId) (uint64, error) CreateValidatorDashboardPublicId(ctx context.Context, dashboardId t.VDBIdPrimary, name string, shareGroups bool) (*t.VDBPublicId, error) GetValidatorDashboardPublicId(ctx context.Context, publicDashboardId t.VDBIdPublic) (*t.VDBPublicId, error) diff --git a/backend/pkg/api/data_access/vdb_management.go b/backend/pkg/api/data_access/vdb_management.go index 92772772b..27e91cb98 100644 --- a/backend/pkg/api/data_access/vdb_management.go +++ b/backend/pkg/api/data_access/vdb_management.go @@ -758,7 +758,7 @@ func (d *DataAccessService) GetValidatorDashboardGroupExists(ctx context.Context return groupExists, err } -func (d *DataAccessService) AddValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, validators []t.VDBValidator) ([]t.VDBPostValidatorsData, error) { +func (d *DataAccessService) AddValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, validators []t.VDBValidator, limitEB uint64) ([]t.VDBPostValidatorsData, error) { result := []t.VDBPostValidatorsData{} if len(validators) == 0 { @@ -766,6 +766,49 @@ func (d *DataAccessService) AddValidatorDashboardValidators(ctx context.Context, return nil, nil } + // determine new validators and check for their EBs + var existingValidators []uint64 + existingValidatorsDs := goqu.Dialect("postgres"). + From(goqu.Dialect("postgres"). + From(goqu.L("unnest(?::int[])", pq.Array(validators)).As("validator_index")). + Select("*").As("req")). + SelectDistinct(goqu.I("uvdv.validator_index")). + InnerJoin( + goqu.T("users_val_dashboards_validators").As("uvdv"), + goqu.On(goqu.I("req.validator_index").Eq(goqu.I("uvdv.validator_index"))), + ). + Where( + goqu.I("uvdv.dashboard_id").Eq(dashboardId), + ) + existingValidatorsQuery, args, err := existingValidatorsDs.Prepared(true).ToSQL() + if err != nil { + return nil, fmt.Errorf("error preparing query: %w", err) + } + err = d.alloyReader.SelectContext(ctx, &existingValidators, existingValidatorsQuery, args...) + if err != nil { + return nil, err + } + existingValidatorsMap := utils.SliceToMap(existingValidators) + + newValidators := make([]uint64, 0, len(validators)) + for _, validator := range validators { + if _, ok := existingValidatorsMap[validator]; !ok { + newValidators = append(newValidators, validator) + } + } + + if len(newValidators) > 0 && limitEB > 0 { + // only insert new validators until the eb limit is reached + newValidatorEbs, err := d.GetValidatorsEffectiveBalances(ctx, newValidators, false) + if err != nil { + return nil, err + } + newValidators = d.applyEBFiler(newValidatorEbs, limitEB) + } + + // keep existing validators so we can update their group + validators = append(existingValidators, newValidators...) + tx, err := d.writerDb.BeginTxx(ctx, nil) if err != nil { return nil, fmt.Errorf("error starting db transactions to insert validators for a dashboard: %w", err) @@ -829,7 +872,7 @@ func (d *DataAccessService) AddValidatorDashboardValidators(ctx context.Context, // Updates the group for validators already in the dashboard linked to the deposit address. // Adds up to limit new validators associated with the deposit address, if not already in the dashboard. -func (d *DataAccessService) AddValidatorDashboardValidatorsByDepositAddress(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, address string, limit uint64) ([]t.VDBPostValidatorsData, error) { +func (d *DataAccessService) AddValidatorDashboardValidatorsByDepositAddress(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, address string, ebLimit uint64) ([]t.VDBPostValidatorsData, error) { result := []t.VDBPostValidatorsData{} addressParsed, err := hex.DecodeString(strings.TrimPrefix(address, "0x")) @@ -837,30 +880,54 @@ func (d *DataAccessService) AddValidatorDashboardValidatorsByDepositAddress(ctx return nil, err } - uniqueValidatorIndexesQuery := ` - (SELECT - DISTINCT uvdv.validator_index - FROM validators v - JOIN eth1_deposits d ON v.pubkey = d.publickey - JOIN users_val_dashboards_validators uvdv ON v.validatorindex = uvdv.validator_index - WHERE uvdv.dashboard_id = $1 AND d.from_address = $2) + baseDs := goqu.Dialect("postgres"). + From(goqu.T("validators").As("v")). + SelectDistinct(goqu.I("v.validatorindex")). + InnerJoin( + goqu.T("eth1_deposits").As("d"), + goqu.On(goqu.I("v.pubkey").Eq(goqu.I("d.publickey"))), + ). + Where( + goqu.I("d.from_address").Eq(addressParsed), + ) - UNION + existingValidatorsDs := baseDs. + InnerJoin( + goqu.T("users_val_dashboards_validators").As("uvdv"), + goqu.On(goqu.I("v.validatorindex").Eq(goqu.I("uvdv.validator_index"))), + ). + Where( + goqu.I("uvdv.dashboard_id").Eq(dashboardId), + ) - (SELECT - DISTINCT v.validatorindex AS validator_index - FROM validators v - JOIN eth1_deposits d ON v.pubkey = d.publickey - LEFT JOIN users_val_dashboards_validators uvdv - ON v.validatorindex = uvdv.validator_index AND uvdv.dashboard_id = $1 - WHERE d.from_address = $2 AND uvdv.validator_index IS NULL - ORDER BY validator_index - LIMIT $3)` + var uniqueValidatorIndexesDs *goqu.SelectDataset + if ebLimit > 0 { + newValidatorsDs := baseDs. + LeftJoin( + goqu.T("users_val_dashboards_validators").As("uvdv"), + goqu.On( + goqu.I("v.validatorindex").Eq(goqu.I("uvdv.validator_index")), + goqu.I("uvdv.dashboard_id").Eq(dashboardId), + ), + ). + Where( + goqu.I("uvdv.validator_index").IsNull(), + ) + err := d.applyNewValidatorsDSEBFilter(ctx, newValidatorsDs, ebLimit) + if err != nil { + return nil, err + } + uniqueValidatorIndexesDs = existingValidatorsDs.Union(newValidatorsDs) + } - addValidatorsQuery := d.getAddValidatorsQuery(uniqueValidatorIndexesQuery) + addValidatorsDs := d.getAddValidatorsQuery(uniqueValidatorIndexesDs, uint64(dashboardId), groupId) + addValidatorsQuery, args, err := addValidatorsDs.Prepared(true).ToSQL() + if err != nil { + return nil, fmt.Errorf("error preparing query: %w", err) + } var validators []uint64 - err = d.alloyWriter.SelectContext(ctx, &validators, addValidatorsQuery, dashboardId, addressParsed, limit, groupId) + err = d.alloyWriter.SelectContext(ctx, &validators, addValidatorsQuery, args...) if err != nil { return nil, err } @@ -877,7 +944,7 @@ func (d *DataAccessService) AddValidatorDashboardValidatorsByDepositAddress(ctx // Updates the group for validators already in the dashboard linked to the withdrawal address. // Adds up to limit new validators associated with the withdrawal address, if not already in the dashboard. -func (d *DataAccessService) AddValidatorDashboardValidatorsByWithdrawalCredential(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, credential string, limit uint64) ([]t.VDBPostValidatorsData, error) { +func (d *DataAccessService) AddValidatorDashboardValidatorsByWithdrawalCredential(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, credential string, ebLimit uint64) ([]t.VDBPostValidatorsData, error) { result := []t.VDBPostValidatorsData{} addressParsed, err := hex.DecodeString(strings.TrimPrefix(credential, "0x")) @@ -885,28 +952,50 @@ func (d *DataAccessService) AddValidatorDashboardValidatorsByWithdrawalCredentia return nil, err } - uniqueValidatorIndexesQuery := ` - (SELECT - DISTINCT uvdv.validator_index - FROM validators v - JOIN users_val_dashboards_validators uvdv ON v.validatorindex = uvdv.validator_index - WHERE uvdv.dashboard_id = $1 AND v.withdrawalcredentials = $2) + baseDs := goqu.Dialect("postgres"). + From(goqu.T("validators").As("v")). + SelectDistinct(goqu.I("v.validatorindex")). + Where( + goqu.I("v.withdrawalcredentials").Eq(addressParsed), + ) - UNION + existingValidatorsDs := baseDs. + InnerJoin( + goqu.T("users_val_dashboards_validators").As("uvdv"), + goqu.On(goqu.I("v.validatorindex").Eq(goqu.I("uvdv.validator_index"))), + ). + Where( + goqu.I("uvdv.dashboard_id").Eq(dashboardId), + ) - (SELECT - DISTINCT v.validatorindex AS validator_index - FROM validators v - LEFT JOIN users_val_dashboards_validators uvdv - ON v.validatorindex = uvdv.validator_index AND uvdv.dashboard_id = $1 - WHERE v.withdrawalcredentials = $2 AND uvdv.validator_index IS NULL - ORDER BY v.validatorindex - LIMIT $3)` + var uniqueValidatorIndexesDs *goqu.SelectDataset + if ebLimit > 0 { + newValidatorsDs := baseDs. + LeftJoin( + goqu.T("users_val_dashboards_validators").As("uvdv"), + goqu.On( + goqu.I("v.validatorindex").Eq(goqu.I("uvdv.validator_index")), + goqu.I("uvdv.dashboard_id").Eq(dashboardId), + ), + ). + Where( + goqu.I("uvdv.validator_index").IsNull(), + ) + err := d.applyNewValidatorsDSEBFilter(ctx, newValidatorsDs, ebLimit) + if err != nil { + return nil, err + } + uniqueValidatorIndexesDs = existingValidatorsDs.Union(newValidatorsDs) + } - addValidatorsQuery := d.getAddValidatorsQuery(uniqueValidatorIndexesQuery) + addValidatorsDs := d.getAddValidatorsQuery(uniqueValidatorIndexesDs, uint64(dashboardId), groupId) + addValidatorsQuery, args, err := addValidatorsDs.Prepared(true).ToSQL() + if err != nil { + return nil, fmt.Errorf("error preparing query: %w", err) + } var validators []uint64 - err = d.alloyWriter.SelectContext(ctx, &validators, addValidatorsQuery, dashboardId, addressParsed, limit, groupId) + err = d.alloyWriter.SelectContext(ctx, &validators, addValidatorsQuery, args...) if err != nil { return nil, err } @@ -923,30 +1012,53 @@ func (d *DataAccessService) AddValidatorDashboardValidatorsByWithdrawalCredentia // Update the group for validators already in the dashboard linked to the graffiti (via produced block). // Add up to limit new validators associated with the graffiti, if not already in the dashboard. -func (d *DataAccessService) AddValidatorDashboardValidatorsByGraffiti(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, graffiti string, limit uint64) ([]t.VDBPostValidatorsData, error) { +func (d *DataAccessService) AddValidatorDashboardValidatorsByGraffiti(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, graffiti string, ebLimit uint64) ([]t.VDBPostValidatorsData, error) { result := []t.VDBPostValidatorsData{} - uniqueValidatorIndexesQuery := ` - (SELECT - DISTINCT uvdv.validator_index - FROM blocks b - JOIN users_val_dashboards_validators uvdv ON b.proposer = uvdv.validator_index - WHERE uvdv.dashboard_id = $1 AND b.graffiti_text = $2) - - UNION - - (SELECT DISTINCT b.proposer AS validator_index - FROM blocks b - LEFT JOIN users_val_dashboards_validators uvdv - ON b.proposer = uvdv.validator_index AND uvdv.dashboard_id = $1 - WHERE b.graffiti_text = $2 AND uvdv.validator_index IS NULL - ORDER BY b.proposer - LIMIT $3)` - - addValidatorsQuery := d.getAddValidatorsQuery(uniqueValidatorIndexesQuery) + baseDs := goqu.Dialect("postgres"). + From(goqu.T("blocks").As("b")). + SelectDistinct(goqu.I("b.proposer")). + Where( + goqu.I("b.graffiti_text").Eq(graffiti), + ) + + existingValidatorsDs := baseDs. + InnerJoin( + goqu.T("users_val_dashboards_validators").As("uvdv"), + goqu.On(goqu.I("b.proposer").Eq(goqu.I("uvdv.validator_index"))), + ). + Where( + goqu.I("uvdv.dashboard_id").Eq(dashboardId), + ) + + var uniqueValidatorIndexesDs *goqu.SelectDataset + if ebLimit > 0 { + newValidatorsDs := baseDs. + LeftJoin( + goqu.T("users_val_dashboards_validators").As("uvdv"), + goqu.On( + goqu.I("b.proposer").Eq(goqu.I("uvdv.validator_index")), + goqu.I("uvdv.dashboard_id").Eq(dashboardId), + ), + ). + Where( + goqu.I("uvdv.validator_index").IsNull(), + ) + err := d.applyNewValidatorsDSEBFilter(ctx, newValidatorsDs, ebLimit) + if err != nil { + return nil, err + } + uniqueValidatorIndexesDs = existingValidatorsDs.Union(newValidatorsDs) + } + + addValidatorsDs := d.getAddValidatorsQuery(uniqueValidatorIndexesDs, uint64(dashboardId), groupId) + addValidatorsQuery, args, err := addValidatorsDs.Prepared(true).ToSQL() + if err != nil { + return nil, fmt.Errorf("error preparing query: %w", err) + } var validators []uint64 - err := d.alloyWriter.SelectContext(ctx, &validators, addValidatorsQuery, dashboardId, graffiti, limit, groupId) + err = d.alloyWriter.SelectContext(ctx, &validators, addValidatorsQuery, args...) if err != nil { return nil, err } @@ -961,20 +1073,89 @@ func (d *DataAccessService) AddValidatorDashboardValidatorsByGraffiti(ctx contex return result, nil } -func (d *DataAccessService) getAddValidatorsQuery(uniqueValidatorIndexesQuery string) string { - return fmt.Sprintf(` - WITH unique_validator_indexes AS ( - %s - ) - INSERT INTO users_val_dashboards_validators (dashboard_id, group_id, validator_index) - SELECT $1 AS dashboard_id, $4 AS group_id, validator_index - FROM unique_validator_indexes - ON CONFLICT (dashboard_id, validator_index) DO UPDATE - SET - dashboard_id = EXCLUDED.dashboard_id, - group_id = EXCLUDED.group_id, - validator_index = EXCLUDED.validator_index - RETURNING validator_index`, uniqueValidatorIndexesQuery) +func (d *DataAccessService) applyEBFiler(validatorEbs map[t.VDBValidator]uint64, ebLimit uint64) []t.VDBValidator { + // Decide which new validators to add: + // a) insert by lowest index until ebLimit reached + // b) insert by lowest effective balance until ebLimit reached + // c) insert the combination of validators which gets closest to the ebLimit (knapsack problem) + // TODO prefer active validators & insert exited last in all 3 cases + newValidatorsList := maps.Keys(validatorEbs) + + sort.Slice(newValidatorsList, func(i, j int) bool { + // use a) as sec. sort + if validatorEbs[newValidatorsList[i]] == validatorEbs[newValidatorsList[j]] { + return newValidatorsList[i] < newValidatorsList[j] // a) + } + return validatorEbs[newValidatorsList[i]] < validatorEbs[newValidatorsList[j]] // b) + }) + + var newEbAccumulator uint64 + for i, validator := range newValidatorsList { + if newEbAccumulator+validatorEbs[validator] > ebLimit { + newValidatorsList = newValidatorsList[:i] + break + } + newEbAccumulator += validatorEbs[validator] + } + return newValidatorsList +} + +// takes a goqu ds of new validators to add to a vdb, +// determines whether they fit in the ebLimit and shrinks selection if not +func (d *DataAccessService) applyNewValidatorsDSEBFilter(ctx context.Context, newValidatorsDs *goqu.SelectDataset, ebLimit uint64) error { + var newValidators []uint64 + newValidatorsQuery, args, err := newValidatorsDs.Prepared(true).ToSQL() + if err != nil { + return fmt.Errorf("error preparing query: %w", err) + } + + err = d.alloyReader.SelectContext(ctx, &newValidators, newValidatorsQuery, args...) + if err != nil { + return err + } + validatorEbs, err := d.GetValidatorsEffectiveBalances(ctx, newValidators, false) + if err != nil { + return err + } + + filteredValidators := d.applyEBFiler(validatorEbs, ebLimit) + if len(filteredValidators) < len(newValidators) { + //nolint:staticcheck + newValidatorsDs = goqu.Dialect("postgres"). + From(goqu.L("unnest(?::int[])", pq.Array(filteredValidators)).As("validator_index")). + Select("*") + } + return nil +} + +func (d *DataAccessService) getAddValidatorsQuery(uniqueValidatorIndexesQuery *goqu.SelectDataset, dashboardId, groupId uint64) *goqu.InsertDataset { + // 0. update group for existing ones + // 1. get all that would be an option (latest EB only > 0, not present in dashboard yet) up to max available limit + // 2. add non-exited first + // 3. if max eb not reached yet and empty ones available: get exit epoch from vm + // 4. get EB at exit epoch and add until full + + return goqu.Dialect("postgres"). + Insert("users_val_dashboards_validators").Cols("dashboard_id", "group_id", "validator_index"). + With("unique_validator_indexes", uniqueValidatorIndexesQuery). + FromQuery( + goqu.Dialect("postgres"). + From(goqu.I("unique_validator_indexes")). + Select( + goqu.V(dashboardId).As("dashboard_id"), + goqu.V(groupId).As("group_id"), + goqu.L("validatorindex"), + ), + ). + OnConflict(goqu.DoUpdate( + "dashboard_id, validator_index", + goqu.Record{ + "dashboard_id": goqu.L("EXCLUDED.dashboard_id"), + "group_id": goqu.L("EXCLUDED.group_id"), + "validator_index": goqu.L("EXCLUDED.validator_index"), + }, + )). + Returning("validator_index") } func (d *DataAccessService) RemoveValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, validators []t.VDBValidator) error { @@ -999,14 +1180,23 @@ func (d *DataAccessService) RemoveValidatorDashboardValidators(ctx context.Conte return err } -func (d *DataAccessService) GetValidatorDashboardValidatorsCount(ctx context.Context, dashboardId t.VDBIdPrimary) (uint64, error) { - var count uint64 - err := d.alloyReader.GetContext(ctx, &count, ` - SELECT COUNT(*) - FROM users_val_dashboards_validators - WHERE dashboard_id = $1 - `, dashboardId) - return count, err +func (d *DataAccessService) GetValidatorDashboardEffectiveBalanceTotal(ctx context.Context, dashboardId t.VDBId) (uint64, error) { + var validators []t.VDBValidator + + if dashboardId.Validators == nil { + err := d.alloyReader.SelectContext(ctx, &validators, ` + SELECT validator_index + FROM users_val_dashboards_validators + WHERE dashboard_id = $1 + `, dashboardId.Id) + if err != nil { + return 0, err + } + } else { + validators = dashboardId.Validators + } + + return d.GetValidatorsEffectiveBalanceTotal(ctx, validators) } func (d *DataAccessService) CreateValidatorDashboardPublicId(ctx context.Context, dashboardId t.VDBIdPrimary, name string, shareGroups bool) (*t.VDBPublicId, error) { diff --git a/backend/pkg/api/handlers/handler_service.go b/backend/pkg/api/handlers/handler_service.go index 65d2768ac..c5193511a 100644 --- a/backend/pkg/api/handlers/handler_service.go +++ b/backend/pkg/api/handlers/handler_service.go @@ -128,8 +128,12 @@ func (h *HandlerService) getDashboardId(ctx context.Context, dashboardIdParam in if len(validators) == 0 { return nil, newNotFoundErr("no validators found for given id") } - if len(validators) > maxValidatorsInList { - return nil, newBadRequestErr("too many validators in list, maximum is %d", maxValidatorsInList) + validatorEb, err := h.daService.GetValidatorDashboardEffectiveBalanceTotal(ctx, types.VDBId{Validators: validators}) + if err != nil { + return nil, err + } + if validatorEb > maxEBInList { + return nil, newBadRequestErr("validators in list contain too much ETH, maximum is %d", maxEBInList) } return &types.VDBId{Validators: validators}, nil } diff --git a/backend/pkg/api/handlers/input_validation.go b/backend/pkg/api/handlers/input_validation.go index e35561c26..ca3418894 100644 --- a/backend/pkg/api/handlers/input_validation.go +++ b/backend/pkg/api/handlers/input_validation.go @@ -45,7 +45,7 @@ var ( const ( maxNameLength = 50 - maxValidatorsInList = 20 + maxEBInList uint64 = 20 * 32 * 1e9 maxQueryLimit uint64 = 100 defaultReturnLimit uint64 = 10 sortOrderAscending = "asc" diff --git a/backend/pkg/api/handlers/public.go b/backend/pkg/api/handlers/public.go index fcf3a3883..2b7dad740 100644 --- a/backend/pkg/api/handlers/public.go +++ b/backend/pkg/api/handlers/public.go @@ -611,17 +611,17 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW returnForbidden(w, r, errors.New("bulk adding not allowed with current subscription plan")) return } - dashboardLimit := userInfo.PremiumPerks.ValidatorsPerDashboard - existingValidatorCount, err := h.getDataAccessor(ctx).GetValidatorDashboardValidatorsCount(ctx, dashboardId) + limitEB := userInfo.PremiumPerks.EffectiveBalancePerDashboard + existingEB, err := h.getDataAccessor(ctx).GetValidatorDashboardEffectiveBalanceTotal(ctx, types.VDBId{Id: dashboardId}) if err != nil { handleErr(w, r, err) return } - var limit uint64 + var spaceLeftEB uint64 if isUserAdmin(userInfo) { - limit = math.MaxUint32 // no limit for admins - } else if dashboardLimit >= existingValidatorCount { - limit = dashboardLimit - existingValidatorCount + spaceLeftEB = math.MaxUint64 // no limit for admins + } else if limitEB > existingEB { + spaceLeftEB = limitEB - existingEB } var data []types.VDBPostValidatorsData @@ -638,10 +638,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, err) return } - if len(validators) > int(limit) { - validators = validators[:limit] - } - data, dataErr = h.getDataAccessor(ctx).AddValidatorDashboardValidators(ctx, dashboardId, groupId, validators) + data, dataErr = h.getDataAccessor(ctx).AddValidatorDashboardValidators(ctx, dashboardId, groupId, validators, spaceLeftEB) case req.DepositAddress != "": depositAddress := v.checkRegex(reEthereumAddress, req.DepositAddress, "deposit_address") @@ -649,7 +646,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, v) return } - data, dataErr = h.getDataAccessor(ctx).AddValidatorDashboardValidatorsByDepositAddress(ctx, dashboardId, groupId, depositAddress, limit) + data, dataErr = h.getDataAccessor(ctx).AddValidatorDashboardValidatorsByDepositAddress(ctx, dashboardId, groupId, depositAddress, spaceLeftEB) case req.WithdrawalCredential != "": withdrawalCredential := v.checkRegex(reWithdrawalCredential, req.WithdrawalCredential, "withdrawal_credential") @@ -657,7 +654,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, v) return } - data, dataErr = h.getDataAccessor(ctx).AddValidatorDashboardValidatorsByWithdrawalCredential(ctx, dashboardId, groupId, withdrawalCredential, limit) + data, dataErr = h.getDataAccessor(ctx).AddValidatorDashboardValidatorsByWithdrawalCredential(ctx, dashboardId, groupId, withdrawalCredential, spaceLeftEB) case req.Graffiti != "": graffiti := v.checkRegex(reGraffiti, req.Graffiti, "graffiti") @@ -665,7 +662,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, v) return } - data, dataErr = h.getDataAccessor(ctx).AddValidatorDashboardValidatorsByGraffiti(ctx, dashboardId, groupId, graffiti, limit) + data, dataErr = h.getDataAccessor(ctx).AddValidatorDashboardValidatorsByGraffiti(ctx, dashboardId, groupId, graffiti, spaceLeftEB) } if dataErr != nil { @@ -1015,7 +1012,7 @@ func (h *HandlerService) PublicPutValidatorDashboardArchiving(w http.ResponseWri returnConflict(w, r, errors.New("maximum number of groups in dashboards reached")) return } - if dashboardInfo.ValidatorCount >= userInfo.PremiumPerks.ValidatorsPerDashboard { + if dashboardInfo.EffectiveBalance >= userInfo.PremiumPerks.EffectiveBalancePerDashboard { returnConflict(w, r, errors.New("maximum number of validators in dashboards reached")) return } diff --git a/backend/pkg/api/types/dashboard.go b/backend/pkg/api/types/dashboard.go index 340a21b01..8ed1568bc 100644 --- a/backend/pkg/api/types/dashboard.go +++ b/backend/pkg/api/types/dashboard.go @@ -5,14 +5,15 @@ type AccountDashboard struct { Name string `json:"name"` } type ValidatorDashboard struct { - Id uint64 `json:"id" extensions:"x-order=1"` - Name string `json:"name" extensions:"x-order=2"` - Network uint64 `json:"network" extensions:"x-order=3"` - PublicIds []VDBPublicId `json:"public_ids,omitempty" extensions:"x-order=4"` - IsArchived bool `json:"is_archived" extensions:"x-order=5"` - ArchivedReason string `json:"archived_reason,omitempty" tstype:"'user' | 'dashboard_limit' | 'validator_limit' | 'group_limit'" extensions:"x-order=6"` - ValidatorCount uint64 `json:"validator_count" extensions:"x-order=7"` - GroupCount uint64 `json:"group_count" extensions:"x-order=8"` + Id uint64 `json:"id" extensions:"x-order=1"` + Name string `json:"name" extensions:"x-order=2"` + Network uint64 `json:"network" extensions:"x-order=3"` + PublicIds []VDBPublicId `json:"public_ids,omitempty" extensions:"x-order=4"` + IsArchived bool `json:"is_archived" extensions:"x-order=5"` + ArchivedReason string `json:"archived_reason,omitempty" tstype:"'user' | 'dashboard_limit' | 'validator_limit' | 'group_limit'" extensions:"x-order=6"` + EffectiveBalance uint64 `json:"effective_balance" extensions:"x-order=7"` + ValidatorCount uint64 `json:"validator_count" extensions:"x-order=8"` + GroupCount uint64 `json:"group_count" extensions:"x-order=9"` } type UserDashboardsData struct { diff --git a/backend/pkg/api/types/search.go b/backend/pkg/api/types/search.go index c57b68cce..f10464037 100644 --- a/backend/pkg/api/types/search.go +++ b/backend/pkg/api/types/search.go @@ -30,9 +30,10 @@ type SearchValidatorsByGraffiti struct { } type SearchResult struct { - Type string `json:"type"` - ChainId uint64 `json:"chain_id"` - Value interface{} `json:"value"` + Type string `json:"type"` + ChainId uint64 `json:"chain_id"` + TotalEffectiveBalance uint64 `json:"total_effective_balance"` + Value interface{} `json:"value"` } type InternalPostSearchResponse struct { diff --git a/backend/pkg/api/types/user.go b/backend/pkg/api/types/user.go index 8e4a30dc7..63ccda660 100644 --- a/backend/pkg/api/types/user.go +++ b/backend/pkg/api/types/user.go @@ -62,7 +62,8 @@ const ProductStoreEthpool ProductStore = "ethpool" const ProductStoreCustom ProductStore = "custom" type ProductSummary struct { - ValidatorsPerDashboardLimit uint64 `json:"validators_per_dashboard_limit"` + ValidatorsPerDashboardLimit uint64 `json:"validators_per_dashboard_limit"` // remove after Pectra + EffectiveBalancePerDashboardLimit uint64 `json:"effective_balance_per_dashboard_limit"` StripePublicKey string `json:"stripe_public_key"` ApiProducts []ApiProduct `json:"api_products"` PremiumProducts []PremiumProduct `json:"premium_products"` @@ -106,20 +107,22 @@ type PremiumProduct struct { } type ExtraDashboardValidatorsPremiumAddon struct { - ProductName string `json:"product_name"` - ExtraDashboardValidators uint64 `json:"extra_dashboard_validators"` - PricePerYearEur float64 `json:"price_per_year_eur"` - PricePerMonthEur float64 `json:"price_per_month_eur"` - ProductIdMonthly string `json:"product_id_monthly"` - ProductIdYearly string `json:"product_id_yearly"` - StripePriceIdMonthly string `json:"stripe_price_id_monthly"` - StripePriceIdYearly string `json:"stripe_price_id_yearly"` + ProductName string `json:"product_name"` + ExtraDashboardValidators uint64 `json:"extra_dashboard_validators"` // remove after Pectra + ExtraDashboardEffectiveBalance uint64 `json:"extra_dashboard_effective_balance"` + PricePerYearEur float64 `json:"price_per_year_eur"` + PricePerMonthEur float64 `json:"price_per_month_eur"` + ProductIdMonthly string `json:"product_id_monthly"` + ProductIdYearly string `json:"product_id_yearly"` + StripePriceIdMonthly string `json:"stripe_price_id_monthly"` + StripePriceIdYearly string `json:"stripe_price_id_yearly"` } type PremiumPerks struct { AdFree bool `json:"ad_free"` // note that this is somhow redunant, since there is already ApiPerks.NoAds ValidatorDashboards uint64 `json:"validator_dashboards"` - ValidatorsPerDashboard uint64 `json:"validators_per_dashboard"` + ValidatorsPerDashboard uint64 `json:"validators_per_dashboard"` // remove after Pectra + EffectiveBalancePerDashboard uint64 `json:"effective_balance_per_dashboard"` ValidatorGroupsPerDashboard uint64 `json:"validator_groups_per_dashboard"` ShareCustomDashboards bool `json:"share_custom_dashboards"` ManageDashboardViaApi bool `json:"manage_dashboard_via_api"` diff --git a/backend/pkg/commons/config/config.go b/backend/pkg/commons/config/config.go index bd135f56d..019231b08 100644 --- a/backend/pkg/commons/config/config.go +++ b/backend/pkg/commons/config/config.go @@ -31,3 +31,12 @@ var GnosisChainYml string //go:embed holesky.chain.yml var HoleskyChainYml string + +//go:embed mekong.chain.yml +var MekongChainYml string + +//go:embed pectra-devnet-5.chain.yml +var PectraDevnet5ChainYml string + +//go:embed pectra-devnet-6.chain.yml +var PectraDevnet6ChainYml string diff --git a/backend/pkg/commons/config/mekong.chain.yml b/backend/pkg/commons/config/mekong.chain.yml new file mode 100644 index 000000000..eed63477c --- /dev/null +++ b/backend/pkg/commons/config/mekong.chain.yml @@ -0,0 +1,153 @@ +# Extends the mainnet preset +PRESET_BASE: mainnet +CONFIG_NAME: mekong # needs to exist because of Prysm. Otherwise it conflicts with mainnet genesis + +# Genesis +# --------------------------------------------------------------- +# `2**14` (= 16,384) +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 100000 +# 2024-Nov-05 03:59:00 PM UTC +MIN_GENESIS_TIME: 1730822340 +GENESIS_FORK_VERSION: 0x10637624 +GENESIS_DELAY: 60 + + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x20637624 +ALTAIR_FORK_EPOCH: 0 +# Merge +BELLATRIX_FORK_VERSION: 0x30637624 +BELLATRIX_FORK_EPOCH: 0 +TERMINAL_TOTAL_DIFFICULTY: 0 +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Capella +CAPELLA_FORK_VERSION: 0x40637624 +CAPELLA_FORK_EPOCH: 0 + +# DENEB +DENEB_FORK_VERSION: 0x50637624 +DENEB_FORK_EPOCH: 0 + +# Electra +ELECTRA_FORK_VERSION: 0x60637624 +ELECTRA_FORK_EPOCH: 256 + +# Fulu +FULU_FORK_VERSION: 0x70000000 +FULU_FORK_EPOCH: 99999 + +# EIP7594 - Peerdas +EIP7594_FORK_VERSION: 0x70000000 +EIP7594_FORK_EPOCH: 99999 + +# Time parameters +# --------------------------------------------------------------- +# 12 seconds +SECONDS_PER_SLOT: 12 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 12 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 2 +# 2**8 (= 256) epochs ~27 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 2048 + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 30000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 128 +# [New in Deneb:EIP7514] 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 +# 20% +REORG_HEAD_WEIGHT_THRESHOLD: 20 +# 160% +REORG_PARENT_WEIGHT_THRESHOLD: 160 +# `2` epochs +REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + +# Deposit contract +# --------------------------------------------------------------- +DEPOSIT_CHAIN_ID: 7078815900 +DEPOSIT_NETWORK_ID: 7078815900 +DEPOSIT_CONTRACT_ADDRESS: 0x4242424242424242424242424242424242424242 + +# Networking +# --------------------------------------------------------------- +# `10 * 2**20` (= 10485760, 10 MiB) +GOSSIP_MAX_SIZE: 10485760 +# `2**10` (= 1024) +MAX_REQUEST_BLOCKS: 1024 +# `2**8` (= 256) +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +# `10 * 2**20` (=10485760, 10 MiB) +MAX_CHUNK_SIZE: 10485760 +# 5s +TTFB_TIMEOUT: 5 +# 10s +RESP_TIMEOUT: 10 +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**8 (= 64) +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS +ATTESTATION_SUBNET_PREFIX_BITS: 6 + +# Deneb +# `2**7` (=128) +MAX_REQUEST_BLOCKS_DENEB: 128 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK +MAX_REQUEST_BLOB_SIDECARS: 768 +# `2**12` (= 4096 epochs, ~18 days) +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 +# `6` +BLOB_SIDECAR_SUBNET_COUNT: 6 +## `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 + +# Whisk +# `Epoch(2**8)` +WHISK_EPOCHS_PER_SHUFFLING_PHASE: 256 +# `Epoch(2)` +WHISK_PROPOSER_SELECTION_GAP: 2 + +# EIP7594 +NUMBER_OF_COLUMNS: 128 +MAX_CELLS_IN_EXTENDED_MATRIX: 768 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 +SAMPLES_PER_SLOT: 8 +CUSTODY_REQUIREMENT: 4 + +# [New in Electra:EIP7251] +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 # 2**7 * 10**9 (= 128,000,000,000) +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 # 2**8 * 10**9 (= 256,000,000,000) \ No newline at end of file diff --git a/backend/pkg/commons/config/pectra-devnet-5.chain.yml b/backend/pkg/commons/config/pectra-devnet-5.chain.yml new file mode 100644 index 000000000..d5b6b1ca2 --- /dev/null +++ b/backend/pkg/commons/config/pectra-devnet-5.chain.yml @@ -0,0 +1,164 @@ +# Extends the mainnet preset +PRESET_BASE: mainnet +CONFIG_NAME: testnet # needs to exist because of Prysm. Otherwise it conflicts with mainnet genesis + +# Genesis +# --------------------------------------------------------------- +# `2**14` (= 16,384) +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 11300 +# 2025-Jan-16 01:30:00 PM UTC +MIN_GENESIS_TIME: 1737034200 +GENESIS_FORK_VERSION: 0x10710240 +GENESIS_DELAY: 60 + + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x20710240 +ALTAIR_FORK_EPOCH: 0 +# Merge +BELLATRIX_FORK_VERSION: 0x30710240 +BELLATRIX_FORK_EPOCH: 0 +TERMINAL_TOTAL_DIFFICULTY: 0 +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Capella +CAPELLA_FORK_VERSION: 0x40710240 +CAPELLA_FORK_EPOCH: 0 + +# DENEB +DENEB_FORK_VERSION: 0x50710240 +DENEB_FORK_EPOCH: 0 + +# Electra +ELECTRA_FORK_VERSION: 0x60710240 +ELECTRA_FORK_EPOCH: 4 + +# Fulu +FULU_FORK_VERSION: 0x70710240 +FULU_FORK_EPOCH: 999999 + + +# Time parameters +# --------------------------------------------------------------- +# 12 seconds +SECONDS_PER_SLOT: 12 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 12 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 2 +# 2**8 (= 256) epochs ~27 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 2048 + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 128 +# [New in Deneb:EIP7514] 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 +# 20% +REORG_HEAD_WEIGHT_THRESHOLD: 20 +# 160% +REORG_PARENT_WEIGHT_THRESHOLD: 160 +# `2` epochs +REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + +# Deposit contract +# --------------------------------------------------------------- +DEPOSIT_CHAIN_ID: 7088110746 +DEPOSIT_NETWORK_ID: 7088110746 +DEPOSIT_CONTRACT_ADDRESS: 0x4242424242424242424242424242424242424242 + +# Networking +# --------------------------------------------------------------- +# `10 * 2**20` (= 10485760, 10 MiB) +GOSSIP_MAX_SIZE: 10485760 +# `2**10` (= 1024) +MAX_REQUEST_BLOCKS: 1024 +# `2**8` (= 256) +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +# `10 * 2**20` (=10485760, 10 MiB) +MAX_CHUNK_SIZE: 10485760 +# 5s +TTFB_TIMEOUT: 5 +# 10s +RESP_TIMEOUT: 10 +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**8 (= 64) +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS +ATTESTATION_SUBNET_PREFIX_BITS: 6 + +# Deneb +# `2**7` (=128) +MAX_REQUEST_BLOCKS_DENEB: 128 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK +MAX_REQUEST_BLOB_SIDECARS: 768 +# `2**12` (= 4096 epochs, ~18 days) +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 +# `6` +BLOB_SIDECAR_SUBNET_COUNT: 6 +## `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 + +# Electra +# 2**7 * 10**9 (= 128,000,000,000) +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**8 * 10**9 (= 256,000,000,000) +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 +# `9` +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 +# `uint64(6)` +TARGET_BLOBS_PER_BLOCK_ELECTRA: 6 +# `uint64(9)` +MAX_BLOBS_PER_BLOCK_ELECTRA: 9 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 + +# Whisk +# `Epoch(2**8)` +WHISK_EPOCHS_PER_SHUFFLING_PHASE: 256 +# `Epoch(2)` +WHISK_PROPOSER_SELECTION_GAP: 2 + +# Fulu +NUMBER_OF_COLUMNS: 128 +MAX_CELLS_IN_EXTENDED_MATRIX: 768 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 +SAMPLES_PER_SLOT: 8 +CUSTODY_REQUIREMENT: 4 +TARGET_BLOBS_PER_BLOCK_FULU: 9 +MAX_BLOBS_PER_BLOCK_FULU: 12 +# `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_FULU` +MAX_REQUEST_BLOB_SIDECARS_FULU: 1536 diff --git a/backend/pkg/commons/config/pectra-devnet-6.chain.yml b/backend/pkg/commons/config/pectra-devnet-6.chain.yml new file mode 100644 index 000000000..36931cb9b --- /dev/null +++ b/backend/pkg/commons/config/pectra-devnet-6.chain.yml @@ -0,0 +1,172 @@ +# Extends the mainnet preset +PRESET_BASE: mainnet +CONFIG_NAME: testnet # needs to exist because of Prysm. Otherwise it conflicts with mainnet genesis + +# Genesis +# --------------------------------------------------------------- +# `2**14` (= 16,384) +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 71000 +# 2025-Feb-03 05:30:00 PM UTC +MIN_GENESIS_TIME: 1738603800 +GENESIS_FORK_VERSION: 0x10585557 +GENESIS_DELAY: 60 + + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x20585557 +ALTAIR_FORK_EPOCH: 0 +# Merge +BELLATRIX_FORK_VERSION: 0x30585557 +BELLATRIX_FORK_EPOCH: 0 +TERMINAL_TOTAL_DIFFICULTY: 0 +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Capella +CAPELLA_FORK_VERSION: 0x40585557 +CAPELLA_FORK_EPOCH: 0 + +# DENEB +DENEB_FORK_VERSION: 0x50585557 +DENEB_FORK_EPOCH: 0 + +# Electra +ELECTRA_FORK_VERSION: 0x60585557 +ELECTRA_FORK_EPOCH: 10 + +# Fulu +FULU_FORK_VERSION: 0x70585557 +FULU_FORK_EPOCH: 999999 + +# EIP7732 +EIP7732_FORK_VERSION: 0x80000000 +EIP7732_FORK_EPOCH: 99999 + +# EIP7805 +EIP7805_FORK_VERSION: 0x90000000 +EIP7805_FORK_EPOCH: 99999 + +# Time parameters +# --------------------------------------------------------------- +# 12 seconds +SECONDS_PER_SLOT: 12 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 12 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 2 +# 2**8 (= 256) epochs ~27 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 2048 + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 128 +# [New in Deneb:EIP7514] 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 +# 20% +REORG_HEAD_WEIGHT_THRESHOLD: 20 +# 160% +REORG_PARENT_WEIGHT_THRESHOLD: 160 +# `2` epochs +REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + +# Deposit contract +# --------------------------------------------------------------- +DEPOSIT_CHAIN_ID: 7072151312 +DEPOSIT_NETWORK_ID: 7072151312 +DEPOSIT_CONTRACT_ADDRESS: 0x4242424242424242424242424242424242424242 + +# Networking +# --------------------------------------------------------------- +# `10 * 2**20` (= 10485760, 10 MiB) +GOSSIP_MAX_SIZE: 10485760 +# `2**10` (= 1024) +MAX_REQUEST_BLOCKS: 1024 +# `2**8` (= 256) +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +# `10 * 2**20` (=10485760, 10 MiB) +MAX_CHUNK_SIZE: 10485760 +# 5s +TTFB_TIMEOUT: 5 +# 10s +RESP_TIMEOUT: 10 +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**8 (= 64) +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS +ATTESTATION_SUBNET_PREFIX_BITS: 6 + +# Deneb +# `2**7` (=128) +MAX_REQUEST_BLOCKS_DENEB: 128 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK +MAX_REQUEST_BLOB_SIDECARS: 768 +# `2**12` (= 4096 epochs, ~18 days) +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 +# `6` +BLOB_SIDECAR_SUBNET_COUNT: 6 +## `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 + +# Electra +# 2**7 * 10**9 (= 128,000,000,000) +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**8 * 10**9 (= 256,000,000,000) +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 +# `9` +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 +# `uint64(6)` +TARGET_BLOBS_PER_BLOCK_ELECTRA: 6 +# `uint64(9)` +MAX_BLOBS_PER_BLOCK_ELECTRA: 9 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 + +# Whisk +# `Epoch(2**8)` +WHISK_EPOCHS_PER_SHUFFLING_PHASE: 256 +# `Epoch(2)` +WHISK_PROPOSER_SELECTION_GAP: 2 + +# Fulu +NUMBER_OF_COLUMNS: 128 +NUMBER_OF_CUSTODY_GROUPS: 128 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 +SAMPLES_PER_SLOT: 8 +CUSTODY_REQUIREMENT: 4 +MAX_BLOBS_PER_BLOCK_FULU: 12 +MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 + +# EIP7732 +MAX_REQUEST_PAYLOADS: 128 \ No newline at end of file diff --git a/backend/pkg/commons/db/bigtable.go b/backend/pkg/commons/db/bigtable.go index ff4de7833..b772c9d10 100644 --- a/backend/pkg/commons/db/bigtable.go +++ b/backend/pkg/commons/db/bigtable.go @@ -547,9 +547,11 @@ func (bigtable *Bigtable) SaveValidatorBalances(epoch uint64, validators []*type balanceEncoded := make([]byte, 8) binary.LittleEndian.PutUint64(balanceEncoded, validator.Balance) - effectiveBalanceEncoded := uint8(validator.EffectiveBalance / 1e9) // we can encode the effective balance in 1 byte as it is capped at 32ETH and only decrements in 1 ETH steps - combined := append(balanceEncoded, effectiveBalanceEncoded) + effectiveBalanceEncoded := make([]byte, 8) + binary.LittleEndian.PutUint64(effectiveBalanceEncoded, validator.EffectiveBalance) + + combined := append(balanceEncoded, effectiveBalanceEncoded...) mut := &gcp_bigtable.Mutation{} mut.Set(VALIDATOR_BALANCES_FAMILY, "b", ts, combined) key := fmt.Sprintf("%s:%s:%s:%s", bigtable.chainId, bigtable.validatorIndexToKey(validator.Index), VALIDATOR_BALANCES_FAMILY, epochKey) diff --git a/backend/pkg/commons/db/db.go b/backend/pkg/commons/db/db.go index 8a16a64dc..d6dd3a82d 100644 --- a/backend/pkg/commons/db/db.go +++ b/backend/pkg/commons/db/db.go @@ -2504,3 +2504,97 @@ func CopyToTable[T []any](tableName string, columns []string, data []T) error { } return nil } + +func HasEventsForEpoch(epoch uint64) (bool, error) { + if epoch == 0 { + return true, nil + } + + firstSlot := (epoch - 1) * utils.Config.Chain.ClConfig.SlotsPerEpoch + lastSlot := (epoch * utils.Config.Chain.ClConfig.SlotsPerEpoch) - 1 + var count uint64 + err := ReaderDb.Get(&count, ` + SELECT + COUNT(*) + FROM + consensus_layer_events + WHERE + slot >= $1 AND slot <= $2`, firstSlot, lastSlot) + if err != nil { + return false, fmt.Errorf("error checking for events for epoch: %w", err) + } + + return count > 0, nil +} + +func TransformSwitchToCompoundingRequests(firstSlot, lastSlot uint64, tx *sqlx.Tx) (int64, error) { + res, err := tx.Exec(` + INSERT INTO blocks_switch_to_compounding_requests (block_slot, block_root, request_index, address, validator_index) + SELECT + slot AS slot, + block_root AS block_root, + event_index AS request_index, + decode((data->>'address'), 'base64') AS address, + (data->>'index')::int AS validator_index + FROM consensus_layer_events WHERE event_name = 'SwitchToCompoundingEvent' AND slot >= $1 AND slot <= $2 ON CONFLICT DO NOTHING; + `, firstSlot, lastSlot) + if err != nil { + return 0, fmt.Errorf("error transforming consolidation requests: %w", err) + } + + consolidationRequestsProcessed, err := res.RowsAffected() + if err != nil { + return 0, fmt.Errorf("error getting the amount of processed consolidation requests: %w", err) + } + + return consolidationRequestsProcessed, nil +} + +func TransformConsolidationRequests(firstSlot, lastSlot uint64, tx *sqlx.Tx) (int64, error) { + res, err := tx.Exec(` + INSERT INTO blocks_consolidation_requests (block_slot, block_root, request_index, source_index, target_index, amount_consolidated) + SELECT + slot AS slot, + block_root AS block_root, + event_index AS request_index, + (data->>'source_index')::int AS source_index, + (data->>'target_index')::int AS target_index, + (data->>'amount')::bigint AS amount_consolidated + FROM consensus_layer_events WHERE event_name = 'ConsolidationProcessedEvent' AND slot >= $1 AND slot <= $2 ON CONFLICT DO NOTHING; + `, firstSlot, lastSlot) + if err != nil { + return 0, fmt.Errorf("error transforming consolidation requests: %w", err) + } + + consolidationRequestsProcessed, err := res.RowsAffected() + if err != nil { + return 0, fmt.Errorf("error getting the amount of processed consolidation requests: %w", err) + } + + return consolidationRequestsProcessed, nil +} + +func TransformDepositRequests(firstSlot, lastSlot uint64, tx *sqlx.Tx) (int64, error) { + res, err := tx.Exec(` + INSERT INTO blocks_deposit_requests (block_slot, block_root, request_index, pubkey, withdrawal_credentials, amount, signature) + SELECT + slot AS block_slot, + block_root AS block_root, + event_index AS request_index, + decode((data->>'pubkey'), 'base64') AS pubkey, + decode((data->>'withdrawal_credentials'), 'base64')::bytea AS withdrawal_credentials, + (data->>'amount')::bigint AS amount, + decode((data->>'signature'), 'base64')::bytea AS signature + FROM consensus_layer_events WHERE event_name = 'DepositProcessedEvent' AND slot >= $1 AND slot <= $2 ON CONFLICT DO NOTHING; +`, firstSlot, lastSlot) + if err != nil { + return 0, fmt.Errorf("error transforming deposit requests: %w", err) + } + + depositRequestsProcessed, err := res.RowsAffected() + if err != nil { + return 0, fmt.Errorf("error getting the amount of processed deposit requests: %w", err) + } + + return depositRequestsProcessed, nil +} diff --git a/backend/pkg/commons/db/migrations/postgres/20250131063102_pectra_tables.sql b/backend/pkg/commons/db/migrations/postgres/20250131063102_pectra_tables.sql new file mode 100644 index 000000000..bf547c166 --- /dev/null +++ b/backend/pkg/commons/db/migrations/postgres/20250131063102_pectra_tables.sql @@ -0,0 +1,88 @@ +-- +goose Up +-- +goose StatementBegin +SELECT + 'creating blocks_consolidation_requests table'; + +CREATE TABLE + IF NOT EXISTS blocks_consolidation_requests ( + block_slot INT NOT NULL, + block_root BYTEA NOT NULL, + request_index INT NOT NULL, + source_address BYTEA NOT NULL, + source_index INT NOT NULL, + target_index INT NOT NULL, + PRIMARY KEY (block_slot, block_root, request_index) + ); + +SELECT + 'creating blocks_withdrawal_requests table'; + +CREATE TABLE + IF NOT EXISTS blocks_withdrawal_requests ( + block_slot INT NOT NULL, + block_root BYTEA NOT NULL, + request_index INT NOT NULL, + source_address BYTEA NOT NULL, + validator_pubkey BYTEA NOT NULL, + amount BIGINT NOT NULL, + PRIMARY KEY (block_slot, block_root, request_index) + ); + +SELECT + 'creating blocks_deposit_requests table'; + +CREATE TABLE + IF NOT EXISTS blocks_deposit_requests ( + block_slot INT NOT NULL, + block_root BYTEA NOT NULL, + request_index INT NOT NULL, + pubkey BYTEA NOT NULL, + withdrawal_credentials BYTEA NOT NULL, + amount BIGINT NOT NULL, + signature BYTEA NOT NULL, + PRIMARY KEY (block_slot, block_root, request_index) + ); + +SELECT + 'creating blocks_switch_to_compounding_requests table'; + +CREATE TABLE + IF NOT EXISTS blocks_switch_to_compounding_requests ( + block_slot int not null, + block_root bytea not null, + request_index int not null, + address bytea not null, + validator_index int not null, + primary key (block_slot, block_root, request_index) + ); + +SELECT + 'updating blocks_attestations table'; + +ALTER TABLE blocks_attestations +ADD COLUMN IF NOT EXISTS committeebits BYTEA; + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +SELECT + 'dropping blocks_consolidation_requests table'; + +DROP TABLE IF EXISTS blocks_consolidation_requests; + +SELECT + 'dropping blocks_withdrawal_requests table'; + +DROP TABLE IF EXISTS blocks_withdrawal_requests; + +SELECT + 'dropping blocks_deposit_requests table'; + +DROP TABLE IF EXISTS blocks_deposit_requests; + +SELECT + 'dropping blocks_switch_to_compounding_requests table'; + +DROP TABLE IF EXISTS blocks_switch_to_compounding_requests; + +-- +goose StatementEnd \ No newline at end of file diff --git a/backend/pkg/commons/db/migrations/postgres/20250131124812_change_network_data_type.sql b/backend/pkg/commons/db/migrations/postgres/20250131124812_change_network_data_type.sql new file mode 100644 index 000000000..fb661f650 --- /dev/null +++ b/backend/pkg/commons/db/migrations/postgres/20250131124812_change_network_data_type.sql @@ -0,0 +1,10 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE users_val_dashboards ALTER COLUMN network TYPE BIGINT +ALTER TABLE network_notifications_history ALTER COLUMN network TYPE BIGINT +-- +goose StatementEnd + +-- +goose Down +SELECT 'down SQL query - we do not revert the network id to SMALLINT as this could cause an out of range error'; +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/backend/pkg/commons/db/user.go b/backend/pkg/commons/db/user.go index f3de874d0..111972ca2 100644 --- a/backend/pkg/commons/db/user.go +++ b/backend/pkg/commons/db/user.go @@ -21,47 +21,15 @@ const week = 7 * day const month = 30 * day const maxJsInt uint64 = 9007199254740991 // 2^53-1 (max safe int in JS) -var freeTierProduct t.PremiumProduct = t.PremiumProduct{ - ProductName: "Free", - PremiumPerks: t.PremiumPerks{ - AdFree: false, - ValidatorDashboards: 1, - ValidatorsPerDashboard: 20, - ValidatorGroupsPerDashboard: 1, - ShareCustomDashboards: false, - ManageDashboardViaApi: false, - BulkAdding: false, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: 0, - Hourly: 12 * hour, - Daily: 0, - Weekly: 0, - }, - EmailNotificationsPerDay: 10, - ConfigureNotificationsViaApi: false, - ValidatorGroupNotifications: 1, - WebhookEndpoints: 1, - MobileAppCustomThemes: false, - MobileAppWidget: false, - MonitorMachines: 1, - MachineMonitoringHistorySeconds: 3600 * 3, - NotificationsMachineCustomThreshold: false, - NotificationsValidatorDashboardGroupEfficiency: false, - }, - PricePerMonthEur: 0, - PricePerYearEur: 0, - ProductIdMonthly: "premium_free", - ProductIdYearly: "premium_free.yearly", -} - var adminPerks = t.PremiumPerks{ - AdFree: false, // admins want to see ads to check ad configuration - ValidatorDashboards: maxJsInt, - ValidatorsPerDashboard: maxJsInt, - ValidatorGroupsPerDashboard: maxJsInt, - ShareCustomDashboards: true, - ManageDashboardViaApi: true, - BulkAdding: true, + AdFree: false, // admins want to see ads to check ad configuration + ValidatorDashboards: maxJsInt, + ValidatorsPerDashboard: maxJsInt, + EffectiveBalancePerDashboard: maxJsInt, + ValidatorGroupsPerDashboard: maxJsInt, + ShareCustomDashboards: true, + ManageDashboardViaApi: true, + BulkAdding: true, ChartHistorySeconds: t.ChartHistorySeconds{ Epoch: maxJsInt, Hourly: maxJsInt, @@ -233,6 +201,7 @@ func GetUserInfo(ctx context.Context, userId uint64, userDbReader *sqlx.DB) (*t. foundAddon = true for i := 0; i < addon.Quantity; i++ { userInfo.PremiumPerks.ValidatorsPerDashboard += p.ExtraDashboardValidators + userInfo.PremiumPerks.EffectiveBalancePerDashboard += p.ExtraDashboardEffectiveBalance userInfo.Subscriptions = append(userInfo.Subscriptions, t.UserSubscription{ ProductId: utils.PriceIdToProductId(addon.PriceId), ProductName: p.ProductName, @@ -253,6 +222,10 @@ func GetUserInfo(ctx context.Context, userId uint64, userDbReader *sqlx.DB) (*t. userInfo.PremiumPerks.ValidatorsPerDashboard = productSummary.ValidatorsPerDashboardLimit } + if productSummary.EffectiveBalancePerDashboardLimit < userInfo.PremiumPerks.EffectiveBalancePerDashboard { + userInfo.PremiumPerks.EffectiveBalancePerDashboard = productSummary.EffectiveBalancePerDashboardLimit + } + if userInfo.UserGroup == t.UserGroupAdmin { userInfo.PremiumPerks = adminPerks } @@ -260,10 +233,21 @@ func GetUserInfo(ctx context.Context, userId uint64, userDbReader *sqlx.DB) (*t. return userInfo, nil } +func premiumLimitNetworkfactor() uint64 { + networkFactor := uint64(1) + if utils.Config.Chain.Id == 100 { // gnosis + networkFactor = 5 + } + return networkFactor +} + func GetProductSummary(ctx context.Context) (*t.ProductSummary, error) { // TODO @patrick post-beta put into db instead of hardcoding here and make it configurable - return &t.ProductSummary{ - ValidatorsPerDashboardLimit: 102_000, - StripePublicKey: utils.Config.Frontend.Stripe.PublicKey, + freeTierProduct, err := GetFreeTierProduct(ctx) + factor := premiumLimitNetworkfactor() + summary := t.ProductSummary{ + ValidatorsPerDashboardLimit: 102_000, + EffectiveBalancePerDashboardLimit: uint64(102_000 * 32 * utils.Config.Frontend.ClCurrencyDivisor), + StripePublicKey: utils.Config.Frontend.Stripe.PublicKey, ApiProducts: []t.ApiProduct{ // TODO @patrick post-beta this data is not final yet { ProductId: "api_free", @@ -331,17 +315,18 @@ func GetProductSummary(ctx context.Context) (*t.ProductSummary, error) { // TODO }, }, PremiumProducts: []t.PremiumProduct{ - freeTierProduct, + *freeTierProduct, { ProductName: "Guppy", PremiumPerks: t.PremiumPerks{ - AdFree: true, - ValidatorDashboards: 1, - ValidatorsPerDashboard: 100, - ValidatorGroupsPerDashboard: 3, - ShareCustomDashboards: true, - ManageDashboardViaApi: false, - BulkAdding: true, + AdFree: true, + ValidatorDashboards: 1, + ValidatorsPerDashboard: 100 * factor, + EffectiveBalancePerDashboard: uint64(100*32*utils.Config.Frontend.ClCurrencyDivisor) * factor, + ValidatorGroupsPerDashboard: 3, + ShareCustomDashboards: true, + ManageDashboardViaApi: false, + BulkAdding: true, ChartHistorySeconds: t.ChartHistorySeconds{ Epoch: day, Hourly: 7 * day, @@ -369,13 +354,14 @@ func GetProductSummary(ctx context.Context) (*t.ProductSummary, error) { // TODO { ProductName: "Dolphin", PremiumPerks: t.PremiumPerks{ - AdFree: true, - ValidatorDashboards: 2, - ValidatorsPerDashboard: 300, - ValidatorGroupsPerDashboard: 10, - ShareCustomDashboards: true, - ManageDashboardViaApi: false, - BulkAdding: true, + AdFree: true, + ValidatorDashboards: 2, + ValidatorsPerDashboard: 300 * factor, + EffectiveBalancePerDashboard: uint64(300*32*utils.Config.Frontend.ClCurrencyDivisor) * factor, + ValidatorGroupsPerDashboard: 10, + ShareCustomDashboards: true, + ManageDashboardViaApi: false, + BulkAdding: true, ChartHistorySeconds: t.ChartHistorySeconds{ Epoch: 5 * day, Hourly: month, @@ -403,13 +389,14 @@ func GetProductSummary(ctx context.Context) (*t.ProductSummary, error) { // TODO { ProductName: "Orca", PremiumPerks: t.PremiumPerks{ - AdFree: true, - ValidatorDashboards: 2, - ValidatorsPerDashboard: 1000, - ValidatorGroupsPerDashboard: 30, - ShareCustomDashboards: true, - ManageDashboardViaApi: true, - BulkAdding: true, + AdFree: true, + ValidatorDashboards: 2, + ValidatorsPerDashboard: 1000 * factor, + EffectiveBalancePerDashboard: uint64(1000*32*utils.Config.Frontend.ClCurrencyDivisor) * factor, + ValidatorGroupsPerDashboard: 30, + ShareCustomDashboards: true, + ManageDashboardViaApi: true, + BulkAdding: true, ChartHistorySeconds: t.ChartHistorySeconds{ Epoch: 3 * week, Hourly: 6 * month, @@ -438,29 +425,67 @@ func GetProductSummary(ctx context.Context) (*t.ProductSummary, error) { // TODO }, ExtraDashboardValidatorsPremiumAddon: []t.ExtraDashboardValidatorsPremiumAddon{ { - ProductName: "1k extra valis per dashboard", - ExtraDashboardValidators: 1000, - PricePerMonthEur: 74.99, - PricePerYearEur: 719.88, - ProductIdMonthly: "vdb_addon_1k", - ProductIdYearly: "vdb_addon_1k.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon1k, - StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon1kYearly, + ProductName: fmt.Sprintf("%d effective balance increase per dashboard", 1_000*32*factor), + ExtraDashboardValidators: 1_000 * factor, + ExtraDashboardEffectiveBalance: uint64(1_000*32*utils.Config.Frontend.ClCurrencyDivisor) * factor, + PricePerMonthEur: 74.99, + PricePerYearEur: 719.88, + ProductIdMonthly: "vdb_addon_1k", + ProductIdYearly: "vdb_addon_1k.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon1k, + StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon1kYearly, }, { - ProductName: "10k extra valis per dashboard", - ExtraDashboardValidators: 10000, - PricePerMonthEur: 449.99, - PricePerYearEur: 4319.88, - ProductIdMonthly: "vdb_addon_10k", - ProductIdYearly: "vdb_addon_10k.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon10k, - StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon10kYearly, + ProductName: fmt.Sprintf("%d effective balance increase per dashboard", 10_000*32*factor), + ExtraDashboardValidators: 10_000 * factor, + ExtraDashboardEffectiveBalance: uint64(10_000*32*utils.Config.Frontend.ClCurrencyDivisor) * factor, + PricePerMonthEur: 449.99, + PricePerYearEur: 4319.88, + ProductIdMonthly: "vdb_addon_10k", + ProductIdYearly: "vdb_addon_10k.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon10k, + StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon10kYearly, }, }, - }, nil + } + + return &summary, err } -func GetFreeTierPerks(ctx context.Context) (*t.PremiumPerks, error) { - return &freeTierProduct.PremiumPerks, nil +func GetFreeTierProduct(ctx context.Context) (*t.PremiumProduct, error) { + factor := premiumLimitNetworkfactor() + + return &t.PremiumProduct{ + ProductName: "Free", + PremiumPerks: t.PremiumPerks{ + AdFree: false, + ValidatorDashboards: 1, + ValidatorsPerDashboard: 20 * factor, + EffectiveBalancePerDashboard: 20 * 32 * 1e9 * factor, + ValidatorGroupsPerDashboard: 1, + ShareCustomDashboards: false, + ManageDashboardViaApi: false, + BulkAdding: false, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: 0, + Hourly: 12 * hour, + Daily: 0, + Weekly: 0, + }, + EmailNotificationsPerDay: 10, + ConfigureNotificationsViaApi: false, + ValidatorGroupNotifications: 1, + WebhookEndpoints: 1, + MobileAppCustomThemes: false, + MobileAppWidget: false, + MonitorMachines: 1, + MachineMonitoringHistorySeconds: 3600 * 3, + NotificationsMachineCustomThreshold: false, + NotificationsValidatorDashboardGroupEfficiency: false, + }, + PricePerMonthEur: 0, + PricePerYearEur: 0, + ProductIdMonthly: "premium_free", + ProductIdYearly: "premium_free.yearly", + }, nil } diff --git a/backend/pkg/commons/rpc/erigon.go b/backend/pkg/commons/rpc/erigon.go index eeacf95e6..f21ee5e78 100644 --- a/backend/pkg/commons/rpc/erigon.go +++ b/backend/pkg/commons/rpc/erigon.go @@ -713,7 +713,7 @@ func (client *ErigonClient) GetERC20TokenMetadata(token []byte) (*types.ERC20Met return ret, err } - if err == nil && len(ret.Decimals) == 0 && ret.Symbol == "" && len(ret.TotalSupply) == 0 { + if len(ret.Decimals) == 0 && ret.Symbol == "" && len(ret.TotalSupply) == 0 { // it's possible that a token contract implements the ERC20 interfaces but does not return any values; we use a backup in this case ret = &types.ERC20Metadata{ Decimals: []byte{0x0}, diff --git a/backend/pkg/commons/rpc/geth.go b/backend/pkg/commons/rpc/geth.go index 379f82d50..ccbd85340 100644 --- a/backend/pkg/commons/rpc/geth.go +++ b/backend/pkg/commons/rpc/geth.go @@ -443,7 +443,7 @@ func (client *GethClient) GetERC20TokenMetadata(token []byte) (*types.ERC20Metad return ret, err } - if err == nil && len(ret.Decimals) == 0 && ret.Symbol == "" && len(ret.TotalSupply) == 0 { + if len(ret.Decimals) == 0 && ret.Symbol == "" && len(ret.TotalSupply) == 0 { // it's possible that a token contract implements the ERC20 interfaces but does not return any values; we use a backup in this case ret = &types.ERC20Metadata{ Decimals: []byte{0x0}, diff --git a/backend/pkg/commons/rpc/interfaces.go b/backend/pkg/commons/rpc/interfaces.go index 8459f62ad..cf2e847e2 100644 --- a/backend/pkg/commons/rpc/interfaces.go +++ b/backend/pkg/commons/rpc/interfaces.go @@ -21,6 +21,7 @@ type Client interface { GetBalancesForEpoch(epoch int64) (map[uint64]uint64, error) GetValidatorState(epoch uint64) (*constypes.StandardValidatorsResponse, error) GetBlockHeader(slot uint64) (*constypes.StandardBeaconHeaderResponse, error) + GetStandardBeaconState(stateID any) (*constypes.StandardBeaconStateResponse, error) } type Eth1Client interface { diff --git a/backend/pkg/commons/rpc/lighthouse.go b/backend/pkg/commons/rpc/lighthouse.go index d87f8e0f8..f9c5a77cf 100644 --- a/backend/pkg/commons/rpc/lighthouse.go +++ b/backend/pkg/commons/rpc/lighthouse.go @@ -178,12 +178,21 @@ func (lc *LighthouseClient) GetEpochAssignments(epoch uint64) (*types.EpochAssig parsedCommittees, err := lc.cl.GetCommittees(depStateRoot, &epoch, nil, nil) if err != nil { - return nil, fmt.Errorf("error retrieving committees data: %w", err) + if strings.Contains(err.Error(), "epoch out of bounds, too far in future") { + log.Warnf("error retrieving committees data for epoch %v: %v, trying slot based retrieval", epoch, err) + parsedCommittees, err = lc.cl.GetCommittees(epoch*utils.Config.ClConfig.SlotsPerEpoch, &epoch, nil, nil) + if err != nil { + return nil, fmt.Errorf("error retrieving committees data: %w", err) + } + } else { + return nil, fmt.Errorf("error retrieving committees data: %w", err) + } } assignments := &types.EpochAssignments{ - ProposerAssignments: make(map[uint64]uint64), - AttestorAssignments: make(map[string]uint64), + ProposerAssignments: make(map[uint64]uint64), + AttestorAssignments: make(map[string]uint64), + AttestationCommitteeLengths: make(map[string]uint64), } // propose @@ -193,6 +202,7 @@ func (lc *LighthouseClient) GetEpochAssignments(epoch uint64) (*types.EpochAssig // attest for _, committee := range parsedCommittees.Data { + assignments.AttestationCommitteeLengths[fmt.Sprintf("%d-%d", committee.Slot, committee.Index)] = uint64(len(committee.Validators)) for i, valIndex := range committee.Validators { k := utils.FormatAttestorAssignmentKey(committee.Slot, committee.Index, uint64(i)) assignments.AttestorAssignments[k] = uint64(valIndex) @@ -616,6 +626,10 @@ func (lc *LighthouseClient) GetBlockBySlot(slot uint64) (*types.Block, error) { Deposits: make([]*types.Deposit, 0), VoluntaryExits: make([]*types.VoluntaryExit, 0), SyncAggregate: nil, + ExecutionRequests: &types.ExecutionRequests{ + Consolidations: make([]*types.ConsolidationExecutionRequest, 0), + Withdrawals: make([]*types.WithdrawalExecutionRequest, 0), + }, } if isFirstSlotOfEpoch { @@ -679,7 +693,7 @@ func (lc *LighthouseClient) GetBlockBySlot(slot uint64) (*types.Block, error) { } // for the first slot of an epoch, also retrieve the epoch assignments - if block.Slot%utils.Config.Chain.ClConfig.SlotsPerEpoch == 0 { + if isFirstSlotOfEpoch { var err error block.EpochAssignments, err = lc.GetEpochAssignments(block.Slot / utils.Config.Chain.ClConfig.SlotsPerEpoch) if err != nil { @@ -743,27 +757,69 @@ func (lc *LighthouseClient) blockFromResponse(parsedHeaders *constypes.StandardB SignedBLSToExecutionChange: make([]*types.SignedBLSToExecutionChange, len(parsedBlock.Message.Body.SignedBLSToExecutionChange)), BlobKZGCommitments: make([][]byte, len(parsedBlock.Message.Body.BlobKZGCommitments)), BlobKZGProofs: make([][]byte, len(parsedBlock.Message.Body.BlobKZGCommitments)), - AttestationDuties: make(map[types.ValidatorIndex][]types.Slot), - SyncDuties: make(map[types.ValidatorIndex]bool), + ExecutionRequests: &types.ExecutionRequests{ + Deposits: make([]*types.DepositExecutionRequest, len(parsedBlock.Message.Body.ExecutionRequests.Deposits)), + Consolidations: make([]*types.ConsolidationExecutionRequest, len(parsedBlock.Message.Body.ExecutionRequests.Consolidations)), + Withdrawals: make([]*types.WithdrawalExecutionRequest, len(parsedBlock.Message.Body.ExecutionRequests.Withdrawals)), + }, + AttestationDuties: make(map[types.ValidatorIndex][]types.Slot), + SyncDuties: make(map[types.ValidatorIndex]bool), } for i, c := range parsedBlock.Message.Body.BlobKZGCommitments { block.BlobKZGCommitments[i] = c } + if len(parsedBlock.Message.Body.ExecutionRequests.Consolidations) > 0 { + for i, consolidation := range parsedBlock.Message.Body.ExecutionRequests.Consolidations { + block.ExecutionRequests.Consolidations[i] = &types.ConsolidationExecutionRequest{ + SourceAddress: consolidation.SourceAddress, + SourcePubkey: consolidation.SourcePubkey, + TargetPubkey: consolidation.TargetPubkey, + } + } + } + + if len(parsedBlock.Message.Body.ExecutionRequests.Withdrawals) > 0 { + for i, withdrawal := range parsedBlock.Message.Body.ExecutionRequests.Withdrawals { + block.ExecutionRequests.Withdrawals[i] = &types.WithdrawalExecutionRequest{ + SourceAddress: withdrawal.SourceAddress, + ValidatorPubkey: withdrawal.ValidatorPubkey, + Amount: withdrawal.Amount, + } + } + } + + if len(parsedBlock.Message.Body.ExecutionRequests.Deposits) > 0 { + for i, deposit := range parsedBlock.Message.Body.ExecutionRequests.Deposits { + block.ExecutionRequests.Deposits[i] = &types.DepositExecutionRequest{ + Pubkey: deposit.Pubkey, + WithdrawalCredentials: deposit.WithdrawalCredentials, + Amount: deposit.Amount, + Signature: deposit.Signature, + Index: deposit.Index, + } + } + } + if len(parsedBlock.Message.Body.BlobKZGCommitments) > 0 { res, err := lc.GetBlobSidecars(fmt.Sprintf("%#x", block.BlockRoot)) if err != nil { - return nil, err - } - if len(res.Data) != len(parsedBlock.Message.Body.BlobKZGCommitments) { - return nil, fmt.Errorf("error constructing block at slot %v: len(blob_sidecars) != len(block.blob_kzg_commitments): %v != %v", block.Slot, len(res.Data), len(parsedBlock.Message.Body.BlobKZGCommitments)) - } - for i, d := range res.Data { - if !bytes.Equal(d.KzgCommitment, block.BlobKZGCommitments[i]) { - return nil, fmt.Errorf("error constructing block at slot %v: unequal kzg_commitments at index %v: %#x != %#x", block.Slot, i, d.KzgCommitment, block.BlobKZGCommitments[i]) + // TODO: remove this hack for mekong!!! + for i := 0; i < len(block.BlobKZGProofs); i++ { + block.BlobKZGProofs[i] = []byte{} + } + //return nil, err + } else { + if len(res.Data) != len(parsedBlock.Message.Body.BlobKZGCommitments) { + return nil, fmt.Errorf("error constructing block at slot %v: len(blob_sidecars) != len(block.blob_kzg_commitments): %v != %v", block.Slot, len(res.Data), len(parsedBlock.Message.Body.BlobKZGCommitments)) + } + for i, d := range res.Data { + if !bytes.Equal(d.KzgCommitment, block.BlobKZGCommitments[i]) { + return nil, fmt.Errorf("error constructing block at slot %v: unequal kzg_commitments at index %v: %#x != %#x", block.Slot, i, d.KzgCommitment, block.BlobKZGCommitments[i]) + } + block.BlobKZGProofs[i] = d.KzgProof } - block.BlobKZGProofs[i] = d.KzgProof } } @@ -798,43 +854,6 @@ func (lc *LighthouseClient) blockFromResponse(parsedHeaders *constypes.StandardB } if payload := parsedBlock.Message.Body.ExecutionPayload; payload != nil && !bytes.Equal(payload.ParentHash, make([]byte, 32)) { - txs := make([]*types.Transaction, 0, len(payload.Transactions)) - for i, rawTx := range payload.Transactions { - tx := &types.Transaction{Raw: rawTx} - var decTx gethtypes.Transaction - if err := decTx.UnmarshalBinary(rawTx); err != nil { - return nil, fmt.Errorf("error parsing tx %d block %x: %w", i, payload.BlockHash, err) - } else { - h := decTx.Hash() - tx.TxHash = h[:] - tx.AccountNonce = decTx.Nonce() - // big endian - tx.Price = decTx.GasPrice().Bytes() - tx.GasLimit = decTx.Gas() - sender, err := lc.signer.Sender(&decTx) - if err != nil { - return nil, fmt.Errorf("transaction with invalid sender (slot: %v, tx-hash: %x): %w", slot, h, err) - } - tx.Sender = sender.Bytes() - if v := decTx.To(); v != nil { - tx.Recipient = v.Bytes() - } else { - tx.Recipient = []byte{} - } - tx.Amount = decTx.Value().Bytes() - tx.Payload = decTx.Data() - tx.MaxPriorityFeePerGas = decTx.GasTipCap().Uint64() - tx.MaxFeePerGas = decTx.GasFeeCap().Uint64() - - if decTx.BlobGasFeeCap() != nil { - tx.MaxFeePerBlobGas = decTx.BlobGasFeeCap().Uint64() - } - for _, h := range decTx.BlobHashes() { - tx.BlobVersionedHashes = append(tx.BlobVersionedHashes, h.Bytes()) - } - } - txs = append(txs, tx) - } withdrawals := make([]*types.Withdrawals, 0, len(payload.Withdrawals)) for _, w := range payload.Withdrawals { withdrawals = append(withdrawals, &types.Withdrawals{ @@ -846,23 +865,23 @@ func (lc *LighthouseClient) blockFromResponse(parsedHeaders *constypes.StandardB } block.ExecutionPayload = &types.ExecutionPayload{ - ParentHash: payload.ParentHash, - FeeRecipient: payload.FeeRecipient, - StateRoot: payload.StateRoot, - ReceiptsRoot: payload.ReceiptsRoot, - LogsBloom: payload.LogsBloom, - Random: payload.PrevRandao, - BlockNumber: payload.BlockNumber, - GasLimit: payload.GasLimit, - GasUsed: payload.GasUsed, - Timestamp: payload.Timestamp, - ExtraData: payload.ExtraData, - BaseFeePerGas: payload.BaseFeePerGas, - BlockHash: payload.BlockHash, - Transactions: txs, - Withdrawals: withdrawals, - BlobGasUsed: payload.BlobGasUsed, - ExcessBlobGas: payload.ExcessBlobGas, + ParentHash: payload.ParentHash, + FeeRecipient: payload.FeeRecipient, + StateRoot: payload.StateRoot, + ReceiptsRoot: payload.ReceiptsRoot, + LogsBloom: payload.LogsBloom, + Random: payload.PrevRandao, + BlockNumber: payload.BlockNumber, + GasLimit: payload.GasLimit, + GasUsed: payload.GasUsed, + Timestamp: payload.Timestamp, + ExtraData: payload.ExtraData, + BaseFeePerGas: payload.BaseFeePerGas, + BlockHash: payload.BlockHash, + TransactionsCount: len(payload.Transactions), + Withdrawals: withdrawals, + BlobGasUsed: payload.BlobGasUsed, + ExcessBlobGas: payload.ExcessBlobGas, } } @@ -933,6 +952,7 @@ func (lc *LighthouseClient) blockFromResponse(parsedHeaders *constypes.StandardB for i, attestation := range parsedBlock.Message.Body.Attestations { a := &types.Attestation{ AggregationBits: attestation.AggregationBits, + CommitteeBits: attestation.CommitteeBits, Attesters: []uint64{}, Data: &types.AttestationData{ Slot: attestation.Data.Slot, @@ -951,24 +971,52 @@ func (lc *LighthouseClient) blockFromResponse(parsedHeaders *constypes.StandardB } aggregationBits := bitfield.Bitlist(a.AggregationBits) + committeeBits := bitfield.Bitvector64(a.CommitteeBits) + assignments, err := lc.GetEpochAssignments(a.Data.Slot / utils.Config.Chain.ClConfig.SlotsPerEpoch) if err != nil { return nil, fmt.Errorf("error receiving epoch assignment for epoch %v: %w", a.Data.Slot/utils.Config.Chain.ClConfig.SlotsPerEpoch, err) } + if len(a.CommitteeBits) == 0 { + for i := uint64(0); i < aggregationBits.Len(); i++ { + if aggregationBits.BitAt(i) { + validator, found := assignments.AttestorAssignments[utils.FormatAttestorAssignmentKey(a.Data.Slot, uint64(a.Data.CommitteeIndex), i)] + if !found { // This should never happen! + validator = 0 + log.Fatal(fmt.Errorf("error retrieving assigned validator for attestation %v of block %v for slot %v committee index %v member index %v", i, block.Slot, a.Data.Slot, a.Data.CommitteeIndex, i), "", 0) + } + a.Attesters = append(a.Attesters, validator) - for i := uint64(0); i < aggregationBits.Len(); i++ { - if aggregationBits.BitAt(i) { - validator, found := assignments.AttestorAssignments[utils.FormatAttestorAssignmentKey(a.Data.Slot, uint64(a.Data.CommitteeIndex), i)] - if !found { // This should never happen! - validator = 0 - log.Fatal(fmt.Errorf("error retrieving assigned validator for attestation %v of block %v for slot %v committee index %v member index %v", i, block.Slot, a.Data.Slot, a.Data.CommitteeIndex, i), "", 0) + if block.AttestationDuties[types.ValidatorIndex(validator)] == nil { + block.AttestationDuties[types.ValidatorIndex(validator)] = []types.Slot{types.Slot(a.Data.Slot)} + } else { + block.AttestationDuties[types.ValidatorIndex(validator)] = append(block.AttestationDuties[types.ValidatorIndex(validator)], types.Slot(a.Data.Slot)) + } } - a.Attesters = append(a.Attesters, validator) - - if block.AttestationDuties[types.ValidatorIndex(validator)] == nil { - block.AttestationDuties[types.ValidatorIndex(validator)] = []types.Slot{types.Slot(a.Data.Slot)} - } else { - block.AttestationDuties[types.ValidatorIndex(validator)] = append(block.AttestationDuties[types.ValidatorIndex(validator)], types.Slot(a.Data.Slot)) + } + } else { + attestationsBitsOffset := uint64(0) + for i := uint64(0); i < committeeBits.Len(); i++ { + if committeeBits.BitAt(i) { + committeeLength := assignments.AttestationCommitteeLengths[fmt.Sprintf("%d-%d", a.Data.Slot, i)] + for j := uint64(0); j < committeeLength; j++ { + if aggregationBits.BitAt(attestationsBitsOffset + j) { + validator, found := assignments.AttestorAssignments[utils.FormatAttestorAssignmentKey(a.Data.Slot, i, j)] + if !found { // This should never happen! + validator = 0 + log.Fatal(fmt.Errorf("error retrieving assigned validator for attestation %v of block %v for slot %v committee index %v member index %v", i, block.Slot, a.Data.Slot, a.Data.CommitteeIndex, i), "", 0) + } + //log.Infof("attestation %v of block %v for slot %v committee index %v member index %v validator %v", i, block.Slot, a.Data.Slot, i, attestationsBitsOffset+j, validator) + a.Attesters = append(a.Attesters, validator) + + if block.AttestationDuties[types.ValidatorIndex(validator)] == nil { + block.AttestationDuties[types.ValidatorIndex(validator)] = []types.Slot{types.Slot(a.Data.Slot)} + } else { + block.AttestationDuties[types.ValidatorIndex(validator)] = append(block.AttestationDuties[types.ValidatorIndex(validator)], types.Slot(a.Data.Slot)) + } + } + } + attestationsBitsOffset += committeeLength } } } @@ -1128,3 +1176,7 @@ type ExecutionPayload struct { BlobGasUsed constypes.Uint64Str `json:"blob_gas_used"` ExcessBlobGas constypes.Uint64Str `json:"excess_blob_gas"` } + +func (lc *LighthouseClient) GetStandardBeaconState(stateID any) (*constypes.StandardBeaconStateResponse, error) { + return lc.cl.GetState(stateID) +} diff --git a/backend/pkg/commons/types/chain.go b/backend/pkg/commons/types/chain.go index 36321267e..1a211abc0 100644 --- a/backend/pkg/commons/types/chain.go +++ b/backend/pkg/commons/types/chain.go @@ -28,6 +28,8 @@ type ClChainConfig struct { CappellaForkEpoch uint64 `yaml:"CAPELLA_FORK_EPOCH"` DenebForkVersion string `yaml:"DENEB_FORK_VERSION"` DenebForkEpoch uint64 `yaml:"DENEB_FORK_EPOCH"` + ElectraForkVersion string `yaml:"ELECTRA_FORK_VERSION"` + ElectraForkEpoch uint64 `yaml:"ELECTRA_FORK_EPOCH"` Eip6110ForkVersion string `yaml:"EIP6110_FORK_VERSION"` Eip6110ForkEpoch uint64 `yaml:"EIP6110_FORK_EPOCH"` Eip7002ForkVersion string `yaml:"EIP7002_FORK_VERSION"` diff --git a/backend/pkg/commons/types/exporter.go b/backend/pkg/commons/types/exporter.go index 197638472..ea3e41727 100644 --- a/backend/pkg/commons/types/exporter.go +++ b/backend/pkg/commons/types/exporter.go @@ -130,11 +130,47 @@ type Block struct { ExcessBlobGas uint64 BlobKZGCommitments [][]byte BlobKZGProofs [][]byte + ExecutionRequests *ExecutionRequests + AttestationDuties map[ValidatorIndex][]Slot SyncDuties map[ValidatorIndex]bool Finalized bool EpochAssignments *EpochAssignments Validators []*Validator + QueuedExecutionRequest *ExecutionRequests + ProcessedExecutionRequests *ExecutionRequests +} + +type ExecutionRequests struct { + Deposits []*DepositExecutionRequest + Withdrawals []*WithdrawalExecutionRequest + Consolidations []*ConsolidationExecutionRequest +} + +type DepositExecutionRequest struct { + Pubkey []byte + WithdrawalCredentials []byte + Amount uint64 + Signature []byte + Index uint64 + SlotFromState uint64 +} + +type WithdrawalExecutionRequest struct { + SourceAddress []byte + ValidatorPubkey []byte + ValidatorIndex uint64 + Amount uint64 + WithdrawableEpoch uint64 +} + +type ConsolidationExecutionRequest struct { + SourceAddress []byte + SourcePubkey []byte + SourceIndex uint64 + TargetPubkey []byte + TargetIndex uint64 + AmountConsolidated uint64 } type SignedBLSToExecutionChange struct { @@ -170,23 +206,23 @@ type Transaction struct { } type ExecutionPayload struct { - ParentHash []byte - FeeRecipient []byte - StateRoot []byte - ReceiptsRoot []byte - LogsBloom []byte - Random []byte - BlockNumber uint64 - GasLimit uint64 - GasUsed uint64 - Timestamp uint64 - ExtraData []byte - BaseFeePerGas uint64 - BlockHash []byte - Transactions []*Transaction - Withdrawals []*Withdrawals - BlobGasUsed uint64 - ExcessBlobGas uint64 + ParentHash []byte + FeeRecipient []byte + StateRoot []byte + ReceiptsRoot []byte + LogsBloom []byte + Random []byte + BlockNumber uint64 + GasLimit uint64 + GasUsed uint64 + Timestamp uint64 + ExtraData []byte + BaseFeePerGas uint64 + BlockHash []byte + TransactionsCount int + Withdrawals []*Withdrawals + BlobGasUsed uint64 + ExcessBlobGas uint64 } type Withdrawals struct { @@ -243,6 +279,7 @@ type IndexedAttestation struct { // Attestation is a struct to hold attestation header data type Attestation struct { AggregationBits []byte + CommitteeBits []byte Attesters []uint64 Data *AttestationData Signature []byte @@ -297,9 +334,10 @@ type CanonBlock struct { // EpochAssignments is a struct to hold epoch assignment data type EpochAssignments struct { - ProposerAssignments map[uint64]uint64 - AttestorAssignments map[string]uint64 - SyncAssignments []uint64 + ProposerAssignments map[uint64]uint64 + AttestorAssignments map[string]uint64 + AttestationCommitteeLengths map[string]uint64 + SyncAssignments []uint64 } // ELDeposit is a struct to hold execution layer deposit data diff --git a/backend/pkg/commons/utils/config.go b/backend/pkg/commons/utils/config.go index 134f926fc..d07bea4e9 100644 --- a/backend/pkg/commons/utils/config.go +++ b/backend/pkg/commons/utils/config.go @@ -133,6 +133,10 @@ func ReadConfig(cfg *types.Config, path string) error { cfg.Chain.GenesisTimestamp = 1638993340 case "holesky": cfg.Chain.GenesisTimestamp = 1695902400 + case "pectra-devnet-5": + cfg.Chain.GenesisTimestamp = 1737034260 + case "pectra-devnet-6": + cfg.Chain.GenesisTimestamp = 1738603860 default: return fmt.Errorf("tried to set known genesis-timestamp, but unknown chain-name") } @@ -291,6 +295,12 @@ func setELConfig(cfg *types.Config) error { err = yaml.Unmarshal([]byte(config.GnosisChainYml), &minimalCfg) case "holesky": err = yaml.Unmarshal([]byte(config.HoleskyChainYml), &minimalCfg) + case "mekong": + err = yaml.Unmarshal([]byte(config.MekongChainYml), &minimalCfg) + case "pectra-devnet-5": + err = yaml.Unmarshal([]byte(config.PectraDevnet5ChainYml), &minimalCfg) + case "pectra-devnet-6": + err = yaml.Unmarshal([]byte(config.PectraDevnet6ChainYml), &minimalCfg) default: return fmt.Errorf("tried to set known chain-config, but unknown chain-name: %v (path: %v)", cfg.Chain.Name, cfg.Chain.ElConfigPath) } @@ -341,6 +351,10 @@ func setCLConfig(cfg *types.Config) error { err = yaml.Unmarshal([]byte(config.GnosisChainYml), &cfg.Chain.ClConfig) case "holesky": err = yaml.Unmarshal([]byte(config.HoleskyChainYml), &cfg.Chain.ClConfig) + case "pectra-devnet-5": + err = yaml.Unmarshal([]byte(config.PectraDevnet5ChainYml), &cfg.Chain.ClConfig) + case "pectra-devnet-6": + err = yaml.Unmarshal([]byte(config.PectraDevnet6ChainYml), &cfg.Chain.ClConfig) default: return fmt.Errorf("tried to set known chain-config, but unknown chain-name: %v (path: %v)", cfg.Chain.Name, cfg.Chain.ClConfigPath) } @@ -378,6 +392,10 @@ func setCLConfig(cfg *types.Config) error { log.Warnf("DenebForkEpoch not set, defaulting to maxForkEpoch") jr.Data.DenebForkEpoch = &maxForkEpoch } + if jr.Data.ElectraForkEpoch == nil { + log.Warnf("ElectraForkEpoch not set, defaulting to maxForkEpoch") + jr.Data.ElectraForkEpoch = &maxForkEpoch + } chainCfg := types.ClChainConfig{ PresetBase: jr.Data.PresetBase, @@ -397,6 +415,8 @@ func setCLConfig(cfg *types.Config) error { CappellaForkEpoch: *jr.Data.CapellaForkEpoch, DenebForkVersion: jr.Data.DenebForkVersion, DenebForkEpoch: *jr.Data.DenebForkEpoch, + ElectraForkVersion: jr.Data.ElectraForkVersion, + ElectraForkEpoch: *jr.Data.ElectraForkEpoch, SecondsPerSlot: uint64(jr.Data.SecondsPerSlot), SecondsPerEth1Block: uint64(jr.Data.SecondsPerEth1Block), MinValidatorWithdrawabilityDelay: uint64(jr.Data.MinValidatorWithdrawabilityDelay), diff --git a/backend/pkg/commons/utils/units.go b/backend/pkg/commons/utils/units.go index ec92d6f47..3978942b8 100644 --- a/backend/pkg/commons/utils/units.go +++ b/backend/pkg/commons/utils/units.go @@ -30,3 +30,7 @@ func GWeiToWei(gwei *big.Int) decimal.Decimal { func GWeiBytesToWei(gwei []byte) decimal.Decimal { return GWeiToWei(new(big.Int).SetBytes(gwei)) } + +func EtherToGwei(ether *big.Int) decimal.Decimal { + return decimal.NewFromBigInt(ether, 0).Mul(decimal.NewFromInt(params.GWei)) +} diff --git a/backend/pkg/commons/utils/utils.go b/backend/pkg/commons/utils/utils.go index 20c45e829..d931cb570 100644 --- a/backend/pkg/commons/utils/utils.go +++ b/backend/pkg/commons/utils/utils.go @@ -406,3 +406,10 @@ func FirstN(input string, n int) string { } return input[:n] } + +func Min(a, b uint64) uint64 { + if a < b { + return a + } + return b +} diff --git a/backend/pkg/consapi/client.go b/backend/pkg/consapi/client.go index 017083078..ccaca28b4 100644 --- a/backend/pkg/consapi/client.go +++ b/backend/pkg/consapi/client.go @@ -62,6 +62,9 @@ type ClientInt interface { // /eth/v1/events GetEvents(topics []types.EventTopic) chan *types.EventResponse + + // /eth/v1/debug/beacon/states/%v + GetState(stateID any) (*types.StandardBeaconStateResponse, error) } type NodeClient struct { Endpoint string diff --git a/backend/pkg/consapi/client_node.go b/backend/pkg/consapi/client_node.go index a0c136d01..344b64d76 100644 --- a/backend/pkg/consapi/client_node.go +++ b/backend/pkg/consapi/client_node.go @@ -178,3 +178,8 @@ func (r *NodeClient) GetEvents(topics []types.EventTopic) chan *types.EventRespo }() return responseCh } + +func (r *NodeClient) GetState(stateID any) (*types.StandardBeaconStateResponse, error) { + requestURL := fmt.Sprintf("%s/eth/v1/debug/beacon/states/%v", r.Endpoint, stateID) + return network.Get[types.StandardBeaconStateResponse](r.httpClient, requestURL) +} diff --git a/backend/pkg/consapi/types/slot.go b/backend/pkg/consapi/types/slot.go index cfc72acfa..c9c5d41c5 100644 --- a/backend/pkg/consapi/types/slot.go +++ b/backend/pkg/consapi/types/slot.go @@ -1,6 +1,8 @@ package types -import "github.com/ethereum/go-ethereum/common/hexutil" +import ( + "github.com/ethereum/go-ethereum/common/hexutil" +) // /eth/v2/beacon/blocks/{block_id} type StandardBeaconSlotResponse struct { @@ -37,11 +39,38 @@ type AnySignedBlock struct { // present only after deneb BlobKZGCommitments []hexutil.Bytes `json:"blob_kzg_commitments"` + + // present only after pectra + ExecutionRequests struct { + Deposits []DepositExecutionRequest `json:"deposits"` + Withdrawals []WithdrawalExecutionRequest `json:"withdrawals"` + Consolidations []ConsolidationExecutionRequest `json:"consolidations"` + } `json:"execution_requests"` } `json:"body"` } `json:"message"` Signature hexutil.Bytes `json:"signature"` } +type DepositExecutionRequest struct { + Pubkey hexutil.Bytes `json:"pubkey"` + WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials"` + Amount uint64 `json:"amount,string"` + Signature hexutil.Bytes `json:"signature"` + Index uint64 `json:"index,string"` +} + +type WithdrawalExecutionRequest struct { + SourceAddress hexutil.Bytes `json:"source_address"` + ValidatorPubkey hexutil.Bytes `json:"validator_pubkey"` + Amount uint64 `json:"amount,string"` +} + +type ConsolidationExecutionRequest struct { + SourceAddress hexutil.Bytes `json:"source_address"` + SourcePubkey hexutil.Bytes `json:"source_pubkey"` + TargetPubkey hexutil.Bytes `json:"target_pubkey"` +} + type ProposerSlashing struct { SignedHeader1 struct { Message struct { @@ -120,6 +149,7 @@ func (a *AttesterSlashing) GetSlashedIndices() []uint64 { type Attestation struct { AggregationBits hexutil.Bytes `json:"aggregation_bits"` + CommitteeBits hexutil.Bytes `json:"committee_bits"` Signature hexutil.Bytes `json:"signature"` Data struct { Slot uint64 `json:"slot,string"` diff --git a/backend/pkg/consapi/types/spec.go b/backend/pkg/consapi/types/spec.go index 181fd7b65..fe6e06f4b 100644 --- a/backend/pkg/consapi/types/spec.go +++ b/backend/pkg/consapi/types/spec.go @@ -23,6 +23,8 @@ type StandardSpec struct { CapellaForkEpoch *uint64 `json:"CAPELLA_FORK_EPOCH,string"` DenebForkVersion string `json:"DENEB_FORK_VERSION"` DenebForkEpoch *uint64 `json:"DENEB_FORK_EPOCH,string"` + ElectraForkVersion string `json:"ELECTRA_FORK_VERSION"` + ElectraForkEpoch *uint64 `json:"ELECTRA_FORK_EPOCH,string"` SecondsPerSlot int64 `json:"SECONDS_PER_SLOT,string"` SecondsPerEth1Block int64 `json:"SECONDS_PER_ETH1_BLOCK,string"` MinValidatorWithdrawabilityDelay int64 `json:"MIN_VALIDATOR_WITHDRAWABILITY_DELAY,string"` diff --git a/backend/pkg/consapi/types/state.go b/backend/pkg/consapi/types/state.go new file mode 100644 index 000000000..868d91166 --- /dev/null +++ b/backend/pkg/consapi/types/state.go @@ -0,0 +1,37 @@ +package types + +import "github.com/ethereum/go-ethereum/common/hexutil" + +type StandardBeaconStateResponse struct { + ExecutionOptimistic bool `json:"execution_optimistic"` + Finalized bool `json:"finalized"` + Data struct { + Balances []Uint64Str `json:"balances"` + Validators []struct { + Pubkey hexutil.Bytes `json:"pubkey"` + WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials"` + EffectiveBalance uint64 `json:"effective_balance,string"` + Slashed bool `json:"slashed"` + ActivationEligibilityEpoch uint64 `json:"activation_eligibility_epoch,string"` + ActivationEpoch uint64 `json:"activation_epoch,string"` + ExitEpoch uint64 `json:"exit_epoch,string"` + WithdrawalbleEpoch uint64 `json:"withdrawable_epoch,string"` + } `json:"validators"` + PendingConsolidations []struct { + SourceIndex uint64 `json:"source_index,string"` + TargetIndex uint64 `json:"target_index,string"` + } `json:"pending_consolidations"` + PendingPartialWithdrawals []struct { + ValidatorIndex uint64 `json:"validator_index,string"` + Amount uint64 `json:"amount,string"` + WithdrawableEpopch uint64 `json:"withdrawable_epoch,string"` + } `json:"pending_partial_withdrawals"` + PendingDeposits []struct { + Pubkey hexutil.Bytes `json:"pubkey"` + WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials"` + Amount uint64 `json:"amount,string"` + Signature hexutil.Bytes `json:"signature"` + Slot uint64 `json:"slot,string"` + } `json:"pending_deposits"` + } `json:"data"` +} diff --git a/backend/pkg/consapi/types/validator.go b/backend/pkg/consapi/types/validator.go index 4a2cd0674..ad27c38e9 100644 --- a/backend/pkg/consapi/types/validator.go +++ b/backend/pkg/consapi/types/validator.go @@ -1,6 +1,8 @@ package types -import "github.com/ethereum/go-ethereum/common/hexutil" +import ( + "github.com/ethereum/go-ethereum/common/hexutil" +) const ( // Node statuses diff --git a/backend/pkg/exporter/db/db.go b/backend/pkg/exporter/db/db.go index 033d652ea..bd909b387 100644 --- a/backend/pkg/exporter/db/db.go +++ b/backend/pkg/exporter/db/db.go @@ -59,7 +59,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1) ON CONFLICT (block_hash) DO NOTHING`) if err != nil { - return err + return fmt.Errorf("error preparing stmtExecutionPayload: %w", err) } defer stmtExecutionPayload.Close() @@ -68,7 +68,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34, $35, $36, $37, $38, $39, $40) ON CONFLICT (slot, blockroot) DO NOTHING`) if err != nil { - return err + return fmt.Errorf("error preparing stmtBlock: %w", err) } defer stmtBlock.Close() @@ -77,7 +77,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (block_slot, block_root, withdrawalindex) DO NOTHING`) if err != nil { - return err + return fmt.Errorf("error preparing stmtWithdrawals: %w", err) } defer stmtWithdrawals.Close() @@ -86,7 +86,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (block_slot, block_root, validatorindex) DO NOTHING`) if err != nil { - return err + return fmt.Errorf("error preparing stmtBLSChange: %w", err) } defer stmtBLSChange.Close() @@ -95,7 +95,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14) ON CONFLICT (block_slot, block_index) DO NOTHING`) if err != nil { - return err + return fmt.Errorf("error preparing stmtProposerSlashing: %w", err) } defer stmtProposerSlashing.Close() @@ -104,16 +104,16 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21) ON CONFLICT (block_slot, block_index) DO UPDATE SET attestation1_indices = excluded.attestation1_indices, attestation2_indices = excluded.attestation2_indices`) if err != nil { - return err + return fmt.Errorf("error preparing stmtAttesterSlashing: %w", err) } defer stmtAttesterSlashing.Close() stmtAttestations, err := tx.Prepare(` - INSERT INTO blocks_attestations (block_slot, block_index, block_root, aggregationbits, validators, signature, slot, committeeindex, beaconblockroot, source_epoch, source_root, target_epoch, target_root) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) + INSERT INTO blocks_attestations (block_slot, block_index, block_root, aggregationbits, validators, signature, slot, committeeindex, beaconblockroot, source_epoch, source_root, target_epoch, target_root, committeebits) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14) ON CONFLICT (block_slot, block_index) DO NOTHING`) if err != nil { - return err + return fmt.Errorf("error preparing stmtAttestations: %w", err) } defer stmtAttestations.Close() @@ -122,7 +122,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) ON CONFLICT (block_slot, block_index) DO NOTHING`) if err != nil { - return err + return fmt.Errorf("error preparing stmtDeposits: %w", err) } defer stmtDeposits.Close() @@ -131,7 +131,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (block_root, index) DO NOTHING`) if err != nil { - return err + return fmt.Errorf("error preparing stmtBlobs: %w", err) } defer stmtBlobs.Close() @@ -140,7 +140,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (block_slot, block_index) DO NOTHING`) if err != nil { - return err + return fmt.Errorf("error preparing stmtVoluntaryExits: %w", err) } defer stmtVoluntaryExits.Close() @@ -149,7 +149,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1, $2, $3, $4) ON CONFLICT (epoch, validatorindex, proposerslot) DO UPDATE SET status = excluded.status`) if err != nil { - return err + return fmt.Errorf("error preparing stmtProposalAssignments: %w", err) } defer stmtProposalAssignments.Close() @@ -211,7 +211,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo ExtraData []byte BaseFeePerGas *uint64 BlockHash []byte - TxCount *int64 + TxCount *int WithdrawalCount *int64 BlobGasUsed *uint64 ExcessBlobGas *uint64 @@ -221,7 +221,6 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo execData := new(exectionPayloadData) if b.ExecutionPayload != nil { - txCount := int64(len(b.ExecutionPayload.Transactions)) withdrawalCount := int64(len(b.ExecutionPayload.Withdrawals)) blobTxCount := int64(len(b.BlobKZGCommitments)) execData = &exectionPayloadData{ @@ -238,7 +237,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo ExtraData: b.ExecutionPayload.ExtraData, BaseFeePerGas: &b.ExecutionPayload.BaseFeePerGas, BlockHash: b.ExecutionPayload.BlockHash, - TxCount: &txCount, + TxCount: &b.ExecutionPayload.TransactionsCount, WithdrawalCount: &withdrawalCount, BlobGasUsed: &b.ExecutionPayload.BlobGasUsed, ExcessBlobGas: &b.ExecutionPayload.ExcessBlobGas, @@ -328,8 +327,9 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo return fmt.Errorf("error executing stmtAttesterSlashing for block %v index %v: %w", b.Slot, i, err) } } + for i, a := range b.Attestations { - _, err = stmtAttestations.Exec(b.Slot, i, b.BlockRoot, a.AggregationBits, pq.Array(a.Attesters), a.Signature, a.Data.Slot, a.Data.CommitteeIndex, a.Data.BeaconBlockRoot, a.Data.Source.Epoch, a.Data.Source.Root, a.Data.Target.Epoch, a.Data.Target.Root) + _, err = stmtAttestations.Exec(b.Slot, i, b.BlockRoot, a.AggregationBits, pq.Array(a.Attesters), a.Signature, a.Data.Slot, a.Data.CommitteeIndex, a.Data.BeaconBlockRoot, a.Data.Source.Epoch, a.Data.Source.Root, a.Data.Target.Epoch, a.Data.Target.Root, a.CommitteeBits) if err != nil { return fmt.Errorf("error executing stmtAttestations for block %v index %v: %w", b.Slot, i, err) } diff --git a/backend/pkg/exporter/modules/execution_rewards_finalizer.go b/backend/pkg/exporter/modules/execution_rewards_finalizer.go index 3659648cc..64f58c5b0 100644 --- a/backend/pkg/exporter/modules/execution_rewards_finalizer.go +++ b/backend/pkg/exporter/modules/execution_rewards_finalizer.go @@ -80,7 +80,7 @@ func (d *executionRewardsFinalizer) maintainTable() (err error) { var latestFinalizedSlot int64 err = db.ReaderDb.Get(&latestFinalizedSlot, ` SELECT - max(slot) + COALESCE(max(slot), 0) FROM blocks WHERE diff --git a/backend/pkg/exporter/modules/slot_exporter.go b/backend/pkg/exporter/modules/slot_exporter.go index da431b5e1..03b337444 100644 --- a/backend/pkg/exporter/modules/slot_exporter.go +++ b/backend/pkg/exporter/modules/slot_exporter.go @@ -101,11 +101,13 @@ func (d *slotExporterData) OnHead(_ *constypes.StandardEventHeadResponse) (err e defer utils.Rollback(tx) if d.FirstRun { + log.Infof("performing first run consistency checks") // get all slots we currently have in the database dbSlots, err := db.GetAllSlots(tx) if err != nil { return fmt.Errorf("error retrieving all db slots: %w", err) } + log.Info("retrieved all exported slots from the database") if len(dbSlots) > 0 { if dbSlots[0] != 0 { @@ -122,6 +124,7 @@ func (d *slotExporterData) OnHead(_ *constypes.StandardEventHeadResponse) (err e } if len(dbSlots) > 1 { + log.Info("performing gap checks") // export any gaps we might have (for whatever reason) for slotIndex := 1; slotIndex < len(dbSlots); slotIndex++ { previousSlot := dbSlots[slotIndex-1] @@ -170,7 +173,7 @@ func (d *slotExporterData) OnHead(_ *constypes.StandardEventHeadResponse) (err e slotsExported++ // in case of large export runs, export at most 10 epochs per tx - if slotsExported == int(utils.Config.Chain.ClConfig.SlotsPerEpoch)*10 { + if slotsExported == int(utils.Config.Chain.ClConfig.SlotsPerEpoch)*1 { // TODO: change this back to 10 err := tx.Commit() if err != nil { @@ -354,8 +357,43 @@ func ExportSlot(client rpc.Client, slot uint64, isHeadEpoch bool, tx *sqlx.Tx) e if block.EpochAssignments != nil { // export the epoch assignments as they are included in the first slot of an epoch epoch := utils.EpochOfSlot(block.Slot) - log.Infof("exporting duties & balances for epoch %v", epoch) + log.Infof("checking that events have been loaded for epoch %v", epoch) + for ; ; time.Sleep(time.Second) { + exported, err := db.HasEventsForEpoch(epoch) + if err != nil { + return fmt.Errorf("error retrieving events for epoch %v: %w", epoch, err) + } + if exported { + break + } + log.Infof("events for epoch %v have not been loaded yet, waiting", epoch) + } + log.Infof("events for epoch %v have been loaded, transforming consolidations & deposits", epoch) + + if epoch > 0 { + firstSlot := (epoch - 1) * utils.Config.Chain.ClConfig.SlotsPerEpoch + lastSlot := (epoch * utils.Config.Chain.ClConfig.SlotsPerEpoch) - 1 + + switchToCompoundingRequestsProcessed, err := db.TransformSwitchToCompoundingRequests(firstSlot, lastSlot, tx) + if err != nil { + return fmt.Errorf("error transforming consolidation requests for epoch %v: %w", epoch, err) + } + log.Infof("transformed switch to compounding requests for epoch %v, processed %d requests", epoch, switchToCompoundingRequestsProcessed) + consolidationRequestsProcessed, err := db.TransformConsolidationRequests(firstSlot, lastSlot, tx) + if err != nil { + return fmt.Errorf("error transforming consolidation requests for epoch %v: %w", epoch, err) + } + log.Infof("transformed consolidations for epoch %v, processed %d requests", epoch, consolidationRequestsProcessed) + + depositRequestsProcessed, err := db.TransformDepositRequests(firstSlot, lastSlot, tx) + if err != nil { + return fmt.Errorf("error transforming deposit requests for epoch %v: %w", epoch, err) + } + log.Infof("transformed deposits for epoch %v, processed %d requests", epoch, depositRequestsProcessed) + } + + log.Infof("exporting duties & balances for epoch %v", epoch) // prepare the duties for export to bigtable syncDutiesEpoch := make(map[types.Slot]map[types.ValidatorIndex]bool) attDutiesEpoch := make(map[types.Slot]map[types.ValidatorIndex][]types.Slot) @@ -478,8 +516,9 @@ func ExportSlot(client rpc.Client, slot uint64, isHeadEpoch bool, tx *sqlx.Tx) e } return nil }) + // if we are exporting the head epoch, update the validator db table - if isHeadEpoch { + if isHeadEpoch || epoch%5 == 0 { // this function sets exports the validator status into the db // and also updates the status field in the validators array err := edb.SaveValidators(epoch, block.Validators, client, 10000, tx) diff --git a/frontend/types/api/dashboard.ts b/frontend/types/api/dashboard.ts index 16a840350..8d16e6652 100644 --- a/frontend/types/api/dashboard.ts +++ b/frontend/types/api/dashboard.ts @@ -16,6 +16,7 @@ export interface ValidatorDashboard { public_ids?: VDBPublicId[]; is_archived: boolean; archived_reason?: 'user' | 'dashboard_limit' | 'validator_limit' | 'group_limit'; + effective_balance: number /* uint64 */; validator_count: number /* uint64 */; group_count: number /* uint64 */; } diff --git a/frontend/types/api/search.ts b/frontend/types/api/search.ts index 4c1beaca9..8209baf69 100644 --- a/frontend/types/api/search.ts +++ b/frontend/types/api/search.ts @@ -29,6 +29,7 @@ export interface SearchValidatorsByGraffiti { export interface SearchResult { type: string; chain_id: number /* uint64 */; + total_effective_balance: number /* uint64 */; value: any; } export interface InternalPostSearchResponse { diff --git a/frontend/types/api/user.ts b/frontend/types/api/user.ts index 6b8dbcd6b..481ac8a74 100644 --- a/frontend/types/api/user.ts +++ b/frontend/types/api/user.ts @@ -54,7 +54,8 @@ export const ProductStoreAndroidPlaystore: ProductStore = "android-playstore"; export const ProductStoreEthpool: ProductStore = "ethpool"; export const ProductStoreCustom: ProductStore = "custom"; export interface ProductSummary { - validators_per_dashboard_limit: number /* uint64 */; + validators_per_dashboard_limit: number /* uint64 */; // remove after Pectra + effective_balance_per_dashboard_limit: number /* uint64 */; stripe_public_key: string; api_products: ApiProduct[]; premium_products: PremiumProduct[]; @@ -94,7 +95,8 @@ export interface PremiumProduct { } export interface ExtraDashboardValidatorsPremiumAddon { product_name: string; - extra_dashboard_validators: number /* uint64 */; + extra_dashboard_validators: number /* uint64 */; // remove after Pectra + extra_dashboard_effective_balance: number /* uint64 */; price_per_year_eur: number /* float64 */; price_per_month_eur: number /* float64 */; product_id_monthly: string; @@ -105,7 +107,8 @@ export interface ExtraDashboardValidatorsPremiumAddon { export interface PremiumPerks { ad_free: boolean; // note that this is somhow redunant, since there is already ApiPerks.NoAds validator_dashboards: number /* uint64 */; - validators_per_dashboard: number /* uint64 */; + validators_per_dashboard: number /* uint64 */; // remove after Pectra + effective_balance_per_dashboard: number /* uint64 */; validator_groups_per_dashboard: number /* uint64 */; share_custom_dashboards: boolean; manage_dashboard_via_api: boolean; diff --git a/frontend/types/network.ts b/frontend/types/network.ts index c5e0d651e..9a8bddf4e 100644 --- a/frontend/types/network.ts +++ b/frontend/types/network.ts @@ -16,7 +16,8 @@ const ChainIDs = { Gnosis: 100, Holesky: 17000, - + Pectra_Devnet_5: 7088110746, + Pectra_Devnet_6: 7072151312, Sepolia: 11155111, } as const @@ -112,6 +113,46 @@ export const ChainInfo: Record = { slotsPerEpoch: 32, timeStampSlot0: 1695902400, }, + [ChainIDs.Pectra_Devnet_5]: { + clCurrency: 'ETH', + description: 'Devnet', + elCurrency: 'ETH', + family: ChainFamily.Ethereum, + mainCurrency: 'ETH', + mainNet: ChainIDs.Ethereum, + name: 'Ethereum Pectra Devnet 5', + nameParts: [ + 'Ethereum', + 'Pectra', + 'Devnet', + '5', + ], + priority: 41, + secondsPerSlot: 12, + shortName: 'Pectra', + slotsPerEpoch: 32, + timeStampSlot0: 1737034260, + }, + [ChainIDs.Pectra_Devnet_6]: { + clCurrency: 'ETH', + description: 'Devnet', + elCurrency: 'ETH', + family: ChainFamily.Ethereum, + mainCurrency: 'ETH', + mainNet: ChainIDs.Ethereum, + name: 'Ethereum Pectra Devnet 6', + nameParts: [ + 'Ethereum', + 'Pectra', + 'Devnet', + '6', + ], + priority: 42, + secondsPerSlot: 12, + shortName: 'Pectra', + slotsPerEpoch: 32, + timeStampSlot0: 1738603860, + }, [ChainIDs.Sepolia]: { clCurrency: 'ETH', description: 'Testnet',