diff --git a/.github/workflows/backend-converted-types-check.yml b/.github/workflows/backend-converted-types-check.yml new file mode 100644 index 000000000..c6f09fdac --- /dev/null +++ b/.github/workflows/backend-converted-types-check.yml @@ -0,0 +1,46 @@ +name: Backend-Converted-Types-Check +on: + push: + paths: + - 'backend/**' + - 'frontend/types/api/**' + branches: + - main + - staging + pull_request: + paths: + - 'backend/**' + - 'frontend/types/api/**' + branches: + - '*' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: read + pull-requests: read + checks: write + +jobs: + build: + name: converted-types-check + runs-on: self-hosted + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version-file: 'backend/go.mod' + cache-dependency-path: 'backend/go.sum' + - name: Check if all backend-types have been converted to frontend-types + working-directory: backend + run: | + currHash=$(find ../frontend/types/api -type f -print0 | sort -z | xargs -0 sha1sum | sha256sum | head -c 64) + make frontend-types + newHash=$(find ../frontend/types/api -type f -print0 | sort -z | xargs -0 sha1sum | sha256sum | head -c 64) + if [ "$currHash" != "$newHash" ]; then + echo "frontend-types have changed, please commit the changes" + exit 1 + fi + diff --git a/backend/cmd/misc/main.go b/backend/cmd/misc/main.go index 215c5d01c..5555c2e7e 100644 --- a/backend/cmd/misc/main.go +++ b/backend/cmd/misc/main.go @@ -564,6 +564,40 @@ func collectNotifications(startEpoch uint64) error { if len(notifications[0]) > 0 { spew.Dump(notifications[0]) } + + emails, err := notification.RenderEmailsForUserEvents(0, notifications) + if err != nil { + return err + } + + for _, email := range emails { + // if email.Address == "" { + log.Infof("to: %v", email.Address) + log.Infof("subject: %v", email.Subject) + log.Infof("body: %v", email.Email.Body) + log.Info("-----") + // } + } + + // pushMessages, err := notification.RenderPushMessagesForUserEvents(0, notifications) + // if err != nil { + // return err + // } + + // for _, pushMessage := range pushMessages { + // message := pushMessage.Messages[0] + // log.Infof("title: %v body: %v", message.Notification.Title, message.Notification.Body) + + // if message.Token == "" { + // log.Info("sending test message") + + // err = notification.SendPushBatch(pushMessage.UserId, []*messaging.Message{message}, false) + // if err != nil { + // log.Error(err, "error sending firebase batch job", 0) + // } + // } + // } + return nil } @@ -606,7 +640,7 @@ func collectUserDbNotifications(startEpoch uint64) error { if message.Token == "" { log.Info("sending test message") - err = notification.SendPushBatch(pushMessage.UserId, []*messaging.Message{message}, false) + err = notification.SendPushBatch(pushMessage.UserId, []*messaging.Message{message}, true) if err != nil { log.Error(err, "error sending firebase batch job", 0) } diff --git a/backend/go.mod b/backend/go.mod index c9eda313c..ff2204794 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -64,7 +64,7 @@ require ( github.com/prysmaticlabs/go-ssz v0.0.0-20210121151755-f6208871c388 github.com/rocket-pool/rocketpool-go v1.8.3-0.20240618173422-783b8668f5b4 github.com/rocket-pool/smartnode v1.13.6 - github.com/shopspring/decimal v1.3.1 + github.com/shopspring/decimal v1.4.0 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d diff --git a/backend/go.sum b/backend/go.sum index 0ab17372b..481c0f1c4 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -873,6 +873,8 @@ github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9Nz github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= diff --git a/backend/pkg/api/data_access/header.go b/backend/pkg/api/data_access/header.go index 5658c9157..1a91ad832 100644 --- a/backend/pkg/api/data_access/header.go +++ b/backend/pkg/api/data_access/header.go @@ -1,9 +1,15 @@ package dataaccess import ( + "context" + "database/sql" + "fmt" + t "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gobitfly/beaconchain/pkg/commons/cache" + "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/price" + "github.com/gobitfly/beaconchain/pkg/commons/utils" ) func (d *DataAccessService) GetLatestSlot() (uint64, error) { @@ -26,6 +32,43 @@ func (d *DataAccessService) GetBlockHeightAt(slot uint64) (uint64, error) { return d.dummy.GetBlockHeightAt(slot) } +// returns the block number of the latest existing block at or before the given slot +func (d *DataAccessService) GetLatestBlockHeightForSlot(ctx context.Context, slot uint64) (uint64, error) { + query := `SELECT MAX(exec_block_number) FROM blocks WHERE slot <= $1` + res := uint64(0) + err := d.alloyReader.GetContext(ctx, &res, query, slot) + if err != nil { + if err == sql.ErrNoRows { + log.Warnf("no EL block found at or before slot %d", slot) + return 0, nil + } + return 0, fmt.Errorf("failed to get latest existing block height at or before slot %d: %w", slot, err) + } + return res, nil +} + +func (d *DataAccessService) GetLatestBlockHeightsForEpoch(ctx context.Context, epoch uint64) ([]uint64, error) { + // use 2 epochs as safety margin + query := ` + WITH recent_blocks AS ( + SELECT slot, exec_block_number + FROM blocks + WHERE slot < $1 + ORDER BY slot DESC + LIMIT $2 * 2 + ) + SELECT MAX(exec_block_number) OVER (ORDER BY slot) AS block + FROM recent_blocks + ORDER BY slot DESC + LIMIT $2` + res := []uint64{} + err := d.alloyReader.SelectContext(ctx, &res, query, (epoch+1)*utils.Config.Chain.ClConfig.SlotsPerEpoch, utils.Config.Chain.ClConfig.SlotsPerEpoch) + if err != nil { + return nil, fmt.Errorf("failed to get latest existing block heights for slots in epoch %d: %w", epoch, err) + } + return res, nil +} + func (d *DataAccessService) GetLatestExchangeRates() ([]t.EthConversionRate, error) { result := []t.EthConversionRate{} diff --git a/backend/pkg/api/data_access/notifications.go b/backend/pkg/api/data_access/notifications.go index 7351de0c6..af9a76f04 100644 --- a/backend/pkg/api/data_access/notifications.go +++ b/backend/pkg/api/data_access/notifications.go @@ -8,7 +8,6 @@ import ( "encoding/gob" "fmt" "io" - "maps" "regexp" "slices" "sort" @@ -19,6 +18,7 @@ import ( "github.com/doug-martin/goqu/v9" "github.com/doug-martin/goqu/v9/exp" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/params" "github.com/go-redis/redis/v8" "github.com/gobitfly/beaconchain/pkg/api/enums" @@ -28,6 +28,7 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/gobitfly/beaconchain/pkg/notification" + n "github.com/gobitfly/beaconchain/pkg/notification" "github.com/lib/pq" "github.com/shopspring/decimal" "golang.org/x/sync/errgroup" @@ -64,6 +65,7 @@ func (*DataAccessService) registerNotificationInterfaceTypes() { gob.Register(¬ification.ValidatorProposalNotification{}) gob.Register(¬ification.ValidatorAttestationNotification{}) gob.Register(¬ification.ValidatorIsOfflineNotification{}) + gob.Register(¬ification.ValidatorIsOnlineNotification{}) gob.Register(¬ification.ValidatorGotSlashedNotification{}) gob.Register(¬ification.ValidatorWithdrawalNotification{}) gob.Register(¬ification.NetworkNotification{}) @@ -243,7 +245,7 @@ func (d *DataAccessService) GetNotificationOverview(ctx context.Context, userId return err } - err = d.alloyReader.GetContext(ctx, &response, querySql, args...) + err = d.userReader.GetContext(ctx, &response, querySql, args...) return err }) @@ -284,7 +286,6 @@ func (d *DataAccessService) GetDashboardNotifications(ctx context.Context, userI )). Where( goqu.Ex{"uvd.user_id": userId}, - goqu.L("uvd.network = ANY(?)", pq.Array(chainIds)), ). GroupBy( goqu.I("uvdnh.epoch"), @@ -294,6 +295,12 @@ func (d *DataAccessService) GetDashboardNotifications(ctx context.Context, userI goqu.I("uvdg.name"), ) + if chainIds != nil { + vdbQuery = vdbQuery.Where( + goqu.L("uvd.network = ANY(?)", pq.Array(chainIds)), + ) + } + // TODO account dashboards /*adbQuery := goqu.Dialect("postgres"). From(goqu.T("adb_notifications_history").As("anh")). @@ -387,18 +394,15 @@ func (d *DataAccessService) GetDashboardNotifications(ctx context.Context, userI func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, epoch uint64, search string) (*t.NotificationValidatorDashboardDetail, error) { notificationDetails := t.NotificationValidatorDashboardDetail{ ValidatorOffline: []uint64{}, - GroupOffline: []t.NotificationEventGroup{}, - ProposalMissed: []t.IndexBlocks{}, + ProposalMissed: []t.IndexSlots{}, ProposalDone: []t.IndexBlocks{}, - UpcomingProposals: []t.IndexBlocks{}, + UpcomingProposals: []t.IndexSlots{}, Slashed: []uint64{}, SyncCommittee: []uint64{}, AttestationMissed: []t.IndexEpoch{}, - Withdrawal: []t.IndexBlocks{}, + Withdrawal: []t.NotificationEventWithdrawal{}, ValidatorOfflineReminder: []uint64{}, - GroupOfflineReminder: []t.NotificationEventGroup{}, ValidatorBackOnline: []t.NotificationEventValidatorBackOnline{}, - GroupBackOnline: []t.NotificationEventGroupBackOnline{}, MinimumCollateralReached: []t.Address{}, MaximumCollateralReached: []t.Address{}, } @@ -419,38 +423,45 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context searchIndexSet[searchIndex] = true } - result := []byte{} - query := `SELECT details FROM users_val_dashboards_notifications_history WHERE dashboard_id = $1 AND group_id = $2 AND epoch = $3` - err := d.alloyReader.GetContext(ctx, &result, query, dashboardId, groupId, epoch) + // ------------------------------------- + // dashboard and group name + query := `SELECT + uvd.name AS dashboard_name, + uvdg.name AS group_name + FROM + users_val_dashboards uvd + INNER JOIN + users_val_dashboards_groups uvdg ON uvdg.dashboard_id = uvd.id + WHERE uvd.id = $1 AND uvdg.id = $2` + err := d.alloyReader.GetContext(ctx, ¬ificationDetails, query, dashboardId, groupId) if err != nil { if err == sql.ErrNoRows { return ¬ificationDetails, nil } return nil, err } - if len(result) == 0 { - return ¬ificationDetails, nil + if notificationDetails.GroupName == "" { + notificationDetails.GroupName = t.DefaultGroupName } - - buf := bytes.NewBuffer(result) - gz, err := gzip.NewReader(buf) - if err != nil { - return nil, err + if notificationDetails.DashboardName == "" { + notificationDetails.DashboardName = t.DefaultDashboardName } - defer gz.Close() - // might need to loop if we get memory issues with large dashboards and can't ReadAll - decompressedData, err := io.ReadAll(gz) + // ------------------------------------- + // retrieve notification events + eventTypesEncodedList := [][]byte{} + query = `SELECT details FROM users_val_dashboards_notifications_history WHERE dashboard_id = $1 AND group_id = $2 AND epoch = $3` + err = d.alloyReader.SelectContext(ctx, &eventTypesEncodedList, query, dashboardId, groupId, epoch) if err != nil { return nil, err } + if len(eventTypesEncodedList) == 0 { + return ¬ificationDetails, nil + } - decoder := gob.NewDecoder(bytes.NewReader(decompressedData)) - - notifications := []types.Notification{} - err = decoder.Decode(¬ifications) + latestBlocks, err := d.GetLatestBlockHeightsForEpoch(ctx, epoch) if err != nil { - return nil, err + return nil, fmt.Errorf("error getting latest block height: %w", err) } type ProposalInfo struct { @@ -461,124 +472,164 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context proposalsInfo := make(map[t.VDBValidator]*ProposalInfo) addressMapping := make(map[string]*t.Address) - for _, not := range notifications { - switch not.GetEventName() { - case types.ValidatorMissedProposalEventName, types.ValidatorExecutedProposalEventName /*, types.ValidatorScheduledProposalEventName*/ : - // aggregate proposals - curNotification, ok := not.(*notification.ValidatorProposalNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to ValidatorProposalNotification") - } - if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { - continue - } - if _, ok := proposalsInfo[curNotification.ValidatorIndex]; !ok { - proposalsInfo[curNotification.ValidatorIndex] = &ProposalInfo{} - } - prop := proposalsInfo[curNotification.ValidatorIndex] - switch curNotification.Status { - case 0: - prop.Scheduled = append(prop.Scheduled, curNotification.Slot) - case 1: - prop.Proposed = append(prop.Proposed, curNotification.Block) - case 2: - prop.Missed = append(prop.Missed, curNotification.Slot) - } - case types.ValidatorMissedAttestationEventName: - curNotification, ok := not.(*notification.ValidatorAttestationNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to ValidatorAttestationNotification") - } - if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { - continue - } - if curNotification.Status != 0 { - continue - } - notificationDetails.AttestationMissed = append(notificationDetails.AttestationMissed, t.IndexEpoch{Index: curNotification.ValidatorIndex, Epoch: curNotification.Epoch}) - case types.ValidatorGotSlashedEventName: - curNotification, ok := not.(*notification.ValidatorGotSlashedNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to ValidatorGotSlashedNotification") - } - if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { - continue - } - notificationDetails.Slashed = append(notificationDetails.Slashed, curNotification.ValidatorIndex) - case types.ValidatorIsOfflineEventName: - curNotification, ok := not.(*notification.ValidatorIsOfflineNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to ValidatorIsOfflineNotification") - } - if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { - continue - } - if curNotification.IsOffline { + contractStatusRequests := make([]db.ContractInteractionAtRequest, 0) + for _, eventTypesEncoded := range eventTypesEncodedList { + buf := bytes.NewBuffer(eventTypesEncoded) + gz, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + defer gz.Close() + + // might need to loop if we get memory issues + eventTypes, err := io.ReadAll(gz) + if err != nil { + return nil, err + } + + decoder := gob.NewDecoder(bytes.NewReader(eventTypes)) + + notifications := []types.Notification{} + err = decoder.Decode(¬ifications) + if err != nil { + return nil, err + } + + for _, notification := range notifications { + switch notification.GetEventName() { + case types.ValidatorMissedProposalEventName, types.ValidatorExecutedProposalEventName /*, types.ValidatorScheduledProposalEventName*/ : + // aggregate proposals + curNotification, ok := notification.(*n.ValidatorProposalNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorProposalNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + if _, ok := proposalsInfo[curNotification.ValidatorIndex]; !ok { + proposalsInfo[curNotification.ValidatorIndex] = &ProposalInfo{} + } + prop := proposalsInfo[curNotification.ValidatorIndex] + switch curNotification.Status { + case 0: + prop.Scheduled = append(prop.Scheduled, curNotification.Slot) + case 1: + prop.Proposed = append(prop.Proposed, curNotification.Block) + case 2: + prop.Missed = append(prop.Missed, curNotification.Slot) + } + case types.ValidatorMissedAttestationEventName: + curNotification, ok := notification.(*n.ValidatorAttestationNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorAttestationNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + if curNotification.Status != 0 { + continue + } + notificationDetails.AttestationMissed = append(notificationDetails.AttestationMissed, t.IndexEpoch{Index: curNotification.ValidatorIndex, Epoch: curNotification.Epoch}) + case types.ValidatorGotSlashedEventName: + curNotification, ok := notification.(*n.ValidatorGotSlashedNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorGotSlashedNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + notificationDetails.Slashed = append(notificationDetails.Slashed, curNotification.ValidatorIndex) + case types.ValidatorIsOfflineEventName: + curNotification, ok := notification.(*n.ValidatorIsOfflineNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorIsOfflineNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } notificationDetails.ValidatorOffline = append(notificationDetails.ValidatorOffline, curNotification.ValidatorIndex) - } else { - // TODO EpochCount is not correct, missing / cumbersome to retrieve from backend - using "back online since" instead atm + // TODO not present in backend yet + //notificationDetails.ValidatorOfflineReminder = ... + case types.ValidatorIsOnlineEventName: + curNotification, ok := notification.(*n.ValidatorIsOnlineNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorIsOnlineNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } notificationDetails.ValidatorBackOnline = append(notificationDetails.ValidatorBackOnline, t.NotificationEventValidatorBackOnline{Index: curNotification.ValidatorIndex, EpochCount: curNotification.Epoch}) + case types.ValidatorGroupIsOfflineEventName: + // TODO type / collection not present yet, skipping + /*curNotification, ok := not.(*notification.validatorGroupIsOfflineNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to validatorGroupIsOfflineNotification") + } + if curNotification.Status == 0 { + notificationDetails.GroupOffline = ... + notificationDetails.GroupOfflineReminder = ... + } else { + notificationDetails.GroupBackOnline = ... + } + */ + case types.ValidatorReceivedWithdrawalEventName: + curNotification, ok := notification.(*n.ValidatorWithdrawalNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorWithdrawalNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + // incorrect formatting TODO rework the Address and ContractInteractionAtRequest types to use clear string formatting (or prob go-ethereum common.Address) + contractStatusRequests = append(contractStatusRequests, db.ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", curNotification.Address), + Block: int64(latestBlocks[curNotification.Slot%utils.Config.Chain.ClConfig.SlotsPerEpoch]), + TxIdx: -1, + TraceIdx: -1, + }) + addr := t.Address{Hash: t.Hash(hexutil.Encode(curNotification.Address))} + addressMapping[hexutil.Encode(curNotification.Address)] = &addr + notificationDetails.Withdrawal = append(notificationDetails.Withdrawal, t.NotificationEventWithdrawal{ + Index: curNotification.ValidatorIndex, + Amount: decimal.NewFromUint64(curNotification.Amount), + Address: addr, + }) + case types.NetworkLivenessIncreasedEventName, + types.EthClientUpdateEventName, + types.MonitoringMachineOfflineEventName, + types.MonitoringMachineDiskAlmostFullEventName, + types.MonitoringMachineCpuLoadEventName, + types.MonitoringMachineMemoryUsageEventName, + types.TaxReportEventName: + // not vdb notifications, skip + case types.ValidatorDidSlashEventName: + case types.RocketpoolCommissionThresholdEventName, + types.RocketpoolNewClaimRoundStartedEventName: + // these could maybe returned later (?) + case types.RocketpoolCollateralMinReachedEventName, types.RocketpoolCollateralMaxReachedEventName: + _, ok := notification.(*n.RocketpoolNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to RocketpoolNotification") + } + addr := t.Address{Hash: t.Hash(notification.GetEventFilter()), IsContract: true} + addressMapping[notification.GetEventFilter()] = &addr + if notification.GetEventName() == types.RocketpoolCollateralMinReachedEventName { + notificationDetails.MinimumCollateralReached = append(notificationDetails.MinimumCollateralReached, addr) + } else { + notificationDetails.MaximumCollateralReached = append(notificationDetails.MaximumCollateralReached, addr) + } + case types.SyncCommitteeSoonEventName: + curNotification, ok := notification.(*n.SyncCommitteeSoonNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to SyncCommitteeSoonNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + notificationDetails.SyncCommittee = append(notificationDetails.SyncCommittee, curNotification.ValidatorIndex) + default: + log.Debugf("Unhandled notification type: %s", notification.GetEventName()) } - // TODO not present in backend yet - //notificationDetails.ValidatorOfflineReminder = ... - case types.ValidatorGroupIsOfflineEventName: - // TODO type / collection not present yet, skipping - /*curNotification, ok := not.(*notification.validatorGroupIsOfflineNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to validatorGroupIsOfflineNotification") - } - if curNotification.Status == 0 { - notificationDetails.GroupOffline = ... - notificationDetails.GroupOfflineReminder = ... - } else { - notificationDetails.GroupBackOnline = ... - } - */ - case types.ValidatorReceivedWithdrawalEventName: - curNotification, ok := not.(*notification.ValidatorWithdrawalNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to ValidatorWithdrawalNotification") - } - if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { - continue - } - // TODO might need to take care of automatic + exit withdrawal happening in the same epoch ? - notificationDetails.Withdrawal = append(notificationDetails.Withdrawal, t.IndexBlocks{Index: curNotification.ValidatorIndex, Blocks: []uint64{curNotification.Slot}}) - case types.NetworkLivenessIncreasedEventName, - types.EthClientUpdateEventName, - types.MonitoringMachineOfflineEventName, - types.MonitoringMachineDiskAlmostFullEventName, - types.MonitoringMachineCpuLoadEventName, - types.MonitoringMachineMemoryUsageEventName, - types.TaxReportEventName: - // not vdb notifications, skip - case types.ValidatorDidSlashEventName: - case types.RocketpoolCommissionThresholdEventName, - types.RocketpoolNewClaimRoundStartedEventName: - // these could maybe returned later (?) - case types.RocketpoolCollateralMinReachedEventName, types.RocketpoolCollateralMaxReachedEventName: - _, ok := not.(*notification.RocketpoolNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to RocketpoolNotification") - } - addr := t.Address{Hash: t.Hash(not.GetEventFilter()), IsContract: true} - addressMapping[not.GetEventFilter()] = &addr - if not.GetEventName() == types.RocketpoolCollateralMinReachedEventName { - notificationDetails.MinimumCollateralReached = append(notificationDetails.MinimumCollateralReached, addr) - } else { - notificationDetails.MaximumCollateralReached = append(notificationDetails.MaximumCollateralReached, addr) - } - case types.SyncCommitteeSoonEventName: - curNotification, ok := not.(*notification.SyncCommitteeSoonNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to SyncCommitteeSoonNotification") - } - if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { - continue - } - notificationDetails.SyncCommittee = append(notificationDetails.SyncCommittee, curNotification.ValidatorIndex) - default: - log.Debugf("Unhandled notification type: %s", not.GetEventName()) } } @@ -588,10 +639,10 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context notificationDetails.ProposalDone = append(notificationDetails.ProposalDone, t.IndexBlocks{Index: validatorIndex, Blocks: proposalInfo.Proposed}) } if len(proposalInfo.Scheduled) > 0 { - notificationDetails.UpcomingProposals = append(notificationDetails.UpcomingProposals, t.IndexBlocks{Index: validatorIndex, Blocks: proposalInfo.Scheduled}) + notificationDetails.UpcomingProposals = append(notificationDetails.UpcomingProposals, t.IndexSlots{Index: validatorIndex, Slots: proposalInfo.Scheduled}) } if len(proposalInfo.Missed) > 0 { - notificationDetails.ProposalMissed = append(notificationDetails.ProposalMissed, t.IndexBlocks{Index: validatorIndex, Blocks: proposalInfo.Missed}) + notificationDetails.ProposalMissed = append(notificationDetails.ProposalMissed, t.IndexSlots{Index: validatorIndex, Slots: proposalInfo.Missed}) } } @@ -599,6 +650,14 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context if err := d.GetNamesAndEnsForAddresses(ctx, addressMapping); err != nil { return nil, err } + contractStatuses, err := d.bigtable.GetAddressContractInteractionsAt(contractStatusRequests) + if err != nil { + return nil, err + } + contractStatusPerAddress := make(map[string]int) + for i, contractStatus := range contractStatusRequests { + contractStatusPerAddress["0x"+contractStatus.Address] = i + } for i := range notificationDetails.MinimumCollateralReached { if address, ok := addressMapping[string(notificationDetails.MinimumCollateralReached[i].Hash)]; ok { notificationDetails.MinimumCollateralReached[i] = *address @@ -609,6 +668,13 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context notificationDetails.MaximumCollateralReached[i] = *address } } + for i := range notificationDetails.Withdrawal { + if address, ok := addressMapping[string(notificationDetails.Withdrawal[i].Address.Hash)]; ok { + notificationDetails.Withdrawal[i].Address = *address + } + contractStatus := contractStatuses[contractStatusPerAddress[string(notificationDetails.Withdrawal[i].Address.Hash)]] + notificationDetails.Withdrawal[i].Address.IsContract = contractStatus == types.CONTRACT_CREATION || contractStatus == types.CONTRACT_PRESENT + } return ¬ificationDetails, nil } @@ -997,9 +1063,9 @@ func (d *DataAccessService) GetRocketPoolNotifications(ctx context.Context, user // switch notification.EventType { // case types.RocketpoolNewClaimRoundStartedEventName: // resultEntry.EventType = "reward_round" - // case types.RocketpoolCollateralMinReached: + // case types.RocketpoolCollateralMinReachedEventName: // resultEntry.EventType = "collateral_min" - // case types.RocketpoolCollateralMaxReached: + // case types.RocketpoolCollateralMaxReachedEventName: // resultEntry.EventType = "collateral_max" // default: // return nil, nil, fmt.Errorf("invalid event name for rocketpool notification: %v", notification.EventType) @@ -1439,10 +1505,10 @@ func (d *DataAccessService) UpdateNotificationSettingsGeneral(ctx context.Contex // Collect the machine and rocketpool events to set and delete //Machine events - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineOfflineSubscribed, userId, string(types.MonitoringMachineOfflineEventName), "", epoch, 0) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineStorageUsageSubscribed, userId, string(types.MonitoringMachineDiskAlmostFullEventName), "", epoch, settings.MachineStorageUsageThreshold) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineCpuUsageSubscribed, userId, string(types.MonitoringMachineCpuLoadEventName), "", epoch, settings.MachineCpuUsageThreshold) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineMemoryUsageSubscribed, userId, string(types.MonitoringMachineMemoryUsageEventName), "", epoch, settings.MachineMemoryUsageThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineOfflineSubscribed, userId, types.MonitoringMachineOfflineEventName, "", "", epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineStorageUsageSubscribed, userId, types.MonitoringMachineDiskAlmostFullEventName, "", "", epoch, settings.MachineStorageUsageThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineCpuUsageSubscribed, userId, types.MonitoringMachineCpuLoadEventName, "", "", epoch, settings.MachineCpuUsageThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineMemoryUsageSubscribed, userId, types.MonitoringMachineMemoryUsageEventName, "", "", epoch, settings.MachineMemoryUsageThreshold) // Insert all the events or update the threshold if they already exist if len(eventsToInsert) > 0 { @@ -1517,14 +1583,10 @@ func (d *DataAccessService) UpdateNotificationSettingsNetworks(ctx context.Conte } defer utils.Rollback(tx) - eventName := fmt.Sprintf("%s:%s", networkName, types.NetworkGasAboveThresholdEventName) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsGasAboveSubscribed, userId, eventName, "", epoch, settings.GasAboveThreshold.Div(decimal.NewFromInt(params.GWei)).InexactFloat64()) - eventName = fmt.Sprintf("%s:%s", networkName, types.NetworkGasBelowThresholdEventName) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsGasBelowSubscribed, userId, eventName, "", epoch, settings.GasBelowThreshold.Div(decimal.NewFromInt(params.GWei)).InexactFloat64()) - eventName = fmt.Sprintf("%s:%s", networkName, types.NetworkParticipationRateThresholdEventName) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsParticipationRateSubscribed, userId, eventName, "", epoch, settings.ParticipationRateThreshold) - eventName = fmt.Sprintf("%s:%s", networkName, types.RocketpoolNewClaimRoundStartedEventName) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsNewRewardRoundSubscribed, userId, eventName, "", epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsGasAboveSubscribed, userId, types.NetworkGasAboveThresholdEventName, networkName, "", epoch, settings.GasAboveThreshold.Div(decimal.NewFromInt(params.GWei)).InexactFloat64()) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsGasBelowSubscribed, userId, types.NetworkGasBelowThresholdEventName, networkName, "", epoch, settings.GasBelowThreshold.Div(decimal.NewFromInt(params.GWei)).InexactFloat64()) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsParticipationRateSubscribed, userId, types.NetworkParticipationRateThresholdEventName, networkName, "", epoch, settings.ParticipationRateThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsNewRewardRoundSubscribed, userId, types.RocketpoolNewClaimRoundStartedEventName, networkName, "", epoch, 0) // Insert all the events or update the threshold if they already exist if len(eventsToInsert) > 0 { @@ -1765,28 +1827,28 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex // ------------------------------------- // Evaluate the data - type NotificationSettingsDashboardsInfo struct { - IsAccountDashboard bool // if false it's a validator dashboard - DashboardId uint64 - DashboardName string - GroupId uint64 - GroupName string - // if it's a validator dashboard, Settings is NotificationSettingsAccountDashboard, otherwise NotificationSettingsValidatorDashboard - Settings interface{} - ChainIds []uint64 - } - settingsMap := make(map[string]*NotificationSettingsDashboardsInfo) + resultMap := make(map[string]*t.NotificationSettingsDashboardsTableRow) for _, event := range events { - eventSplit := strings.Split(event.Filter, ":") - if len(eventSplit) != 3 { + eventFilterSplit := strings.Split(event.Filter, ":") + if len(eventFilterSplit) != 3 { continue } - dashboardType := eventSplit[0] + dashboardType := eventFilterSplit[0] + + eventNameSplit := strings.Split(string(event.Name), ":") + if len(eventNameSplit) != 2 && dashboardType == ValidatorDashboardEventPrefix { + return nil, nil, fmt.Errorf("invalid event name formatting for val dashboard notification: expected {network:event_name}, got %v", event.Name) + } - if _, ok := settingsMap[event.Filter]; !ok { + eventName := event.Name + if len(eventNameSplit) == 2 { + eventName = types.EventName(eventNameSplit[1]) + } + + if _, ok := resultMap[event.Filter]; !ok { if dashboardType == ValidatorDashboardEventPrefix { - settingsMap[event.Filter] = &NotificationSettingsDashboardsInfo{ + resultMap[event.Filter] = &t.NotificationSettingsDashboardsTableRow{ Settings: t.NotificationSettingsValidatorDashboard{ GroupOfflineThreshold: GroupOfflineThresholdDefault, MaxCollateralThreshold: MaxCollateralThresholdDefault, @@ -1794,7 +1856,7 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex }, } } else if dashboardType == AccountDashboardEventPrefix { - settingsMap[event.Filter] = &NotificationSettingsDashboardsInfo{ + resultMap[event.Filter] = &t.NotificationSettingsDashboardsTableRow{ Settings: t.NotificationSettingsAccountDashboard{ ERC20TokenTransfersValueThreshold: ERC20TokenTransfersValueThresholdDefault, }, @@ -1802,9 +1864,9 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex } } - switch settings := settingsMap[event.Filter].Settings.(type) { + switch settings := resultMap[event.Filter].Settings.(type) { case t.NotificationSettingsValidatorDashboard: - switch event.Name { + switch eventName { case types.ValidatorIsOfflineEventName: settings.IsValidatorOfflineSubscribed = true case types.GroupIsOfflineEventName: @@ -1812,7 +1874,7 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex settings.GroupOfflineThreshold = event.Threshold case types.ValidatorMissedAttestationEventName: settings.IsAttestationsMissedSubscribed = true - case types.ValidatorProposalEventName: + case types.ValidatorMissedProposalEventName, types.ValidatorExecutedProposalEventName: settings.IsBlockProposalSubscribed = true case types.ValidatorUpcomingProposalEventName: settings.IsUpcomingBlockProposalSubscribed = true @@ -1822,16 +1884,16 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex settings.IsWithdrawalProcessedSubscribed = true case types.ValidatorGotSlashedEventName: settings.IsSlashedSubscribed = true - case types.RocketpoolCollateralMinReached: + case types.RocketpoolCollateralMinReachedEventName: settings.IsMinCollateralSubscribed = true settings.MinCollateralThreshold = event.Threshold - case types.RocketpoolCollateralMaxReached: + case types.RocketpoolCollateralMaxReachedEventName: settings.IsMaxCollateralSubscribed = true settings.MaxCollateralThreshold = event.Threshold } - settingsMap[event.Filter].Settings = settings + resultMap[event.Filter].Settings = settings case t.NotificationSettingsAccountDashboard: - switch event.Name { + switch eventName { case types.IncomingTransactionEventName: settings.IsIncomingTransactionsSubscribed = true case types.OutgoingTransactionEventName: @@ -1844,7 +1906,7 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex case types.ERC1155TokenTransferEventName: settings.IsERC1155TokenTransfersSubscribed = true } - settingsMap[event.Filter].Settings = settings + resultMap[event.Filter].Settings = settings } } @@ -1852,8 +1914,8 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex for _, valDashboard := range valDashboards { key := fmt.Sprintf("%s:%d:%d", ValidatorDashboardEventPrefix, valDashboard.DashboardId, valDashboard.GroupId) - if _, ok := settingsMap[key]; !ok { - settingsMap[key] = &NotificationSettingsDashboardsInfo{ + if _, ok := resultMap[key]; !ok { + resultMap[key] = &t.NotificationSettingsDashboardsTableRow{ Settings: t.NotificationSettingsValidatorDashboard{ GroupOfflineThreshold: GroupOfflineThresholdDefault, MaxCollateralThreshold: MaxCollateralThresholdDefault, @@ -1863,15 +1925,15 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex } // Set general info - settingsMap[key].IsAccountDashboard = false - settingsMap[key].DashboardId = valDashboard.DashboardId - settingsMap[key].DashboardName = valDashboard.DashboardName - settingsMap[key].GroupId = valDashboard.GroupId - settingsMap[key].GroupName = valDashboard.GroupName - settingsMap[key].ChainIds = []uint64{valDashboard.Network} + resultMap[key].IsAccountDashboard = false + resultMap[key].DashboardId = valDashboard.DashboardId + resultMap[key].DashboardName = valDashboard.DashboardName + resultMap[key].GroupId = valDashboard.GroupId + resultMap[key].GroupName = valDashboard.GroupName + resultMap[key].ChainIds = []uint64{valDashboard.Network} // Set the settings - if valSettings, ok := settingsMap[key].Settings.(*t.NotificationSettingsValidatorDashboard); ok { + if valSettings, ok := resultMap[key].Settings.(*t.NotificationSettingsValidatorDashboard); ok { valSettings.WebhookUrl = valDashboard.WebhookUrl.String valSettings.IsWebhookDiscordEnabled = valDashboard.IsWebhookDiscordEnabled.Bool valSettings.IsRealTimeModeEnabled = valDashboard.IsRealTimeModeEnabled.Bool @@ -1882,8 +1944,8 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex for _, accDashboard := range accDashboards { key := fmt.Sprintf("%s:%d:%d", AccountDashboardEventPrefix, accDashboard.DashboardId, accDashboard.GroupId) - if _, ok := settingsMap[key]; !ok { - settingsMap[key] = &NotificationSettingsDashboardsInfo{ + if _, ok := resultMap[key]; !ok { + resultMap[key] = &t.NotificationSettingsDashboardsTableRow{ Settings: t.NotificationSettingsAccountDashboard{ ERC20TokenTransfersValueThreshold: ERC20TokenTransfersValueThresholdDefault, }, @@ -1891,15 +1953,15 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex } // Set general info - settingsMap[key].IsAccountDashboard = true - settingsMap[key].DashboardId = accDashboard.DashboardId - settingsMap[key].DashboardName = accDashboard.DashboardName - settingsMap[key].GroupId = accDashboard.GroupId - settingsMap[key].GroupName = accDashboard.GroupName - settingsMap[key].ChainIds = accDashboard.SubscribedChainIds + resultMap[key].IsAccountDashboard = true + resultMap[key].DashboardId = accDashboard.DashboardId + resultMap[key].DashboardName = accDashboard.DashboardName + resultMap[key].GroupId = accDashboard.GroupId + resultMap[key].GroupName = accDashboard.GroupName + resultMap[key].ChainIds = accDashboard.SubscribedChainIds // Set the settings - if accSettings, ok := settingsMap[key].Settings.(*t.NotificationSettingsAccountDashboard); ok { + if accSettings, ok := resultMap[key].Settings.(*t.NotificationSettingsAccountDashboard); ok { accSettings.WebhookUrl = accDashboard.WebhookUrl.String accSettings.IsWebhookDiscordEnabled = accDashboard.IsWebhookDiscordEnabled.Bool accSettings.IsIgnoreSpamTransactionsEnabled = accDashboard.IsIgnoreSpamTransactionsEnabled @@ -1910,74 +1972,63 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex // Apply filter if search != "" { lowerSearch := strings.ToLower(search) - for key, setting := range settingsMap { - if !strings.HasPrefix(strings.ToLower(setting.DashboardName), lowerSearch) && - !strings.HasPrefix(strings.ToLower(setting.GroupName), lowerSearch) { - delete(settingsMap, key) + for key, resultEntry := range resultMap { + if !strings.HasPrefix(strings.ToLower(resultEntry.DashboardName), lowerSearch) && + !strings.HasPrefix(strings.ToLower(resultEntry.GroupName), lowerSearch) { + delete(resultMap, key) } } } // Convert to a slice for sorting and paging - settings := slices.Collect(maps.Values(settingsMap)) + for _, resultEntry := range resultMap { + result = append(result, *resultEntry) + } // ------------------------------------- // Sort // Each row is uniquely defined by the dashboardId, groupId, and isAccountDashboard so the sort order is DashboardName/GroupName => DashboardId => GroupId => IsAccountDashboard - var primarySortParam func(resultEntry *NotificationSettingsDashboardsInfo) string + var primarySortParam func(resultEntry t.NotificationSettingsDashboardsTableRow) string switch colSort.Column { case enums.NotificationSettingsDashboardColumns.DashboardName: - primarySortParam = func(resultEntry *NotificationSettingsDashboardsInfo) string { return resultEntry.DashboardName } + primarySortParam = func(resultEntry t.NotificationSettingsDashboardsTableRow) string { return resultEntry.DashboardName } case enums.NotificationSettingsDashboardColumns.GroupName: - primarySortParam = func(resultEntry *NotificationSettingsDashboardsInfo) string { return resultEntry.GroupName } + primarySortParam = func(resultEntry t.NotificationSettingsDashboardsTableRow) string { return resultEntry.GroupName } default: return nil, nil, fmt.Errorf("invalid sort column for notification subscriptions: %v", colSort.Column) } - sort.Slice(settings, func(i, j int) bool { + sort.Slice(result, func(i, j int) bool { if isReverseDirection { - if primarySortParam(settings[i]) == primarySortParam(settings[j]) { - if settings[i].DashboardId == settings[j].DashboardId { - if settings[i].GroupId == settings[j].GroupId { - return settings[i].IsAccountDashboard + if primarySortParam(result[i]) == primarySortParam(result[j]) { + if result[i].DashboardId == result[j].DashboardId { + if result[i].GroupId == result[j].GroupId { + return result[i].IsAccountDashboard } - return settings[i].GroupId > settings[j].GroupId + return result[i].GroupId > result[j].GroupId } - return settings[i].DashboardId > settings[j].DashboardId + return result[i].DashboardId > result[j].DashboardId } - return primarySortParam(settings[i]) > primarySortParam(settings[j]) + return primarySortParam(result[i]) > primarySortParam(result[j]) } else { - if primarySortParam(settings[i]) == primarySortParam(settings[j]) { - if settings[i].DashboardId == settings[j].DashboardId { - if settings[i].GroupId == settings[j].GroupId { - return settings[j].IsAccountDashboard + if primarySortParam(result[i]) == primarySortParam(result[j]) { + if result[i].DashboardId == result[j].DashboardId { + if result[i].GroupId == result[j].GroupId { + return result[j].IsAccountDashboard } - return settings[i].GroupId < settings[j].GroupId + return result[i].GroupId < result[j].GroupId } - return settings[i].DashboardId < settings[j].DashboardId + return result[i].DashboardId < result[j].DashboardId } - return primarySortParam(settings[i]) < primarySortParam(settings[j]) + return primarySortParam(result[i]) < primarySortParam(result[j]) } }) - // ------------------------------------- - // Convert to the final result format - for _, setting := range settings { - result = append(result, t.NotificationSettingsDashboardsTableRow{ - IsAccountDashboard: setting.IsAccountDashboard, - DashboardId: setting.DashboardId, - GroupId: setting.GroupId, - GroupName: setting.GroupName, - Settings: setting.Settings, - ChainIds: setting.ChainIds, - }) - } - // ------------------------------------- // Paging // Find the index for the cursor and limit the data if currentCursor.IsValid() { - for idx, row := range settings { + for idx, row := range result { if row.DashboardId == currentCursor.DashboardId && row.GroupId == currentCursor.GroupId && row.IsAccountDashboard == currentCursor.IsAccountDashboard { @@ -2017,6 +2068,30 @@ func (d *DataAccessService) UpdateNotificationSettingsValidatorDashboard(ctx con var eventsToInsert []goqu.Record var eventsToDelete []goqu.Expression + // Get the network for the validator dashboard + var chainId uint64 + err := d.alloyReader.GetContext(ctx, &chainId, `SELECT network FROM users_val_dashboards WHERE id = $1 AND user_id = $2`, dashboardId, userId) + if err != nil { + return fmt.Errorf("error getting network for validator dashboard: %w", err) + } + + networks, err := d.GetAllNetworks() + if err != nil { + return err + } + + networkName := "" + for _, network := range networks { + if network.ChainId == chainId { + networkName = network.Name + break + } + } + if networkName == "" { + return fmt.Errorf("network with chain id %d to update general notification settings not found", chainId) + } + + // Add and remove the events in users_subscriptions tx, err := d.userWriter.BeginTxx(ctx, nil) if err != nil { return fmt.Errorf("error starting db transactions to update validator dashboard notification settings: %w", err) @@ -2025,16 +2100,18 @@ func (d *DataAccessService) UpdateNotificationSettingsValidatorDashboard(ctx con eventFilter := fmt.Sprintf("%s:%d:%d", ValidatorDashboardEventPrefix, dashboardId, groupId) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsValidatorOfflineSubscribed, userId, string(types.ValidatorIsOfflineEventName), eventFilter, epoch, 0) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsGroupOfflineSubscribed, userId, string(types.GroupIsOfflineEventName), eventFilter, epoch, settings.GroupOfflineThreshold) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsAttestationsMissedSubscribed, userId, string(types.ValidatorMissedAttestationEventName), eventFilter, epoch, 0) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsBlockProposalSubscribed, userId, string(types.ValidatorProposalEventName), eventFilter, epoch, 0) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsUpcomingBlockProposalSubscribed, userId, string(types.ValidatorUpcomingProposalEventName), eventFilter, epoch, 0) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsSyncSubscribed, userId, string(types.SyncCommitteeSoon), eventFilter, epoch, 0) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsWithdrawalProcessedSubscribed, userId, string(types.ValidatorReceivedWithdrawalEventName), eventFilter, epoch, 0) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsSlashedSubscribed, userId, string(types.ValidatorGotSlashedEventName), eventFilter, epoch, 0) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMaxCollateralSubscribed, userId, string(types.RocketpoolCollateralMaxReached), eventFilter, epoch, settings.MaxCollateralThreshold) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMinCollateralSubscribed, userId, string(types.RocketpoolCollateralMinReached), eventFilter, epoch, settings.MinCollateralThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsValidatorOfflineSubscribed, userId, types.ValidatorIsOfflineEventName, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsGroupOfflineSubscribed, userId, types.GroupIsOfflineEventName, networkName, eventFilter, epoch, settings.GroupOfflineThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsAttestationsMissedSubscribed, userId, types.ValidatorMissedAttestationEventName, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsUpcomingBlockProposalSubscribed, userId, types.ValidatorUpcomingProposalEventName, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsSyncSubscribed, userId, types.SyncCommitteeSoon, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsWithdrawalProcessedSubscribed, userId, types.ValidatorReceivedWithdrawalEventName, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsSlashedSubscribed, userId, types.ValidatorGotSlashedEventName, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMaxCollateralSubscribed, userId, types.RocketpoolCollateralMaxReachedEventName, networkName, eventFilter, epoch, settings.MaxCollateralThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMinCollateralSubscribed, userId, types.RocketpoolCollateralMinReachedEventName, networkName, eventFilter, epoch, settings.MinCollateralThreshold) + // Set two events for IsBlockProposalSubscribed + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsBlockProposalSubscribed, userId, types.ValidatorMissedProposalEventName, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsBlockProposalSubscribed, userId, types.ValidatorExecutedProposalEventName, networkName, eventFilter, epoch, 0) // Insert all the events or update the threshold if they already exist if len(eventsToInsert) > 0 { @@ -2110,11 +2187,11 @@ func (d *DataAccessService) UpdateNotificationSettingsAccountDashboard(ctx conte // eventFilter := fmt.Sprintf("%s:%d:%d", AccountDashboardEventPrefix, dashboardId, groupId) - // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsIncomingTransactionsSubscribed, userId, string(types.IncomingTransactionEventName), eventFilter, epoch, 0) - // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsOutgoingTransactionsSubscribed, userId, string(types.OutgoingTransactionEventName), eventFilter, epoch, 0) - // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsERC20TokenTransfersSubscribed, userId, string(types.ERC20TokenTransferEventName), eventFilter, epoch, settings.ERC20TokenTransfersValueThreshold) - // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsERC721TokenTransfersSubscribed, userId, string(types.ERC721TokenTransferEventName), eventFilter, epoch, 0) - // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsERC1155TokenTransfersSubscribed, userId, string(types.ERC1155TokenTransferEventName), eventFilter, epoch, 0) + // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsIncomingTransactionsSubscribed, userId, types.IncomingTransactionEventName, "", eventFilter, epoch, 0) + // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsOutgoingTransactionsSubscribed, userId, types.OutgoingTransactionEventName, "", eventFilter, epoch, 0) + // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsERC20TokenTransfersSubscribed, userId, types.ERC20TokenTransferEventName, "", eventFilter, epoch, settings.ERC20TokenTransfersValueThreshold) + // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsERC721TokenTransfersSubscribed, userId, types.ERC721TokenTransferEventName, "", eventFilter, epoch, 0) + // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsERC1155TokenTransfersSubscribed, userId, types.ERC1155TokenTransferEventName, "", eventFilter, epoch, 0) // // Insert all the events or update the threshold if they already exist // if len(eventsToInsert) > 0 { @@ -2176,11 +2253,16 @@ func (d *DataAccessService) UpdateNotificationSettingsAccountDashboard(ctx conte return d.dummy.UpdateNotificationSettingsAccountDashboard(ctx, userId, dashboardId, groupId, settings) } -func (d *DataAccessService) AddOrRemoveEvent(eventsToInsert *[]goqu.Record, eventsToDelete *[]goqu.Expression, isSubscribed bool, userId uint64, eventName string, eventFilter string, epoch int64, threshold float64) { +func (d *DataAccessService) AddOrRemoveEvent(eventsToInsert *[]goqu.Record, eventsToDelete *[]goqu.Expression, isSubscribed bool, userId uint64, eventName types.EventName, network, eventFilter string, epoch int64, threshold float64) { + fullEventName := string(eventName) + if network != "" { + fullEventName = fmt.Sprintf("%s:%s", network, eventName) + } + if isSubscribed { - event := goqu.Record{"user_id": userId, "event_name": eventName, "event_filter": eventFilter, "created_ts": goqu.L("NOW()"), "created_epoch": epoch, "event_threshold": threshold} + event := goqu.Record{"user_id": userId, "event_name": fullEventName, "event_filter": eventFilter, "created_ts": goqu.L("NOW()"), "created_epoch": epoch, "event_threshold": threshold} *eventsToInsert = append(*eventsToInsert, event) } else { - *eventsToDelete = append(*eventsToDelete, goqu.Ex{"user_id": userId, "event_name": eventName, "event_filter": eventFilter}) + *eventsToDelete = append(*eventsToDelete, goqu.Ex{"user_id": userId, "event_name": fullEventName, "event_filter": eventFilter}) } } diff --git a/backend/pkg/api/data_access/user.go b/backend/pkg/api/data_access/user.go index 906742ecf..62e689ed4 100644 --- a/backend/pkg/api/data_access/user.go +++ b/backend/pkg/api/data_access/user.go @@ -4,10 +4,10 @@ import ( "context" "database/sql" "fmt" - "math" "time" t "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/pkg/errors" "golang.org/x/sync/errgroup" @@ -258,460 +258,16 @@ func (d *DataAccessService) GetUserIdByResetHash(ctx context.Context, hash strin return result, err } -var adminPerks = t.PremiumPerks{ - AdFree: false, // admins want to see ads to check ad configuration - ValidatorDashboards: maxJsInt, - ValidatorsPerDashboard: maxJsInt, - ValidatorGroupsPerDashboard: maxJsInt, - ShareCustomDashboards: true, - ManageDashboardViaApi: true, - BulkAdding: true, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: maxJsInt, - Hourly: maxJsInt, - Daily: maxJsInt, - Weekly: maxJsInt, - }, - EmailNotificationsPerDay: maxJsInt, - ConfigureNotificationsViaApi: true, - ValidatorGroupNotifications: maxJsInt, - WebhookEndpoints: maxJsInt, - MobileAppCustomThemes: true, - MobileAppWidget: true, - MonitorMachines: maxJsInt, - MachineMonitoringHistorySeconds: maxJsInt, - NotificationsMachineCustomThreshold: true, - NotificationsValidatorDashboardRealTimeMode: true, - NotificationsValidatorDashboardGroupOffline: true, -} - func (d *DataAccessService) GetUserInfo(ctx context.Context, userId uint64) (*t.UserInfo, error) { - // TODO @patrick post-beta improve and unmock - userInfo := &t.UserInfo{ - Id: userId, - ApiKeys: []string{}, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 10, - UnitsPerMonth: 10, - ApiKeys: 4, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - Subscriptions: []t.UserSubscription{}, - } - - productSummary, err := d.GetProductSummary(ctx) - if err != nil { - return nil, fmt.Errorf("error getting productSummary: %w", err) - } - - result := struct { - Email string `db:"email"` - UserGroup string `db:"user_group"` - }{} - err = d.userReader.GetContext(ctx, &result, `SELECT email, COALESCE(user_group, '') as user_group FROM users WHERE id = $1`, userId) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, fmt.Errorf("%w: user not found", ErrNotFound) - } - return nil, err - } - userInfo.Email = result.Email - userInfo.UserGroup = result.UserGroup - - userInfo.Email = utils.CensorEmail(userInfo.Email) - - err = d.userReader.SelectContext(ctx, &userInfo.ApiKeys, `SELECT api_key FROM api_keys WHERE user_id = $1`, userId) - if err != nil && err != sql.ErrNoRows { - return nil, fmt.Errorf("error getting userApiKeys for user %v: %w", userId, err) - } - - premiumProduct := struct { - ProductId string `db:"product_id"` - Store string `db:"store"` - Start time.Time `db:"start"` - End time.Time `db:"end"` - }{} - err = d.userReader.GetContext(ctx, &premiumProduct, ` - SELECT - COALESCE(uas.product_id, '') AS product_id, - COALESCE(uas.store, '') AS store, - COALESCE(to_timestamp((uss.payload->>'current_period_start')::bigint),uas.created_at) AS start, - COALESCE(to_timestamp((uss.payload->>'current_period_end')::bigint),uas.expires_at) AS end - FROM users_app_subscriptions uas - LEFT JOIN users_stripe_subscriptions uss ON uss.subscription_id = uas.subscription_id - WHERE uas.user_id = $1 AND uas.active = true AND product_id IN ('orca.yearly', 'orca', 'dolphin.yearly', 'dolphin', 'guppy.yearly', 'guppy', 'whale', 'goldfish', 'plankton') - ORDER BY CASE uas.product_id - WHEN 'orca.yearly' THEN 1 - WHEN 'orca' THEN 2 - WHEN 'dolphin.yearly' THEN 3 - WHEN 'dolphin' THEN 4 - WHEN 'guppy.yearly' THEN 5 - WHEN 'guppy' THEN 6 - WHEN 'whale' THEN 7 - WHEN 'goldfish' THEN 8 - WHEN 'plankton' THEN 9 - ELSE 10 -- For any other product_id values - END, uas.id DESC - LIMIT 1`, userId) - if err != nil { - if err != sql.ErrNoRows { - return nil, fmt.Errorf("error getting premiumProduct for userId %v: %w", userId, err) - } - premiumProduct.ProductId = "premium_free" - premiumProduct.Store = "" - } - - foundProduct := false - for _, p := range productSummary.PremiumProducts { - effectiveProductId := premiumProduct.ProductId - productName := p.ProductName - switch premiumProduct.ProductId { - case "whale": - effectiveProductId = "dolphin" - productName = "Whale" - case "goldfish": - effectiveProductId = "guppy" - productName = "Goldfish" - case "plankton": - effectiveProductId = "guppy" - productName = "Plankton" - } - if p.ProductIdMonthly == effectiveProductId || p.ProductIdYearly == effectiveProductId { - userInfo.PremiumPerks = p.PremiumPerks - foundProduct = true - - store := t.ProductStoreStripe - switch premiumProduct.Store { - case "ios-appstore": - store = t.ProductStoreIosAppstore - case "android-playstore": - store = t.ProductStoreAndroidPlaystore - case "ethpool": - store = t.ProductStoreEthpool - case "manuall": - store = t.ProductStoreCustom - } - - if effectiveProductId != "premium_free" { - userInfo.Subscriptions = append(userInfo.Subscriptions, t.UserSubscription{ - ProductId: premiumProduct.ProductId, - ProductName: productName, - ProductCategory: t.ProductCategoryPremium, - ProductStore: store, - Start: premiumProduct.Start.Unix(), - End: premiumProduct.End.Unix(), - }) - } - break - } - } - if !foundProduct { - return nil, fmt.Errorf("product %s not found", premiumProduct.ProductId) - } - - premiumAddons := []struct { - PriceId string `db:"price_id"` - Start time.Time `db:"start"` - End time.Time `db:"end"` - Quantity int `db:"quantity"` - }{} - err = d.userReader.SelectContext(ctx, &premiumAddons, ` - SELECT - price_id, - to_timestamp((uss.payload->>'current_period_start')::bigint) AS start, - to_timestamp((uss.payload->>'current_period_end')::bigint) AS end, - COALESCE((uss.payload->>'quantity')::int,1) AS quantity - FROM users_stripe_subscriptions uss - INNER JOIN users u ON u.stripe_customer_id = uss.customer_id - WHERE u.id = $1 AND uss.active = true AND uss.purchase_group = 'addon'`, userId) - if err != nil { - return nil, fmt.Errorf("error getting premiumAddons for userId %v: %w", userId, err) - } - for _, addon := range premiumAddons { - foundAddon := false - for _, p := range productSummary.ExtraDashboardValidatorsPremiumAddon { - if p.StripePriceIdMonthly == addon.PriceId || p.StripePriceIdYearly == addon.PriceId { - foundAddon = true - for i := 0; i < addon.Quantity; i++ { - userInfo.PremiumPerks.ValidatorsPerDashboard += p.ExtraDashboardValidators - userInfo.Subscriptions = append(userInfo.Subscriptions, t.UserSubscription{ - ProductId: utils.PriceIdToProductId(addon.PriceId), - ProductName: p.ProductName, - ProductCategory: t.ProductCategoryPremiumAddon, - ProductStore: t.ProductStoreStripe, - Start: addon.Start.Unix(), - End: addon.End.Unix(), - }) - } - } - } - if !foundAddon { - return nil, fmt.Errorf("addon not found: %v", addon.PriceId) - } - } - - if productSummary.ValidatorsPerDashboardLimit < userInfo.PremiumPerks.ValidatorsPerDashboard { - userInfo.PremiumPerks.ValidatorsPerDashboard = productSummary.ValidatorsPerDashboardLimit - } - - if userInfo.UserGroup == t.UserGroupAdmin { - userInfo.PremiumPerks = adminPerks - } - - return userInfo, nil -} - -const hour uint64 = 3600 -const day = 24 * hour -const week = 7 * day -const month = 30 * day -const maxJsInt uint64 = 9007199254740991 // 2^53-1 (max safe int in JS) - -var freeTierProduct t.PremiumProduct = t.PremiumProduct{ - ProductName: "Free", - PremiumPerks: t.PremiumPerks{ - AdFree: false, - ValidatorDashboards: 1, - ValidatorsPerDashboard: 20, - ValidatorGroupsPerDashboard: 1, - ShareCustomDashboards: false, - ManageDashboardViaApi: false, - BulkAdding: false, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: 0, - Hourly: 12 * hour, - Daily: 0, - Weekly: 0, - }, - EmailNotificationsPerDay: 5, - ConfigureNotificationsViaApi: false, - ValidatorGroupNotifications: 1, - WebhookEndpoints: 1, - MobileAppCustomThemes: false, - MobileAppWidget: false, - MonitorMachines: 1, - MachineMonitoringHistorySeconds: 3600 * 3, - NotificationsMachineCustomThreshold: false, - NotificationsValidatorDashboardRealTimeMode: false, - NotificationsValidatorDashboardGroupOffline: false, - }, - PricePerMonthEur: 0, - PricePerYearEur: 0, - ProductIdMonthly: "premium_free", - ProductIdYearly: "premium_free.yearly", + return db.GetUserInfo(ctx, userId, d.userReader) } func (d *DataAccessService) GetProductSummary(ctx context.Context) (*t.ProductSummary, error) { - // TODO @patrick post-beta put into db instead of hardcoding here and make it configurable - return &t.ProductSummary{ - ValidatorsPerDashboardLimit: 102_000, - StripePublicKey: utils.Config.Frontend.Stripe.PublicKey, - ApiProducts: []t.ApiProduct{ // TODO @patrick post-beta this data is not final yet - { - ProductId: "api_free", - ProductName: "Free", - PricePerMonthEur: 0, - PricePerYearEur: 0 * 12, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 10, - UnitsPerMonth: 10_000_000, - ApiKeys: 2, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - }, - { - ProductId: "iron", - ProductName: "Iron", - PricePerMonthEur: 1.99, - PricePerYearEur: math.Floor(1.99*12*0.9*100) / 100, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 20, - UnitsPerMonth: 20_000_000, - ApiKeys: 10, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - }, - { - ProductId: "silver", - ProductName: "Silver", - PricePerMonthEur: 2.99, - PricePerYearEur: math.Floor(2.99*12*0.9*100) / 100, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 30, - UnitsPerMonth: 100_000_000, - ApiKeys: 20, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - }, - { - ProductId: "gold", - ProductName: "Gold", - PricePerMonthEur: 3.99, - PricePerYearEur: math.Floor(3.99*12*0.9*100) / 100, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 40, - UnitsPerMonth: 200_000_000, - ApiKeys: 40, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - }, - }, - PremiumProducts: []t.PremiumProduct{ - freeTierProduct, - { - ProductName: "Guppy", - PremiumPerks: t.PremiumPerks{ - AdFree: true, - ValidatorDashboards: 1, - ValidatorsPerDashboard: 100, - ValidatorGroupsPerDashboard: 3, - ShareCustomDashboards: true, - ManageDashboardViaApi: false, - BulkAdding: true, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: day, - Hourly: 7 * day, - Daily: month, - Weekly: 0, - }, - EmailNotificationsPerDay: 15, - ConfigureNotificationsViaApi: false, - ValidatorGroupNotifications: 3, - WebhookEndpoints: 3, - MobileAppCustomThemes: true, - MobileAppWidget: true, - MonitorMachines: 2, - MachineMonitoringHistorySeconds: 3600 * 24 * 30, - NotificationsMachineCustomThreshold: true, - NotificationsValidatorDashboardRealTimeMode: true, - NotificationsValidatorDashboardGroupOffline: true, - }, - PricePerMonthEur: 9.99, - PricePerYearEur: 107.88, - ProductIdMonthly: "guppy", - ProductIdYearly: "guppy.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.Guppy, - StripePriceIdYearly: utils.Config.Frontend.Stripe.GuppyYearly, - }, - { - ProductName: "Dolphin", - PremiumPerks: t.PremiumPerks{ - AdFree: true, - ValidatorDashboards: 2, - ValidatorsPerDashboard: 300, - ValidatorGroupsPerDashboard: 10, - ShareCustomDashboards: true, - ManageDashboardViaApi: false, - BulkAdding: true, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: 5 * day, - Hourly: month, - Daily: 2 * month, - Weekly: 8 * week, - }, - EmailNotificationsPerDay: 20, - ConfigureNotificationsViaApi: false, - ValidatorGroupNotifications: 10, - WebhookEndpoints: 10, - MobileAppCustomThemes: true, - MobileAppWidget: true, - MonitorMachines: 10, - MachineMonitoringHistorySeconds: 3600 * 24 * 30, - NotificationsMachineCustomThreshold: true, - NotificationsValidatorDashboardRealTimeMode: true, - NotificationsValidatorDashboardGroupOffline: true, - }, - PricePerMonthEur: 29.99, - PricePerYearEur: 311.88, - ProductIdMonthly: "dolphin", - ProductIdYearly: "dolphin.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.Dolphin, - StripePriceIdYearly: utils.Config.Frontend.Stripe.DolphinYearly, - }, - { - ProductName: "Orca", - PremiumPerks: t.PremiumPerks{ - AdFree: true, - ValidatorDashboards: 2, - ValidatorsPerDashboard: 1000, - ValidatorGroupsPerDashboard: 30, - ShareCustomDashboards: true, - ManageDashboardViaApi: true, - BulkAdding: true, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: 3 * week, - Hourly: 6 * month, - Daily: 12 * month, - Weekly: maxJsInt, - }, - EmailNotificationsPerDay: 50, - ConfigureNotificationsViaApi: true, - ValidatorGroupNotifications: 60, - WebhookEndpoints: 30, - MobileAppCustomThemes: true, - MobileAppWidget: true, - MonitorMachines: 10, - MachineMonitoringHistorySeconds: 3600 * 24 * 30, - NotificationsMachineCustomThreshold: true, - NotificationsValidatorDashboardRealTimeMode: true, - NotificationsValidatorDashboardGroupOffline: true, - }, - PricePerMonthEur: 49.99, - PricePerYearEur: 479.88, - ProductIdMonthly: "orca", - ProductIdYearly: "orca.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.Orca, - StripePriceIdYearly: utils.Config.Frontend.Stripe.OrcaYearly, - IsPopular: true, - }, - }, - ExtraDashboardValidatorsPremiumAddon: []t.ExtraDashboardValidatorsPremiumAddon{ - { - ProductName: "1k extra valis per dashboard", - ExtraDashboardValidators: 1000, - PricePerMonthEur: 74.99, - PricePerYearEur: 719.88, - ProductIdMonthly: "vdb_addon_1k", - ProductIdYearly: "vdb_addon_1k.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon1k, - StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon1kYearly, - }, - { - ProductName: "10k extra valis per dashboard", - ExtraDashboardValidators: 10000, - PricePerMonthEur: 449.99, - PricePerYearEur: 4319.88, - ProductIdMonthly: "vdb_addon_10k", - ProductIdYearly: "vdb_addon_10k.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon10k, - StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon10kYearly, - }, - }, - }, nil + return db.GetProductSummary(ctx) } func (d *DataAccessService) GetFreeTierPerks(ctx context.Context) (*t.PremiumPerks, error) { - return &freeTierProduct.PremiumPerks, nil + return db.GetFreeTierPerks(ctx) } func (d *DataAccessService) GetUserDashboards(ctx context.Context, userId uint64) (*t.UserDashboardsData, error) { diff --git a/backend/pkg/api/data_access/vdb_summary.go b/backend/pkg/api/data_access/vdb_summary.go index 58e29ad2a..7f51b5bed 100644 --- a/backend/pkg/api/data_access/vdb_summary.go +++ b/backend/pkg/api/data_access/vdb_summary.go @@ -867,8 +867,8 @@ func (d *DataAccessService) internal_getElClAPR(ctx context.Context, dashboardId if err != nil { return decimal.Zero, 0, decimal.Zero, 0, err } - elIncomeFloat, _ := elIncome.Float64() - elAPR = ((elIncomeFloat / float64(aprDivisor)) / (float64(32e18) * float64(rewardsResultTable.ValidatorCount))) * 24.0 * 365.0 * 100.0 + elIncomeFloat, _ := elIncome.Float64() // EL income is in ETH + elAPR = ((elIncomeFloat / float64(aprDivisor)) / (float64(32) * float64(rewardsResultTable.ValidatorCount))) * 24.0 * 365.0 * 100.0 if math.IsNaN(elAPR) { elAPR = 0 } @@ -1093,17 +1093,17 @@ func (d *DataAccessService) GetLatestExportedChartTs(ctx context.Context, aggreg var dateColumn string switch aggregation { case enums.IntervalEpoch: - table = "validator_dashboard_data_epoch" - dateColumn = "epoch_timestamp" + table = "view_validator_dashboard_data_epoch_max_ts" + dateColumn = "t" case enums.IntervalHourly: - table = "validator_dashboard_data_hourly" - dateColumn = "hour" + table = "view_validator_dashboard_data_hourly_max_ts" + dateColumn = "t" case enums.IntervalDaily: - table = "validator_dashboard_data_daily" - dateColumn = "day" + table = "view_validator_dashboard_data_daily_max_ts" + dateColumn = "t" case enums.IntervalWeekly: - table = "validator_dashboard_data_weekly" - dateColumn = "week" + table = "view_validator_dashboard_data_weekly_max_ts" + dateColumn = "t" default: return 0, fmt.Errorf("unexpected aggregation type: %v", aggregation) } diff --git a/backend/pkg/api/handlers/auth.go b/backend/pkg/api/handlers/auth.go index 777b84ca9..3f3c2f3b3 100644 --- a/backend/pkg/api/handlers/auth.go +++ b/backend/pkg/api/handlers/auth.go @@ -3,14 +3,10 @@ package handlers import ( "cmp" "context" - "crypto/sha256" - "encoding/binary" - "encoding/hex" "errors" "fmt" "html" "net/http" - "strconv" "strings" "time" @@ -41,7 +37,7 @@ const authEmailExpireTime = time.Minute * 30 type ctxKey string const ctxUserIdKey ctxKey = "user_id" -const ctxIsMockEnabledKey ctxKey = "is_mock_enabled" +const ctxIsMockedKey ctxKey = "is_mocked" var errBadCredentials = newUnauthorizedErr("invalid email or password") @@ -86,7 +82,7 @@ func (h *HandlerService) purgeAllSessionsForUser(ctx context.Context, userId uin // TODO move to service? func (h *HandlerService) sendConfirmationEmail(ctx context.Context, userId uint64, email string) error { // 1. check last confirmation time to enforce ratelimit - lastTs, err := h.dai.GetEmailConfirmationTime(ctx, userId) + lastTs, err := h.daService.GetEmailConfirmationTime(ctx, userId) if err != nil { return errors.New("error getting confirmation-ts") } @@ -96,7 +92,7 @@ func (h *HandlerService) sendConfirmationEmail(ctx context.Context, userId uint6 // 2. update confirmation hash (before sending so there's no hash mismatch on failure) confirmationHash := utils.RandomString(40) - err = h.dai.UpdateEmailConfirmationHash(ctx, userId, email, confirmationHash) + err = h.daService.UpdateEmailConfirmationHash(ctx, userId, email, confirmationHash) if err != nil { return errors.New("error updating confirmation hash") } @@ -117,7 +113,7 @@ Best regards, } // 4. update confirmation time (only after mail was sent) - err = h.dai.UpdateEmailConfirmationTime(ctx, userId) + err = h.daService.UpdateEmailConfirmationTime(ctx, userId) if err != nil { // shouldn't present this as error to user, confirmation works fine log.Error(err, "error updating email confirmation time, rate limiting won't be enforced", 0, nil) @@ -129,7 +125,7 @@ Best regards, func (h *HandlerService) sendPasswordResetEmail(ctx context.Context, userId uint64, email string) error { // 0. check if password resets are allowed // (can be forbidden by admin (not yet in v2)) - passwordResetAllowed, err := h.dai.IsPasswordResetAllowed(ctx, userId) + passwordResetAllowed, err := h.daService.IsPasswordResetAllowed(ctx, userId) if err != nil { return err } @@ -138,7 +134,7 @@ func (h *HandlerService) sendPasswordResetEmail(ctx context.Context, userId uint } // 1. check last confirmation time to enforce ratelimit - lastTs, err := h.dai.GetPasswordResetTime(ctx, userId) + lastTs, err := h.daService.GetPasswordResetTime(ctx, userId) if err != nil { return errors.New("error getting confirmation-ts") } @@ -148,7 +144,7 @@ func (h *HandlerService) sendPasswordResetEmail(ctx context.Context, userId uint // 2. update reset hash (before sending so there's no hash mismatch on failure) resetHash := utils.RandomString(40) - err = h.dai.UpdatePasswordResetHash(ctx, userId, resetHash) + err = h.daService.UpdatePasswordResetHash(ctx, userId, resetHash) if err != nil { return errors.New("error updating confirmation hash") } @@ -169,7 +165,7 @@ Best regards, } // 4. update reset time (only after mail was sent) - err = h.dai.UpdatePasswordResetTime(ctx, userId) + err = h.daService.UpdatePasswordResetTime(ctx, userId) if err != nil { // shouldn't present this as error to user, reset works fine log.Error(err, "error updating password reset time, rate limiting won't be enforced", 0, nil) @@ -198,7 +194,7 @@ func (h *HandlerService) GetUserIdByApiKey(r *http.Request) (uint64, error) { if apiKey == "" { return 0, newUnauthorizedErr("missing api key") } - userId, err := h.dai.GetUserIdByApiKey(r.Context(), apiKey) + userId, err := h.daService.GetUserIdByApiKey(r.Context(), apiKey) if errors.Is(err, dataaccess.ErrNotFound) { err = newUnauthorizedErr("api key not found") } @@ -247,7 +243,7 @@ func (h *HandlerService) InternalPostUsers(w http.ResponseWriter, r *http.Reques return } - _, err := h.dai.GetUserByEmail(r.Context(), email) + _, err := h.daService.GetUserByEmail(r.Context(), email) if !errors.Is(err, dataaccess.ErrNotFound) { if err == nil { returnConflict(w, r, errors.New("email already registered")) @@ -270,7 +266,7 @@ func (h *HandlerService) InternalPostUsers(w http.ResponseWriter, r *http.Reques } // add user - userId, err := h.dai.CreateUser(r.Context(), email, string(passwordHash)) + userId, err := h.daService.CreateUser(r.Context(), email, string(passwordHash)) if err != nil { handleErr(w, r, err) return @@ -295,12 +291,12 @@ func (h *HandlerService) InternalPostUserConfirm(w http.ResponseWriter, r *http. return } - userId, err := h.dai.GetUserIdByConfirmationHash(r.Context(), confirmationHash) + userId, err := h.daService.GetUserIdByConfirmationHash(r.Context(), confirmationHash) if err != nil { handleErr(w, r, err) return } - confirmationTime, err := h.dai.GetEmailConfirmationTime(r.Context(), userId) + confirmationTime, err := h.daService.GetEmailConfirmationTime(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -310,7 +306,7 @@ func (h *HandlerService) InternalPostUserConfirm(w http.ResponseWriter, r *http. return } - err = h.dai.UpdateUserEmail(r.Context(), userId) + err = h.daService.UpdateUserEmail(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -342,7 +338,7 @@ func (h *HandlerService) InternalPostUserPasswordReset(w http.ResponseWriter, r return } - userId, err := h.dai.GetUserByEmail(r.Context(), email) + userId, err := h.daService.GetUserByEmail(r.Context(), email) if err != nil { if err == dataaccess.ErrNotFound { // don't leak if email is registered @@ -380,12 +376,12 @@ func (h *HandlerService) InternalPostUserPasswordResetHash(w http.ResponseWriter } // check token validity - userId, err := h.dai.GetUserIdByResetHash(r.Context(), resetToken) + userId, err := h.daService.GetUserIdByResetHash(r.Context(), resetToken) if err != nil { handleErr(w, r, err) return } - resetTime, err := h.dai.GetPasswordResetTime(r.Context(), userId) + resetTime, err := h.daService.GetPasswordResetTime(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -401,20 +397,20 @@ func (h *HandlerService) InternalPostUserPasswordResetHash(w http.ResponseWriter handleErr(w, r, errors.New("error hashing password")) return } - err = h.dai.UpdateUserPassword(r.Context(), userId, string(passwordHash)) + err = h.daService.UpdateUserPassword(r.Context(), userId, string(passwordHash)) if err != nil { handleErr(w, r, err) return } // if email is not confirmed, confirm since they clicked a link emailed to them - userInfo, err := h.dai.GetUserCredentialInfo(r.Context(), userId) + userInfo, err := h.daService.GetUserCredentialInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return } if !userInfo.EmailConfirmed { - err = h.dai.UpdateUserEmail(r.Context(), userId) + err = h.daService.UpdateUserEmail(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -449,7 +445,7 @@ func (h *HandlerService) InternalPostLogin(w http.ResponseWriter, r *http.Reques } // fetch user - userId, err := h.dai.GetUserByEmail(r.Context(), email) + userId, err := h.daService.GetUserByEmail(r.Context(), email) if err != nil { if errors.Is(err, dataaccess.ErrNotFound) { err = errBadCredentials @@ -457,7 +453,7 @@ func (h *HandlerService) InternalPostLogin(w http.ResponseWriter, r *http.Reques handleErr(w, r, err) return } - user, err := h.dai.GetUserCredentialInfo(r.Context(), userId) + user, err := h.daService.GetUserCredentialInfo(r.Context(), userId) if err != nil { if errors.Is(err, dataaccess.ErrNotFound) { err = errBadCredentials @@ -532,7 +528,7 @@ func (h *HandlerService) InternalPostMobileAuthorize(w http.ResponseWriter, r *h } // check if oauth app exists to validate whether redirect uri is valid - appInfo, err := h.dai.GetAppDataFromRedirectUri(req.RedirectURI) + appInfo, err := h.daService.GetAppDataFromRedirectUri(req.RedirectURI) if err != nil { callback := req.RedirectURI + "?error=invalid_request&error_description=missing_redirect_uri" + state http.Redirect(w, r, callback, http.StatusSeeOther) @@ -549,7 +545,7 @@ func (h *HandlerService) InternalPostMobileAuthorize(w http.ResponseWriter, r *h session := h.scs.Token(r.Context()) sanitizedDeviceName := html.EscapeString(clientName) - err = h.dai.AddUserDevice(userInfo.Id, utils.HashAndEncode(session+session), clientID, sanitizedDeviceName, appInfo.ID) + err = h.daService.AddUserDevice(userInfo.Id, utils.HashAndEncode(session+session), clientID, sanitizedDeviceName, appInfo.ID) if err != nil { log.Warnf("Error adding user device: %v", err) callback := req.RedirectURI + "?error=invalid_request&error_description=server_error" + state @@ -589,7 +585,7 @@ func (h *HandlerService) InternalPostMobileEquivalentExchange(w http.ResponseWri } // Get user info - user, err := h.dai.GetUserCredentialInfo(r.Context(), userID) + user, err := h.daService.GetUserCredentialInfo(r.Context(), userID) if err != nil { if errors.Is(err, dataaccess.ErrNotFound) { err = errBadCredentials @@ -612,7 +608,7 @@ func (h *HandlerService) InternalPostMobileEquivalentExchange(w http.ResponseWri // invalidate old refresh token and replace with hashed session id sanitizedDeviceName := html.EscapeString(req.DeviceName) - err = h.dai.MigrateMobileSession(refreshTokenHashed, utils.HashAndEncode(session+session), req.DeviceID, sanitizedDeviceName) // salted with session + err = h.daService.MigrateMobileSession(refreshTokenHashed, utils.HashAndEncode(session+session), req.DeviceID, sanitizedDeviceName) // salted with session if err != nil { handleErr(w, r, err) return @@ -653,7 +649,7 @@ func (h *HandlerService) InternalPostUsersMeNotificationSettingsPairedDevicesTok return } - err = h.dai.AddMobileNotificationToken(user.Id, deviceID, req.Token) + err = h.daService.AddMobileNotificationToken(user.Id, deviceID, req.Token) if err != nil { handleErr(w, r, err) return @@ -693,7 +689,7 @@ func (h *HandlerService) InternalHandleMobilePurchase(w http.ResponseWriter, r * return } - subscriptionCount, err := h.dai.GetAppSubscriptionCount(user.Id) + subscriptionCount, err := h.daService.GetAppSubscriptionCount(user.Id) if err != nil { handleErr(w, r, err) return @@ -724,7 +720,7 @@ func (h *HandlerService) InternalHandleMobilePurchase(w http.ResponseWriter, r * } } - err = h.dai.AddMobilePurchase(nil, user.Id, req, validationResult, "") + err = h.daService.AddMobilePurchase(nil, user.Id, req, validationResult, "") if err != nil { handleErr(w, r, err) return @@ -755,7 +751,7 @@ func (h *HandlerService) InternalDeleteUser(w http.ResponseWriter, r *http.Reque } // TODO allow if user has any subsciptions etc? - err = h.dai.RemoveUser(r.Context(), user.Id) + err = h.daService.RemoveUser(r.Context(), user.Id) if err != nil { handleErr(w, r, err) return @@ -777,7 +773,7 @@ func (h *HandlerService) InternalPostUserEmail(w http.ResponseWriter, r *http.Re handleErr(w, r, err) return } - userInfo, err := h.dai.GetUserCredentialInfo(r.Context(), user.Id) + userInfo, err := h.daService.GetUserCredentialInfo(r.Context(), user.Id) if err != nil { handleErr(w, r, err) return @@ -809,7 +805,7 @@ func (h *HandlerService) InternalPostUserEmail(w http.ResponseWriter, r *http.Re return } - _, err = h.dai.GetUserByEmail(r.Context(), newEmail) + _, err = h.daService.GetUserByEmail(r.Context(), newEmail) if !errors.Is(err, dataaccess.ErrNotFound) { if err == nil { handleErr(w, r, newConflictErr("email already registered")) @@ -856,7 +852,7 @@ func (h *HandlerService) InternalPutUserPassword(w http.ResponseWriter, r *http. return } // user doesn't contain password, fetch from db - userData, err := h.dai.GetUserCredentialInfo(r.Context(), user.Id) + userData, err := h.daService.GetUserCredentialInfo(r.Context(), user.Id) if err != nil { handleErr(w, r, err) return @@ -892,7 +888,7 @@ func (h *HandlerService) InternalPutUserPassword(w http.ResponseWriter, r *http. } // change password - err = h.dai.UpdateUserPassword(r.Context(), user.Id, string(passwordHash)) + err = h.daService.UpdateUserPassword(r.Context(), user.Id, string(passwordHash)) if err != nil { handleErr(w, r, err) return @@ -906,187 +902,3 @@ func (h *HandlerService) InternalPutUserPassword(w http.ResponseWriter, r *http. returnNoContent(w, r) } - -// Middlewares - -func hashUint64(data uint64) [32]byte { - // Convert uint64 to a byte slice - buf := make([]byte, 8) - binary.LittleEndian.PutUint64(buf, data) - - // Compute SHA-256 hash - hash := sha256.Sum256(buf) - return hash -} - -func checkHash(data uint64, hashStr string) bool { - // Decode the hexadecimal string into a byte slice - hashToCheck, err := hex.DecodeString(hashStr) - if err != nil { - return false - } - - // Hash the uint64 value - computedHash := hashUint64(data) - - // Compare the computed hash with the provided hash - return string(computedHash[:]) == string(hashToCheck) -} - -// returns a middleware that stores user id in context, using the provided function -func StoreUserIdMiddleware(next http.Handler, userIdFunc func(r *http.Request) (uint64, error)) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - userId, err := userIdFunc(r) - if err != nil { - if errors.Is(err, errUnauthorized) { - // if next handler requires authentication, it should return 'unauthorized' itself - next.ServeHTTP(w, r) - } else { - handleErr(w, r, err) - } - return - } - - // if user id matches a given hash, allow access without checking dashboard access and return mock data - // TODO: move to config, exposing this in source code is a minor security risk for now - validHashes := []string{ - "2cab06069254b5555b617efa1d17f0748324270bb587b73422e6840d59ff322c", - "fc624cf355b84bc583661552982894621568b59c0a1c92ab0c1e03ed3bbf649b", - "03e7fb02cbc33eb45e98ab50b4bcad7fc338e5edfb5eca33ad9eb7d13d4ff106", - } - for _, hash := range validHashes { - if checkHash(userId, hash) { - ctx := r.Context() - ctx = context.WithValue(ctx, ctxIsMockEnabledKey, true) - r = r.WithContext(ctx) - } - } - ctx := r.Context() - ctx = context.WithValue(ctx, ctxUserIdKey, userId) - r = r.WithContext(ctx) - next.ServeHTTP(w, r) - }) -} - -func (h *HandlerService) StoreUserIdBySessionMiddleware(next http.Handler) http.Handler { - return StoreUserIdMiddleware(next, func(r *http.Request) (uint64, error) { - return h.GetUserIdBySession(r) - }) -} - -func (h *HandlerService) StoreUserIdByApiKeyMiddleware(next http.Handler) http.Handler { - return StoreUserIdMiddleware(next, func(r *http.Request) (uint64, error) { - return h.GetUserIdByApiKey(r) - }) -} - -// returns a middleware that checks if user has access to dashboard when a primary id is used -func (h *HandlerService) VDBAuthMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // if mock data is used, no need to check access - if isMockEnabled, ok := r.Context().Value(ctxIsMockEnabledKey).(bool); ok && isMockEnabled { - next.ServeHTTP(w, r) - return - } - var err error - dashboardId, err := strconv.ParseUint(mux.Vars(r)["dashboard_id"], 10, 64) - if err != nil { - // if primary id is not used, no need to check access - next.ServeHTTP(w, r) - return - } - // primary id is used -> user needs to have access to dashboard - - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - - // store user id in context - ctx := r.Context() - ctx = context.WithValue(ctx, ctxUserIdKey, userId) - r = r.WithContext(ctx) - - dashboardUser, err := h.dai.GetValidatorDashboardUser(r.Context(), types.VDBIdPrimary(dashboardId)) - if err != nil { - handleErr(w, r, err) - return - } - - if dashboardUser.UserId != userId { - // user does not have access to dashboard - // the proper error would be 403 Forbidden, but we don't want to leak information so we return 404 Not Found - handleErr(w, r, newNotFoundErr("dashboard with id %v not found", dashboardId)) - return - } - - next.ServeHTTP(w, r) - }) -} - -// Common middleware logic for checking user premium perks -func (h *HandlerService) PremiumPerkCheckMiddleware(next http.Handler, hasRequiredPerk func(premiumPerks types.PremiumPerks) bool) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // get user id from context - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - - // get user info - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) - if err != nil { - handleErr(w, r, err) - return - } - - // check if user has the required premium perk - if !hasRequiredPerk(userInfo.PremiumPerks) { - handleErr(w, r, newForbiddenErr("users premium perks do not allow usage of this endpoint")) - return - } - - next.ServeHTTP(w, r) - }) -} - -// Middleware for managing dashboards via API -func (h *HandlerService) ManageDashboardsViaApiCheckMiddleware(next http.Handler) http.Handler { - return h.PremiumPerkCheckMiddleware(next, func(premiumPerks types.PremiumPerks) bool { - return premiumPerks.ManageDashboardViaApi - }) -} - -// Middleware for managing notifications via API -func (h *HandlerService) ManageNotificationsViaApiCheckMiddleware(next http.Handler) http.Handler { - return h.PremiumPerkCheckMiddleware(next, func(premiumPerks types.PremiumPerks) bool { - return premiumPerks.ConfigureNotificationsViaApi - }) -} - -// middleware check to return if specified dashboard is not archived (and accessible) -func (h *HandlerService) VDBArchivedCheckMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - if len(dashboardId.Validators) > 0 { - next.ServeHTTP(w, r) - return - } - dashboard, err := h.dai.GetValidatorDashboardInfo(r.Context(), dashboardId.Id) - if err != nil { - handleErr(w, r, err) - return - } - if dashboard.IsArchived { - handleErr(w, r, newForbiddenErr("dashboard with id %v is archived", dashboardId)) - return - } - next.ServeHTTP(w, r) - }) -} diff --git a/backend/pkg/api/handlers/backward_compat.go b/backend/pkg/api/handlers/backward_compat.go index f02161a00..e9d823c69 100644 --- a/backend/pkg/api/handlers/backward_compat.go +++ b/backend/pkg/api/handlers/backward_compat.go @@ -43,7 +43,7 @@ func (h *HandlerService) getTokenByRefresh(r *http.Request, refreshToken string) log.Infof("refresh token: %v, claims: %v, hashed refresh: %v", refreshToken, unsafeClaims, refreshTokenHashed) // confirm all claims via db lookup and refreshtoken check - userID, err := h.dai.GetUserIdByRefreshToken(unsafeClaims.UserID, unsafeClaims.AppID, unsafeClaims.DeviceID, refreshTokenHashed) + userID, err := h.daService.GetUserIdByRefreshToken(unsafeClaims.UserID, unsafeClaims.AppID, unsafeClaims.DeviceID, refreshTokenHashed) if err != nil { if err == sql.ErrNoRows { return 0, "", dataaccess.ErrNotFound diff --git a/backend/pkg/api/handlers/handler_service.go b/backend/pkg/api/handlers/handler_service.go new file mode 100644 index 000000000..4e7435351 --- /dev/null +++ b/backend/pkg/api/handlers/handler_service.go @@ -0,0 +1,541 @@ +package handlers + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/invopop/jsonschema" + + "github.com/alexedwards/scs/v2" + dataaccess "github.com/gobitfly/beaconchain/pkg/api/data_access" + "github.com/gobitfly/beaconchain/pkg/api/enums" + "github.com/gobitfly/beaconchain/pkg/api/services" + types "github.com/gobitfly/beaconchain/pkg/api/types" +) + +type HandlerService struct { + daService dataaccess.DataAccessor + daDummy dataaccess.DataAccessor + scs *scs.SessionManager + isPostMachineMetricsEnabled bool // if more config options are needed, consider having the whole config in here +} + +func NewHandlerService(dataAccessor dataaccess.DataAccessor, dummy dataaccess.DataAccessor, sessionManager *scs.SessionManager, enablePostMachineMetrics bool) *HandlerService { + if allNetworks == nil { + networks, err := dataAccessor.GetAllNetworks() + if err != nil { + log.Fatal(err, "error getting networks for handler", 0, nil) + } + allNetworks = networks + } + + return &HandlerService{ + daService: dataAccessor, + daDummy: dummy, + scs: sessionManager, + isPostMachineMetricsEnabled: enablePostMachineMetrics, + } +} + +// getDataAccessor returns the correct data accessor based on the request context. +// if the request is mocked, the data access dummy is returned; otherwise the data access service. +// should only be used if getting mocked data for the endpoint is appropriate +func (h *HandlerService) getDataAccessor(r *http.Request) dataaccess.DataAccessor { + if isMocked(r) { + return h.daDummy + } + return h.daService +} + +// all networks available in the system, filled on startup in NewHandlerService +var allNetworks []types.NetworkInfo + +// -------------------------------------- +// errors + +var ( + errMsgParsingId = errors.New("error parsing parameter 'dashboard_id'") + errBadRequest = errors.New("bad request") + errInternalServer = errors.New("internal server error") + errUnauthorized = errors.New("unauthorized") + errForbidden = errors.New("forbidden") + errConflict = errors.New("conflict") + errTooManyRequests = errors.New("too many requests") + errGone = errors.New("gone") +) + +// -------------------------------------- +// utility functions + +type validatorSet struct { + Indexes []types.VDBValidator + PublicKeys []string +} + +// parseDashboardId is a helper function to validate the string dashboard id param. +func parseDashboardId(id string) (interface{}, error) { + var v validationError + if reInteger.MatchString(id) { + // given id is a normal id + id := v.checkUint(id, "dashboard_id") + if v.hasErrors() { + return nil, v + } + return types.VDBIdPrimary(id), nil + } + if reValidatorDashboardPublicId.MatchString(id) { + // given id is a public id + return types.VDBIdPublic(id), nil + } + // given id must be an encoded set of validators + decodedId, err := base64.RawURLEncoding.DecodeString(id) + if err != nil { + return nil, newBadRequestErr("given value '%s' is not a valid dashboard id", id) + } + indexes, publicKeys := v.checkValidatorList(string(decodedId), forbidEmpty) + if v.hasErrors() { + return nil, newBadRequestErr("given value '%s' is not a valid dashboard id", id) + } + return validatorSet{Indexes: indexes, PublicKeys: publicKeys}, nil +} + +// getDashboardId is a helper function to convert the dashboard id param to a VDBId. +// precondition: dashboardIdParam must be a valid dashboard id and either a primary id, public id, or list of validators. +func (h *HandlerService) getDashboardId(ctx context.Context, dashboardIdParam interface{}) (*types.VDBId, error) { + switch dashboardId := dashboardIdParam.(type) { + case types.VDBIdPrimary: + return &types.VDBId{Id: dashboardId, Validators: nil}, nil + case types.VDBIdPublic: + dashboardInfo, err := h.daService.GetValidatorDashboardPublicId(ctx, dashboardId) + if err != nil { + return nil, err + } + return &types.VDBId{Id: types.VDBIdPrimary(dashboardInfo.DashboardId), Validators: nil, AggregateGroups: !dashboardInfo.ShareSettings.ShareGroups}, nil + case validatorSet: + validators, err := h.daService.GetValidatorsFromSlices(dashboardId.Indexes, dashboardId.PublicKeys) + if err != nil { + return nil, err + } + if len(validators) == 0 { + return nil, newNotFoundErr("no validators found for given id") + } + if len(validators) > maxValidatorsInList { + return nil, newBadRequestErr("too many validators in list, maximum is %d", maxValidatorsInList) + } + return &types.VDBId{Validators: validators}, nil + } + return nil, errMsgParsingId +} + +// handleDashboardId is a helper function to both validate the dashboard id param and convert it to a VDBId. +// it should be used as the last validation step for all internal dashboard GET-handlers. +// Modifying handlers (POST, PUT, DELETE) should only accept primary dashboard ids and just use checkPrimaryDashboardId. +func (h *HandlerService) handleDashboardId(ctx context.Context, param string) (*types.VDBId, error) { + // validate dashboard id param + dashboardIdParam, err := parseDashboardId(param) + if err != nil { + return nil, err + } + // convert to VDBId + dashboardId, err := h.getDashboardId(ctx, dashboardIdParam) + if err != nil { + return nil, err + } + + return dashboardId, nil +} + +const chartDatapointLimit uint64 = 200 + +type ChartTimeDashboardLimits struct { + MinAllowedTs uint64 + LatestExportedTs uint64 + MaxAllowedInterval uint64 +} + +// helper function to retrieve allowed chart timestamp boundaries according to the users premium perks at the current point in time +func (h *HandlerService) getCurrentChartTimeLimitsForDashboard(ctx context.Context, dashboardId *types.VDBId, aggregation enums.ChartAggregation) (ChartTimeDashboardLimits, error) { + limits := ChartTimeDashboardLimits{} + var err error + premiumPerks, err := h.getDashboardPremiumPerks(ctx, *dashboardId) + if err != nil { + return limits, err + } + + maxAge := getMaxChartAge(aggregation, premiumPerks.ChartHistorySeconds) // can be max int for unlimited, always check for underflows + if maxAge == 0 { + return limits, newConflictErr("requested aggregation is not available for dashboard owner's premium subscription") + } + limits.LatestExportedTs, err = h.daService.GetLatestExportedChartTs(ctx, aggregation) + if err != nil { + return limits, err + } + limits.MinAllowedTs = limits.LatestExportedTs - min(maxAge, limits.LatestExportedTs) // min to prevent underflow + secondsPerEpoch := uint64(12 * 32) // TODO: fetch dashboards chain id and use correct value for network once available + limits.MaxAllowedInterval = chartDatapointLimit*uint64(aggregation.Duration(secondsPerEpoch).Seconds()) - 1 // -1 to make sure we don't go over the limit + + return limits, nil +} + +// getDashboardPremiumPerks gets the premium perks of the dashboard OWNER or if it's a guest dashboard, it returns free tier premium perks +func (h *HandlerService) getDashboardPremiumPerks(ctx context.Context, id types.VDBId) (*types.PremiumPerks, error) { + // for guest dashboards, return free tier perks + if id.Validators != nil { + perk, err := h.daService.GetFreeTierPerks(ctx) + if err != nil { + return nil, err + } + return perk, nil + } + // could be made into a single query if needed + dashboardUser, err := h.daService.GetValidatorDashboardUser(ctx, id.Id) + if err != nil { + return nil, err + } + userInfo, err := h.daService.GetUserInfo(ctx, dashboardUser.UserId) + if err != nil { + return nil, err + } + + return &userInfo.PremiumPerks, nil +} + +// getMaxChartAge returns the maximum age of a chart in seconds based on the given aggregation type and premium perks +func getMaxChartAge(aggregation enums.ChartAggregation, perkSeconds types.ChartHistorySeconds) uint64 { + aggregations := enums.ChartAggregations + switch aggregation { + case aggregations.Epoch: + return perkSeconds.Epoch + case aggregations.Hourly: + return perkSeconds.Hourly + case aggregations.Daily: + return perkSeconds.Daily + case aggregations.Weekly: + return perkSeconds.Weekly + default: + return 0 + } +} + +func isUserAdmin(user *types.UserInfo) bool { + if user == nil { + return false + } + return user.UserGroup == types.UserGroupAdmin +} + +// -------------------------------------- +// Response handling + +func writeResponse(w http.ResponseWriter, r *http.Request, statusCode int, response interface{}) { + w.Header().Set("Content-Type", "application/json") + if response == nil { + w.WriteHeader(statusCode) + return + } + jsonData, err := json.Marshal(response) + if err != nil { + logApiError(r, fmt.Errorf("error encoding json data: %w", err), 0, + log.Fields{ + "data": fmt.Sprintf("%+v", response), + }) + w.WriteHeader(http.StatusInternalServerError) + response = types.ApiErrorResponse{ + Error: "error encoding json data", + } + if err = json.NewEncoder(w).Encode(response); err != nil { + // there seems to be an error with the lib + logApiError(r, fmt.Errorf("error encoding error response after failed encoding: %w", err), 0) + } + return + } + w.WriteHeader(statusCode) + if _, err = w.Write(jsonData); err != nil { + // already returned wrong status code to user, can't prevent that + logApiError(r, fmt.Errorf("error writing response data: %w", err), 0) + } +} + +func returnError(w http.ResponseWriter, r *http.Request, code int, err error) { + response := types.ApiErrorResponse{ + Error: err.Error(), + } + writeResponse(w, r, code, response) +} + +func returnOk(w http.ResponseWriter, r *http.Request, data interface{}) { + writeResponse(w, r, http.StatusOK, data) +} + +func returnCreated(w http.ResponseWriter, r *http.Request, data interface{}) { + writeResponse(w, r, http.StatusCreated, data) +} + +func returnNoContent(w http.ResponseWriter, r *http.Request) { + writeResponse(w, r, http.StatusNoContent, nil) +} + +// Errors + +func returnBadRequest(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusBadRequest, err) +} + +func returnUnauthorized(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusUnauthorized, err) +} + +func returnNotFound(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusNotFound, err) +} + +func returnConflict(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusConflict, err) +} + +func returnForbidden(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusForbidden, err) +} + +func returnTooManyRequests(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusTooManyRequests, err) +} + +func returnGone(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusGone, err) +} + +const maxBodySize = 10 * 1024 + +func logApiError(r *http.Request, err error, callerSkip int, additionalInfos ...log.Fields) { + body, _ := io.ReadAll(io.LimitReader(r.Body, maxBodySize)) + requestFields := log.Fields{ + "request_endpoint": r.Method + " " + r.URL.Path, + "request_query": r.URL.RawQuery, + "request_body": string(body), + } + log.Error(err, "error handling request", callerSkip+1, append(additionalInfos, requestFields)...) +} + +func handleErr(w http.ResponseWriter, r *http.Request, err error) { + _, isValidationError := err.(validationError) + switch { + case isValidationError, errors.Is(err, errBadRequest): + returnBadRequest(w, r, err) + case errors.Is(err, dataaccess.ErrNotFound): + returnNotFound(w, r, err) + case errors.Is(err, errUnauthorized): + returnUnauthorized(w, r, err) + case errors.Is(err, errForbidden): + returnForbidden(w, r, err) + case errors.Is(err, errConflict): + returnConflict(w, r, err) + case errors.Is(err, services.ErrWaiting): + returnError(w, r, http.StatusServiceUnavailable, err) + case errors.Is(err, errTooManyRequests): + returnTooManyRequests(w, r, err) + case errors.Is(err, errGone): + returnGone(w, r, err) + case errors.Is(err, context.Canceled): + if r.Context().Err() != context.Canceled { // only return error if the request context was canceled + logApiError(r, err, 1) + returnError(w, r, http.StatusInternalServerError, err) + } + default: + logApiError(r, err, 1) + // TODO: don't return the error message to the user in production + returnError(w, r, http.StatusInternalServerError, err) + } +} + +// -------------------------------------- +// Error Helpers + +func errWithMsg(err error, format string, args ...interface{}) error { + return fmt.Errorf("%w: %s", err, fmt.Sprintf(format, args...)) +} + +//nolint:nolintlint +//nolint:unparam +func newBadRequestErr(format string, args ...interface{}) error { + return errWithMsg(errBadRequest, format, args...) +} + +//nolint:unparam +func newInternalServerErr(format string, args ...interface{}) error { + return errWithMsg(errInternalServer, format, args...) +} + +//nolint:unparam +func newUnauthorizedErr(format string, args ...interface{}) error { + return errWithMsg(errUnauthorized, format, args...) +} + +func newForbiddenErr(format string, args ...interface{}) error { + return errWithMsg(errForbidden, format, args...) +} + +//nolint:unparam +func newConflictErr(format string, args ...interface{}) error { + return errWithMsg(errConflict, format, args...) +} + +//nolint:nolintlint +//nolint:unparam +func newNotFoundErr(format string, args ...interface{}) error { + return errWithMsg(dataaccess.ErrNotFound, format, args...) +} + +func newTooManyRequestsErr(format string, args ...interface{}) error { + return errWithMsg(errTooManyRequests, format, args...) +} + +func newGoneErr(format string, args ...interface{}) error { + return errWithMsg(errGone, format, args...) +} + +// -------------------------------------- +// misc. helper functions + +// maps different types of validator dashboard summary validators to a common format +func mapVDBIndices(indices interface{}) ([]types.VDBSummaryValidatorsData, error) { + if indices == nil { + return nil, errors.New("no data found when mapping") + } + + switch v := indices.(type) { + case *types.VDBGeneralSummaryValidators: + // deposited, online, offline, slashing, slashed, exited, withdrawn, pending, exiting, withdrawing + return []types.VDBSummaryValidatorsData{ + mapUintSlice("deposited", v.Deposited), + mapUintSlice("online", v.Online), + mapUintSlice("offline", v.Offline), + mapUintSlice("slashing", v.Slashing), + mapUintSlice("slashed", v.Slashed), + mapUintSlice("exited", v.Exited), + mapUintSlice("withdrawn", v.Withdrawn), + mapIndexTimestampSlice("pending", v.Pending), + mapIndexTimestampSlice("exiting", v.Exiting), + mapIndexTimestampSlice("withdrawing", v.Withdrawing), + }, nil + + case *types.VDBSyncSummaryValidators: + return []types.VDBSummaryValidatorsData{ + mapUintSlice("sync_current", v.Current), + mapUintSlice("sync_upcoming", v.Upcoming), + mapSlice("sync_past", v.Past, + func(v types.VDBValidatorSyncPast) (uint64, []uint64) { return v.Index, []uint64{v.Count} }, + ), + }, nil + + case *types.VDBSlashingsSummaryValidators: + return []types.VDBSummaryValidatorsData{ + mapSlice("got_slashed", v.GotSlashed, + func(v types.VDBValidatorGotSlashed) (uint64, []uint64) { return v.Index, []uint64{v.SlashedBy} }, + ), + mapSlice("has_slashed", v.HasSlashed, + func(v types.VDBValidatorHasSlashed) (uint64, []uint64) { return v.Index, v.SlashedIndices }, + ), + }, nil + + case *types.VDBProposalSummaryValidators: + return []types.VDBSummaryValidatorsData{ + mapIndexBlocksSlice("proposal_proposed", v.Proposed), + mapIndexBlocksSlice("proposal_missed", v.Missed), + }, nil + + default: + return nil, fmt.Errorf("unsupported indices type") + } +} + +// maps different types of validator dashboard summary validators to a common format +func mapSlice[T any](category string, validators []T, getIndexAndDutyObjects func(validator T) (index uint64, dutyObjects []uint64)) types.VDBSummaryValidatorsData { + validatorsData := make([]types.VDBSummaryValidator, len(validators)) + for i, validator := range validators { + index, dutyObjects := getIndexAndDutyObjects(validator) + validatorsData[i] = types.VDBSummaryValidator{Index: index, DutyObjects: dutyObjects} + } + return types.VDBSummaryValidatorsData{ + Category: category, + Validators: validatorsData, + } +} +func mapUintSlice(category string, validators []uint64) types.VDBSummaryValidatorsData { + return mapSlice(category, validators, + func(v uint64) (uint64, []uint64) { return v, nil }, + ) +} + +func mapIndexTimestampSlice(category string, validators []types.IndexTimestamp) types.VDBSummaryValidatorsData { + return mapSlice(category, validators, + func(v types.IndexTimestamp) (uint64, []uint64) { return v.Index, []uint64{v.Timestamp} }, + ) +} + +func mapIndexBlocksSlice(category string, validators []types.IndexBlocks) types.VDBSummaryValidatorsData { + return mapSlice(category, validators, + func(v types.IndexBlocks) (uint64, []uint64) { return v.Index, v.Blocks }, + ) +} + +// -------------------------------------- +// intOrString is a custom type that can be unmarshalled from either an int or a string (strings will also be parsed to int if possible). +// if unmarshaling throws no errors one of the two fields will be set, the other will be nil. +type intOrString struct { + intValue *uint64 + strValue *string +} + +func (v *intOrString) UnmarshalJSON(data []byte) error { + // Attempt to unmarshal as uint64 first + var intValue uint64 + if err := json.Unmarshal(data, &intValue); err == nil { + v.intValue = &intValue + return nil + } + + // If unmarshalling as uint64 fails, try to unmarshal as string + var strValue string + if err := json.Unmarshal(data, &strValue); err == nil { + if parsedInt, err := strconv.ParseUint(strValue, 10, 64); err == nil { + v.intValue = &parsedInt + } else { + v.strValue = &strValue + } + return nil + } + + // If both unmarshalling attempts fail, return an error + return fmt.Errorf("failed to unmarshal intOrString from json: %s", string(data)) +} + +func (v intOrString) String() string { + if v.intValue != nil { + return strconv.FormatUint(*v.intValue, 10) + } + if v.strValue != nil { + return *v.strValue + } + return "" +} + +func (intOrString) JSONSchema() *jsonschema.Schema { + return &jsonschema.Schema{ + OneOf: []*jsonschema.Schema{ + {Type: "string"}, {Type: "integer"}, + }, + } +} + +func isMocked(r *http.Request) bool { + isMocked, ok := r.Context().Value(ctxIsMockedKey).(bool) + return ok && isMocked +} diff --git a/backend/pkg/api/handlers/common.go b/backend/pkg/api/handlers/input_validation.go similarity index 51% rename from backend/pkg/api/handlers/common.go rename to backend/pkg/api/handlers/input_validation.go index 053426d46..16478449a 100644 --- a/backend/pkg/api/handlers/common.go +++ b/backend/pkg/api/handlers/input_validation.go @@ -3,10 +3,7 @@ package handlers import ( "bytes" "cmp" - "context" - "encoding/base64" "encoding/json" - "errors" "fmt" "io" "net/http" @@ -16,46 +13,14 @@ import ( "strings" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/api/enums" + "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gorilla/mux" "github.com/invopop/jsonschema" "github.com/shopspring/decimal" "github.com/xeipuuv/gojsonschema" - - "github.com/alexedwards/scs/v2" - dataaccess "github.com/gobitfly/beaconchain/pkg/api/data_access" - "github.com/gobitfly/beaconchain/pkg/api/enums" - "github.com/gobitfly/beaconchain/pkg/api/services" - types "github.com/gobitfly/beaconchain/pkg/api/types" ) -type HandlerService struct { - dai dataaccess.DataAccessor - dummy dataaccess.DataAccessor - scs *scs.SessionManager - isPostMachineMetricsEnabled bool // if more config options are needed, consider having the whole config in here -} - -func NewHandlerService(dataAccessor dataaccess.DataAccessor, dummy dataaccess.DataAccessor, sessionManager *scs.SessionManager, enablePostMachineMetrics bool) *HandlerService { - if allNetworks == nil { - networks, err := dataAccessor.GetAllNetworks() - if err != nil { - log.Fatal(err, "error getting networks for handler", 0, nil) - } - allNetworks = networks - } - - return &HandlerService{ - dai: dataAccessor, - dummy: dummy, - scs: sessionManager, - isPostMachineMetricsEnabled: enablePostMachineMetrics, - } -} - -// all networks available in the system, filled on startup in NewHandlerService -var allNetworks []types.NetworkInfo - // -------------------------------------- var ( @@ -93,23 +58,6 @@ const ( MaxArchivedDashboardsCount = 10 ) -var ( - errMsgParsingId = errors.New("error parsing parameter 'dashboard_id'") - errBadRequest = errors.New("bad request") - errInternalServer = errors.New("internal server error") - errUnauthorized = errors.New("unauthorized") - errForbidden = errors.New("forbidden") - errConflict = errors.New("conflict") - errTooManyRequests = errors.New("too many requests") - errGone = errors.New("gone") -) - -type Paging struct { - cursor string - limit uint64 - search string -} - // All changes to common functions MUST NOT break any public handler behavior (not in effect yet) // -------------------------------------- @@ -146,6 +94,8 @@ func (v *validationError) hasErrors() bool { return v != nil && len(*v) > 0 } +// -------------------------------------- + func (v *validationError) checkRegex(regex *regexp.Regexp, param, paramName string) string { if !regex.MatchString(param) { v.add(paramName, fmt.Sprintf(`given value '%s' has incorrect format`, param)) @@ -307,143 +257,10 @@ func (v *validationError) checkAdConfigurationKeys(keysString string) []string { return keys } -type validatorSet struct { - Indexes []types.VDBValidator - PublicKeys []string -} - -// parseDashboardId is a helper function to validate the string dashboard id param. -func parseDashboardId(id string) (interface{}, error) { - var v validationError - if reInteger.MatchString(id) { - // given id is a normal id - id := v.checkUint(id, "dashboard_id") - if v.hasErrors() { - return nil, v - } - return types.VDBIdPrimary(id), nil - } - if reValidatorDashboardPublicId.MatchString(id) { - // given id is a public id - return types.VDBIdPublic(id), nil - } - // given id must be an encoded set of validators - decodedId, err := base64.RawURLEncoding.DecodeString(id) - if err != nil { - return nil, newBadRequestErr("given value '%s' is not a valid dashboard id", id) - } - indexes, publicKeys := v.checkValidatorList(string(decodedId), forbidEmpty) - if v.hasErrors() { - return nil, newBadRequestErr("given value '%s' is not a valid dashboard id", id) - } - return validatorSet{Indexes: indexes, PublicKeys: publicKeys}, nil -} - -// getDashboardId is a helper function to convert the dashboard id param to a VDBId. -// precondition: dashboardIdParam must be a valid dashboard id and either a primary id, public id, or list of validators. -func (h *HandlerService) getDashboardId(ctx context.Context, dashboardIdParam interface{}) (*types.VDBId, error) { - switch dashboardId := dashboardIdParam.(type) { - case types.VDBIdPrimary: - return &types.VDBId{Id: dashboardId, Validators: nil}, nil - case types.VDBIdPublic: - dashboardInfo, err := h.dai.GetValidatorDashboardPublicId(ctx, dashboardId) - if err != nil { - return nil, err - } - return &types.VDBId{Id: types.VDBIdPrimary(dashboardInfo.DashboardId), Validators: nil, AggregateGroups: !dashboardInfo.ShareSettings.ShareGroups}, nil - case validatorSet: - validators, err := h.dai.GetValidatorsFromSlices(dashboardId.Indexes, dashboardId.PublicKeys) - if err != nil { - return nil, err - } - if len(validators) == 0 { - return nil, newNotFoundErr("no validators found for given id") - } - if len(validators) > maxValidatorsInList { - return nil, newBadRequestErr("too many validators in list, maximum is %d", maxValidatorsInList) - } - return &types.VDBId{Validators: validators}, nil - } - return nil, errMsgParsingId -} - -// handleDashboardId is a helper function to both validate the dashboard id param and convert it to a VDBId. -// it should be used as the last validation step for all internal dashboard GET-handlers. -// Modifying handlers (POST, PUT, DELETE) should only accept primary dashboard ids and just use checkPrimaryDashboardId. -func (h *HandlerService) handleDashboardId(ctx context.Context, param string) (*types.VDBId, error) { - // validate dashboard id param - dashboardIdParam, err := parseDashboardId(param) - if err != nil { - return nil, err - } - // convert to VDBId - dashboardId, err := h.getDashboardId(ctx, dashboardIdParam) - if err != nil { - return nil, err - } - - return dashboardId, nil -} - -const chartDatapointLimit uint64 = 200 - -type ChartTimeDashboardLimits struct { - MinAllowedTs uint64 - LatestExportedTs uint64 - MaxAllowedInterval uint64 -} - -// helper function to retrieve allowed chart timestamp boundaries according to the users premium perks at the current point in time -func (h *HandlerService) getCurrentChartTimeLimitsForDashboard(ctx context.Context, dashboardId *types.VDBId, aggregation enums.ChartAggregation) (ChartTimeDashboardLimits, error) { - limits := ChartTimeDashboardLimits{} - var err error - premiumPerks, err := h.getDashboardPremiumPerks(ctx, *dashboardId) - if err != nil { - return limits, err - } - - maxAge := getMaxChartAge(aggregation, premiumPerks.ChartHistorySeconds) // can be max int for unlimited, always check for underflows - if maxAge == 0 { - return limits, newConflictErr("requested aggregation is not available for dashboard owner's premium subscription") - } - limits.LatestExportedTs, err = h.dai.GetLatestExportedChartTs(ctx, aggregation) - if err != nil { - return limits, err - } - limits.MinAllowedTs = limits.LatestExportedTs - min(maxAge, limits.LatestExportedTs) // min to prevent underflow - secondsPerEpoch := uint64(12 * 32) // TODO: fetch dashboards chain id and use correct value for network once available - limits.MaxAllowedInterval = chartDatapointLimit*uint64(aggregation.Duration(secondsPerEpoch).Seconds()) - 1 // -1 to make sure we don't go over the limit - - return limits, nil -} - func (v *validationError) checkPrimaryDashboardId(param string) types.VDBIdPrimary { return types.VDBIdPrimary(v.checkUint(param, "dashboard_id")) } -// getDashboardPremiumPerks gets the premium perks of the dashboard OWNER or if it's a guest dashboard, it returns free tier premium perks -func (h *HandlerService) getDashboardPremiumPerks(ctx context.Context, id types.VDBId) (*types.PremiumPerks, error) { - // for guest dashboards, return free tier perks - if id.Validators != nil { - perk, err := h.dai.GetFreeTierPerks(ctx) - if err != nil { - return nil, err - } - return perk, nil - } - // could be made into a single query if needed - dashboardUser, err := h.dai.GetValidatorDashboardUser(ctx, id.Id) - if err != nil { - return nil, err - } - userInfo, err := h.dai.GetUserInfo(ctx, dashboardUser.UserId) - if err != nil { - return nil, err - } - - return &userInfo.PremiumPerks, nil -} - // helper function to unify handling of block detail request validation func (h *HandlerService) validateBlockRequest(r *http.Request, paramName string) (uint64, uint64, error) { var v validationError @@ -454,9 +271,9 @@ func (h *HandlerService) validateBlockRequest(r *http.Request, paramName string) // possibly add other values like "genesis", "finalized", hardforks etc. later case "latest": if paramName == "block" { - value, err = h.dai.GetLatestBlock() + value, err = h.daService.GetLatestBlock() } else if paramName == "slot" { - value, err = h.dai.GetLatestSlot() + value, err = h.daService.GetLatestSlot() } if err != nil { return 0, 0, err @@ -484,7 +301,6 @@ func (v *validationError) checkExistingGroupId(param string) uint64 { return v.checkUint(param, "group_id") } -//nolint:unparam func splitParameters(params string, delim rune) []string { // This splits the string by delim and removes empty strings f := func(c rune) bool { @@ -531,6 +347,12 @@ func (v *validationError) checkUintMinMax(param string, min uint64, max uint64, return checkMinMax(v, v.checkUint(param, paramName), min, max, paramName) } +type Paging struct { + cursor string + limit uint64 + search string +} + func (v *validationError) checkPagingParams(q url.Values) Paging { paging := Paging{ cursor: q.Get("cursor"), @@ -579,7 +401,7 @@ func checkSort[T enums.EnumFactory[T]](v *validationError, sortString string) *t if sortString == "" { return &types.Sort[T]{Column: c, Desc: defaultDesc} } - sortSplit := strings.Split(sortString, ":") + sortSplit := splitParameters(sortString, ':') if len(sortSplit) > 2 { v.add("sort", fmt.Sprintf("given value '%s' for parameter 'sort' is not valid, expected format is '[:(asc|desc)]'", sortString)) return nil @@ -679,11 +501,7 @@ func (v *validationError) checkNetworkParameter(param string) uint64 { return v.checkNetwork(intOrString{strValue: ¶m}) } -//nolint:unused func (v *validationError) checkNetworksParameter(param string) []uint64 { - if param == "" { - v.add("networks", "list of networks must not be empty") - } var chainIds []uint64 for _, network := range splitParameters(param, ',') { chainIds = append(chainIds, v.checkNetworkParameter(network)) @@ -738,334 +556,3 @@ func (v *validationError) checkTimestamps(r *http.Request, chartLimits ChartTime return afterTs, beforeTs } } - -// getMaxChartAge returns the maximum age of a chart in seconds based on the given aggregation type and premium perks -func getMaxChartAge(aggregation enums.ChartAggregation, perkSeconds types.ChartHistorySeconds) uint64 { - aggregations := enums.ChartAggregations - switch aggregation { - case aggregations.Epoch: - return perkSeconds.Epoch - case aggregations.Hourly: - return perkSeconds.Hourly - case aggregations.Daily: - return perkSeconds.Daily - case aggregations.Weekly: - return perkSeconds.Weekly - default: - return 0 - } -} - -func isUserAdmin(user *types.UserInfo) bool { - if user == nil { - return false - } - return user.UserGroup == types.UserGroupAdmin -} - -// -------------------------------------- -// Response handling - -func writeResponse(w http.ResponseWriter, r *http.Request, statusCode int, response interface{}) { - w.Header().Set("Content-Type", "application/json") - if response == nil { - w.WriteHeader(statusCode) - return - } - jsonData, err := json.Marshal(response) - if err != nil { - logApiError(r, fmt.Errorf("error encoding json data: %w", err), 0, - log.Fields{ - "data": fmt.Sprintf("%+v", response), - }) - w.WriteHeader(http.StatusInternalServerError) - response = types.ApiErrorResponse{ - Error: "error encoding json data", - } - if err = json.NewEncoder(w).Encode(response); err != nil { - // there seems to be an error with the lib - logApiError(r, fmt.Errorf("error encoding error response after failed encoding: %w", err), 0) - } - return - } - w.WriteHeader(statusCode) - if _, err = w.Write(jsonData); err != nil { - // already returned wrong status code to user, can't prevent that - logApiError(r, fmt.Errorf("error writing response data: %w", err), 0) - } -} - -func returnError(w http.ResponseWriter, r *http.Request, code int, err error) { - response := types.ApiErrorResponse{ - Error: err.Error(), - } - writeResponse(w, r, code, response) -} - -func returnOk(w http.ResponseWriter, r *http.Request, data interface{}) { - writeResponse(w, r, http.StatusOK, data) -} - -func returnCreated(w http.ResponseWriter, r *http.Request, data interface{}) { - writeResponse(w, r, http.StatusCreated, data) -} - -func returnNoContent(w http.ResponseWriter, r *http.Request) { - writeResponse(w, r, http.StatusNoContent, nil) -} - -// Errors - -func returnBadRequest(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusBadRequest, err) -} - -func returnUnauthorized(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusUnauthorized, err) -} - -func returnNotFound(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusNotFound, err) -} - -func returnConflict(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusConflict, err) -} - -func returnForbidden(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusForbidden, err) -} - -func returnTooManyRequests(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusTooManyRequests, err) -} - -func returnGone(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusGone, err) -} - -const maxBodySize = 10 * 1024 - -func logApiError(r *http.Request, err error, callerSkip int, additionalInfos ...log.Fields) { - body, _ := io.ReadAll(io.LimitReader(r.Body, maxBodySize)) - requestFields := log.Fields{ - "request_endpoint": r.Method + " " + r.URL.Path, - "request_query": r.URL.RawQuery, - "request_body": string(body), - } - log.Error(err, "error handling request", callerSkip+1, append(additionalInfos, requestFields)...) -} - -func handleErr(w http.ResponseWriter, r *http.Request, err error) { - _, isValidationError := err.(validationError) - switch { - case isValidationError, errors.Is(err, errBadRequest): - returnBadRequest(w, r, err) - case errors.Is(err, dataaccess.ErrNotFound): - returnNotFound(w, r, err) - case errors.Is(err, errUnauthorized): - returnUnauthorized(w, r, err) - case errors.Is(err, errForbidden): - returnForbidden(w, r, err) - case errors.Is(err, errConflict): - returnConflict(w, r, err) - case errors.Is(err, services.ErrWaiting): - returnError(w, r, http.StatusServiceUnavailable, err) - case errors.Is(err, errTooManyRequests): - returnTooManyRequests(w, r, err) - case errors.Is(err, errGone): - returnGone(w, r, err) - default: - logApiError(r, err, 1) - // TODO: don't return the error message to the user in production - returnError(w, r, http.StatusInternalServerError, err) - } -} - -// -------------------------------------- -// Error Helpers - -func errWithMsg(err error, format string, args ...interface{}) error { - return fmt.Errorf("%w: %s", err, fmt.Sprintf(format, args...)) -} - -//nolint:nolintlint -//nolint:unparam -func newBadRequestErr(format string, args ...interface{}) error { - return errWithMsg(errBadRequest, format, args...) -} - -//nolint:unparam -func newInternalServerErr(format string, args ...interface{}) error { - return errWithMsg(errInternalServer, format, args...) -} - -//nolint:unparam -func newUnauthorizedErr(format string, args ...interface{}) error { - return errWithMsg(errUnauthorized, format, args...) -} - -func newForbiddenErr(format string, args ...interface{}) error { - return errWithMsg(errForbidden, format, args...) -} - -//nolint:unparam -func newConflictErr(format string, args ...interface{}) error { - return errWithMsg(errConflict, format, args...) -} - -//nolint:nolintlint -//nolint:unparam -func newNotFoundErr(format string, args ...interface{}) error { - return errWithMsg(dataaccess.ErrNotFound, format, args...) -} - -func newTooManyRequestsErr(format string, args ...interface{}) error { - return errWithMsg(errTooManyRequests, format, args...) -} - -func newGoneErr(format string, args ...interface{}) error { - return errWithMsg(errGone, format, args...) -} - -// -------------------------------------- -// misc. helper functions - -// maps different types of validator dashboard summary validators to a common format -func mapVDBIndices(indices interface{}) ([]types.VDBSummaryValidatorsData, error) { - if indices == nil { - return nil, errors.New("no data found when mapping") - } - - switch v := indices.(type) { - case *types.VDBGeneralSummaryValidators: - // deposited, online, offline, slashing, slashed, exited, withdrawn, pending, exiting, withdrawing - return []types.VDBSummaryValidatorsData{ - mapUintSlice("deposited", v.Deposited), - mapUintSlice("online", v.Online), - mapUintSlice("offline", v.Offline), - mapUintSlice("slashing", v.Slashing), - mapUintSlice("slashed", v.Slashed), - mapUintSlice("exited", v.Exited), - mapUintSlice("withdrawn", v.Withdrawn), - mapIndexTimestampSlice("pending", v.Pending), - mapIndexTimestampSlice("exiting", v.Exiting), - mapIndexTimestampSlice("withdrawing", v.Withdrawing), - }, nil - - case *types.VDBSyncSummaryValidators: - return []types.VDBSummaryValidatorsData{ - mapUintSlice("sync_current", v.Current), - mapUintSlice("sync_upcoming", v.Upcoming), - mapSlice("sync_past", v.Past, - func(v types.VDBValidatorSyncPast) (uint64, []uint64) { return v.Index, []uint64{v.Count} }, - ), - }, nil - - case *types.VDBSlashingsSummaryValidators: - return []types.VDBSummaryValidatorsData{ - mapSlice("got_slashed", v.GotSlashed, - func(v types.VDBValidatorGotSlashed) (uint64, []uint64) { return v.Index, []uint64{v.SlashedBy} }, - ), - mapSlice("has_slashed", v.HasSlashed, - func(v types.VDBValidatorHasSlashed) (uint64, []uint64) { return v.Index, v.SlashedIndices }, - ), - }, nil - - case *types.VDBProposalSummaryValidators: - return []types.VDBSummaryValidatorsData{ - mapIndexBlocksSlice("proposal_proposed", v.Proposed), - mapIndexBlocksSlice("proposal_missed", v.Missed), - }, nil - - default: - return nil, fmt.Errorf("unsupported indices type") - } -} - -// maps different types of validator dashboard summary validators to a common format -func mapSlice[T any](category string, validators []T, getIndexAndDutyObjects func(validator T) (index uint64, dutyObjects []uint64)) types.VDBSummaryValidatorsData { - validatorsData := make([]types.VDBSummaryValidator, len(validators)) - for i, validator := range validators { - index, dutyObjects := getIndexAndDutyObjects(validator) - validatorsData[i] = types.VDBSummaryValidator{Index: index, DutyObjects: dutyObjects} - } - return types.VDBSummaryValidatorsData{ - Category: category, - Validators: validatorsData, - } -} -func mapUintSlice(category string, validators []uint64) types.VDBSummaryValidatorsData { - return mapSlice(category, validators, - func(v uint64) (uint64, []uint64) { return v, nil }, - ) -} - -func mapIndexTimestampSlice(category string, validators []types.IndexTimestamp) types.VDBSummaryValidatorsData { - return mapSlice(category, validators, - func(v types.IndexTimestamp) (uint64, []uint64) { return v.Index, []uint64{v.Timestamp} }, - ) -} - -func mapIndexBlocksSlice(category string, validators []types.IndexBlocks) types.VDBSummaryValidatorsData { - return mapSlice(category, validators, - func(v types.IndexBlocks) (uint64, []uint64) { return v.Index, v.Blocks }, - ) -} - -// -------------------------------------- -// intOrString is a custom type that can be unmarshalled from either an int or a string (strings will also be parsed to int if possible). -// if unmarshaling throws no errors one of the two fields will be set, the other will be nil. -type intOrString struct { - intValue *uint64 - strValue *string -} - -func (v *intOrString) UnmarshalJSON(data []byte) error { - // Attempt to unmarshal as uint64 first - var intValue uint64 - if err := json.Unmarshal(data, &intValue); err == nil { - v.intValue = &intValue - return nil - } - - // If unmarshalling as uint64 fails, try to unmarshal as string - var strValue string - if err := json.Unmarshal(data, &strValue); err == nil { - if parsedInt, err := strconv.ParseUint(strValue, 10, 64); err == nil { - v.intValue = &parsedInt - } else { - v.strValue = &strValue - } - return nil - } - - // If both unmarshalling attempts fail, return an error - return fmt.Errorf("failed to unmarshal intOrString from json: %s", string(data)) -} - -func (v intOrString) String() string { - if v.intValue != nil { - return strconv.FormatUint(*v.intValue, 10) - } - if v.strValue != nil { - return *v.strValue - } - return "" -} - -func (intOrString) JSONSchema() *jsonschema.Schema { - return &jsonschema.Schema{ - OneOf: []*jsonschema.Schema{ - {Type: "string"}, {Type: "integer"}, - }, - } -} - -func isMockEnabled(r *http.Request) bool { - isMockEnabled, ok := r.Context().Value(ctxIsMockEnabledKey).(bool) - if !ok { - return false - } - return isMockEnabled -} diff --git a/backend/pkg/api/handlers/internal.go b/backend/pkg/api/handlers/internal.go index 41f549b8f..3093352f9 100644 --- a/backend/pkg/api/handlers/internal.go +++ b/backend/pkg/api/handlers/internal.go @@ -14,7 +14,7 @@ import ( // Premium Plans func (h *HandlerService) InternalGetProductSummary(w http.ResponseWriter, r *http.Request) { - data, err := h.dai.GetProductSummary(r.Context()) + data, err := h.daService.GetProductSummary(r.Context()) if err != nil { handleErr(w, r, err) return @@ -29,7 +29,7 @@ func (h *HandlerService) InternalGetProductSummary(w http.ResponseWriter, r *htt // API Ratelimit Weights func (h *HandlerService) InternalGetRatelimitWeights(w http.ResponseWriter, r *http.Request) { - data, err := h.dai.GetApiWeights(r.Context()) + data, err := h.daService.GetApiWeights(r.Context()) if err != nil { handleErr(w, r, err) return @@ -44,19 +44,19 @@ func (h *HandlerService) InternalGetRatelimitWeights(w http.ResponseWriter, r *h // Latest State func (h *HandlerService) InternalGetLatestState(w http.ResponseWriter, r *http.Request) { - latestSlot, err := h.dai.GetLatestSlot() + latestSlot, err := h.daService.GetLatestSlot() if err != nil { handleErr(w, r, err) return } - finalizedEpoch, err := h.dai.GetLatestFinalizedEpoch() + finalizedEpoch, err := h.daService.GetLatestFinalizedEpoch() if err != nil { handleErr(w, r, err) return } - exchangeRates, err := h.dai.GetLatestExchangeRates() + exchangeRates, err := h.daService.GetLatestExchangeRates() if err != nil { handleErr(w, r, err) return @@ -74,7 +74,7 @@ func (h *HandlerService) InternalGetLatestState(w http.ResponseWriter, r *http.R } func (h *HandlerService) InternalGetRocketPool(w http.ResponseWriter, r *http.Request) { - data, err := h.dai.GetRocketPoolOverview(r.Context()) + data, err := h.daService.GetRocketPoolOverview(r.Context()) if err != nil { handleErr(w, r, err) return @@ -126,7 +126,7 @@ func (h *HandlerService) InternalPostAdConfigurations(w http.ResponseWriter, r * return } - err = h.dai.CreateAdConfiguration(r.Context(), key, req.JQuerySelector, insertMode, req.RefreshInterval, req.ForAllUsers, req.BannerId, req.HtmlContent, req.Enabled) + err = h.daService.CreateAdConfiguration(r.Context(), key, req.JQuerySelector, insertMode, req.RefreshInterval, req.ForAllUsers, req.BannerId, req.HtmlContent, req.Enabled) if err != nil { handleErr(w, r, err) return @@ -156,7 +156,7 @@ func (h *HandlerService) InternalGetAdConfigurations(w http.ResponseWriter, r *h return } - data, err := h.dai.GetAdConfigurations(r.Context(), keys) + data, err := h.daService.GetAdConfigurations(r.Context(), keys) if err != nil { handleErr(w, r, err) return @@ -202,7 +202,7 @@ func (h *HandlerService) InternalPutAdConfiguration(w http.ResponseWriter, r *ht return } - err = h.dai.UpdateAdConfiguration(r.Context(), key, req.JQuerySelector, insertMode, req.RefreshInterval, req.ForAllUsers, req.BannerId, req.HtmlContent, req.Enabled) + err = h.daService.UpdateAdConfiguration(r.Context(), key, req.JQuerySelector, insertMode, req.RefreshInterval, req.ForAllUsers, req.BannerId, req.HtmlContent, req.Enabled) if err != nil { handleErr(w, r, err) return @@ -232,7 +232,7 @@ func (h *HandlerService) InternalDeleteAdConfiguration(w http.ResponseWriter, r return } - err = h.dai.RemoveAdConfiguration(r.Context(), key) + err = h.daService.RemoveAdConfiguration(r.Context(), key) if err != nil { handleErr(w, r, err) return @@ -251,7 +251,7 @@ func (h *HandlerService) InternalGetUserInfo(w http.ResponseWriter, r *http.Requ handleErr(w, r, err) return } - userInfo, err := h.dai.GetUserInfo(r.Context(), user.Id) + userInfo, err := h.daService.GetUserInfo(r.Context(), user.Id) if err != nil { handleErr(w, r, err) return @@ -383,7 +383,7 @@ func (h *HandlerService) InternalGetValidatorDashboardMobileValidators(w http.Re handleErr(w, r, v) return } - data, paging, err := h.dai.GetValidatorDashboardMobileValidators(r.Context(), *dashboardId, period, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.daService.GetValidatorDashboardMobileValidators(r.Context(), *dashboardId, period, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -512,7 +512,7 @@ func (h *HandlerService) InternalGetValidatorDashboardMobileWidget(w http.Respon handleErr(w, r, err) return } - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + userInfo, err := h.daService.GetUserInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -521,7 +521,7 @@ func (h *HandlerService) InternalGetValidatorDashboardMobileWidget(w http.Respon returnForbidden(w, r, errors.New("user does not have access to mobile app widget")) return } - data, err := h.dai.GetValidatorDashboardMobileWidget(r.Context(), dashboardId) + data, err := h.daService.GetValidatorDashboardMobileWidget(r.Context(), dashboardId) if err != nil { handleErr(w, r, err) return @@ -545,7 +545,7 @@ func (h *HandlerService) InternalGetMobileLatestBundle(w http.ResponseWriter, r handleErr(w, r, v) return } - stats, err := h.dai.GetLatestBundleForNativeVersion(r.Context(), nativeVersion) + stats, err := h.daService.GetLatestBundleForNativeVersion(r.Context(), nativeVersion) if err != nil { handleErr(w, r, err) return @@ -570,7 +570,7 @@ func (h *HandlerService) InternalPostMobileBundleDeliveries(w http.ResponseWrite handleErr(w, r, v) return } - err := h.dai.IncrementBundleDeliveryCount(r.Context(), bundleVersion) + err := h.daService.IncrementBundleDeliveryCount(r.Context(), bundleVersion) if err != nil { handleErr(w, r, err) return @@ -671,7 +671,7 @@ func (h *HandlerService) InternalGetBlock(w http.ResponseWriter, r *http.Request return } - data, err := h.dai.GetBlock(r.Context(), chainId, block) + data, err := h.daService.GetBlock(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -690,7 +690,7 @@ func (h *HandlerService) InternalGetBlockOverview(w http.ResponseWriter, r *http return } - data, err := h.dai.GetBlockOverview(r.Context(), chainId, block) + data, err := h.daService.GetBlockOverview(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -708,7 +708,7 @@ func (h *HandlerService) InternalGetBlockTransactions(w http.ResponseWriter, r * return } - data, err := h.dai.GetBlockTransactions(r.Context(), chainId, block) + data, err := h.daService.GetBlockTransactions(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -727,7 +727,7 @@ func (h *HandlerService) InternalGetBlockVotes(w http.ResponseWriter, r *http.Re return } - data, err := h.dai.GetBlockVotes(r.Context(), chainId, block) + data, err := h.daService.GetBlockVotes(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -746,7 +746,7 @@ func (h *HandlerService) InternalGetBlockAttestations(w http.ResponseWriter, r * return } - data, err := h.dai.GetBlockAttestations(r.Context(), chainId, block) + data, err := h.daService.GetBlockAttestations(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -765,7 +765,7 @@ func (h *HandlerService) InternalGetBlockWithdrawals(w http.ResponseWriter, r *h return } - data, err := h.dai.GetBlockWithdrawals(r.Context(), chainId, block) + data, err := h.daService.GetBlockWithdrawals(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -784,7 +784,7 @@ func (h *HandlerService) InternalGetBlockBlsChanges(w http.ResponseWriter, r *ht return } - data, err := h.dai.GetBlockBlsChanges(r.Context(), chainId, block) + data, err := h.daService.GetBlockBlsChanges(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -803,7 +803,7 @@ func (h *HandlerService) InternalGetBlockVoluntaryExits(w http.ResponseWriter, r return } - data, err := h.dai.GetBlockVoluntaryExits(r.Context(), chainId, block) + data, err := h.daService.GetBlockVoluntaryExits(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -822,7 +822,7 @@ func (h *HandlerService) InternalGetBlockBlobs(w http.ResponseWriter, r *http.Re return } - data, err := h.dai.GetBlockBlobs(r.Context(), chainId, block) + data, err := h.daService.GetBlockBlobs(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -844,7 +844,7 @@ func (h *HandlerService) InternalGetSlot(w http.ResponseWriter, r *http.Request) return } - data, err := h.dai.GetSlot(r.Context(), chainId, block) + data, err := h.daService.GetSlot(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -863,7 +863,7 @@ func (h *HandlerService) InternalGetSlotOverview(w http.ResponseWriter, r *http. return } - data, err := h.dai.GetSlotOverview(r.Context(), chainId, block) + data, err := h.daService.GetSlotOverview(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -881,7 +881,7 @@ func (h *HandlerService) InternalGetSlotTransactions(w http.ResponseWriter, r *h return } - data, err := h.dai.GetSlotTransactions(r.Context(), chainId, block) + data, err := h.daService.GetSlotTransactions(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -900,7 +900,7 @@ func (h *HandlerService) InternalGetSlotVotes(w http.ResponseWriter, r *http.Req return } - data, err := h.dai.GetSlotVotes(r.Context(), chainId, block) + data, err := h.daService.GetSlotVotes(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -919,7 +919,7 @@ func (h *HandlerService) InternalGetSlotAttestations(w http.ResponseWriter, r *h return } - data, err := h.dai.GetSlotAttestations(r.Context(), chainId, block) + data, err := h.daService.GetSlotAttestations(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -938,7 +938,7 @@ func (h *HandlerService) InternalGetSlotWithdrawals(w http.ResponseWriter, r *ht return } - data, err := h.dai.GetSlotWithdrawals(r.Context(), chainId, block) + data, err := h.daService.GetSlotWithdrawals(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -957,7 +957,7 @@ func (h *HandlerService) InternalGetSlotBlsChanges(w http.ResponseWriter, r *htt return } - data, err := h.dai.GetSlotBlsChanges(r.Context(), chainId, block) + data, err := h.daService.GetSlotBlsChanges(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -976,7 +976,7 @@ func (h *HandlerService) InternalGetSlotVoluntaryExits(w http.ResponseWriter, r return } - data, err := h.dai.GetSlotVoluntaryExits(r.Context(), chainId, block) + data, err := h.daService.GetSlotVoluntaryExits(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -995,7 +995,7 @@ func (h *HandlerService) InternalGetSlotBlobs(w http.ResponseWriter, r *http.Req return } - data, err := h.dai.GetSlotBlobs(r.Context(), chainId, block) + data, err := h.daService.GetSlotBlobs(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return diff --git a/backend/pkg/api/handlers/machine_metrics.go b/backend/pkg/api/handlers/machine_metrics.go index 9ac1dd0cd..9bef843d5 100644 --- a/backend/pkg/api/handlers/machine_metrics.go +++ b/backend/pkg/api/handlers/machine_metrics.go @@ -30,7 +30,7 @@ func (h *HandlerService) PublicGetUserMachineMetrics(w http.ResponseWriter, r *h return } - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + userInfo, err := h.daService.GetUserInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -51,7 +51,7 @@ func (h *HandlerService) PublicGetUserMachineMetrics(w http.ResponseWriter, r *h offset = 0 } - data, err := h.dai.GetUserMachineMetrics(r.Context(), userId, int(limit), int(offset)) + data, err := h.daService.GetUserMachineMetrics(r.Context(), userId, int(limit), int(offset)) if err != nil { handleErr(w, r, err) return @@ -77,13 +77,13 @@ func (h *HandlerService) LegacyPostUserMachineMetrics(w http.ResponseWriter, r * return } - userID, err := h.dai.GetUserIdByApiKey(r.Context(), apiKey) + userID, err := h.daService.GetUserIdByApiKey(r.Context(), apiKey) if err != nil { returnBadRequest(w, r, fmt.Errorf("no user found with api key")) return } - userInfo, err := h.dai.GetUserInfo(r.Context(), userID) + userInfo, err := h.daService.GetUserInfo(r.Context(), userID) if err != nil { handleErr(w, r, err) return @@ -200,7 +200,7 @@ func (h *HandlerService) internal_processMachine(context context.Context, machin } } - return h.dai.PostUserMachineMetrics(context, userInfo.Id, machine, parsedMeta.Process, data) + return h.daService.PostUserMachineMetrics(context, userInfo.Id, machine, parsedMeta.Process, data) } func DecodeMapStructure(input interface{}, output interface{}) error { diff --git a/backend/pkg/api/handlers/middlewares.go b/backend/pkg/api/handlers/middlewares.go new file mode 100644 index 000000000..54238cc59 --- /dev/null +++ b/backend/pkg/api/handlers/middlewares.go @@ -0,0 +1,200 @@ +package handlers + +import ( + "context" + "errors" + "net/http" + "slices" + "strconv" + + "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gorilla/mux" +) + +// Middlewares + +// middleware that stores user id in context, using the provided function +func StoreUserIdMiddleware(next http.Handler, userIdFunc func(r *http.Request) (uint64, error)) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + userId, err := userIdFunc(r) + if err != nil { + if errors.Is(err, errUnauthorized) { + // if next handler requires authentication, it should return 'unauthorized' itself + next.ServeHTTP(w, r) + } else { + handleErr(w, r, err) + } + return + } + + // store user id in context + ctx := r.Context() + ctx = context.WithValue(ctx, ctxUserIdKey, userId) + r = r.WithContext(ctx) + next.ServeHTTP(w, r) + }) +} + +// middleware that stores user id in context, using the session to get the user id +func (h *HandlerService) StoreUserIdBySessionMiddleware(next http.Handler) http.Handler { + return StoreUserIdMiddleware(next, func(r *http.Request) (uint64, error) { + return h.GetUserIdBySession(r) + }) +} + +// middleware that stores user id in context, using the api key to get the user id +func (h *HandlerService) StoreUserIdByApiKeyMiddleware(next http.Handler) http.Handler { + return StoreUserIdMiddleware(next, func(r *http.Request) (uint64, error) { + return h.GetUserIdByApiKey(r) + }) +} + +// middleware that checks if user has access to dashboard when a primary id is used +func (h *HandlerService) VDBAuthMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // if mock data is used, no need to check access + if isMockEnabled, ok := r.Context().Value(ctxIsMockedKey).(bool); ok && isMockEnabled { + next.ServeHTTP(w, r) + return + } + var err error + dashboardId, err := strconv.ParseUint(mux.Vars(r)["dashboard_id"], 10, 64) + if err != nil { + // if primary id is not used, no need to check access + next.ServeHTTP(w, r) + return + } + // primary id is used -> user needs to have access to dashboard + + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + + // store user id in context + ctx := r.Context() + ctx = context.WithValue(ctx, ctxUserIdKey, userId) + r = r.WithContext(ctx) + + dashboardUser, err := h.daService.GetValidatorDashboardUser(r.Context(), types.VDBIdPrimary(dashboardId)) + if err != nil { + handleErr(w, r, err) + return + } + + if dashboardUser.UserId != userId { + // user does not have access to dashboard + // the proper error would be 403 Forbidden, but we don't want to leak information so we return 404 Not Found + handleErr(w, r, newNotFoundErr("dashboard with id %v not found", dashboardId)) + return + } + + next.ServeHTTP(w, r) + }) +} + +// Common middleware logic for checking user premium perks +func (h *HandlerService) PremiumPerkCheckMiddleware(next http.Handler, hasRequiredPerk func(premiumPerks types.PremiumPerks) bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // get user id from context + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + + // get user info + userInfo, err := h.daService.GetUserInfo(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + + // check if user has the required premium perk + if !hasRequiredPerk(userInfo.PremiumPerks) { + handleErr(w, r, newForbiddenErr("users premium perks do not allow usage of this endpoint")) + return + } + + next.ServeHTTP(w, r) + }) +} + +// Middleware for managing dashboards via API +func (h *HandlerService) ManageDashboardsViaApiCheckMiddleware(next http.Handler) http.Handler { + return h.PremiumPerkCheckMiddleware(next, func(premiumPerks types.PremiumPerks) bool { + return premiumPerks.ManageDashboardViaApi + }) +} + +// Middleware for managing notifications via API +func (h *HandlerService) ManageNotificationsViaApiCheckMiddleware(next http.Handler) http.Handler { + return h.PremiumPerkCheckMiddleware(next, func(premiumPerks types.PremiumPerks) bool { + return premiumPerks.ConfigureNotificationsViaApi + }) +} + +// middleware check to return if specified dashboard is not archived (and accessible) +func (h *HandlerService) VDBArchivedCheckMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if isMockEnabled, ok := r.Context().Value(ctxIsMockedKey).(bool); ok && isMockEnabled { + next.ServeHTTP(w, r) + return + } + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + if len(dashboardId.Validators) > 0 { + next.ServeHTTP(w, r) + return + } + dashboard, err := h.daService.GetValidatorDashboardInfo(r.Context(), dashboardId.Id) + if err != nil { + handleErr(w, r, err) + return + } + if dashboard.IsArchived { + handleErr(w, r, newForbiddenErr("dashboard with id %v is archived", dashboardId)) + return + } + next.ServeHTTP(w, r) + }) +} + +// middleware that checks for `is_mocked` query param and stores it in the request context. +// should bypass auth checks if the flag is set and cause handlers to return mocked data. +// only allowed for users in the admin or dev group. +// note that mocked data is only returned by handlers that check for it. +func (h *HandlerService) StoreIsMockedFlagMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + isMocked, _ := strconv.ParseBool(r.URL.Query().Get("is_mocked")) + if !isMocked { + next.ServeHTTP(w, r) + return + } + // fetch user group + userId, err := h.GetUserIdBySession(r) + if err != nil { + handleErr(w, r, err) + return + } + userCredentials, err := h.daService.GetUserInfo(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + allowedGroups := []string{types.UserGroupAdmin, types.UserGroupDev} + if !slices.Contains(allowedGroups, userCredentials.UserGroup) { + handleErr(w, r, newForbiddenErr("user is not allowed to use mock data")) + return + } + // store isMocked flag in context + ctx := r.Context() + ctx = context.WithValue(ctx, ctxIsMockedKey, true) + r = r.WithContext(ctx) + next.ServeHTTP(w, r) + }) +} diff --git a/backend/pkg/api/handlers/public.go b/backend/pkg/api/handlers/public.go index 53b84e270..98d1b8b55 100644 --- a/backend/pkg/api/handlers/public.go +++ b/backend/pkg/api/handlers/public.go @@ -47,7 +47,7 @@ func (h *HandlerService) PublicGetHealthz(w http.ResponseWriter, r *http.Request } ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) defer cancel() - data := h.dai.GetHealthz(ctx, showAll) + data := h.getDataAccessor(r).GetHealthz(ctx, showAll) responseCode := http.StatusOK if data.TotalOkPercentage != 1 { @@ -74,7 +74,7 @@ func (h *HandlerService) PublicGetUserDashboards(w http.ResponseWriter, r *http. handleErr(w, r, err) return } - data, err := h.dai.GetUserDashboards(r.Context(), userId) + data, err := h.getDataAccessor(r).GetUserDashboards(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -177,12 +177,12 @@ func (h *HandlerService) PublicPostValidatorDashboards(w http.ResponseWriter, r return } - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return } - dashboardCount, err := h.dai.GetUserValidatorDashboardCount(r.Context(), userId, true) + dashboardCount, err := h.getDataAccessor(r).GetUserValidatorDashboardCount(r.Context(), userId, true) if err != nil { handleErr(w, r, err) return @@ -192,7 +192,7 @@ func (h *HandlerService) PublicPostValidatorDashboards(w http.ResponseWriter, r return } - data, err := h.dai.CreateValidatorDashboard(r.Context(), userId, name, chainId) + data, err := h.getDataAccessor(r).CreateValidatorDashboard(r.Context(), userId, name, chainId) if err != nil { handleErr(w, r, err) return @@ -232,10 +232,10 @@ func (h *HandlerService) PublicGetValidatorDashboard(w http.ResponseWriter, r *h // set name depending on dashboard id var name string if reInteger.MatchString(dashboardIdParam) { - name, err = h.dai.GetValidatorDashboardName(r.Context(), dashboardId.Id) + name, err = h.getDataAccessor(r).GetValidatorDashboardName(r.Context(), dashboardId.Id) } else if reValidatorDashboardPublicId.MatchString(dashboardIdParam) { var publicIdInfo *types.VDBPublicId - publicIdInfo, err = h.dai.GetValidatorDashboardPublicId(r.Context(), types.VDBIdPublic(dashboardIdParam)) + publicIdInfo, err = h.getDataAccessor(r).GetValidatorDashboardPublicId(r.Context(), types.VDBIdPublic(dashboardIdParam)) name = publicIdInfo.Name } if err != nil { @@ -249,7 +249,7 @@ func (h *HandlerService) PublicGetValidatorDashboard(w http.ResponseWriter, r *h handleErr(w, r, err) return } - data, err := h.dai.GetValidatorDashboardOverview(r.Context(), *dashboardId, protocolModes) + data, err := h.getDataAccessor(r).GetValidatorDashboardOverview(r.Context(), *dashboardId, protocolModes) if err != nil { handleErr(w, r, err) return @@ -281,7 +281,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboard(w http.ResponseWriter, r handleErr(w, r, v) return } - err := h.dai.RemoveValidatorDashboard(r.Context(), dashboardId) + err := h.getDataAccessor(r).RemoveValidatorDashboard(r.Context(), dashboardId) if err != nil { handleErr(w, r, err) return @@ -317,7 +317,7 @@ func (h *HandlerService) PublicPutValidatorDashboardName(w http.ResponseWriter, handleErr(w, r, v) return } - data, err := h.dai.UpdateValidatorDashboardName(r.Context(), dashboardId, name) + data, err := h.getDataAccessor(r).UpdateValidatorDashboardName(r.Context(), dashboardId, name) if err != nil { handleErr(w, r, err) return @@ -364,12 +364,12 @@ func (h *HandlerService) PublicPostValidatorDashboardGroups(w http.ResponseWrite handleErr(w, r, err) return } - userInfo, err := h.dai.GetUserInfo(ctx, userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(ctx, userId) if err != nil { handleErr(w, r, err) return } - groupCount, err := h.dai.GetValidatorDashboardGroupCount(ctx, dashboardId) + groupCount, err := h.getDataAccessor(r).GetValidatorDashboardGroupCount(ctx, dashboardId) if err != nil { handleErr(w, r, err) return @@ -379,7 +379,7 @@ func (h *HandlerService) PublicPostValidatorDashboardGroups(w http.ResponseWrite return } - data, err := h.dai.CreateValidatorDashboardGroup(ctx, dashboardId, name) + data, err := h.getDataAccessor(r).CreateValidatorDashboardGroup(ctx, dashboardId, name) if err != nil { handleErr(w, r, err) return @@ -423,7 +423,7 @@ func (h *HandlerService) PublicPutValidatorDashboardGroups(w http.ResponseWriter handleErr(w, r, v) return } - groupExists, err := h.dai.GetValidatorDashboardGroupExists(r.Context(), dashboardId, groupId) + groupExists, err := h.getDataAccessor(r).GetValidatorDashboardGroupExists(r.Context(), dashboardId, groupId) if err != nil { handleErr(w, r, err) return @@ -432,7 +432,7 @@ func (h *HandlerService) PublicPutValidatorDashboardGroups(w http.ResponseWriter returnNotFound(w, r, errors.New("group not found")) return } - data, err := h.dai.UpdateValidatorDashboardGroup(r.Context(), dashboardId, groupId, name) + data, err := h.getDataAccessor(r).UpdateValidatorDashboardGroup(r.Context(), dashboardId, groupId, name) if err != nil { handleErr(w, r, err) return @@ -470,7 +470,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardGroup(w http.ResponseWrit returnBadRequest(w, r, errors.New("cannot delete default group")) return } - groupExists, err := h.dai.GetValidatorDashboardGroupExists(r.Context(), dashboardId, groupId) + groupExists, err := h.getDataAccessor(r).GetValidatorDashboardGroupExists(r.Context(), dashboardId, groupId) if err != nil { handleErr(w, r, err) return @@ -479,7 +479,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardGroup(w http.ResponseWrit returnNotFound(w, r, errors.New("group not found")) return } - err = h.dai.RemoveValidatorDashboardGroup(r.Context(), dashboardId, groupId) + err = h.getDataAccessor(r).RemoveValidatorDashboardGroup(r.Context(), dashboardId, groupId) if err != nil { handleErr(w, r, err) return @@ -544,7 +544,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW } ctx := r.Context() - groupExists, err := h.dai.GetValidatorDashboardGroupExists(ctx, dashboardId, groupId) + groupExists, err := h.getDataAccessor(r).GetValidatorDashboardGroupExists(ctx, dashboardId, groupId) if err != nil { handleErr(w, r, err) return @@ -558,7 +558,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, err) return } - userInfo, err := h.dai.GetUserInfo(ctx, userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(ctx, userId) if err != nil { handleErr(w, r, err) return @@ -568,7 +568,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW return } dashboardLimit := userInfo.PremiumPerks.ValidatorsPerDashboard - existingValidatorCount, err := h.dai.GetValidatorDashboardValidatorsCount(ctx, dashboardId) + existingValidatorCount, err := h.getDataAccessor(r).GetValidatorDashboardValidatorsCount(ctx, dashboardId) if err != nil { handleErr(w, r, err) return @@ -589,7 +589,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, v) return } - validators, err := h.dai.GetValidatorsFromSlices(indices, pubkeys) + validators, err := h.getDataAccessor(r).GetValidatorsFromSlices(indices, pubkeys) if err != nil { handleErr(w, r, err) return @@ -597,7 +597,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW if len(validators) > int(limit) { validators = validators[:limit] } - data, dataErr = h.dai.AddValidatorDashboardValidators(ctx, dashboardId, groupId, validators) + data, dataErr = h.getDataAccessor(r).AddValidatorDashboardValidators(ctx, dashboardId, groupId, validators) case req.DepositAddress != "": depositAddress := v.checkRegex(reEthereumAddress, req.DepositAddress, "deposit_address") @@ -605,7 +605,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, v) return } - data, dataErr = h.dai.AddValidatorDashboardValidatorsByDepositAddress(ctx, dashboardId, groupId, depositAddress, limit) + data, dataErr = h.getDataAccessor(r).AddValidatorDashboardValidatorsByDepositAddress(ctx, dashboardId, groupId, depositAddress, limit) case req.WithdrawalAddress != "": withdrawalAddress := v.checkRegex(reWithdrawalCredential, req.WithdrawalAddress, "withdrawal_address") @@ -613,7 +613,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, v) return } - data, dataErr = h.dai.AddValidatorDashboardValidatorsByWithdrawalAddress(ctx, dashboardId, groupId, withdrawalAddress, limit) + data, dataErr = h.getDataAccessor(r).AddValidatorDashboardValidatorsByWithdrawalAddress(ctx, dashboardId, groupId, withdrawalAddress, limit) case req.Graffiti != "": graffiti := v.checkRegex(reGraffiti, req.Graffiti, "graffiti") @@ -621,7 +621,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, v) return } - data, dataErr = h.dai.AddValidatorDashboardValidatorsByGraffiti(ctx, dashboardId, groupId, graffiti, limit) + data, dataErr = h.getDataAccessor(r).AddValidatorDashboardValidatorsByGraffiti(ctx, dashboardId, groupId, graffiti, limit) } if dataErr != nil { @@ -663,7 +663,7 @@ func (h *HandlerService) PublicGetValidatorDashboardValidators(w http.ResponseWr handleErr(w, r, v) return } - data, paging, err := h.dai.GetValidatorDashboardValidators(r.Context(), *dashboardId, groupId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardValidators(r.Context(), *dashboardId, groupId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -703,12 +703,12 @@ func (h *HandlerService) PublicDeleteValidatorDashboardValidators(w http.Respons handleErr(w, r, v) return } - validators, err := h.dai.GetValidatorsFromSlices(indices, publicKeys) + validators, err := h.getDataAccessor(r).GetValidatorsFromSlices(indices, publicKeys) if err != nil { handleErr(w, r, err) return } - err = h.dai.RemoveValidatorDashboardValidators(r.Context(), dashboardId, validators) + err = h.getDataAccessor(r).RemoveValidatorDashboardValidators(r.Context(), dashboardId, validators) if err != nil { handleErr(w, r, err) return @@ -749,7 +749,7 @@ func (h *HandlerService) PublicPostValidatorDashboardPublicIds(w http.ResponseWr handleErr(w, r, v) return } - publicIdCount, err := h.dai.GetValidatorDashboardPublicIdCount(r.Context(), dashboardId) + publicIdCount, err := h.getDataAccessor(r).GetValidatorDashboardPublicIdCount(r.Context(), dashboardId) if err != nil { handleErr(w, r, err) return @@ -759,7 +759,7 @@ func (h *HandlerService) PublicPostValidatorDashboardPublicIds(w http.ResponseWr return } - data, err := h.dai.CreateValidatorDashboardPublicId(r.Context(), dashboardId, name, req.ShareSettings.ShareGroups) + data, err := h.getDataAccessor(r).CreateValidatorDashboardPublicId(r.Context(), dashboardId, name, req.ShareSettings.ShareGroups) if err != nil { handleErr(w, r, err) return @@ -805,7 +805,7 @@ func (h *HandlerService) PublicPutValidatorDashboardPublicId(w http.ResponseWrit handleErr(w, r, v) return } - fetchedId, err := h.dai.GetValidatorDashboardIdByPublicId(r.Context(), publicDashboardId) + fetchedId, err := h.getDataAccessor(r).GetValidatorDashboardIdByPublicId(r.Context(), publicDashboardId) if err != nil { handleErr(w, r, err) return @@ -815,7 +815,7 @@ func (h *HandlerService) PublicPutValidatorDashboardPublicId(w http.ResponseWrit return } - data, err := h.dai.UpdateValidatorDashboardPublicId(r.Context(), publicDashboardId, name, req.ShareSettings.ShareGroups) + data, err := h.getDataAccessor(r).UpdateValidatorDashboardPublicId(r.Context(), publicDashboardId, name, req.ShareSettings.ShareGroups) if err != nil { handleErr(w, r, err) return @@ -847,7 +847,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardPublicId(w http.ResponseW handleErr(w, r, v) return } - fetchedId, err := h.dai.GetValidatorDashboardIdByPublicId(r.Context(), publicDashboardId) + fetchedId, err := h.getDataAccessor(r).GetValidatorDashboardIdByPublicId(r.Context(), publicDashboardId) if err != nil { handleErr(w, r, err) return @@ -857,7 +857,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardPublicId(w http.ResponseW return } - err = h.dai.RemoveValidatorDashboardPublicId(r.Context(), publicDashboardId) + err = h.getDataAccessor(r).RemoveValidatorDashboardPublicId(r.Context(), publicDashboardId) if err != nil { handleErr(w, r, err) return @@ -896,7 +896,7 @@ func (h *HandlerService) PublicPutValidatorDashboardArchiving(w http.ResponseWri } // check conditions for changing archival status - dashboardInfo, err := h.dai.GetValidatorDashboardInfo(r.Context(), dashboardId) + dashboardInfo, err := h.getDataAccessor(r).GetValidatorDashboardInfo(r.Context(), dashboardId) if err != nil { handleErr(w, r, err) return @@ -914,13 +914,13 @@ func (h *HandlerService) PublicPutValidatorDashboardArchiving(w http.ResponseWri handleErr(w, r, err) return } - dashboardCount, err := h.dai.GetUserValidatorDashboardCount(r.Context(), userId, !req.IsArchived) + dashboardCount, err := h.getDataAccessor(r).GetUserValidatorDashboardCount(r.Context(), userId, !req.IsArchived) if err != nil { handleErr(w, r, err) return } - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -950,7 +950,7 @@ func (h *HandlerService) PublicPutValidatorDashboardArchiving(w http.ResponseWri archivedReason = &enums.VDBArchivedReasons.User } - data, err := h.dai.UpdateValidatorDashboardArchiving(r.Context(), dashboardId, archivedReason) + data, err := h.getDataAccessor(r).UpdateValidatorDashboardArchiving(r.Context(), dashboardId, archivedReason) if err != nil { handleErr(w, r, err) return @@ -984,7 +984,7 @@ func (h *HandlerService) PublicGetValidatorDashboardSlotViz(w http.ResponseWrite handleErr(w, r, v) return } - data, err := h.dai.GetValidatorDashboardSlotViz(r.Context(), *dashboardId, groupIds) + data, err := h.getDataAccessor(r).GetValidatorDashboardSlotViz(r.Context(), *dashboardId, groupIds) if err != nil { handleErr(w, r, err) return @@ -1029,7 +1029,7 @@ func (h *HandlerService) PublicGetValidatorDashboardSummary(w http.ResponseWrite return } - data, paging, err := h.dai.GetValidatorDashboardSummary(r.Context(), *dashboardId, period, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardSummary(r.Context(), *dashboardId, period, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1074,7 +1074,7 @@ func (h *HandlerService) PublicGetValidatorDashboardGroupSummary(w http.Response return } - data, err := h.dai.GetValidatorDashboardGroupSummary(r.Context(), *dashboardId, groupId, period, protocolModes) + data, err := h.getDataAccessor(r).GetValidatorDashboardGroupSummary(r.Context(), *dashboardId, groupId, period, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1127,7 +1127,7 @@ func (h *HandlerService) PublicGetValidatorDashboardSummaryChart(w http.Response return } - data, err := h.dai.GetValidatorDashboardSummaryChart(ctx, *dashboardId, groupIds, efficiencyType, aggregation, afterTs, beforeTs) + data, err := h.getDataAccessor(r).GetValidatorDashboardSummaryChart(ctx, *dashboardId, groupIds, efficiencyType, aggregation, afterTs, beforeTs) if err != nil { handleErr(w, r, err) return @@ -1171,13 +1171,13 @@ func (h *HandlerService) PublicGetValidatorDashboardSummaryValidators(w http.Res duties := enums.ValidatorDuties switch duty { case duties.None: - indices, err = h.dai.GetValidatorDashboardSummaryValidators(r.Context(), *dashboardId, groupId) + indices, err = h.getDataAccessor(r).GetValidatorDashboardSummaryValidators(r.Context(), *dashboardId, groupId) case duties.Sync: - indices, err = h.dai.GetValidatorDashboardSyncSummaryValidators(r.Context(), *dashboardId, groupId, period) + indices, err = h.getDataAccessor(r).GetValidatorDashboardSyncSummaryValidators(r.Context(), *dashboardId, groupId, period) case duties.Slashed: - indices, err = h.dai.GetValidatorDashboardSlashingsSummaryValidators(r.Context(), *dashboardId, groupId, period) + indices, err = h.getDataAccessor(r).GetValidatorDashboardSlashingsSummaryValidators(r.Context(), *dashboardId, groupId, period) case duties.Proposal: - indices, err = h.dai.GetValidatorDashboardProposalSummaryValidators(r.Context(), *dashboardId, groupId, period) + indices, err = h.getDataAccessor(r).GetValidatorDashboardProposalSummaryValidators(r.Context(), *dashboardId, groupId, period) } if err != nil { handleErr(w, r, err) @@ -1227,7 +1227,7 @@ func (h *HandlerService) PublicGetValidatorDashboardRewards(w http.ResponseWrite return } - data, paging, err := h.dai.GetValidatorDashboardRewards(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardRewards(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1268,7 +1268,7 @@ func (h *HandlerService) PublicGetValidatorDashboardGroupRewards(w http.Response return } - data, err := h.dai.GetValidatorDashboardGroupRewards(r.Context(), *dashboardId, groupId, epoch, protocolModes) + data, err := h.getDataAccessor(r).GetValidatorDashboardGroupRewards(r.Context(), *dashboardId, groupId, epoch, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1304,7 +1304,7 @@ func (h *HandlerService) PublicGetValidatorDashboardRewardsChart(w http.Response return } - data, err := h.dai.GetValidatorDashboardRewardsChart(r.Context(), *dashboardId, protocolModes) + data, err := h.getDataAccessor(r).GetValidatorDashboardRewardsChart(r.Context(), *dashboardId, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1350,7 +1350,7 @@ func (h *HandlerService) PublicGetValidatorDashboardDuties(w http.ResponseWriter return } - data, paging, err := h.dai.GetValidatorDashboardDuties(r.Context(), *dashboardId, epoch, groupId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardDuties(r.Context(), *dashboardId, epoch, groupId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1392,7 +1392,7 @@ func (h *HandlerService) PublicGetValidatorDashboardBlocks(w http.ResponseWriter return } - data, paging, err := h.dai.GetValidatorDashboardBlocks(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardBlocks(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1442,7 +1442,7 @@ func (h *HandlerService) PublicGetValidatorDashboardHeatmap(w http.ResponseWrite return } - data, err := h.dai.GetValidatorDashboardHeatmap(r.Context(), *dashboardId, protocolModes, aggregation, afterTs, beforeTs) + data, err := h.getDataAccessor(r).GetValidatorDashboardHeatmap(r.Context(), *dashboardId, protocolModes, aggregation, afterTs, beforeTs) if err != nil { handleErr(w, r, err) return @@ -1492,7 +1492,7 @@ func (h *HandlerService) PublicGetValidatorDashboardGroupHeatmap(w http.Response return } - data, err := h.dai.GetValidatorDashboardGroupHeatmap(r.Context(), *dashboardId, groupId, protocolModes, aggregation, requestedTimestamp) + data, err := h.getDataAccessor(r).GetValidatorDashboardGroupHeatmap(r.Context(), *dashboardId, groupId, protocolModes, aggregation, requestedTimestamp) if err != nil { handleErr(w, r, err) return @@ -1527,7 +1527,7 @@ func (h *HandlerService) PublicGetValidatorDashboardExecutionLayerDeposits(w htt return } - data, paging, err := h.dai.GetValidatorDashboardElDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardElDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -1563,7 +1563,7 @@ func (h *HandlerService) PublicGetValidatorDashboardConsensusLayerDeposits(w htt return } - data, paging, err := h.dai.GetValidatorDashboardClDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardClDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -1592,7 +1592,7 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalConsensusLayerDeposits( handleErr(w, r, err) return } - data, err := h.dai.GetValidatorDashboardTotalClDeposits(r.Context(), *dashboardId) + data, err := h.getDataAccessor(r).GetValidatorDashboardTotalClDeposits(r.Context(), *dashboardId) if err != nil { handleErr(w, r, err) return @@ -1620,7 +1620,7 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalExecutionLayerDeposits( handleErr(w, r, err) return } - data, err := h.dai.GetValidatorDashboardTotalElDeposits(r.Context(), *dashboardId) + data, err := h.getDataAccessor(r).GetValidatorDashboardTotalElDeposits(r.Context(), *dashboardId) if err != nil { handleErr(w, r, err) return @@ -1662,7 +1662,7 @@ func (h *HandlerService) PublicGetValidatorDashboardWithdrawals(w http.ResponseW return } - data, paging, err := h.dai.GetValidatorDashboardWithdrawals(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardWithdrawals(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1699,7 +1699,7 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalWithdrawals(w http.Resp return } - data, err := h.dai.GetValidatorDashboardTotalWithdrawals(r.Context(), *dashboardId, pagingParams.search, protocolModes) + data, err := h.getDataAccessor(r).GetValidatorDashboardTotalWithdrawals(r.Context(), *dashboardId, pagingParams.search, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1739,7 +1739,7 @@ func (h *HandlerService) PublicGetValidatorDashboardRocketPool(w http.ResponseWr return } - data, paging, err := h.dai.GetValidatorDashboardRocketPool(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardRocketPool(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -1774,7 +1774,7 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalRocketPool(w http.Respo return } - data, err := h.dai.GetValidatorDashboardTotalRocketPool(r.Context(), *dashboardId, pagingParams.search) + data, err := h.getDataAccessor(r).GetValidatorDashboardTotalRocketPool(r.Context(), *dashboardId, pagingParams.search) if err != nil { handleErr(w, r, err) return @@ -1817,7 +1817,7 @@ func (h *HandlerService) PublicGetValidatorDashboardRocketPoolMinipools(w http.R return } - data, paging, err := h.dai.GetValidatorDashboardRocketPoolMinipools(r.Context(), *dashboardId, nodeAddress, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardRocketPoolMinipools(r.Context(), *dashboardId, nodeAddress, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -1847,7 +1847,7 @@ func (h *HandlerService) PublicGetUserNotifications(w http.ResponseWriter, r *ht handleErr(w, r, err) return } - data, err := h.dai.GetNotificationOverview(r.Context(), userId) + data, err := h.getDataAccessor(r).GetNotificationOverview(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -1882,20 +1882,13 @@ func (h *HandlerService) PublicGetUserNotificationDashboards(w http.ResponseWrit q := r.URL.Query() pagingParams := v.checkPagingParams(q) sort := checkSort[enums.NotificationDashboardsColumn](&v, q.Get("sort")) - chainId := v.checkNetworkParameter(q.Get("network")) - chainIds := []uint64{chainId} - // TODO replace with "networks" once multiple networks are supported - //chainIds := v.checkNetworksParameter(q.Get("networks")) + chainIds := v.checkNetworksParameter(q.Get("networks")) if v.hasErrors() { handleErr(w, r, v) return } - dataAccessor := h.dai - if isMockEnabled(r) { - dataAccessor = h.dummy - } - data, paging, err := dataAccessor.GetDashboardNotifications(r.Context(), userId, chainIds, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetDashboardNotifications(r.Context(), userId, chainIds, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -1931,11 +1924,7 @@ func (h *HandlerService) PublicGetUserNotificationsValidatorDashboard(w http.Res handleErr(w, r, v) return } - dataAccessor := h.dai - if isMockEnabled(r) { - dataAccessor = h.dummy - } - data, err := dataAccessor.GetValidatorDashboardNotificationDetails(r.Context(), dashboardId, groupId, epoch, search) + data, err := h.getDataAccessor(r).GetValidatorDashboardNotificationDetails(r.Context(), dashboardId, groupId, epoch, search) if err != nil { handleErr(w, r, err) return @@ -1970,7 +1959,7 @@ func (h *HandlerService) PublicGetUserNotificationsAccountDashboard(w http.Respo handleErr(w, r, v) return } - data, err := h.dai.GetAccountDashboardNotificationDetails(r.Context(), dashboardId, groupId, epoch, search) + data, err := h.getDataAccessor(r).GetAccountDashboardNotificationDetails(r.Context(), dashboardId, groupId, epoch, search) if err != nil { handleErr(w, r, err) return @@ -2008,7 +1997,7 @@ func (h *HandlerService) PublicGetUserNotificationMachines(w http.ResponseWriter handleErr(w, r, v) return } - data, paging, err := h.dai.GetMachineNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetMachineNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -2047,7 +2036,7 @@ func (h *HandlerService) PublicGetUserNotificationClients(w http.ResponseWriter, handleErr(w, r, v) return } - data, paging, err := h.dai.GetClientNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetClientNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -2086,7 +2075,7 @@ func (h *HandlerService) PublicGetUserNotificationRocketPool(w http.ResponseWrit handleErr(w, r, v) return } - data, paging, err := h.dai.GetRocketPoolNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetRocketPoolNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -2124,7 +2113,7 @@ func (h *HandlerService) PublicGetUserNotificationNetworks(w http.ResponseWriter handleErr(w, r, v) return } - data, paging, err := h.dai.GetNetworkNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetNetworkNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -2152,19 +2141,19 @@ func (h *HandlerService) PublicGetUserNotificationSettings(w http.ResponseWriter handleErr(w, r, err) return } - data, err := h.dai.GetNotificationSettings(r.Context(), userId) + data, err := h.getDataAccessor(r).GetNotificationSettings(r.Context(), userId) if err != nil { handleErr(w, r, err) return } // check premium perks - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return } - defaultSettings, err := h.dai.GetNotificationSettingsDefaultValues(r.Context()) + defaultSettings, err := h.getDataAccessor(r).GetNotificationSettingsDefaultValues(r.Context()) if err != nil { handleErr(w, r, err) return @@ -2227,12 +2216,12 @@ func (h *HandlerService) PublicPutUserNotificationSettingsGeneral(w http.Respons } // check premium perks - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return } - defaultSettings, err := h.dai.GetNotificationSettingsDefaultValues(r.Context()) + defaultSettings, err := h.getDataAccessor(r).GetNotificationSettingsDefaultValues(r.Context()) if err != nil { handleErr(w, r, err) return @@ -2248,7 +2237,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsGeneral(w http.Respons return } - err = h.dai.UpdateNotificationSettingsGeneral(r.Context(), userId, req) + err = h.getDataAccessor(r).UpdateNotificationSettingsGeneral(r.Context(), userId, req) if err != nil { handleErr(w, r, err) return @@ -2313,7 +2302,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsNetworks(w http.Respon IsNewRewardRoundSubscribed: req.IsNewRewardRoundSubscribed, } - err = h.dai.UpdateNotificationSettingsNetworks(r.Context(), userId, chainId, settings) + err = h.getDataAccessor(r).UpdateNotificationSettingsNetworks(r.Context(), userId, chainId, settings) if err != nil { handleErr(w, r, err) return @@ -2362,7 +2351,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsPairedDevices(w http.R handleErr(w, r, v) return } - err = h.dai.UpdateNotificationSettingsPairedDevice(r.Context(), userId, pairedDeviceId, name, req.IsNotificationsEnabled) + err = h.getDataAccessor(r).UpdateNotificationSettingsPairedDevice(r.Context(), userId, pairedDeviceId, name, req.IsNotificationsEnabled) if err != nil { handleErr(w, r, err) return @@ -2402,7 +2391,7 @@ func (h *HandlerService) PublicDeleteUserNotificationSettingsPairedDevices(w htt handleErr(w, r, v) return } - err = h.dai.DeleteNotificationSettingsPairedDevice(r.Context(), userId, pairedDeviceId) + err = h.getDataAccessor(r).DeleteNotificationSettingsPairedDevice(r.Context(), userId, pairedDeviceId) if err != nil { handleErr(w, r, err) return @@ -2442,7 +2431,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsClient(w http.Response handleErr(w, r, v) return } - data, err := h.dai.UpdateNotificationSettingsClients(r.Context(), userId, clientId, req.IsSubscribed) + data, err := h.getDataAccessor(r).UpdateNotificationSettingsClients(r.Context(), userId, clientId, req.IsSubscribed) if err != nil { handleErr(w, r, err) return @@ -2480,19 +2469,19 @@ func (h *HandlerService) PublicGetUserNotificationSettingsDashboards(w http.Resp handleErr(w, r, v) return } - data, paging, err := h.dai.GetNotificationSettingsDashboards(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetNotificationSettingsDashboards(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return } // if users premium perks do not allow subscriptions, set them to false in the response // TODO: once stripe payments run in v2, this should be removed and the notification settings should be updated upon a tier change instead - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return } - defaultSettings, err := h.dai.GetNotificationSettingsDefaultValues(r.Context()) + defaultSettings, err := h.getDataAccessor(r).GetNotificationSettingsDefaultValues(r.Context()) if err != nil { handleErr(w, r, err) return @@ -2559,7 +2548,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsValidatorDashboard(w h handleErr(w, r, v) return } - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -2573,7 +2562,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsValidatorDashboard(w h return } - err = h.dai.UpdateNotificationSettingsValidatorDashboard(r.Context(), userId, dashboardId, groupId, req) + err = h.getDataAccessor(r).UpdateNotificationSettingsValidatorDashboard(r.Context(), userId, dashboardId, groupId, req) if err != nil { handleErr(w, r, err) return @@ -2646,7 +2635,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsAccountDashboard(w htt IsERC721TokenTransfersSubscribed: req.IsERC721TokenTransfersSubscribed, IsERC1155TokenTransfersSubscribed: req.IsERC1155TokenTransfersSubscribed, } - err = h.dai.UpdateNotificationSettingsAccountDashboard(r.Context(), userId, dashboardId, groupId, settings) + err = h.getDataAccessor(r).UpdateNotificationSettingsAccountDashboard(r.Context(), userId, dashboardId, groupId, settings) if err != nil { handleErr(w, r, err) return diff --git a/backend/pkg/api/handlers/search_handlers.go b/backend/pkg/api/handlers/search_handlers.go index 3b8f058a1..49d43acdd 100644 --- a/backend/pkg/api/handlers/search_handlers.go +++ b/backend/pkg/api/handlers/search_handlers.go @@ -120,208 +120,163 @@ func (h *HandlerService) InternalPostSearch(w http.ResponseWriter, r *http.Reque // Search Helper Functions func (h *HandlerService) handleSearch(ctx context.Context, input string, searchType searchTypeKey, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil + switch searchType { + case validatorByIndex: + return h.handleSearchValidatorByIndex(ctx, input, chainId) + case validatorByPublicKey: + return h.handleSearchValidatorByPublicKey(ctx, input, chainId) + case validatorsByDepositAddress: + return h.handleSearchValidatorsByDepositAddress(ctx, input, chainId) + case validatorsByDepositEnsName: + return h.handleSearchValidatorsByDepositEnsName(ctx, input, chainId) + case validatorsByWithdrawalCredential: + return h.handleSearchValidatorsByWithdrawalCredential(ctx, input, chainId) + case validatorsByWithdrawalAddress: + return h.handleSearchValidatorsByWithdrawalAddress(ctx, input, chainId) + case validatorsByWithdrawalEns: + return h.handleSearchValidatorsByWithdrawalEnsName(ctx, input, chainId) + case validatorsByGraffiti: + return h.handleSearchValidatorsByGraffiti(ctx, input, chainId) default: - switch searchType { - case validatorByIndex: - return h.handleSearchValidatorByIndex(ctx, input, chainId) - case validatorByPublicKey: - return h.handleSearchValidatorByPublicKey(ctx, input, chainId) - case validatorsByDepositAddress: - return h.handleSearchValidatorsByDepositAddress(ctx, input, chainId) - case validatorsByDepositEnsName: - return h.handleSearchValidatorsByDepositEnsName(ctx, input, chainId) - case validatorsByWithdrawalCredential: - return h.handleSearchValidatorsByWithdrawalCredential(ctx, input, chainId) - case validatorsByWithdrawalAddress: - return h.handleSearchValidatorsByWithdrawalAddress(ctx, input, chainId) - case validatorsByWithdrawalEns: - return h.handleSearchValidatorsByWithdrawalEnsName(ctx, input, chainId) - case validatorsByGraffiti: - return h.handleSearchValidatorsByGraffiti(ctx, input, chainId) - default: - return nil, errors.New("invalid search type") - } + return nil, errors.New("invalid search type") } } func (h *HandlerService) handleSearchValidatorByIndex(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - index, err := strconv.ParseUint(input, 10, 64) - if err != nil { - // input should've been checked by the regex before, this should never happen - return nil, err - } - result, err := h.dai.GetSearchValidatorByIndex(ctx, chainId, index) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorByIndex), - ChainId: chainId, - HashValue: "0x" + hex.EncodeToString(result.PublicKey), - NumValue: &result.Index, - }, nil + index, err := strconv.ParseUint(input, 10, 64) + if err != nil { + // input should've been checked by the regex before, this should never happen + return nil, err + } + result, err := h.daService.GetSearchValidatorByIndex(ctx, chainId, index) + if err != nil { + return nil, err } + + return &types.SearchResult{ + Type: string(validatorByIndex), + ChainId: chainId, + HashValue: "0x" + hex.EncodeToString(result.PublicKey), + NumValue: &result.Index, + }, nil } func (h *HandlerService) handleSearchValidatorByPublicKey(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - publicKey, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) - if err != nil { - // input should've been checked by the regex before, this should never happen - return nil, err - } - result, err := h.dai.GetSearchValidatorByPublicKey(ctx, chainId, publicKey) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorByPublicKey), - ChainId: chainId, - HashValue: "0x" + hex.EncodeToString(result.PublicKey), - NumValue: &result.Index, - }, nil + publicKey, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) + if err != nil { + // input should've been checked by the regex before, this should never happen + return nil, err } + result, err := h.daService.GetSearchValidatorByPublicKey(ctx, chainId, publicKey) + if err != nil { + return nil, err + } + + return &types.SearchResult{ + Type: string(validatorByPublicKey), + ChainId: chainId, + HashValue: "0x" + hex.EncodeToString(result.PublicKey), + NumValue: &result.Index, + }, nil } func (h *HandlerService) handleSearchValidatorsByDepositAddress(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - address, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) - if err != nil { - return nil, err - } - result, err := h.dai.GetSearchValidatorsByDepositAddress(ctx, chainId, address) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorsByDepositAddress), - ChainId: chainId, - HashValue: "0x" + hex.EncodeToString(result.Address), - NumValue: &result.Count, - }, nil + address, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) + if err != nil { + return nil, err + } + result, err := h.daService.GetSearchValidatorsByDepositAddress(ctx, chainId, address) + if err != nil { + return nil, err } + + return &types.SearchResult{ + Type: string(validatorsByDepositAddress), + ChainId: chainId, + HashValue: "0x" + hex.EncodeToString(result.Address), + NumValue: &result.Count, + }, nil } func (h *HandlerService) handleSearchValidatorsByDepositEnsName(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - result, err := h.dai.GetSearchValidatorsByDepositEnsName(ctx, chainId, input) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorsByDepositEnsName), - ChainId: chainId, - StrValue: result.EnsName, - HashValue: "0x" + hex.EncodeToString(result.Address), - NumValue: &result.Count, - }, nil + result, err := h.daService.GetSearchValidatorsByDepositEnsName(ctx, chainId, input) + if err != nil { + return nil, err } + + return &types.SearchResult{ + Type: string(validatorsByDepositEnsName), + ChainId: chainId, + StrValue: result.EnsName, + HashValue: "0x" + hex.EncodeToString(result.Address), + NumValue: &result.Count, + }, nil } func (h *HandlerService) handleSearchValidatorsByWithdrawalCredential(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - withdrawalCredential, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) - if err != nil { - return nil, err - } - result, err := h.dai.GetSearchValidatorsByWithdrawalCredential(ctx, chainId, withdrawalCredential) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorsByWithdrawalCredential), - ChainId: chainId, - HashValue: "0x" + hex.EncodeToString(result.WithdrawalCredential), - NumValue: &result.Count, - }, nil + withdrawalCredential, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) + if err != nil { + return nil, err } + result, err := h.daService.GetSearchValidatorsByWithdrawalCredential(ctx, chainId, withdrawalCredential) + if err != nil { + return nil, err + } + + return &types.SearchResult{ + Type: string(validatorsByWithdrawalCredential), + ChainId: chainId, + HashValue: "0x" + hex.EncodeToString(result.WithdrawalCredential), + NumValue: &result.Count, + }, nil } func (h *HandlerService) handleSearchValidatorsByWithdrawalAddress(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - withdrawalString := "010000000000000000000000" + strings.TrimPrefix(input, "0x") - withdrawalCredential, err := hex.DecodeString(withdrawalString) - if err != nil { - return nil, err - } - result, err := h.dai.GetSearchValidatorsByWithdrawalCredential(ctx, chainId, withdrawalCredential) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorsByWithdrawalAddress), - ChainId: chainId, - HashValue: "0x" + hex.EncodeToString(result.WithdrawalCredential), - NumValue: &result.Count, - }, nil + withdrawalString := "010000000000000000000000" + strings.TrimPrefix(input, "0x") + withdrawalCredential, err := hex.DecodeString(withdrawalString) + if err != nil { + return nil, err + } + result, err := h.daService.GetSearchValidatorsByWithdrawalCredential(ctx, chainId, withdrawalCredential) + if err != nil { + return nil, err } + + return &types.SearchResult{ + Type: string(validatorsByWithdrawalAddress), + ChainId: chainId, + HashValue: "0x" + hex.EncodeToString(result.WithdrawalCredential), + NumValue: &result.Count, + }, nil } func (h *HandlerService) handleSearchValidatorsByWithdrawalEnsName(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - result, err := h.dai.GetSearchValidatorsByWithdrawalEnsName(ctx, chainId, input) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorsByWithdrawalEns), - ChainId: chainId, - StrValue: result.EnsName, - HashValue: "0x" + hex.EncodeToString(result.Address), - NumValue: &result.Count, - }, nil + result, err := h.daService.GetSearchValidatorsByWithdrawalEnsName(ctx, chainId, input) + if err != nil { + return nil, err } + + return &types.SearchResult{ + Type: string(validatorsByWithdrawalEns), + ChainId: chainId, + StrValue: result.EnsName, + HashValue: "0x" + hex.EncodeToString(result.Address), + NumValue: &result.Count, + }, nil } func (h *HandlerService) handleSearchValidatorsByGraffiti(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - result, err := h.dai.GetSearchValidatorsByGraffiti(ctx, chainId, input) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorsByGraffiti), - ChainId: chainId, - StrValue: result.Graffiti, - NumValue: &result.Count, - }, nil + result, err := h.daService.GetSearchValidatorsByGraffiti(ctx, chainId, input) + if err != nil { + return nil, err } + + return &types.SearchResult{ + Type: string(validatorsByGraffiti), + ChainId: chainId, + StrValue: result.Graffiti, + NumValue: &result.Count, + }, nil } // -------------------------------------- diff --git a/backend/pkg/api/router.go b/backend/pkg/api/router.go index 36beea764..946fca724 100644 --- a/backend/pkg/api/router.go +++ b/backend/pkg/api/router.go @@ -39,6 +39,11 @@ func NewApiRouter(dataAccessor dataaccess.DataAccessor, dummy dataaccess.DataAcc publicRouter.Use(handlerService.StoreUserIdByApiKeyMiddleware) internalRouter.Use(handlerService.StoreUserIdBySessionMiddleware) + if cfg.DeploymentType != "production" { + publicRouter.Use(handlerService.StoreIsMockedFlagMiddleware) + internalRouter.Use(handlerService.StoreIsMockedFlagMiddleware) + } + addRoutes(handlerService, publicRouter, internalRouter, cfg) addLegacyRoutes(handlerService, legacyRouter) diff --git a/backend/pkg/api/types/common.go b/backend/pkg/api/types/common.go index a57ebf982..24e2b9522 100644 --- a/backend/pkg/api/types/common.go +++ b/backend/pkg/api/types/common.go @@ -142,8 +142,8 @@ type ChartHistorySeconds struct { } type IndexEpoch struct { - Index uint64 - Epoch uint64 + Index uint64 `json:"index"` + Epoch uint64 `json:"epoch"` } type IndexBlocks struct { @@ -151,6 +151,11 @@ type IndexBlocks struct { Blocks []uint64 `json:"blocks"` } +type IndexSlots struct { + Index uint64 `json:"index"` + Slots []uint64 `json:"slots"` +} + type ValidatorStateCounts struct { Online uint64 `json:"online"` Offline uint64 `json:"offline"` diff --git a/backend/pkg/api/types/data_access.go b/backend/pkg/api/types/data_access.go index 6bcf30472..b364702bc 100644 --- a/backend/pkg/api/types/data_access.go +++ b/backend/pkg/api/types/data_access.go @@ -17,6 +17,7 @@ const DefaultGroupId = 0 const AllGroups = -1 const NetworkAverage = -2 const DefaultGroupName = "default" +const DefaultDashboardName = DefaultGroupName type Sort[T enums.Enum] struct { Column T diff --git a/backend/pkg/api/types/notifications.go b/backend/pkg/api/types/notifications.go index f9eb491f9..36ca1b1b6 100644 --- a/backend/pkg/api/types/notifications.go +++ b/backend/pkg/api/types/notifications.go @@ -36,7 +36,7 @@ type NotificationDashboardsTableRow struct { ChainId uint64 `db:"chain_id" json:"chain_id"` Epoch uint64 `db:"epoch" json:"epoch"` DashboardId uint64 `db:"dashboard_id" json:"dashboard_id"` - DashboardName string `db:"dashboard_name" json:"-"` // not exported, internal use only + DashboardName string `db:"dashboard_name" json:"dashboard_name"` GroupId uint64 `db:"group_id" json:"group_id"` GroupName string `db:"group_name" json:"group_name"` EntityCount uint64 `db:"entity_count" json:"entity_count"` @@ -48,35 +48,33 @@ type InternalGetUserNotificationDashboardsResponse ApiPagingResponse[Notificatio // ------------------------------------------------------------ // Validator Dashboard Notification Detail -type NotificationEventGroup struct { - GroupName string `json:"group_name"` - DashboardID uint64 `json:"dashboard_id"` -} -type NotificationEventGroupBackOnline struct { - GroupName string `json:"group_name"` - DashboardID uint64 `json:"dashboard_id"` - EpochCount uint64 `json:"epoch_count"` -} - type NotificationEventValidatorBackOnline struct { Index uint64 `json:"index"` EpochCount uint64 `json:"epoch_count"` } +type NotificationEventWithdrawal struct { + Index uint64 `json:"index"` + Amount decimal.Decimal `json:"amount"` + Address Address `json:"address"` +} + type NotificationValidatorDashboardDetail struct { + DashboardName string `db:"dashboard_name" json:"dashboard_name"` + GroupName string `db:"group_name" json:"group_name"` ValidatorOffline []uint64 `json:"validator_offline"` // validator indices - GroupOffline []NotificationEventGroup `json:"group_offline"` // TODO not filled yet - ProposalMissed []IndexBlocks `json:"proposal_missed"` + GroupOffline bool `json:"group_offline"` // TODO not filled yet + ProposalMissed []IndexSlots `json:"proposal_missed"` ProposalDone []IndexBlocks `json:"proposal_done"` - UpcomingProposals []IndexBlocks `json:"upcoming_proposals"` + UpcomingProposals []IndexSlots `json:"upcoming_proposals"` Slashed []uint64 `json:"slashed"` // validator indices SyncCommittee []uint64 `json:"sync_committee"` // validator indices AttestationMissed []IndexEpoch `json:"attestation_missed"` // index (epoch) - Withdrawal []IndexBlocks `json:"withdrawal"` + Withdrawal []NotificationEventWithdrawal `json:"withdrawal"` ValidatorOfflineReminder []uint64 `json:"validator_offline_reminder"` // validator indices; TODO not filled yet - GroupOfflineReminder []NotificationEventGroup `json:"group_offline_reminder"` // TODO not filled yet + GroupOfflineReminder bool `json:"group_offline_reminder"` // TODO not filled yet ValidatorBackOnline []NotificationEventValidatorBackOnline `json:"validator_back_online"` - GroupBackOnline []NotificationEventGroupBackOnline `json:"group_back_online"` // TODO not filled yet + GroupBackOnline uint64 `json:"group_back_online"` // TODO not filled yet MinimumCollateralReached []Address `json:"min_collateral_reached"` // node addresses MaximumCollateralReached []Address `json:"max_collateral_reached"` // node addresses } @@ -242,6 +240,7 @@ type InternalPutUserNotificationSettingsAccountDashboardResponse ApiDataResponse type NotificationSettingsDashboardsTableRow struct { IsAccountDashboard bool `json:"is_account_dashboard"` // if false it's a validator dashboard DashboardId uint64 `json:"dashboard_id"` + DashboardName string `json:"dashboard_name"` GroupId uint64 `json:"group_id"` GroupName string `json:"group_name"` // if it's a validator dashboard, Settings is NotificationSettingsAccountDashboard, otherwise NotificationSettingsValidatorDashboard diff --git a/backend/pkg/api/types/user.go b/backend/pkg/api/types/user.go index 47dd0b3fd..d31ac8e0e 100644 --- a/backend/pkg/api/types/user.go +++ b/backend/pkg/api/types/user.go @@ -1,6 +1,7 @@ package types const UserGroupAdmin = "ADMIN" +const UserGroupDev = "DEV" type UserInfo struct { Id uint64 `json:"id"` diff --git a/backend/pkg/commons/db/user.go b/backend/pkg/commons/db/user.go new file mode 100644 index 000000000..fd7712bfa --- /dev/null +++ b/backend/pkg/commons/db/user.go @@ -0,0 +1,471 @@ +package db + +import ( + "context" + "database/sql" + "errors" + "fmt" + "math" + "time" + + t "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" + "github.com/jmoiron/sqlx" +) + +var ErrNotFound = errors.New("not found") + +const hour uint64 = 3600 +const day = 24 * hour +const week = 7 * day +const month = 30 * day +const maxJsInt uint64 = 9007199254740991 // 2^53-1 (max safe int in JS) + +var freeTierProduct t.PremiumProduct = t.PremiumProduct{ + ProductName: "Free", + PremiumPerks: t.PremiumPerks{ + AdFree: false, + ValidatorDashboards: 1, + ValidatorsPerDashboard: 20, + ValidatorGroupsPerDashboard: 1, + ShareCustomDashboards: false, + ManageDashboardViaApi: false, + BulkAdding: false, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: 0, + Hourly: 12 * hour, + Daily: 0, + Weekly: 0, + }, + EmailNotificationsPerDay: 10, + ConfigureNotificationsViaApi: false, + ValidatorGroupNotifications: 1, + WebhookEndpoints: 1, + MobileAppCustomThemes: false, + MobileAppWidget: false, + MonitorMachines: 1, + MachineMonitoringHistorySeconds: 3600 * 3, + NotificationsMachineCustomThreshold: false, + NotificationsValidatorDashboardRealTimeMode: false, + NotificationsValidatorDashboardGroupOffline: false, + }, + PricePerMonthEur: 0, + PricePerYearEur: 0, + ProductIdMonthly: "premium_free", + ProductIdYearly: "premium_free.yearly", +} + +var adminPerks = t.PremiumPerks{ + AdFree: false, // admins want to see ads to check ad configuration + ValidatorDashboards: maxJsInt, + ValidatorsPerDashboard: maxJsInt, + ValidatorGroupsPerDashboard: maxJsInt, + ShareCustomDashboards: true, + ManageDashboardViaApi: true, + BulkAdding: true, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: maxJsInt, + Hourly: maxJsInt, + Daily: maxJsInt, + Weekly: maxJsInt, + }, + EmailNotificationsPerDay: maxJsInt, + ConfigureNotificationsViaApi: true, + ValidatorGroupNotifications: maxJsInt, + WebhookEndpoints: maxJsInt, + MobileAppCustomThemes: true, + MobileAppWidget: true, + MonitorMachines: maxJsInt, + MachineMonitoringHistorySeconds: maxJsInt, + NotificationsMachineCustomThreshold: true, + NotificationsValidatorDashboardRealTimeMode: true, + NotificationsValidatorDashboardGroupOffline: true, +} + +func GetUserInfo(ctx context.Context, userId uint64, userDbReader *sqlx.DB) (*t.UserInfo, error) { + // TODO @patrick post-beta improve and unmock + userInfo := &t.UserInfo{ + Id: userId, + ApiKeys: []string{}, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 10, + UnitsPerMonth: 10, + ApiKeys: 4, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + Subscriptions: []t.UserSubscription{}, + } + + productSummary, err := GetProductSummary(ctx) + if err != nil { + return nil, fmt.Errorf("error getting productSummary: %w", err) + } + + result := struct { + Email string `db:"email"` + UserGroup string `db:"user_group"` + }{} + err = userDbReader.GetContext(ctx, &result, `SELECT email, COALESCE(user_group, '') as user_group FROM users WHERE id = $1`, userId) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("%w: user not found", ErrNotFound) + } + return nil, err + } + userInfo.Email = result.Email + userInfo.UserGroup = result.UserGroup + + userInfo.Email = utils.CensorEmail(userInfo.Email) + + err = userDbReader.SelectContext(ctx, &userInfo.ApiKeys, `SELECT api_key FROM api_keys WHERE user_id = $1`, userId) + if err != nil && err != sql.ErrNoRows { + return nil, fmt.Errorf("error getting userApiKeys for user %v: %w", userId, err) + } + + premiumProduct := struct { + ProductId string `db:"product_id"` + Store string `db:"store"` + Start time.Time `db:"start"` + End time.Time `db:"end"` + }{} + err = userDbReader.GetContext(ctx, &premiumProduct, ` + SELECT + COALESCE(uas.product_id, '') AS product_id, + COALESCE(uas.store, '') AS store, + COALESCE(to_timestamp((uss.payload->>'current_period_start')::bigint),uas.created_at) AS start, + COALESCE(to_timestamp((uss.payload->>'current_period_end')::bigint),uas.expires_at) AS end + FROM users_app_subscriptions uas + LEFT JOIN users_stripe_subscriptions uss ON uss.subscription_id = uas.subscription_id + WHERE uas.user_id = $1 AND uas.active = true AND product_id IN ('orca.yearly', 'orca', 'dolphin.yearly', 'dolphin', 'guppy.yearly', 'guppy', 'whale', 'goldfish', 'plankton') + ORDER BY CASE uas.product_id + WHEN 'orca.yearly' THEN 1 + WHEN 'orca' THEN 2 + WHEN 'dolphin.yearly' THEN 3 + WHEN 'dolphin' THEN 4 + WHEN 'guppy.yearly' THEN 5 + WHEN 'guppy' THEN 6 + WHEN 'whale' THEN 7 + WHEN 'goldfish' THEN 8 + WHEN 'plankton' THEN 9 + ELSE 10 -- For any other product_id values + END, uas.id DESC + LIMIT 1`, userId) + if err != nil { + if err != sql.ErrNoRows { + return nil, fmt.Errorf("error getting premiumProduct for userId %v: %w", userId, err) + } + premiumProduct.ProductId = "premium_free" + premiumProduct.Store = "" + } + + foundProduct := false + for _, p := range productSummary.PremiumProducts { + effectiveProductId := premiumProduct.ProductId + productName := p.ProductName + switch premiumProduct.ProductId { + case "whale": + effectiveProductId = "dolphin" + productName = "Whale" + case "goldfish": + effectiveProductId = "guppy" + productName = "Goldfish" + case "plankton": + effectiveProductId = "guppy" + productName = "Plankton" + } + if p.ProductIdMonthly == effectiveProductId || p.ProductIdYearly == effectiveProductId { + userInfo.PremiumPerks = p.PremiumPerks + foundProduct = true + + store := t.ProductStoreStripe + switch premiumProduct.Store { + case "ios-appstore": + store = t.ProductStoreIosAppstore + case "android-playstore": + store = t.ProductStoreAndroidPlaystore + case "ethpool": + store = t.ProductStoreEthpool + case "manuall": + store = t.ProductStoreCustom + } + + if effectiveProductId != "premium_free" { + userInfo.Subscriptions = append(userInfo.Subscriptions, t.UserSubscription{ + ProductId: premiumProduct.ProductId, + ProductName: productName, + ProductCategory: t.ProductCategoryPremium, + ProductStore: store, + Start: premiumProduct.Start.Unix(), + End: premiumProduct.End.Unix(), + }) + } + break + } + } + if !foundProduct { + return nil, fmt.Errorf("product %s not found", premiumProduct.ProductId) + } + + premiumAddons := []struct { + PriceId string `db:"price_id"` + Start time.Time `db:"start"` + End time.Time `db:"end"` + Quantity int `db:"quantity"` + }{} + err = userDbReader.SelectContext(ctx, &premiumAddons, ` + SELECT + price_id, + to_timestamp((uss.payload->>'current_period_start')::bigint) AS start, + to_timestamp((uss.payload->>'current_period_end')::bigint) AS end, + COALESCE((uss.payload->>'quantity')::int,1) AS quantity + FROM users_stripe_subscriptions uss + INNER JOIN users u ON u.stripe_customer_id = uss.customer_id + WHERE u.id = $1 AND uss.active = true AND uss.purchase_group = 'addon'`, userId) + if err != nil { + return nil, fmt.Errorf("error getting premiumAddons for userId %v: %w", userId, err) + } + for _, addon := range premiumAddons { + foundAddon := false + for _, p := range productSummary.ExtraDashboardValidatorsPremiumAddon { + if p.StripePriceIdMonthly == addon.PriceId || p.StripePriceIdYearly == addon.PriceId { + foundAddon = true + for i := 0; i < addon.Quantity; i++ { + userInfo.PremiumPerks.ValidatorsPerDashboard += p.ExtraDashboardValidators + userInfo.Subscriptions = append(userInfo.Subscriptions, t.UserSubscription{ + ProductId: utils.PriceIdToProductId(addon.PriceId), + ProductName: p.ProductName, + ProductCategory: t.ProductCategoryPremiumAddon, + ProductStore: t.ProductStoreStripe, + Start: addon.Start.Unix(), + End: addon.End.Unix(), + }) + } + } + } + if !foundAddon { + return nil, fmt.Errorf("addon not found: %v", addon.PriceId) + } + } + + if productSummary.ValidatorsPerDashboardLimit < userInfo.PremiumPerks.ValidatorsPerDashboard { + userInfo.PremiumPerks.ValidatorsPerDashboard = productSummary.ValidatorsPerDashboardLimit + } + + if userInfo.UserGroup == t.UserGroupAdmin { + userInfo.PremiumPerks = adminPerks + } + + return userInfo, nil +} + +func GetProductSummary(ctx context.Context) (*t.ProductSummary, error) { // TODO @patrick post-beta put into db instead of hardcoding here and make it configurable + return &t.ProductSummary{ + ValidatorsPerDashboardLimit: 102_000, + StripePublicKey: utils.Config.Frontend.Stripe.PublicKey, + ApiProducts: []t.ApiProduct{ // TODO @patrick post-beta this data is not final yet + { + ProductId: "api_free", + ProductName: "Free", + PricePerMonthEur: 0, + PricePerYearEur: 0 * 12, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 10, + UnitsPerMonth: 10_000_000, + ApiKeys: 2, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + }, + { + ProductId: "iron", + ProductName: "Iron", + PricePerMonthEur: 1.99, + PricePerYearEur: math.Floor(1.99*12*0.9*100) / 100, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 20, + UnitsPerMonth: 20_000_000, + ApiKeys: 10, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + }, + { + ProductId: "silver", + ProductName: "Silver", + PricePerMonthEur: 2.99, + PricePerYearEur: math.Floor(2.99*12*0.9*100) / 100, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 30, + UnitsPerMonth: 100_000_000, + ApiKeys: 20, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + }, + { + ProductId: "gold", + ProductName: "Gold", + PricePerMonthEur: 3.99, + PricePerYearEur: math.Floor(3.99*12*0.9*100) / 100, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 40, + UnitsPerMonth: 200_000_000, + ApiKeys: 40, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + }, + }, + PremiumProducts: []t.PremiumProduct{ + freeTierProduct, + { + ProductName: "Guppy", + PremiumPerks: t.PremiumPerks{ + AdFree: true, + ValidatorDashboards: 1, + ValidatorsPerDashboard: 100, + ValidatorGroupsPerDashboard: 3, + ShareCustomDashboards: true, + ManageDashboardViaApi: false, + BulkAdding: true, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: day, + Hourly: 7 * day, + Daily: month, + Weekly: 0, + }, + EmailNotificationsPerDay: 15, + ConfigureNotificationsViaApi: false, + ValidatorGroupNotifications: 3, + WebhookEndpoints: 3, + MobileAppCustomThemes: true, + MobileAppWidget: true, + MonitorMachines: 2, + MachineMonitoringHistorySeconds: 3600 * 24 * 30, + NotificationsMachineCustomThreshold: true, + NotificationsValidatorDashboardRealTimeMode: true, + NotificationsValidatorDashboardGroupOffline: true, + }, + PricePerMonthEur: 9.99, + PricePerYearEur: 107.88, + ProductIdMonthly: "guppy", + ProductIdYearly: "guppy.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.Guppy, + StripePriceIdYearly: utils.Config.Frontend.Stripe.GuppyYearly, + }, + { + ProductName: "Dolphin", + PremiumPerks: t.PremiumPerks{ + AdFree: true, + ValidatorDashboards: 2, + ValidatorsPerDashboard: 300, + ValidatorGroupsPerDashboard: 10, + ShareCustomDashboards: true, + ManageDashboardViaApi: false, + BulkAdding: true, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: 5 * day, + Hourly: month, + Daily: 2 * month, + Weekly: 8 * week, + }, + EmailNotificationsPerDay: 20, + ConfigureNotificationsViaApi: false, + ValidatorGroupNotifications: 10, + WebhookEndpoints: 10, + MobileAppCustomThemes: true, + MobileAppWidget: true, + MonitorMachines: 10, + MachineMonitoringHistorySeconds: 3600 * 24 * 30, + NotificationsMachineCustomThreshold: true, + NotificationsValidatorDashboardRealTimeMode: true, + NotificationsValidatorDashboardGroupOffline: true, + }, + PricePerMonthEur: 29.99, + PricePerYearEur: 311.88, + ProductIdMonthly: "dolphin", + ProductIdYearly: "dolphin.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.Dolphin, + StripePriceIdYearly: utils.Config.Frontend.Stripe.DolphinYearly, + }, + { + ProductName: "Orca", + PremiumPerks: t.PremiumPerks{ + AdFree: true, + ValidatorDashboards: 2, + ValidatorsPerDashboard: 1000, + ValidatorGroupsPerDashboard: 30, + ShareCustomDashboards: true, + ManageDashboardViaApi: true, + BulkAdding: true, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: 3 * week, + Hourly: 6 * month, + Daily: 12 * month, + Weekly: maxJsInt, + }, + EmailNotificationsPerDay: 50, + ConfigureNotificationsViaApi: true, + ValidatorGroupNotifications: 60, + WebhookEndpoints: 30, + MobileAppCustomThemes: true, + MobileAppWidget: true, + MonitorMachines: 10, + MachineMonitoringHistorySeconds: 3600 * 24 * 30, + NotificationsMachineCustomThreshold: true, + NotificationsValidatorDashboardRealTimeMode: true, + NotificationsValidatorDashboardGroupOffline: true, + }, + PricePerMonthEur: 49.99, + PricePerYearEur: 479.88, + ProductIdMonthly: "orca", + ProductIdYearly: "orca.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.Orca, + StripePriceIdYearly: utils.Config.Frontend.Stripe.OrcaYearly, + IsPopular: true, + }, + }, + ExtraDashboardValidatorsPremiumAddon: []t.ExtraDashboardValidatorsPremiumAddon{ + { + ProductName: "1k extra valis per dashboard", + ExtraDashboardValidators: 1000, + PricePerMonthEur: 74.99, + PricePerYearEur: 719.88, + ProductIdMonthly: "vdb_addon_1k", + ProductIdYearly: "vdb_addon_1k.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon1k, + StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon1kYearly, + }, + { + ProductName: "10k extra valis per dashboard", + ExtraDashboardValidators: 10000, + PricePerMonthEur: 449.99, + PricePerYearEur: 4319.88, + ProductIdMonthly: "vdb_addon_10k", + ProductIdYearly: "vdb_addon_10k.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon10k, + StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon10kYearly, + }, + }, + }, nil +} + +func GetFreeTierPerks(ctx context.Context) (*t.PremiumPerks, error) { + return &freeTierProduct.PremiumPerks, nil +} diff --git a/backend/pkg/commons/mail/mail.go b/backend/pkg/commons/mail/mail.go index e2e9d08e2..60f27a14c 100644 --- a/backend/pkg/commons/mail/mail.go +++ b/backend/pkg/commons/mail/mail.go @@ -73,35 +73,46 @@ func createTextMessage(msg types.Email) string { // SendMailRateLimited sends an email to a given address with the given message. // It will return a ratelimit-error if the configured ratelimit is exceeded. func SendMailRateLimited(content types.TransitEmailContent) error { - if utils.Config.Frontend.MaxMailsPerEmailPerDay > 0 { - now := time.Now() - count, err := db.CountSentMessage("n_mails", content.UserId) - if err != nil { - return err - } - timeLeft := now.Add(utils.Day).Truncate(utils.Day).Sub(now) - if count > int64(utils.Config.Frontend.MaxMailsPerEmailPerDay) { - return &types.RateLimitError{TimeLeft: timeLeft} - } else if count == int64(utils.Config.Frontend.MaxMailsPerEmailPerDay) { - // send an email if this was the last email for today - err := SendHTMLMail(content.Address, - "beaconcha.in - Email notification threshold limit reached", - types.Email{ - Title: "Email notification threshold limit reached", - //nolint: gosec - Body: template.HTML(fmt.Sprintf("You have reached the email notification threshold limit of %d emails per day. Further notification emails will be suppressed for %.1f hours.", utils.Config.Frontend.MaxMailsPerEmailPerDay, timeLeft.Hours())), - }, - []types.EmailAttachment{}) - if err != nil { - return err - } - } + sendThresholdReachedMail := false + maxEmailsPerDay := int64(0) + userInfo, err := db.GetUserInfo(context.Background(), uint64(content.UserId), db.FrontendReaderDB) + if err != nil { + return err } - - err := SendHTMLMail(content.Address, content.Subject, content.Email, content.Attachments) + maxEmailsPerDay = int64(userInfo.PremiumPerks.EmailNotificationsPerDay) + count, err := db.CountSentMessage("n_mails", content.UserId) if err != nil { return err } + timeLeft := time.Until(time.Now().Add(utils.Day).Truncate(utils.Day)) + + log.Debugf("user %d has sent %d of %d emails today, time left is %v", content.UserId, count, maxEmailsPerDay, timeLeft) + if count > maxEmailsPerDay { + return &types.RateLimitError{TimeLeft: timeLeft} + } else if count == maxEmailsPerDay { + sendThresholdReachedMail = true + } + + err = SendHTMLMail(content.Address, content.Subject, content.Email, content.Attachments) + if err != nil { + log.Error(err, "error sending email", 0) + } + + // make sure the threshold reached email arrives last + if sendThresholdReachedMail { + // send an email if this was the last email for today + err := SendHTMLMail(content.Address, + "beaconcha.in - Email notification threshold limit reached", + types.Email{ + Title: "Email notification threshold limit reached", + //nolint: gosec + Body: template.HTML(fmt.Sprintf("You have reached the email notification threshold limit of %d emails per day. Further notification emails will be suppressed for %.1f hours.", maxEmailsPerDay, timeLeft.Hours())), + }, + []types.EmailAttachment{}) + if err != nil { + return err + } + } return nil } diff --git a/backend/pkg/commons/types/frontend.go b/backend/pkg/commons/types/frontend.go index 64b71cb09..0ef4bde69 100644 --- a/backend/pkg/commons/types/frontend.go +++ b/backend/pkg/commons/types/frontend.go @@ -68,9 +68,8 @@ const ( ValidatorMissedProposalEventName EventName = "validator_proposal_missed" ValidatorExecutedProposalEventName EventName = "validator_proposal_submitted" - ValidatorDidSlashEventName EventName = "validator_did_slash" - ValidatorGroupIsOfflineEventName EventName = "validator_group_is_offline" - ValidatorBalanceDecreasedEventName EventName = "validator_balance_decreased" + ValidatorDidSlashEventName EventName = "validator_did_slash" + ValidatorGroupIsOfflineEventName EventName = "validator_group_is_offline" ValidatorReceivedDepositEventName EventName = "validator_received_deposit" NetworkSlashingEventName EventName = "network_slashing" @@ -80,25 +79,22 @@ const ( NetworkValidatorExitQueueNotFullEventName EventName = "network_validator_exit_queue_not_full" NetworkLivenessIncreasedEventName EventName = "network_liveness_increased" TaxReportEventName EventName = "user_tax_report" - //nolint:misspell - RocketpoolCollateralMinReachedEventName EventName = "rocketpool_colleteral_min" - //nolint:misspell - RocketpoolCollateralMaxReachedEventName EventName = "rocketpool_colleteral_max" - SyncCommitteeSoonEventName EventName = "validator_synccommittee_soon" + SyncCommitteeSoonEventName EventName = "validator_synccommittee_soon" //nolint:misspell RocketpoolCommissionThresholdEventName EventName = "rocketpool_commision_threshold" // Validator dashboard events - ValidatorIsOfflineEventName EventName = "validator_is_offline" - GroupIsOfflineEventName EventName = "group_is_offline" - ValidatorMissedAttestationEventName EventName = "validator_attestation_missed" - ValidatorProposalEventName EventName = "validator_proposal" - ValidatorUpcomingProposalEventName EventName = "validator_proposal_upcoming" - SyncCommitteeSoon EventName = "validator_synccommittee_soon" - ValidatorReceivedWithdrawalEventName EventName = "validator_withdrawal" - ValidatorGotSlashedEventName EventName = "validator_got_slashed" - RocketpoolCollateralMinReached EventName = "rocketpool_colleteral_min" //nolint:misspell - RocketpoolCollateralMaxReached EventName = "rocketpool_colleteral_max" //nolint:misspell + ValidatorIsOfflineEventName EventName = "validator_is_offline" + ValidatorIsOnlineEventName EventName = "validator_is_online" + GroupIsOfflineEventName EventName = "group_is_offline" + ValidatorMissedAttestationEventName EventName = "validator_attestation_missed" + ValidatorProposalEventName EventName = "validator_proposal" + ValidatorUpcomingProposalEventName EventName = "validator_proposal_upcoming" + SyncCommitteeSoon EventName = "validator_synccommittee_soon" + ValidatorReceivedWithdrawalEventName EventName = "validator_withdrawal" + ValidatorGotSlashedEventName EventName = "validator_got_slashed" + RocketpoolCollateralMinReachedEventName EventName = "rocketpool_colleteral_min" //nolint:misspell + RocketpoolCollateralMaxReachedEventName EventName = "rocketpool_colleteral_max" //nolint:misspell // Account dashboard events IncomingTransactionEventName EventName = "incoming_transaction" @@ -134,6 +130,7 @@ var EventSortOrder = []EventName{ MonitoringMachineMemoryUsageEventName, SyncCommitteeSoonEventName, ValidatorIsOfflineEventName, + ValidatorIsOnlineEventName, ValidatorReceivedWithdrawalEventName, NetworkLivenessIncreasedEventName, EthClientUpdateEventName, @@ -181,7 +178,8 @@ var LegacyEventLabel map[EventName]string = map[EventName]string{ ValidatorMissedAttestationEventName: "Your validator(s) missed an attestation", ValidatorGotSlashedEventName: "Your validator(s) got slashed", ValidatorDidSlashEventName: "Your validator(s) slashed another validator", - ValidatorIsOfflineEventName: "Your validator(s) state changed", + ValidatorIsOfflineEventName: "Your validator(s) went offline", + ValidatorIsOnlineEventName: "Your validator(s) came back online", ValidatorReceivedWithdrawalEventName: "A withdrawal was initiated for your validators", NetworkLivenessIncreasedEventName: "The network is experiencing liveness issues", EthClientUpdateEventName: "An Ethereum client has a new update available", @@ -203,8 +201,9 @@ var EventLabel map[EventName]string = map[EventName]string{ ValidatorMissedAttestationEventName: "Attestation missed", ValidatorGotSlashedEventName: "Validator slashed", ValidatorDidSlashEventName: "Validator has slashed", - ValidatorIsOfflineEventName: "Validator online / offline", - ValidatorReceivedWithdrawalEventName: "Validator withdrawal initiated", + ValidatorIsOfflineEventName: "Validator offline", + ValidatorIsOnlineEventName: "Validator back online", + ValidatorReceivedWithdrawalEventName: "Withdrawal processed", NetworkLivenessIncreasedEventName: "The network is experiencing liveness issues", EthClientUpdateEventName: "An Ethereum client has a new update available", MonitoringMachineOfflineEventName: "Machine offline", @@ -236,6 +235,7 @@ var EventNames = []EventName{ ValidatorGotSlashedEventName, ValidatorDidSlashEventName, ValidatorIsOfflineEventName, + ValidatorIsOnlineEventName, ValidatorReceivedWithdrawalEventName, NetworkLivenessIncreasedEventName, EthClientUpdateEventName, diff --git a/backend/pkg/commons/utils/utils.go b/backend/pkg/commons/utils/utils.go index c84aca8d3..4a9e1538a 100644 --- a/backend/pkg/commons/utils/utils.go +++ b/backend/pkg/commons/utils/utils.go @@ -396,3 +396,10 @@ func Deduplicate(slice []uint64) []uint64 { } return list } + +func FirstN(input string, n int) string { + if len(input) <= n { + return input + } + return input[:n] +} diff --git a/backend/pkg/exporter/modules/execution_deposits_exporter.go b/backend/pkg/exporter/modules/execution_deposits_exporter.go index 41fab0f25..ef7d2129f 100644 --- a/backend/pkg/exporter/modules/execution_deposits_exporter.go +++ b/backend/pkg/exporter/modules/execution_deposits_exporter.go @@ -8,7 +8,7 @@ import ( "encoding/hex" "fmt" "math/big" - "sync" + "sync/atomic" "time" "github.com/attestantio/go-eth2-client/spec/phase0" @@ -19,13 +19,16 @@ import ( gethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" gethrpc "github.com/ethereum/go-ethereum/rpc" + "github.com/go-redis/redis/v8" "golang.org/x/exp/maps" + "golang.org/x/sync/errgroup" "github.com/gobitfly/beaconchain/pkg/commons/contracts/deposit_contract" "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/metrics" "github.com/gobitfly/beaconchain/pkg/commons/rpc" + "github.com/gobitfly/beaconchain/pkg/commons/services" "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" constypes "github.com/gobitfly/beaconchain/pkg/consapi/types" @@ -38,37 +41,34 @@ import ( type executionDepositsExporter struct { ModuleContext - Client rpc.Client - ErigonClient *gethrpc.Client - GethClient *gethrpc.Client - LogClient *ethclient.Client - LogFilterer *deposit_contract.DepositContractFilterer - DepositContractAddress common.Address - LastExportedBlock uint64 - ExportMutex *sync.Mutex - StopEarlyMutex *sync.Mutex - StopEarly context.CancelFunc - Signer gethtypes.Signer - DepositMethod abi.Method + Client rpc.Client + ErigonClient *gethrpc.Client + GethClient *gethrpc.Client + LogClient *ethclient.Client + LogFilterer *deposit_contract.DepositContractFilterer + DepositContractAddress common.Address + LastExportedBlock uint64 + LastExportedFinalizedBlock uint64 + LastExportedFinalizedBlockRedisKey string + CurrentHeadBlock atomic.Uint64 + Signer gethtypes.Signer + DepositMethod abi.Method } func NewExecutionDepositsExporter(moduleContext ModuleContext) ModuleInterface { return &executionDepositsExporter{ - ModuleContext: moduleContext, - Client: moduleContext.ConsClient, - DepositContractAddress: common.HexToAddress(utils.Config.Chain.ClConfig.DepositContractAddress), - LastExportedBlock: 0, - ExportMutex: &sync.Mutex{}, - StopEarlyMutex: &sync.Mutex{}, + ModuleContext: moduleContext, + Client: moduleContext.ConsClient, + DepositContractAddress: common.HexToAddress(utils.Config.Chain.ClConfig.DepositContractAddress), + LastExportedBlock: 0, + LastExportedFinalizedBlock: 0, } } -func (d *executionDepositsExporter) OnHead(event *constypes.StandardEventHeadResponse) (err error) { - return nil // nop -} - func (d *executionDepositsExporter) Init() error { - d.Signer = gethtypes.NewCancunSigner(big.NewInt(int64(utils.Config.Chain.ClConfig.DepositChainID))) + d.Signer = gethtypes.NewCancunSigner(big.NewInt(0).SetUint64(utils.Config.Chain.ClConfig.DepositChainID)) + + d.LastExportedFinalizedBlockRedisKey = fmt.Sprintf("%d:execution_deposits_exporter:last_exported_finalized_block", utils.Config.Chain.ClConfig.DepositChainID) rpcClient, err := gethrpc.Dial(utils.Config.Eth1GethEndpoint) if err != nil { @@ -124,14 +124,29 @@ func (d *executionDepositsExporter) Init() error { d.LastExportedBlock = utils.Config.Indexer.ELDepositContractFirstBlock } - log.Infof("initialized execution deposits exporter with last exported block: %v", d.LastExportedBlock) + val, err := db.PersistentRedisDbClient.Get(context.Background(), d.LastExportedFinalizedBlockRedisKey).Uint64() + switch { + case err == redis.Nil: + log.Warnf("%v missing in redis, exporting from beginning", d.LastExportedFinalizedBlockRedisKey) + case err != nil: + log.Fatal(err, "error getting last exported finalized block from redis", 0) + } + + d.LastExportedFinalizedBlock = val + // avoid fetching old bocks on a chain without deposits + if d.LastExportedFinalizedBlock > d.LastExportedBlock { + d.LastExportedBlock = d.LastExportedFinalizedBlock + } + + log.Infof("initialized execution deposits exporter with last exported block/finalizedBlock: %v/%v", d.LastExportedBlock, d.LastExportedFinalizedBlock) - // quick kick-start go func() { - err := d.OnFinalizedCheckpoint(nil) + // quick kick-start + err = d.OnHead(nil) if err != nil { - log.Error(err, "error during kick-start", 0) + log.Error(err, "error kick-starting executionDepositsExporter", 0) } + d.exportLoop() }() return nil @@ -145,90 +160,119 @@ func (d *executionDepositsExporter) OnChainReorg(event *constypes.StandardEventC return nil // nop } -// can take however long it wants to run, is run in a separate goroutine, so no need to worry about blocking func (d *executionDepositsExporter) OnFinalizedCheckpoint(event *constypes.StandardFinalizedCheckpointResponse) (err error) { - // important: have to fetch the actual finalized epoch because even tho its called on finalized checkpoint it actually emits for each justified epoch - // so we have to do an extra request to get the actual latest finalized epoch - res, err := d.CL.GetFinalityCheckpoints("head") - if err != nil { - return err - } + return nil // nop +} - var nearestELBlock sql.NullInt64 - err = db.ReaderDb.Get(&nearestELBlock, "select exec_block_number from blocks where slot <= $1 and exec_block_number > 0 order by slot desc limit 1", res.Data.Finalized.Epoch*utils.Config.Chain.ClConfig.SlotsPerEpoch) - if err != nil { - return err - } - if !nearestELBlock.Valid { - return fmt.Errorf("no block found for finalized epoch %v", res.Data.Finalized.Epoch) +func (d *executionDepositsExporter) OnHead(event *constypes.StandardEventHeadResponse) (err error) { + return nil // nop +} + +func (d *executionDepositsExporter) exportLoop() { + ticker := time.NewTicker(time.Second * 10) + defer ticker.Stop() + for ; true; <-ticker.C { + err := d.export() + if err != nil { + log.Error(err, "error during export", 0) + services.ReportStatus("execution_deposits_exporter", err.Error(), nil) + } else { + services.ReportStatus("execution_deposits_exporter", "Running", nil) + } } - log.Debugf("exporting execution layer deposits till block %v", nearestELBlock.Int64) +} - err = d.exportTillBlock(uint64(nearestELBlock.Int64)) +func (d *executionDepositsExporter) export() (err error) { + var headBlock, finBlock uint64 + var g errgroup.Group + g.Go(func() error { + headSlot, err := d.CL.GetSlot("head") + if err != nil { + return fmt.Errorf("error getting head-slot: %w", err) + } + headBlock = headSlot.Data.Message.Body.ExecutionPayload.BlockNumber + return nil + }) + g.Go(func() error { + finSlot, err := d.CL.GetSlot("finalized") + if err != nil { + return fmt.Errorf("error getting finalized-slot: %w", err) + } + finBlock = finSlot.Data.Message.Body.ExecutionPayload.BlockNumber + return nil + }) + err = g.Wait() if err != nil { return err } - return nil -} - -// this is basically synchronous, each time it gets called it will kill the previous export and replace it with itself -func (d *executionDepositsExporter) exportTillBlock(block uint64) (err error) { - // following blocks if a previous function call is still waiting for an export to stop early - d.StopEarlyMutex.Lock() - if d.StopEarly != nil { - // this will run even if the previous export has already finished - // preventing this would require an overly complex solution - log.Debugf("asking potentially running export to stop early") - d.StopEarly() + if d.LastExportedBlock >= headBlock && d.LastExportedFinalizedBlock >= finBlock { + log.Debugf("skip exporting execution layer deposits: last exported block/finalizedBlock: %v/%v, headBlock/finalizedBlock: %v/%v", d.LastExportedBlock, d.LastExportedFinalizedBlock, headBlock, finBlock) + return nil } - // following blocks as long as the running export hasn't finished yet - d.ExportMutex.Lock() - ctx, cancel := context.WithCancel(context.Background()) - d.StopEarly = cancel - // we have over taken and allow potentially newer function calls to signal us to stop early - d.StopEarlyMutex.Unlock() + nextFinalizedBlock := finBlock blockOffset := d.LastExportedBlock + 1 - blockTarget := block - - defer d.ExportMutex.Unlock() - - log.Infof("exporting execution layer deposits from %v to %v", blockOffset, blockTarget) - - depositsToSave := make([]*types.ELDeposit, 0) - + // make sure to reexport every block since last exported finalized blocks to handle reorgs + if blockOffset > d.LastExportedFinalizedBlock { + blockOffset = d.LastExportedFinalizedBlock + 1 + } + blockTarget := headBlock + + log.InfoWithFields(log.Fields{ + "nextHeadBlock": headBlock, + "nextFinBlock": nextFinalizedBlock, + "lastHeadBlock": d.LastExportedBlock, + "lastFinBlock": d.LastExportedFinalizedBlock, + }, fmt.Sprintf("exporting execution layer deposits from %d to %d", blockOffset, blockTarget)) + + depositsToSaveBatchSize := 10_000 // limit how much deposits we save in one go + blockBatchSize := uint64(10_000) // limit how much blocks we fetch until updating the redis-key + depositsToSave := make([]*types.ELDeposit, 0, depositsToSaveBatchSize) for blockOffset < blockTarget { - tmpBlockTarget := blockOffset + 1000 - if tmpBlockTarget > blockTarget { - tmpBlockTarget = blockTarget + depositsToSave = depositsToSave[:0] + blockBatchStart := blockOffset + for blockOffset < blockTarget && len(depositsToSave) <= depositsToSaveBatchSize && blockOffset < blockBatchStart+blockBatchSize { + tmpBlockTarget := blockOffset + 1000 + if tmpBlockTarget > blockTarget { + tmpBlockTarget = blockTarget + } + log.Debugf("fetching deposits from %v to %v", blockOffset, tmpBlockTarget) + tmp, err := d.fetchDeposits(blockOffset, tmpBlockTarget) + if err != nil { + return err + } + depositsToSave = append(depositsToSave, tmp...) + blockOffset = tmpBlockTarget } - log.Debugf("fetching deposits from %v to %v", blockOffset, tmpBlockTarget) - tmp, err := d.fetchDeposits(blockOffset, tmpBlockTarget) + + log.Debugf("saving %v deposits", len(depositsToSave)) + err = d.saveDeposits(depositsToSave) if err != nil { return err } - depositsToSave = append(depositsToSave, tmp...) - blockOffset = tmpBlockTarget - - select { - case <-ctx.Done(): // a newer function call has asked us to stop early - log.Warnf("stop early signal received, stopping export early") - blockTarget = tmpBlockTarget - default: - continue + d.LastExportedBlock = blockOffset + + prevLastExportedFinalizedBlock := d.LastExportedFinalizedBlock + if nextFinalizedBlock > d.LastExportedBlock && d.LastExportedBlock > d.LastExportedFinalizedBlock { + d.LastExportedFinalizedBlock = d.LastExportedBlock + } else if nextFinalizedBlock > d.LastExportedFinalizedBlock { + d.LastExportedFinalizedBlock = nextFinalizedBlock } - } - log.Debugf("saving %v deposits", len(depositsToSave)) - err = d.saveDeposits(depositsToSave) - if err != nil { - return err + // update redis to keep track of last exported finalized block persistently + if prevLastExportedFinalizedBlock != d.LastExportedFinalizedBlock { + log.Infof("updating %v: %v", d.LastExportedFinalizedBlockRedisKey, d.LastExportedFinalizedBlock) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + err := db.PersistentRedisDbClient.Set(ctx, d.LastExportedFinalizedBlockRedisKey, d.LastExportedFinalizedBlock, 0).Err() + if err != nil { + log.Error(err, fmt.Sprintf("error setting redis %v = %v", d.LastExportedFinalizedBlockRedisKey, d.LastExportedFinalizedBlock), 0) + } + } } - d.LastExportedBlock = blockTarget - start := time.Now() // update cached view err = d.updateCachedView() @@ -271,8 +315,8 @@ func (d *executionDepositsExporter) fetchDeposits(fromBlock, toBlock uint64) (de return nil, fmt.Errorf("nil deposit-log") } - depositLog := - depositLogIterator.Event + depositLog := depositLogIterator.Event + err = utils.VerifyDepositSignature(&phase0.DepositData{ PublicKey: phase0.BLSPubKey(depositLog.Pubkey), WithdrawalCredentials: depositLog.WithdrawalCredentials, @@ -387,6 +431,10 @@ func (d *executionDepositsExporter) fetchDeposits(fromBlock, toBlock uint64) (de } func (d *executionDepositsExporter) saveDeposits(depositsToSave []*types.ELDeposit) error { + if len(depositsToSave) == 0 { + return nil + } + tx, err := db.WriterDb.Beginx() if err != nil { return err diff --git a/backend/pkg/exporter/modules/relays.go b/backend/pkg/exporter/modules/relays.go index 205586792..78d068f93 100644 --- a/backend/pkg/exporter/modules/relays.go +++ b/backend/pkg/exporter/modules/relays.go @@ -107,21 +107,21 @@ func fetchDeliveredPayloads(r types.Relay, offset uint64) ([]BidTrace, error) { if offset != 0 { url += fmt.Sprintf("&cursor=%v", offset) } - - //nolint:gosec - resp, err := http.Get(url) + client := &http.Client{ + Timeout: time.Second * 30, + } + resp, err := client.Get(url) if err != nil { - log.Error(err, "error retrieving delivered payloads", 0, map[string]interface{}{"relay": r.ID}) - return nil, err + log.Error(err, "error retrieving delivered payloads", 0, map[string]interface{}{"relay": r.ID, "offset": offset, "url": url}) + return nil, fmt.Errorf("error retrieving delivered payloads for relay: %v, offset: %v, url: %v: %w", r.ID, offset, url, err) } defer resp.Body.Close() err = json.NewDecoder(resp.Body).Decode(&payloads) - if err != nil { - return nil, err + return nil, fmt.Errorf("error decoding json for delivered payloads for relay: %v, offset: %v, url: %v: %w", r.ID, offset, url, err) } return payloads, nil @@ -175,7 +175,7 @@ func retrieveAndInsertPayloadsFromRelay(r types.Relay, low_bound uint64, high_bo for { resp, err := fetchDeliveredPayloads(r, offset) if err != nil { - return err + return fmt.Errorf("error calling fetchDeliveredPayloads with offset: %v for relay: %v: %w", offset, r.ID, err) } if resp == nil { diff --git a/backend/pkg/notification/collection.go b/backend/pkg/notification/collection.go index 8c6918aac..b35414bf1 100644 --- a/backend/pkg/notification/collection.go +++ b/backend/pkg/notification/collection.go @@ -47,6 +47,7 @@ func notificationCollector() { gob.Register(&ValidatorProposalNotification{}) gob.Register(&ValidatorAttestationNotification{}) gob.Register(&ValidatorIsOfflineNotification{}) + gob.Register(&ValidatorIsOnlineNotification{}) gob.Register(&ValidatorGotSlashedNotification{}) gob.Register(&ValidatorWithdrawalNotification{}) gob.Register(&NetworkNotification{}) @@ -731,7 +732,6 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty DashboardGroupName: sub.DashboardGroupName, }, ValidatorIndex: validator.Index, - IsOffline: true, } notificationsByUserID.AddNotification(n) @@ -749,12 +749,12 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty log.Infof("new event: validator %v detected as online again at epoch %v", validator.Index, epoch) - n := &ValidatorIsOfflineNotification{ + n := &ValidatorIsOnlineNotification{ NotificationBaseImpl: types.NotificationBaseImpl{ SubscriptionID: *sub.ID, UserID: *sub.UserID, Epoch: epoch, - EventName: sub.EventName, + EventName: types.ValidatorIsOnlineEventName, EventFilter: hex.EncodeToString(validator.Pubkey), LatestState: "-", DashboardId: sub.DashboardId, @@ -763,7 +763,6 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty DashboardGroupName: sub.DashboardGroupName, }, ValidatorIndex: validator.Index, - IsOffline: false, } notificationsByUserID.AddNotification(n) @@ -906,7 +905,7 @@ func collectWithdrawalNotifications(notificationsByUserID types.NotificationsPer return fmt.Errorf("error getting withdrawals from database, err: %w", err) } - // log.Infof("retrieved %v events", len(events)) + log.Infof("retrieved %v events", len(events)) for _, event := range events { subscribers, ok := subMap[hex.EncodeToString(event.Pubkey)] if ok { @@ -920,7 +919,7 @@ func collectWithdrawalNotifications(notificationsByUserID types.NotificationsPer continue } } - // log.Infof("creating %v notification for validator %v in epoch %v", types.ValidatorReceivedWithdrawalEventName, event.ValidatorIndex, epoch) + log.Infof("creating %v notification for validator %v in epoch %v", types.ValidatorReceivedWithdrawalEventName, event.ValidatorIndex, epoch) n := &ValidatorWithdrawalNotification{ NotificationBaseImpl: types.NotificationBaseImpl{ SubscriptionID: *sub.ID, @@ -931,6 +930,7 @@ func collectWithdrawalNotifications(notificationsByUserID types.NotificationsPer DashboardName: sub.DashboardName, DashboardGroupId: sub.DashboardGroupId, DashboardGroupName: sub.DashboardGroupName, + Epoch: epoch, }, ValidatorIndex: event.ValidatorIndex, Epoch: epoch, diff --git a/backend/pkg/notification/db.go b/backend/pkg/notification/db.go index 81b58bf16..a6da48a4c 100644 --- a/backend/pkg/notification/db.go +++ b/backend/pkg/notification/db.go @@ -53,7 +53,9 @@ func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, las goqu.C("event_name"), ).Join(goqu.T("users"), goqu.On(goqu.T("users").Col("id").Eq(goqu.T("users_subscriptions").Col("user_id")))). Where(goqu.L("(event_name = ? AND user_id <> 0)", eventNameForQuery)). - Where(goqu.L("(users.notifications_do_not_disturb_ts IS NULL OR users.notifications_do_not_disturb_ts < NOW())")) + Where(goqu.L("(users.notifications_do_not_disturb_ts IS NULL OR users.notifications_do_not_disturb_ts < NOW())")). + // filter out users that have all notification channels disabled + Where(goqu.L("(select bool_or(active) from users_notification_channels where users_notification_channels.user_id = users_subscriptions.user_id)")) if lastSentFilter != "" { if len(lastSentFilterArgs) > 0 { @@ -78,8 +80,10 @@ func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, las } log.Infof("found %d subscriptions for event %s", len(subs), eventName) - + // zero := uint64(0) for _, sub := range subs { + // sub.LastEpoch = &zero + // sub.LastSent = &time.Time{} sub.EventName = types.EventName(strings.Replace(string(sub.EventName), utils.GetNetwork()+":", "", 1)) // remove the network name from the event name if strings.HasPrefix(sub.EventFilter, "vdb:") { dashboardData := strings.Split(sub.EventFilter, ":") diff --git a/backend/pkg/notification/queuing.go b/backend/pkg/notification/queuing.go index 3367b82a9..90cb78bca 100644 --- a/backend/pkg/notification/queuing.go +++ b/backend/pkg/notification/queuing.go @@ -296,6 +296,8 @@ func RenderEmailsForUserEvents(epoch uint64, notificationsByUserID types.Notific bodyDetails := template.HTML("") + totalBlockReward := float64(0) + for _, event := range types.EventSortOrder { for _, notificationsPerGroup := range notificationsPerDashboard { for _, userNotifications := range notificationsPerGroup { @@ -303,10 +305,6 @@ func RenderEmailsForUserEvents(epoch uint64, notificationsByUserID types.Notific if !ok { // nothing to do for this event type continue } - - if len(bodyDetails) > 0 { - bodyDetails += "
" - } //nolint:gosec // this is a static string bodyDetails += template.HTML(fmt.Sprintf("%s
", types.EventLabel[event])) i := 0 @@ -329,6 +327,15 @@ func RenderEmailsForUserEvents(epoch uint64, notificationsByUserID types.Notific } } + if event == types.ValidatorExecutedProposalEventName { + proposalNotification, ok := n.(*ValidatorProposalNotification) + if !ok { + log.Error(fmt.Errorf("error casting proposal notification"), "", 0) + continue + } + totalBlockReward += proposalNotification.Reward + } + metrics.NotificationsQueued.WithLabelValues("email", string(event)).Inc() i++ @@ -344,20 +351,18 @@ func RenderEmailsForUserEvents(epoch uint64, notificationsByUserID types.Notific //nolint:gosec // this is a static string bodyDetails += template.HTML(fmt.Sprintf("%s
", eventInfo)) } + bodyDetails += "
" } } } //nolint:gosec // this is a static string - bodySummary := template.HTML(fmt.Sprintf("
Summary for epoch %d:
", epoch)) + bodySummary := template.HTML(fmt.Sprintf("

Summary for epoch %d:

", epoch)) for _, event := range types.EventSortOrder { count, ok := notificationTypesMap[event] if !ok { continue } - if len(bodySummary) > 0 { - bodySummary += "
" - } plural := "" if count > 1 { plural = "s" @@ -372,13 +377,17 @@ func RenderEmailsForUserEvents(epoch uint64, notificationsByUserID types.Notific case types.EthClientUpdateEventName: //nolint:gosec // this is a static string bodySummary += template.HTML(fmt.Sprintf("%s: %d client%s", types.EventLabel[event], count, plural)) + case types.ValidatorExecutedProposalEventName: + //nolint:gosec // this is a static string + bodySummary += template.HTML(fmt.Sprintf("%s: %d validator%s, Reward: %.3f ETH", types.EventLabel[event], count, plural, totalBlockReward)) default: //nolint:gosec // this is a static string bodySummary += template.HTML(fmt.Sprintf("%s: %d Validator%s", types.EventLabel[event], count, plural)) } + bodySummary += "
" } msg.Body += bodySummary - msg.Body += template.HTML("

Details:
") + msg.Body += template.HTML("

Details:

") msg.Body += bodyDetails if len(uniqueNotificationTypes) > 2 { @@ -412,6 +421,9 @@ func QueueEmailNotifications(epoch uint64, notificationsByUserID types.Notificat // now batch insert the emails in one go log.Infof("queueing %v email notifications", len(emails)) + if len(emails) == 0 { + return nil + } type insertData struct { Content types.TransitEmailContent `db:"content"` } @@ -452,6 +464,7 @@ func RenderPushMessagesForUserEvents(epoch uint64, notificationsByUserID types.N notificationTypesMap := make(map[types.EventName][]string) + totalBlockReward := float64(0) for _, event := range types.EventSortOrder { ns, ok := userNotifications[event] if !ok { // nothing to do for this event type @@ -462,6 +475,15 @@ func RenderPushMessagesForUserEvents(epoch uint64, notificationsByUserID types.N } for _, n := range ns { notificationTypesMap[event] = append(notificationTypesMap[event], n.GetEntitiyId()) + + if event == types.ValidatorExecutedProposalEventName { + proposalNotification, ok := n.(*ValidatorProposalNotification) + if !ok { + log.Error(fmt.Errorf("error casting proposal notification"), "", 0) + continue + } + totalBlockReward += proposalNotification.Reward + } } metrics.NotificationsQueued.WithLabelValues("push", string(event)).Inc() } @@ -489,15 +511,14 @@ func RenderPushMessagesForUserEvents(epoch uint64, notificationsByUserID types.N bodySummary += fmt.Sprintf("%s: %d client%s", types.EventLabel[event], count, plural) case types.MonitoringMachineCpuLoadEventName, types.MonitoringMachineMemoryUsageEventName, types.MonitoringMachineDiskAlmostFullEventName, types.MonitoringMachineOfflineEventName: bodySummary += fmt.Sprintf("%s: %d machine%s", types.EventLabel[event], count, plural) + case types.ValidatorExecutedProposalEventName: + bodySummary += fmt.Sprintf("%s: %d validator%s, Reward: %.3f ETH", types.EventLabel[event], count, plural, totalBlockReward) default: bodySummary += fmt.Sprintf("%s: %d validator%s", types.EventLabel[event], count, plural) } - truncated := "" - if len(events) > 3 { - truncated = ",..." - events = events[:3] + if len(events) < 3 { + bodySummary += fmt.Sprintf(" (%s)", strings.Join(events, ",")) } - bodySummary += fmt.Sprintf(" (%s%s)", strings.Join(events, ","), truncated) } if len(bodySummary) > 1000 { // cap the notification body to 1000 characters (firebase limit) @@ -540,6 +561,9 @@ func QueuePushNotification(epoch uint64, notificationsByUserID types.Notificatio // now batch insert the push messages in one go log.Infof("queueing %v push notifications", len(pushMessages)) + if len(pushMessages) == 0 { + return nil + } type insertData struct { Content types.TransitPushContent `db:"content"` } diff --git a/backend/pkg/notification/sending.go b/backend/pkg/notification/sending.go index fe148ca53..699f4c3f2 100644 --- a/backend/pkg/notification/sending.go +++ b/backend/pkg/notification/sending.go @@ -17,6 +17,7 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/metrics" "github.com/gobitfly/beaconchain/pkg/commons/services" "github.com/gobitfly/beaconchain/pkg/commons/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/lib/pq" ) @@ -268,7 +269,8 @@ func sendWebhookNotifications() error { } resp, err := client.Post(n.Content.Webhook.Url, "application/json", reqBody) if err != nil { - log.Error(err, "error sending webhook request", 0) + log.Warnf("error sending webhook request: %v", err) + metrics.NotificationsSent.WithLabelValues("webhook", "error").Inc() return } else { metrics.NotificationsSent.WithLabelValues("webhook", resp.Status).Inc() @@ -393,7 +395,8 @@ func sendDiscordNotifications() error { resp, err := client.Post(webhook.Url, "application/json", reqBody) if err != nil { - log.Error(err, "error sending discord webhook request", 0) + log.Warnf("failed sending discord webhook request %v: %v", webhook.ID, err) + metrics.NotificationsSent.WithLabelValues("webhook_discord", "error").Inc() } else { metrics.NotificationsSent.WithLabelValues("webhook_discord", resp.Status).Inc() } @@ -413,10 +416,8 @@ func sendDiscordNotifications() error { errResp.Status = resp.Status resp.Body.Close() - if resp.StatusCode == http.StatusTooManyRequests { - log.Warnf("could not push to discord webhook due to rate limit. %v url: %v", errResp.Body, webhook.Url) - } else { - log.Error(nil, "error pushing discord webhook", 0, map[string]interface{}{"errResp.Body": errResp.Body, "webhook.Url": webhook.Url}) + if resp.StatusCode != http.StatusOK { + log.WarnWithFields(map[string]interface{}{"errResp.Body": utils.FirstN(errResp.Body, 1000), "webhook.Url": webhook.Url}, "error pushing discord webhook") } _, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET request = $2, response = $3 WHERE id = $1;`, webhook.ID, reqs[i].Content.DiscordRequest, errResp) if err != nil { diff --git a/backend/pkg/notification/types.go b/backend/pkg/notification/types.go index 9028ffa7e..b0d2f4961 100644 --- a/backend/pkg/notification/types.go +++ b/backend/pkg/notification/types.go @@ -139,7 +139,6 @@ type ValidatorIsOfflineNotification struct { types.NotificationBaseImpl ValidatorIndex uint64 - IsOffline bool } func (n *ValidatorIsOfflineNotification) GetEntitiyId() string { @@ -149,19 +148,9 @@ func (n *ValidatorIsOfflineNotification) GetEntitiyId() string { // Overwrite specific methods func (n *ValidatorIsOfflineNotification) GetInfo(format types.NotificationFormat) string { vali := formatValidatorLink(format, n.ValidatorIndex) - epoch := "" - if n.IsOffline { - epoch = formatEpochLink(format, n.LatestState) - } else { - epoch = formatEpochLink(format, n.Epoch) - } + epoch := formatEpochLink(format, n.LatestState) dashboardAndGroupInfo := formatDashboardAndGroupLink(format, n) - - if n.IsOffline { - return fmt.Sprintf(`Validator %v%v is offline since epoch %s.`, vali, dashboardAndGroupInfo, epoch) - } else { - return fmt.Sprintf(`Validator %v%v is back online since epoch %v.`, vali, dashboardAndGroupInfo, epoch) - } + return fmt.Sprintf(`Validator %v%v is offline since epoch %s.`, vali, dashboardAndGroupInfo, epoch) } func (n *ValidatorIsOfflineNotification) GetTitle() string { @@ -169,19 +158,41 @@ func (n *ValidatorIsOfflineNotification) GetTitle() string { } func (n *ValidatorIsOfflineNotification) GetLegacyInfo() string { - if n.IsOffline { - return fmt.Sprintf(`Validator %v is offline since epoch %s.`, n.ValidatorIndex, n.LatestState) - } else { - return fmt.Sprintf(`Validator %v is back online since epoch %v.`, n.ValidatorIndex, n.Epoch) - } + return fmt.Sprintf(`Validator %v is offline since epoch %d.`, n.ValidatorIndex, n.Epoch) } func (n *ValidatorIsOfflineNotification) GetLegacyTitle() string { - if n.IsOffline { - return "Validator is Offline" - } else { - return "Validator Back Online" - } + return "Validator is Offline" +} + +type ValidatorIsOnlineNotification struct { + types.NotificationBaseImpl + + ValidatorIndex uint64 +} + +func (n *ValidatorIsOnlineNotification) GetEntitiyId() string { + return fmt.Sprintf("%d", n.ValidatorIndex) +} + +// Overwrite specific methods +func (n *ValidatorIsOnlineNotification) GetInfo(format types.NotificationFormat) string { + vali := formatValidatorLink(format, n.ValidatorIndex) + epoch := formatEpochLink(format, n.Epoch) + dashboardAndGroupInfo := formatDashboardAndGroupLink(format, n) + return fmt.Sprintf(`Validator %v%v is back online since epoch %v.`, vali, dashboardAndGroupInfo, epoch) +} + +func (n *ValidatorIsOnlineNotification) GetTitle() string { + return n.GetLegacyTitle() +} + +func (n *ValidatorIsOnlineNotification) GetLegacyInfo() string { + return fmt.Sprintf(`Validator %v is back online since epoch %v.`, n.ValidatorIndex, n.Epoch) +} + +func (n *ValidatorIsOnlineNotification) GetLegacyTitle() string { + return "Validator Back Online" } // type validatorGroupIsOfflineNotification struct { diff --git a/frontend/.env-example b/frontend/.env-example index 8e5b8de1b..ffb100613 100644 --- a/frontend/.env-example +++ b/frontend/.env-example @@ -1,15 +1,16 @@ -NUXT_PUBLIC_API_CLIENT: "" -NUXT_PUBLIC_LEGACY_API_CLIENT: "" -NUXT_PRIVATE_API_SERVER: "" -NUXT_PRIVATE_LEGACY_API_SERVER: "" -NUXT_PUBLIC_API_KEY: "" -NUXT_PUBLIC_DOMAIN: "" -NUXT_PUBLIC_STRIPE_BASE_URL: "" -NUXT_PUBLIC_LOG_IP: "" -NUXT_PUBLIC_SHOW_IN_DEVELOPMENT: "" -NUXT_PUBLIC_V1_DOMAIN: "" -NUXT_PUBLIC_LOG_FILE: "" -NUXT_PUBLIC_CHAIN_ID_BY_DEFAULT: "" -NUXT_PUBLIC_MAINTENANCE_TS: "1717700652" -NUXT_PUBLIC_DEPLOYMENT_TYPE: "development" -PRIVATE_SSR_SECRET: "" +NUXT_IS_API_MOCKED= +NUXT_PRIVATE_API_SERVER= +NUXT_PRIVATE_LEGACY_API_SERVER= +NUXT_PUBLIC_API_CLIENT= +NUXT_PUBLIC_API_KEY= +NUXT_PUBLIC_CHAIN_ID_BY_DEFAULT= +NUXT_PUBLIC_DEPLOYMENT_TYPE=development +NUXT_PUBLIC_DOMAIN= +NUXT_PUBLIC_LEGACY_API_CLIENT= +NUXT_PUBLIC_LOG_FILE= +NUXT_PUBLIC_LOG_IP= +NUXT_PUBLIC_MAINTENANCE_TS=1717700652 +NUXT_PUBLIC_SHOW_IN_DEVELOPMENT= +NUXT_PUBLIC_STRIPE_BASE_URL= +NUXT_PUBLIC_V1_DOMAIN= +PRIVATE_SSR_SECRET= diff --git a/frontend/.vscode/settings.json b/frontend/.vscode/settings.json index fac845230..4e60da420 100644 --- a/frontend/.vscode/settings.json +++ b/frontend/.vscode/settings.json @@ -8,10 +8,13 @@ "DashboardGroupManagementModal", "DashboardValidatorManagmentModal", "NotificationsClientsTable", + "NotificationsDashboardDialogEntity", + "NotificationsDashboardTable", + "NotificationsManagementModalWebhook", + "NotificationsManagementSubscriptionDialog", "NotificationsManagmentMachines", "NotificationsNetworkTable", "NotificationsOverview", - "NotificationsRocketPoolTable", "a11y", "checkout", "ci", diff --git a/frontend/README.md b/frontend/README.md index 678f9fb89..7f1a5afb3 100644 --- a/frontend/README.md +++ b/frontend/README.md @@ -122,6 +122,17 @@ bun run preview Check out the [deployment documentation](https://nuxt.com/docs/getting-started/deployment) for more information. +## Get mocked api data + +If your `user` was added to the `ADMIN` or `DEV` group by the `api team`, you can get +`mocked data` from the `api` for certain `endpoints` by adding `?is_mocked=true` as a +`query parameter`. + +You can `turn on` mocked data `globally` for all `configured enpoints` +- by setting `NUXT_PUBLIC_IS_API_MOCKED=true` +in your [.env](.env) or +- running `npm run dev:mock:api` (See: [package.json](package.json)) + ## Descision Record We documented our decisions in the [decisions](decisions.md) file. diff --git a/frontend/assets/css/main.scss b/frontend/assets/css/main.scss index cb6d26ba6..77fce977f 100644 --- a/frontend/assets/css/main.scss +++ b/frontend/assets/css/main.scss @@ -3,9 +3,14 @@ @import "~/assets/css/fonts.scss"; @import "~/assets/css/utils.scss"; -html { +html, +h1,h2,h3,h4,h5,h6, +ul, +li + { margin: 0; padding: 0; + font: inherit; } ul { padding-inline-start: 1.5rem; diff --git a/frontend/components/bc/BcAccordion.vue b/frontend/components/bc/BcAccordion.vue new file mode 100644 index 000000000..b375bf80c --- /dev/null +++ b/frontend/components/bc/BcAccordion.vue @@ -0,0 +1,144 @@ + + + + + diff --git a/frontend/components/bc/BcCard.vue b/frontend/components/bc/BcCard.vue new file mode 100644 index 000000000..f2eed9a5a --- /dev/null +++ b/frontend/components/bc/BcCard.vue @@ -0,0 +1,17 @@ + + + + + diff --git a/frontend/components/bc/BcContentFilter.vue b/frontend/components/bc/BcContentFilter.vue index 53e3a67cb..bf3619eea 100644 --- a/frontend/components/bc/BcContentFilter.vue +++ b/frontend/components/bc/BcContentFilter.vue @@ -1,15 +1,14 @@ diff --git a/frontend/components/bc/BcSlider.vue b/frontend/components/bc/BcSlider.vue index b3dcaf9c0..d965a55df 100644 --- a/frontend/components/bc/BcSlider.vue +++ b/frontend/components/bc/BcSlider.vue @@ -15,7 +15,6 @@ defineProps<{ :step class="bc-slider" type="range" - v-bind="$attrs" > diff --git a/frontend/components/bc/BcText.vue b/frontend/components/bc/BcText.vue index 4829e1583..588198ecf 100644 --- a/frontend/components/bc/BcText.vue +++ b/frontend/components/bc/BcText.vue @@ -1,8 +1,9 @@ diff --git a/frontend/components/notifications/DashboardsTable.vue b/frontend/components/notifications/NotificationsDashboardsTable.vue similarity index 90% rename from frontend/components/notifications/DashboardsTable.vue rename to frontend/components/notifications/NotificationsDashboardsTable.vue index f221486a7..b4325b1d8 100644 --- a/frontend/components/notifications/DashboardsTable.vue +++ b/frontend/components/notifications/NotificationsDashboardsTable.vue @@ -5,9 +5,9 @@ import IconValidator from '../icon/IconValidator.vue' import IconAccount from '../icon/IconAccount.vue' import type { Cursor } from '~/types/datatable' import type { DashboardType } from '~/types/dashboard' -import { useUserDashboardStore } from '~/stores/dashboard/useUserDashboardStore' import type { ChainIDs } from '~/types/network' import type { NotificationDashboardsTableRow } from '~/types/api/notifications' +import { NotificationsDashboardDialogEntity } from '#components' defineEmits<{ (e: 'openDialog'): void }>() @@ -29,8 +29,6 @@ const { setSearch, } = useNotificationsDashboardStore(networkId) -const { getDashboardLabel } = useUserDashboardStore() - const { width } = useWindowSize() const colsVisible = computed(() => { return { @@ -40,11 +38,6 @@ const colsVisible = computed(() => { } }) -const openDialog = () => { - // TODO: implement dialog - alert('not implemented yet 😪') -} - const getDashboardType = (isAccount: boolean): DashboardType => isAccount ? 'account' : 'validator' const { overview } = useNotificationsDashboardOverviewStore() const mapEventtypeToText = (eventType: NotificationDashboardsTableRow['event_types'][number]) => { @@ -95,6 +88,20 @@ const mapEventtypeToText = (eventType: NotificationDashboardsTableRow['event_typ const textDashboardNotifications = (event_types: NotificationDashboardsTableRow['event_types']) => { return event_types.map(mapEventtypeToText).join(', ') } + +const dialog = useDialog() + +const showDialog = (row: { identifier: string } & NotificationDashboardsTableRow) => { + dialog.open(NotificationsDashboardDialogEntity, { + data: { + dashboard_id: row.dashboard_id, + epoch: row.epoch, + group_id: row.group_id, + group_name: row.group_name, + identifier: row.identifier, + }, + }) +} - + + + @@ -245,10 +250,7 @@ const textDashboardNotifications = (event_types: NotificationDashboardsTableRow[
{{ $t("notifications.dashboards.expansion.label_group") }} @@ -295,14 +297,18 @@ const textDashboardNotifications = (event_types: NotificationDashboardsTableRow[ }}
-
- {{ - $t( - "notifications.dashboards.footer.subscriptions.accounts", - { count: overview?.adb_subscriptions_count }) + +
+ {{ + $t( + "notifications.dashboards.footer.subscriptions.accounts", + { count: overview?.adb_subscriptions_count }) - }} -
+ }} +
+ @@ -373,17 +379,6 @@ $breakpoint-lg: 1024px; @include utils.truncate-text; } -:deep(.bc-table-header) { - .h1 { - display: none; - } - - @media (min-width: $breakpoint-lg) { - .h1 { - display: block; - } - } -} :deep(.right-info) { flex-direction: column; justify-content: center; diff --git a/frontend/components/notifications/NotificationsOverview.vue b/frontend/components/notifications/NotificationsOverview.vue index fc17661c5..1db754293 100644 --- a/frontend/components/notifications/NotificationsOverview.vue +++ b/frontend/components/notifications/NotificationsOverview.vue @@ -44,7 +44,7 @@ const emit = defineEmits<{
-

+

{{ $t('notifications.overview.headers.email_notifications') }}

- {{ $t('notifications.overview.headers.most_notifications_24h') }} + {{ $t('notifications.overview.headers.notifications_24h') }}

{{ notificationsTotal }} @@ -152,9 +152,6 @@ const emit = defineEmits<{ padding: 17px 20px; position: relative; } -.overwrite-h3 { - margin-block: unset; -} .info-section, .action-section { display: flex; align-items: center; diff --git a/frontend/components/notifications/NotificationsRocketPoolTable.vue b/frontend/components/notifications/NotificationsRocketPoolTable.vue deleted file mode 100644 index 74e654823..000000000 --- a/frontend/components/notifications/NotificationsRocketPoolTable.vue +++ /dev/null @@ -1,225 +0,0 @@ - - - - - diff --git a/frontend/components/notifications/management/NotificationsManagementDashboards.vue b/frontend/components/notifications/management/NotificationsManagementDashboards.vue index ca853f2bb..0af763679 100644 --- a/frontend/components/notifications/management/NotificationsManagementDashboards.vue +++ b/frontend/components/notifications/management/NotificationsManagementDashboards.vue @@ -4,7 +4,6 @@ import { } from '@fortawesome/pro-solid-svg-icons' import { FontAwesomeIcon } from '@fortawesome/vue-fontawesome' -import { getGroupLabel } from '~/utils/dashboard/group' import type { ApiPagingResponse } from '~/types/api/common' import type { NotificationSettingsAccountDashboard, @@ -13,7 +12,6 @@ import type { } from '~/types/api/notifications' import type { DashboardType } from '~/types/dashboard' import { useNotificationsManagementDashboards } from '~/composables/notifications/useNotificationsManagementDashboards' -import { useUserDashboardStore } from '~/stores/dashboard/useUserDashboardStore' import { NotificationsManagementModalDashboardsDelete, NotificationsManagementModalWebhook, @@ -43,8 +41,6 @@ const { setPageSize, setSearch, } = useNotificationsManagementDashboards() -const { getDashboardLabel } = useUserDashboardStore() -const { groups } = useValidatorDashboardGroups() const { width } = useWindowSize() const colsVisible = computed(() => { @@ -55,10 +51,6 @@ const colsVisible = computed(() => { } }) -const groupNameLabel = (groupId?: number) => { - return getGroupLabel($t, groupId, groups.value, 'Σ') -} - const wrappedDashboards: ComputedRef< ApiPagingResponse | undefined > = computed(() => { @@ -66,15 +58,11 @@ const wrappedDashboards: ComputedRef< return } return { - data: dashboards.value.data.map(d => ({ - ...d, - dashboard_name: getDashboardLabel( - String(d.dashboard_id), - dashboardType(d), - ), - dashboard_type: dashboardType(d), - identifier: `${dashboardType(d)}-${d.dashboard_id}-${d.group_id}`, - subscriptions: getSubscriptions(d), + data: dashboards.value.data.map(dashboard => ({ + ...dashboard, + dashboard_type: dashboardType(dashboard), + identifier: `${dashboardType(dashboard)}-${dashboard.dashboard_id}-${dashboard.group_id}`, + subscriptions: getSubscriptions(dashboard), })), paging: dashboards.value.paging, } @@ -284,9 +272,7 @@ const handleDelete = (payload: Parameters[0 :header="$t('notifications.col.group')" > { ), }) }) - +const { + refreshOverview, +} = useNotificationsDashboardOverviewStore() watchDebounced(() => notificationsManagementStore.settings.general_settings, async () => { await notificationsManagementStore.saveSettings() + // this is a quickfix and should not be needed, + // reactive data should be updated automatically and `user actions` should be atomic -> error handling + await refreshOverview() }, { deep: true, }) diff --git a/frontend/components/notifications/management/NotificationsManagementMachines.vue b/frontend/components/notifications/management/NotificationsManagementMachines.vue index 67841ce52..b966c91bc 100644 --- a/frontend/components/notifications/management/NotificationsManagementMachines.vue +++ b/frontend/components/notifications/management/NotificationsManagementMachines.vue @@ -21,6 +21,8 @@ watchDebounced(() => notificationsManagementStore.settings.general_settings, asy deep: true, maxWait: waitExtraLongForSliders, }) + +const hasMachines = computed(() => notificationsManagementStore.settings.has_machines) - - - { has-unit :label="$t('notifications.subscriptions.validators.max_collateral_reached.label')" /> - -