From e1ff24949f50eb2225fb24aa74e1d0b161e7fc7f Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 15 Oct 2024 07:33:05 +0000 Subject: [PATCH 01/73] feat(notifications): respect per user email limits --- backend/pkg/api/data_access/user.go | 452 +------------------------- backend/pkg/commons/db/user.go | 470 ++++++++++++++++++++++++++++ backend/pkg/commons/mail/mail.go | 45 +-- 3 files changed, 498 insertions(+), 469 deletions(-) create mode 100644 backend/pkg/commons/db/user.go diff --git a/backend/pkg/api/data_access/user.go b/backend/pkg/api/data_access/user.go index 906742ecf..0c7a64884 100644 --- a/backend/pkg/api/data_access/user.go +++ b/backend/pkg/api/data_access/user.go @@ -4,10 +4,10 @@ import ( "context" "database/sql" "fmt" - "math" "time" t "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/pkg/errors" "golang.org/x/sync/errgroup" @@ -258,460 +258,16 @@ func (d *DataAccessService) GetUserIdByResetHash(ctx context.Context, hash strin return result, err } -var adminPerks = t.PremiumPerks{ - AdFree: false, // admins want to see ads to check ad configuration - ValidatorDashboards: maxJsInt, - ValidatorsPerDashboard: maxJsInt, - ValidatorGroupsPerDashboard: maxJsInt, - ShareCustomDashboards: true, - ManageDashboardViaApi: true, - BulkAdding: true, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: maxJsInt, - Hourly: maxJsInt, - Daily: maxJsInt, - Weekly: maxJsInt, - }, - EmailNotificationsPerDay: maxJsInt, - ConfigureNotificationsViaApi: true, - ValidatorGroupNotifications: maxJsInt, - WebhookEndpoints: maxJsInt, - MobileAppCustomThemes: true, - MobileAppWidget: true, - MonitorMachines: maxJsInt, - MachineMonitoringHistorySeconds: maxJsInt, - NotificationsMachineCustomThreshold: true, - NotificationsValidatorDashboardRealTimeMode: true, - NotificationsValidatorDashboardGroupOffline: true, -} - func (d *DataAccessService) GetUserInfo(ctx context.Context, userId uint64) (*t.UserInfo, error) { - // TODO @patrick post-beta improve and unmock - userInfo := &t.UserInfo{ - Id: userId, - ApiKeys: []string{}, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 10, - UnitsPerMonth: 10, - ApiKeys: 4, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - Subscriptions: []t.UserSubscription{}, - } - - productSummary, err := d.GetProductSummary(ctx) - if err != nil { - return nil, fmt.Errorf("error getting productSummary: %w", err) - } - - result := struct { - Email string `db:"email"` - UserGroup string `db:"user_group"` - }{} - err = d.userReader.GetContext(ctx, &result, `SELECT email, COALESCE(user_group, '') as user_group FROM users WHERE id = $1`, userId) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, fmt.Errorf("%w: user not found", ErrNotFound) - } - return nil, err - } - userInfo.Email = result.Email - userInfo.UserGroup = result.UserGroup - - userInfo.Email = utils.CensorEmail(userInfo.Email) - - err = d.userReader.SelectContext(ctx, &userInfo.ApiKeys, `SELECT api_key FROM api_keys WHERE user_id = $1`, userId) - if err != nil && err != sql.ErrNoRows { - return nil, fmt.Errorf("error getting userApiKeys for user %v: %w", userId, err) - } - - premiumProduct := struct { - ProductId string `db:"product_id"` - Store string `db:"store"` - Start time.Time `db:"start"` - End time.Time `db:"end"` - }{} - err = d.userReader.GetContext(ctx, &premiumProduct, ` - SELECT - COALESCE(uas.product_id, '') AS product_id, - COALESCE(uas.store, '') AS store, - COALESCE(to_timestamp((uss.payload->>'current_period_start')::bigint),uas.created_at) AS start, - COALESCE(to_timestamp((uss.payload->>'current_period_end')::bigint),uas.expires_at) AS end - FROM users_app_subscriptions uas - LEFT JOIN users_stripe_subscriptions uss ON uss.subscription_id = uas.subscription_id - WHERE uas.user_id = $1 AND uas.active = true AND product_id IN ('orca.yearly', 'orca', 'dolphin.yearly', 'dolphin', 'guppy.yearly', 'guppy', 'whale', 'goldfish', 'plankton') - ORDER BY CASE uas.product_id - WHEN 'orca.yearly' THEN 1 - WHEN 'orca' THEN 2 - WHEN 'dolphin.yearly' THEN 3 - WHEN 'dolphin' THEN 4 - WHEN 'guppy.yearly' THEN 5 - WHEN 'guppy' THEN 6 - WHEN 'whale' THEN 7 - WHEN 'goldfish' THEN 8 - WHEN 'plankton' THEN 9 - ELSE 10 -- For any other product_id values - END, uas.id DESC - LIMIT 1`, userId) - if err != nil { - if err != sql.ErrNoRows { - return nil, fmt.Errorf("error getting premiumProduct for userId %v: %w", userId, err) - } - premiumProduct.ProductId = "premium_free" - premiumProduct.Store = "" - } - - foundProduct := false - for _, p := range productSummary.PremiumProducts { - effectiveProductId := premiumProduct.ProductId - productName := p.ProductName - switch premiumProduct.ProductId { - case "whale": - effectiveProductId = "dolphin" - productName = "Whale" - case "goldfish": - effectiveProductId = "guppy" - productName = "Goldfish" - case "plankton": - effectiveProductId = "guppy" - productName = "Plankton" - } - if p.ProductIdMonthly == effectiveProductId || p.ProductIdYearly == effectiveProductId { - userInfo.PremiumPerks = p.PremiumPerks - foundProduct = true - - store := t.ProductStoreStripe - switch premiumProduct.Store { - case "ios-appstore": - store = t.ProductStoreIosAppstore - case "android-playstore": - store = t.ProductStoreAndroidPlaystore - case "ethpool": - store = t.ProductStoreEthpool - case "manuall": - store = t.ProductStoreCustom - } - - if effectiveProductId != "premium_free" { - userInfo.Subscriptions = append(userInfo.Subscriptions, t.UserSubscription{ - ProductId: premiumProduct.ProductId, - ProductName: productName, - ProductCategory: t.ProductCategoryPremium, - ProductStore: store, - Start: premiumProduct.Start.Unix(), - End: premiumProduct.End.Unix(), - }) - } - break - } - } - if !foundProduct { - return nil, fmt.Errorf("product %s not found", premiumProduct.ProductId) - } - - premiumAddons := []struct { - PriceId string `db:"price_id"` - Start time.Time `db:"start"` - End time.Time `db:"end"` - Quantity int `db:"quantity"` - }{} - err = d.userReader.SelectContext(ctx, &premiumAddons, ` - SELECT - price_id, - to_timestamp((uss.payload->>'current_period_start')::bigint) AS start, - to_timestamp((uss.payload->>'current_period_end')::bigint) AS end, - COALESCE((uss.payload->>'quantity')::int,1) AS quantity - FROM users_stripe_subscriptions uss - INNER JOIN users u ON u.stripe_customer_id = uss.customer_id - WHERE u.id = $1 AND uss.active = true AND uss.purchase_group = 'addon'`, userId) - if err != nil { - return nil, fmt.Errorf("error getting premiumAddons for userId %v: %w", userId, err) - } - for _, addon := range premiumAddons { - foundAddon := false - for _, p := range productSummary.ExtraDashboardValidatorsPremiumAddon { - if p.StripePriceIdMonthly == addon.PriceId || p.StripePriceIdYearly == addon.PriceId { - foundAddon = true - for i := 0; i < addon.Quantity; i++ { - userInfo.PremiumPerks.ValidatorsPerDashboard += p.ExtraDashboardValidators - userInfo.Subscriptions = append(userInfo.Subscriptions, t.UserSubscription{ - ProductId: utils.PriceIdToProductId(addon.PriceId), - ProductName: p.ProductName, - ProductCategory: t.ProductCategoryPremiumAddon, - ProductStore: t.ProductStoreStripe, - Start: addon.Start.Unix(), - End: addon.End.Unix(), - }) - } - } - } - if !foundAddon { - return nil, fmt.Errorf("addon not found: %v", addon.PriceId) - } - } - - if productSummary.ValidatorsPerDashboardLimit < userInfo.PremiumPerks.ValidatorsPerDashboard { - userInfo.PremiumPerks.ValidatorsPerDashboard = productSummary.ValidatorsPerDashboardLimit - } - - if userInfo.UserGroup == t.UserGroupAdmin { - userInfo.PremiumPerks = adminPerks - } - - return userInfo, nil -} - -const hour uint64 = 3600 -const day = 24 * hour -const week = 7 * day -const month = 30 * day -const maxJsInt uint64 = 9007199254740991 // 2^53-1 (max safe int in JS) - -var freeTierProduct t.PremiumProduct = t.PremiumProduct{ - ProductName: "Free", - PremiumPerks: t.PremiumPerks{ - AdFree: false, - ValidatorDashboards: 1, - ValidatorsPerDashboard: 20, - ValidatorGroupsPerDashboard: 1, - ShareCustomDashboards: false, - ManageDashboardViaApi: false, - BulkAdding: false, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: 0, - Hourly: 12 * hour, - Daily: 0, - Weekly: 0, - }, - EmailNotificationsPerDay: 5, - ConfigureNotificationsViaApi: false, - ValidatorGroupNotifications: 1, - WebhookEndpoints: 1, - MobileAppCustomThemes: false, - MobileAppWidget: false, - MonitorMachines: 1, - MachineMonitoringHistorySeconds: 3600 * 3, - NotificationsMachineCustomThreshold: false, - NotificationsValidatorDashboardRealTimeMode: false, - NotificationsValidatorDashboardGroupOffline: false, - }, - PricePerMonthEur: 0, - PricePerYearEur: 0, - ProductIdMonthly: "premium_free", - ProductIdYearly: "premium_free.yearly", + return db.GetUserInfo(ctx, userId) } func (d *DataAccessService) GetProductSummary(ctx context.Context) (*t.ProductSummary, error) { - // TODO @patrick post-beta put into db instead of hardcoding here and make it configurable - return &t.ProductSummary{ - ValidatorsPerDashboardLimit: 102_000, - StripePublicKey: utils.Config.Frontend.Stripe.PublicKey, - ApiProducts: []t.ApiProduct{ // TODO @patrick post-beta this data is not final yet - { - ProductId: "api_free", - ProductName: "Free", - PricePerMonthEur: 0, - PricePerYearEur: 0 * 12, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 10, - UnitsPerMonth: 10_000_000, - ApiKeys: 2, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - }, - { - ProductId: "iron", - ProductName: "Iron", - PricePerMonthEur: 1.99, - PricePerYearEur: math.Floor(1.99*12*0.9*100) / 100, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 20, - UnitsPerMonth: 20_000_000, - ApiKeys: 10, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - }, - { - ProductId: "silver", - ProductName: "Silver", - PricePerMonthEur: 2.99, - PricePerYearEur: math.Floor(2.99*12*0.9*100) / 100, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 30, - UnitsPerMonth: 100_000_000, - ApiKeys: 20, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - }, - { - ProductId: "gold", - ProductName: "Gold", - PricePerMonthEur: 3.99, - PricePerYearEur: math.Floor(3.99*12*0.9*100) / 100, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 40, - UnitsPerMonth: 200_000_000, - ApiKeys: 40, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - }, - }, - PremiumProducts: []t.PremiumProduct{ - freeTierProduct, - { - ProductName: "Guppy", - PremiumPerks: t.PremiumPerks{ - AdFree: true, - ValidatorDashboards: 1, - ValidatorsPerDashboard: 100, - ValidatorGroupsPerDashboard: 3, - ShareCustomDashboards: true, - ManageDashboardViaApi: false, - BulkAdding: true, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: day, - Hourly: 7 * day, - Daily: month, - Weekly: 0, - }, - EmailNotificationsPerDay: 15, - ConfigureNotificationsViaApi: false, - ValidatorGroupNotifications: 3, - WebhookEndpoints: 3, - MobileAppCustomThemes: true, - MobileAppWidget: true, - MonitorMachines: 2, - MachineMonitoringHistorySeconds: 3600 * 24 * 30, - NotificationsMachineCustomThreshold: true, - NotificationsValidatorDashboardRealTimeMode: true, - NotificationsValidatorDashboardGroupOffline: true, - }, - PricePerMonthEur: 9.99, - PricePerYearEur: 107.88, - ProductIdMonthly: "guppy", - ProductIdYearly: "guppy.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.Guppy, - StripePriceIdYearly: utils.Config.Frontend.Stripe.GuppyYearly, - }, - { - ProductName: "Dolphin", - PremiumPerks: t.PremiumPerks{ - AdFree: true, - ValidatorDashboards: 2, - ValidatorsPerDashboard: 300, - ValidatorGroupsPerDashboard: 10, - ShareCustomDashboards: true, - ManageDashboardViaApi: false, - BulkAdding: true, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: 5 * day, - Hourly: month, - Daily: 2 * month, - Weekly: 8 * week, - }, - EmailNotificationsPerDay: 20, - ConfigureNotificationsViaApi: false, - ValidatorGroupNotifications: 10, - WebhookEndpoints: 10, - MobileAppCustomThemes: true, - MobileAppWidget: true, - MonitorMachines: 10, - MachineMonitoringHistorySeconds: 3600 * 24 * 30, - NotificationsMachineCustomThreshold: true, - NotificationsValidatorDashboardRealTimeMode: true, - NotificationsValidatorDashboardGroupOffline: true, - }, - PricePerMonthEur: 29.99, - PricePerYearEur: 311.88, - ProductIdMonthly: "dolphin", - ProductIdYearly: "dolphin.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.Dolphin, - StripePriceIdYearly: utils.Config.Frontend.Stripe.DolphinYearly, - }, - { - ProductName: "Orca", - PremiumPerks: t.PremiumPerks{ - AdFree: true, - ValidatorDashboards: 2, - ValidatorsPerDashboard: 1000, - ValidatorGroupsPerDashboard: 30, - ShareCustomDashboards: true, - ManageDashboardViaApi: true, - BulkAdding: true, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: 3 * week, - Hourly: 6 * month, - Daily: 12 * month, - Weekly: maxJsInt, - }, - EmailNotificationsPerDay: 50, - ConfigureNotificationsViaApi: true, - ValidatorGroupNotifications: 60, - WebhookEndpoints: 30, - MobileAppCustomThemes: true, - MobileAppWidget: true, - MonitorMachines: 10, - MachineMonitoringHistorySeconds: 3600 * 24 * 30, - NotificationsMachineCustomThreshold: true, - NotificationsValidatorDashboardRealTimeMode: true, - NotificationsValidatorDashboardGroupOffline: true, - }, - PricePerMonthEur: 49.99, - PricePerYearEur: 479.88, - ProductIdMonthly: "orca", - ProductIdYearly: "orca.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.Orca, - StripePriceIdYearly: utils.Config.Frontend.Stripe.OrcaYearly, - IsPopular: true, - }, - }, - ExtraDashboardValidatorsPremiumAddon: []t.ExtraDashboardValidatorsPremiumAddon{ - { - ProductName: "1k extra valis per dashboard", - ExtraDashboardValidators: 1000, - PricePerMonthEur: 74.99, - PricePerYearEur: 719.88, - ProductIdMonthly: "vdb_addon_1k", - ProductIdYearly: "vdb_addon_1k.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon1k, - StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon1kYearly, - }, - { - ProductName: "10k extra valis per dashboard", - ExtraDashboardValidators: 10000, - PricePerMonthEur: 449.99, - PricePerYearEur: 4319.88, - ProductIdMonthly: "vdb_addon_10k", - ProductIdYearly: "vdb_addon_10k.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon10k, - StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon10kYearly, - }, - }, - }, nil + return db.GetProductSummary(ctx) } func (d *DataAccessService) GetFreeTierPerks(ctx context.Context) (*t.PremiumPerks, error) { - return &freeTierProduct.PremiumPerks, nil + return db.GetFreeTierPerks(ctx) } func (d *DataAccessService) GetUserDashboards(ctx context.Context, userId uint64) (*t.UserDashboardsData, error) { diff --git a/backend/pkg/commons/db/user.go b/backend/pkg/commons/db/user.go new file mode 100644 index 000000000..c877a1379 --- /dev/null +++ b/backend/pkg/commons/db/user.go @@ -0,0 +1,470 @@ +package db + +import ( + "context" + "database/sql" + "errors" + "fmt" + "math" + "time" + + t "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" +) + +var ErrNotFound = errors.New("not found") + +const hour uint64 = 3600 +const day = 24 * hour +const week = 7 * day +const month = 30 * day +const maxJsInt uint64 = 9007199254740991 // 2^53-1 (max safe int in JS) + +var freeTierProduct t.PremiumProduct = t.PremiumProduct{ + ProductName: "Free", + PremiumPerks: t.PremiumPerks{ + AdFree: false, + ValidatorDashboards: 1, + ValidatorsPerDashboard: 20, + ValidatorGroupsPerDashboard: 1, + ShareCustomDashboards: false, + ManageDashboardViaApi: false, + BulkAdding: false, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: 0, + Hourly: 12 * hour, + Daily: 0, + Weekly: 0, + }, + EmailNotificationsPerDay: 5, + ConfigureNotificationsViaApi: false, + ValidatorGroupNotifications: 1, + WebhookEndpoints: 1, + MobileAppCustomThemes: false, + MobileAppWidget: false, + MonitorMachines: 1, + MachineMonitoringHistorySeconds: 3600 * 3, + NotificationsMachineCustomThreshold: false, + NotificationsValidatorDashboardRealTimeMode: false, + NotificationsValidatorDashboardGroupOffline: false, + }, + PricePerMonthEur: 0, + PricePerYearEur: 0, + ProductIdMonthly: "premium_free", + ProductIdYearly: "premium_free.yearly", +} + +var adminPerks = t.PremiumPerks{ + AdFree: false, // admins want to see ads to check ad configuration + ValidatorDashboards: maxJsInt, + ValidatorsPerDashboard: maxJsInt, + ValidatorGroupsPerDashboard: maxJsInt, + ShareCustomDashboards: true, + ManageDashboardViaApi: true, + BulkAdding: true, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: maxJsInt, + Hourly: maxJsInt, + Daily: maxJsInt, + Weekly: maxJsInt, + }, + EmailNotificationsPerDay: maxJsInt, + ConfigureNotificationsViaApi: true, + ValidatorGroupNotifications: maxJsInt, + WebhookEndpoints: maxJsInt, + MobileAppCustomThemes: true, + MobileAppWidget: true, + MonitorMachines: maxJsInt, + MachineMonitoringHistorySeconds: maxJsInt, + NotificationsMachineCustomThreshold: true, + NotificationsValidatorDashboardRealTimeMode: true, + NotificationsValidatorDashboardGroupOffline: true, +} + +func GetUserInfo(ctx context.Context, userId uint64) (*t.UserInfo, error) { + // TODO @patrick post-beta improve and unmock + userInfo := &t.UserInfo{ + Id: userId, + ApiKeys: []string{}, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 10, + UnitsPerMonth: 10, + ApiKeys: 4, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + Subscriptions: []t.UserSubscription{}, + } + + productSummary, err := GetProductSummary(ctx) + if err != nil { + return nil, fmt.Errorf("error getting productSummary: %w", err) + } + + result := struct { + Email string `db:"email"` + UserGroup string `db:"user_group"` + }{} + err = FrontendReaderDB.GetContext(ctx, &result, `SELECT email, COALESCE(user_group, '') as user_group FROM users WHERE id = $1`, userId) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("%w: user not found", ErrNotFound) + } + return nil, err + } + userInfo.Email = result.Email + userInfo.UserGroup = result.UserGroup + + userInfo.Email = utils.CensorEmail(userInfo.Email) + + err = FrontendReaderDB.SelectContext(ctx, &userInfo.ApiKeys, `SELECT api_key FROM api_keys WHERE user_id = $1`, userId) + if err != nil && err != sql.ErrNoRows { + return nil, fmt.Errorf("error getting userApiKeys for user %v: %w", userId, err) + } + + premiumProduct := struct { + ProductId string `db:"product_id"` + Store string `db:"store"` + Start time.Time `db:"start"` + End time.Time `db:"end"` + }{} + err = FrontendReaderDB.GetContext(ctx, &premiumProduct, ` + SELECT + COALESCE(uas.product_id, '') AS product_id, + COALESCE(uas.store, '') AS store, + COALESCE(to_timestamp((uss.payload->>'current_period_start')::bigint),uas.created_at) AS start, + COALESCE(to_timestamp((uss.payload->>'current_period_end')::bigint),uas.expires_at) AS end + FROM users_app_subscriptions uas + LEFT JOIN users_stripe_subscriptions uss ON uss.subscription_id = uas.subscription_id + WHERE uas.user_id = $1 AND uas.active = true AND product_id IN ('orca.yearly', 'orca', 'dolphin.yearly', 'dolphin', 'guppy.yearly', 'guppy', 'whale', 'goldfish', 'plankton') + ORDER BY CASE uas.product_id + WHEN 'orca.yearly' THEN 1 + WHEN 'orca' THEN 2 + WHEN 'dolphin.yearly' THEN 3 + WHEN 'dolphin' THEN 4 + WHEN 'guppy.yearly' THEN 5 + WHEN 'guppy' THEN 6 + WHEN 'whale' THEN 7 + WHEN 'goldfish' THEN 8 + WHEN 'plankton' THEN 9 + ELSE 10 -- For any other product_id values + END, uas.id DESC + LIMIT 1`, userId) + if err != nil { + if err != sql.ErrNoRows { + return nil, fmt.Errorf("error getting premiumProduct for userId %v: %w", userId, err) + } + premiumProduct.ProductId = "premium_free" + premiumProduct.Store = "" + } + + foundProduct := false + for _, p := range productSummary.PremiumProducts { + effectiveProductId := premiumProduct.ProductId + productName := p.ProductName + switch premiumProduct.ProductId { + case "whale": + effectiveProductId = "dolphin" + productName = "Whale" + case "goldfish": + effectiveProductId = "guppy" + productName = "Goldfish" + case "plankton": + effectiveProductId = "guppy" + productName = "Plankton" + } + if p.ProductIdMonthly == effectiveProductId || p.ProductIdYearly == effectiveProductId { + userInfo.PremiumPerks = p.PremiumPerks + foundProduct = true + + store := t.ProductStoreStripe + switch premiumProduct.Store { + case "ios-appstore": + store = t.ProductStoreIosAppstore + case "android-playstore": + store = t.ProductStoreAndroidPlaystore + case "ethpool": + store = t.ProductStoreEthpool + case "manuall": + store = t.ProductStoreCustom + } + + if effectiveProductId != "premium_free" { + userInfo.Subscriptions = append(userInfo.Subscriptions, t.UserSubscription{ + ProductId: premiumProduct.ProductId, + ProductName: productName, + ProductCategory: t.ProductCategoryPremium, + ProductStore: store, + Start: premiumProduct.Start.Unix(), + End: premiumProduct.End.Unix(), + }) + } + break + } + } + if !foundProduct { + return nil, fmt.Errorf("product %s not found", premiumProduct.ProductId) + } + + premiumAddons := []struct { + PriceId string `db:"price_id"` + Start time.Time `db:"start"` + End time.Time `db:"end"` + Quantity int `db:"quantity"` + }{} + err = FrontendReaderDB.SelectContext(ctx, &premiumAddons, ` + SELECT + price_id, + to_timestamp((uss.payload->>'current_period_start')::bigint) AS start, + to_timestamp((uss.payload->>'current_period_end')::bigint) AS end, + COALESCE((uss.payload->>'quantity')::int,1) AS quantity + FROM users_stripe_subscriptions uss + INNER JOIN users u ON u.stripe_customer_id = uss.customer_id + WHERE u.id = $1 AND uss.active = true AND uss.purchase_group = 'addon'`, userId) + if err != nil { + return nil, fmt.Errorf("error getting premiumAddons for userId %v: %w", userId, err) + } + for _, addon := range premiumAddons { + foundAddon := false + for _, p := range productSummary.ExtraDashboardValidatorsPremiumAddon { + if p.StripePriceIdMonthly == addon.PriceId || p.StripePriceIdYearly == addon.PriceId { + foundAddon = true + for i := 0; i < addon.Quantity; i++ { + userInfo.PremiumPerks.ValidatorsPerDashboard += p.ExtraDashboardValidators + userInfo.Subscriptions = append(userInfo.Subscriptions, t.UserSubscription{ + ProductId: utils.PriceIdToProductId(addon.PriceId), + ProductName: p.ProductName, + ProductCategory: t.ProductCategoryPremiumAddon, + ProductStore: t.ProductStoreStripe, + Start: addon.Start.Unix(), + End: addon.End.Unix(), + }) + } + } + } + if !foundAddon { + return nil, fmt.Errorf("addon not found: %v", addon.PriceId) + } + } + + if productSummary.ValidatorsPerDashboardLimit < userInfo.PremiumPerks.ValidatorsPerDashboard { + userInfo.PremiumPerks.ValidatorsPerDashboard = productSummary.ValidatorsPerDashboardLimit + } + + if userInfo.UserGroup == t.UserGroupAdmin { + userInfo.PremiumPerks = adminPerks + } + + return userInfo, nil +} + +func GetProductSummary(ctx context.Context) (*t.ProductSummary, error) { // TODO @patrick post-beta put into db instead of hardcoding here and make it configurable + return &t.ProductSummary{ + ValidatorsPerDashboardLimit: 102_000, + StripePublicKey: utils.Config.Frontend.Stripe.PublicKey, + ApiProducts: []t.ApiProduct{ // TODO @patrick post-beta this data is not final yet + { + ProductId: "api_free", + ProductName: "Free", + PricePerMonthEur: 0, + PricePerYearEur: 0 * 12, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 10, + UnitsPerMonth: 10_000_000, + ApiKeys: 2, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + }, + { + ProductId: "iron", + ProductName: "Iron", + PricePerMonthEur: 1.99, + PricePerYearEur: math.Floor(1.99*12*0.9*100) / 100, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 20, + UnitsPerMonth: 20_000_000, + ApiKeys: 10, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + }, + { + ProductId: "silver", + ProductName: "Silver", + PricePerMonthEur: 2.99, + PricePerYearEur: math.Floor(2.99*12*0.9*100) / 100, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 30, + UnitsPerMonth: 100_000_000, + ApiKeys: 20, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + }, + { + ProductId: "gold", + ProductName: "Gold", + PricePerMonthEur: 3.99, + PricePerYearEur: math.Floor(3.99*12*0.9*100) / 100, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 40, + UnitsPerMonth: 200_000_000, + ApiKeys: 40, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + }, + }, + PremiumProducts: []t.PremiumProduct{ + freeTierProduct, + { + ProductName: "Guppy", + PremiumPerks: t.PremiumPerks{ + AdFree: true, + ValidatorDashboards: 1, + ValidatorsPerDashboard: 100, + ValidatorGroupsPerDashboard: 3, + ShareCustomDashboards: true, + ManageDashboardViaApi: false, + BulkAdding: true, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: day, + Hourly: 7 * day, + Daily: month, + Weekly: 0, + }, + EmailNotificationsPerDay: 15, + ConfigureNotificationsViaApi: false, + ValidatorGroupNotifications: 3, + WebhookEndpoints: 3, + MobileAppCustomThemes: true, + MobileAppWidget: true, + MonitorMachines: 2, + MachineMonitoringHistorySeconds: 3600 * 24 * 30, + NotificationsMachineCustomThreshold: true, + NotificationsValidatorDashboardRealTimeMode: true, + NotificationsValidatorDashboardGroupOffline: true, + }, + PricePerMonthEur: 9.99, + PricePerYearEur: 107.88, + ProductIdMonthly: "guppy", + ProductIdYearly: "guppy.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.Guppy, + StripePriceIdYearly: utils.Config.Frontend.Stripe.GuppyYearly, + }, + { + ProductName: "Dolphin", + PremiumPerks: t.PremiumPerks{ + AdFree: true, + ValidatorDashboards: 2, + ValidatorsPerDashboard: 300, + ValidatorGroupsPerDashboard: 10, + ShareCustomDashboards: true, + ManageDashboardViaApi: false, + BulkAdding: true, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: 5 * day, + Hourly: month, + Daily: 2 * month, + Weekly: 8 * week, + }, + EmailNotificationsPerDay: 20, + ConfigureNotificationsViaApi: false, + ValidatorGroupNotifications: 10, + WebhookEndpoints: 10, + MobileAppCustomThemes: true, + MobileAppWidget: true, + MonitorMachines: 10, + MachineMonitoringHistorySeconds: 3600 * 24 * 30, + NotificationsMachineCustomThreshold: true, + NotificationsValidatorDashboardRealTimeMode: true, + NotificationsValidatorDashboardGroupOffline: true, + }, + PricePerMonthEur: 29.99, + PricePerYearEur: 311.88, + ProductIdMonthly: "dolphin", + ProductIdYearly: "dolphin.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.Dolphin, + StripePriceIdYearly: utils.Config.Frontend.Stripe.DolphinYearly, + }, + { + ProductName: "Orca", + PremiumPerks: t.PremiumPerks{ + AdFree: true, + ValidatorDashboards: 2, + ValidatorsPerDashboard: 1000, + ValidatorGroupsPerDashboard: 30, + ShareCustomDashboards: true, + ManageDashboardViaApi: true, + BulkAdding: true, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: 3 * week, + Hourly: 6 * month, + Daily: 12 * month, + Weekly: maxJsInt, + }, + EmailNotificationsPerDay: 50, + ConfigureNotificationsViaApi: true, + ValidatorGroupNotifications: 60, + WebhookEndpoints: 30, + MobileAppCustomThemes: true, + MobileAppWidget: true, + MonitorMachines: 10, + MachineMonitoringHistorySeconds: 3600 * 24 * 30, + NotificationsMachineCustomThreshold: true, + NotificationsValidatorDashboardRealTimeMode: true, + NotificationsValidatorDashboardGroupOffline: true, + }, + PricePerMonthEur: 49.99, + PricePerYearEur: 479.88, + ProductIdMonthly: "orca", + ProductIdYearly: "orca.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.Orca, + StripePriceIdYearly: utils.Config.Frontend.Stripe.OrcaYearly, + IsPopular: true, + }, + }, + ExtraDashboardValidatorsPremiumAddon: []t.ExtraDashboardValidatorsPremiumAddon{ + { + ProductName: "1k extra valis per dashboard", + ExtraDashboardValidators: 1000, + PricePerMonthEur: 74.99, + PricePerYearEur: 719.88, + ProductIdMonthly: "vdb_addon_1k", + ProductIdYearly: "vdb_addon_1k.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon1k, + StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon1kYearly, + }, + { + ProductName: "10k extra valis per dashboard", + ExtraDashboardValidators: 10000, + PricePerMonthEur: 449.99, + PricePerYearEur: 4319.88, + ProductIdMonthly: "vdb_addon_10k", + ProductIdYearly: "vdb_addon_10k.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon10k, + StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon10kYearly, + }, + }, + }, nil +} + +func GetFreeTierPerks(ctx context.Context) (*t.PremiumPerks, error) { + return &freeTierProduct.PremiumPerks, nil +} diff --git a/backend/pkg/commons/mail/mail.go b/backend/pkg/commons/mail/mail.go index e2e9d08e2..966e29e52 100644 --- a/backend/pkg/commons/mail/mail.go +++ b/backend/pkg/commons/mail/mail.go @@ -73,32 +73,35 @@ func createTextMessage(msg types.Email) string { // SendMailRateLimited sends an email to a given address with the given message. // It will return a ratelimit-error if the configured ratelimit is exceeded. func SendMailRateLimited(content types.TransitEmailContent) error { - if utils.Config.Frontend.MaxMailsPerEmailPerDay > 0 { - now := time.Now() - count, err := db.CountSentMessage("n_mails", content.UserId) + now := time.Now() + count, err := db.CountSentMessage("n_mails", content.UserId) + if err != nil { + return err + } + + userInfo, err := db.GetUserInfo(context.Background(), uint64(content.UserId)) + if err != nil { + return err + } + timeLeft := now.Add(utils.Day).Truncate(utils.Day).Sub(now) + if count > int64(userInfo.PremiumPerks.EmailNotificationsPerDay) { + return &types.RateLimitError{TimeLeft: timeLeft} + } else if count == int64(userInfo.PremiumPerks.EmailNotificationsPerDay) { + // send an email if this was the last email for today + err := SendHTMLMail(content.Address, + "beaconcha.in - Email notification threshold limit reached", + types.Email{ + Title: "Email notification threshold limit reached", + //nolint: gosec + Body: template.HTML(fmt.Sprintf("You have reached the email notification threshold limit of %d emails per day. Further notification emails will be suppressed for %.1f hours.", utils.Config.Frontend.MaxMailsPerEmailPerDay, timeLeft.Hours())), + }, + []types.EmailAttachment{}) if err != nil { return err } - timeLeft := now.Add(utils.Day).Truncate(utils.Day).Sub(now) - if count > int64(utils.Config.Frontend.MaxMailsPerEmailPerDay) { - return &types.RateLimitError{TimeLeft: timeLeft} - } else if count == int64(utils.Config.Frontend.MaxMailsPerEmailPerDay) { - // send an email if this was the last email for today - err := SendHTMLMail(content.Address, - "beaconcha.in - Email notification threshold limit reached", - types.Email{ - Title: "Email notification threshold limit reached", - //nolint: gosec - Body: template.HTML(fmt.Sprintf("You have reached the email notification threshold limit of %d emails per day. Further notification emails will be suppressed for %.1f hours.", utils.Config.Frontend.MaxMailsPerEmailPerDay, timeLeft.Hours())), - }, - []types.EmailAttachment{}) - if err != nil { - return err - } - } } - err := SendHTMLMail(content.Address, content.Subject, content.Email, content.Attachments) + err = SendHTMLMail(content.Address, content.Subject, content.Email, content.Attachments) if err != nil { return err } From 793816904fb4e3aa9f8770d6d412918d4240dd5c Mon Sep 17 00:00:00 2001 From: remoterami <142154971+remoterami@users.noreply.github.com> Date: Tue, 15 Oct 2024 11:19:34 +0200 Subject: [PATCH 02/73] added dashboard name, simplified return struct --- backend/pkg/api/data_access/notifications.go | 27 +++++++++++++------- backend/pkg/api/types/common.go | 5 ++++ backend/pkg/api/types/notifications.go | 18 +++++-------- frontend/types/api/common.ts | 4 +++ frontend/types/api/notifications.ts | 16 +++++------- 5 files changed, 40 insertions(+), 30 deletions(-) diff --git a/backend/pkg/api/data_access/notifications.go b/backend/pkg/api/data_access/notifications.go index c08825fb3..2b297f83e 100644 --- a/backend/pkg/api/data_access/notifications.go +++ b/backend/pkg/api/data_access/notifications.go @@ -253,7 +253,7 @@ func (d *DataAccessService) GetNotificationOverview(ctx context.Context, userId func (d *DataAccessService) GetDashboardNotifications(ctx context.Context, userId uint64, chainIds []uint64, cursor string, colSort t.Sort[enums.NotificationDashboardsColumn], search string, limit uint64) ([]t.NotificationDashboardsTableRow, *t.Paging, error) { // dev hack; TODO remove - if userId == 127504 { + if userId == 127504 || userId == 127227 { return d.dummy.GetDashboardNotifications(ctx, userId, chainIds, cursor, colSort, search, limit) } response := []t.NotificationDashboardsTableRow{} @@ -395,16 +395,16 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context } notificationDetails := t.NotificationValidatorDashboardDetail{ ValidatorOffline: []uint64{}, - GroupOffline: []t.NotificationEventGroup{}, - ProposalMissed: []t.IndexBlocks{}, + GroupOffline: []string{}, + ProposalMissed: []t.IndexSlots{}, ProposalDone: []t.IndexBlocks{}, - UpcomingProposals: []t.IndexBlocks{}, + UpcomingProposals: []t.IndexSlots{}, Slashed: []uint64{}, SyncCommittee: []uint64{}, AttestationMissed: []t.IndexEpoch{}, Withdrawal: []t.IndexBlocks{}, ValidatorOfflineReminder: []uint64{}, - GroupOfflineReminder: []t.NotificationEventGroup{}, + GroupOfflineReminder: []string{}, ValidatorBackOnline: []t.NotificationEventValidatorBackOnline{}, GroupBackOnline: []t.NotificationEventGroupBackOnline{}, MinimumCollateralReached: []t.Address{}, @@ -427,9 +427,18 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context searchIndexSet[searchIndex] = true } + query := `SELECT name FROM users_val_dashboards WHERE id = $1` + err := d.alloyReader.GetContext(ctx, ¬ificationDetails.DashboardName, query, dashboardId) + if err != nil { + if err == sql.ErrNoRows { + return ¬ificationDetails, nil + } + return nil, err + } + result := []byte{} - query := `SELECT details FROM users_val_dashboards_notifications_history WHERE dashboard_id = $1 AND group_id = $2 AND epoch = $3` - err := d.alloyReader.GetContext(ctx, &result, query, dashboardId, groupId, epoch) + query = `SELECT details FROM users_val_dashboards_notifications_history WHERE dashboard_id = $1 AND group_id = $2 AND epoch = $3` + err = d.alloyReader.GetContext(ctx, &result, query, dashboardId, groupId, epoch) if err != nil { if err == sql.ErrNoRows { return ¬ificationDetails, nil @@ -596,10 +605,10 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context notificationDetails.ProposalDone = append(notificationDetails.ProposalDone, t.IndexBlocks{Index: validatorIndex, Blocks: proposalInfo.Proposed}) } if len(proposalInfo.Scheduled) > 0 { - notificationDetails.UpcomingProposals = append(notificationDetails.UpcomingProposals, t.IndexBlocks{Index: validatorIndex, Blocks: proposalInfo.Scheduled}) + notificationDetails.UpcomingProposals = append(notificationDetails.UpcomingProposals, t.IndexSlots{Index: validatorIndex, Slots: proposalInfo.Scheduled}) } if len(proposalInfo.Missed) > 0 { - notificationDetails.ProposalMissed = append(notificationDetails.ProposalMissed, t.IndexBlocks{Index: validatorIndex, Blocks: proposalInfo.Missed}) + notificationDetails.ProposalMissed = append(notificationDetails.ProposalMissed, t.IndexSlots{Index: validatorIndex, Slots: proposalInfo.Missed}) } } diff --git a/backend/pkg/api/types/common.go b/backend/pkg/api/types/common.go index a57ebf982..5e5ccf919 100644 --- a/backend/pkg/api/types/common.go +++ b/backend/pkg/api/types/common.go @@ -151,6 +151,11 @@ type IndexBlocks struct { Blocks []uint64 `json:"blocks"` } +type IndexSlots struct { + Index uint64 `json:"index"` + Slots []uint64 `json:"slots"` +} + type ValidatorStateCounts struct { Online uint64 `json:"online"` Offline uint64 `json:"offline"` diff --git a/backend/pkg/api/types/notifications.go b/backend/pkg/api/types/notifications.go index f9eb491f9..8d59074dd 100644 --- a/backend/pkg/api/types/notifications.go +++ b/backend/pkg/api/types/notifications.go @@ -48,14 +48,9 @@ type InternalGetUserNotificationDashboardsResponse ApiPagingResponse[Notificatio // ------------------------------------------------------------ // Validator Dashboard Notification Detail -type NotificationEventGroup struct { - GroupName string `json:"group_name"` - DashboardID uint64 `json:"dashboard_id"` -} type NotificationEventGroupBackOnline struct { - GroupName string `json:"group_name"` - DashboardID uint64 `json:"dashboard_id"` - EpochCount uint64 `json:"epoch_count"` + GroupName string `json:"group_name"` + EpochCount uint64 `json:"epoch_count"` } type NotificationEventValidatorBackOnline struct { @@ -64,17 +59,18 @@ type NotificationEventValidatorBackOnline struct { } type NotificationValidatorDashboardDetail struct { + DashboardName string `json:"dashboard_name"` ValidatorOffline []uint64 `json:"validator_offline"` // validator indices - GroupOffline []NotificationEventGroup `json:"group_offline"` // TODO not filled yet - ProposalMissed []IndexBlocks `json:"proposal_missed"` + GroupOffline []string `json:"group_offline"` // TODO not filled yet + ProposalMissed []IndexSlots `json:"proposal_missed"` ProposalDone []IndexBlocks `json:"proposal_done"` - UpcomingProposals []IndexBlocks `json:"upcoming_proposals"` + UpcomingProposals []IndexSlots `json:"upcoming_proposals"` Slashed []uint64 `json:"slashed"` // validator indices SyncCommittee []uint64 `json:"sync_committee"` // validator indices AttestationMissed []IndexEpoch `json:"attestation_missed"` // index (epoch) Withdrawal []IndexBlocks `json:"withdrawal"` ValidatorOfflineReminder []uint64 `json:"validator_offline_reminder"` // validator indices; TODO not filled yet - GroupOfflineReminder []NotificationEventGroup `json:"group_offline_reminder"` // TODO not filled yet + GroupOfflineReminder []string `json:"group_offline_reminder"` // TODO not filled yet ValidatorBackOnline []NotificationEventValidatorBackOnline `json:"validator_back_online"` GroupBackOnline []NotificationEventGroupBackOnline `json:"group_back_online"` // TODO not filled yet MinimumCollateralReached []Address `json:"min_collateral_reached"` // node addresses diff --git a/frontend/types/api/common.ts b/frontend/types/api/common.ts index ff4849a95..d5fd6e4ab 100644 --- a/frontend/types/api/common.ts +++ b/frontend/types/api/common.ts @@ -121,6 +121,10 @@ export interface IndexBlocks { index: number /* uint64 */; blocks: number /* uint64 */[]; } +export interface IndexSlots { + index: number /* uint64 */; + slots: number /* uint64 */[]; +} export interface ValidatorStateCounts { online: number /* uint64 */; offline: number /* uint64 */; diff --git a/frontend/types/api/notifications.ts b/frontend/types/api/notifications.ts index febf3b190..90f524fd6 100644 --- a/frontend/types/api/notifications.ts +++ b/frontend/types/api/notifications.ts @@ -1,6 +1,6 @@ // Code generated by tygo. DO NOT EDIT. /* eslint-disable */ -import type { ApiDataResponse, ApiPagingResponse, IndexBlocks, IndexEpoch, Address, Hash } from './common' +import type { ApiDataResponse, ApiPagingResponse, IndexSlots, IndexBlocks, IndexEpoch, Address, Hash } from './common' ////////// // source: notifications.go @@ -45,13 +45,8 @@ export interface NotificationDashboardsTableRow { event_types: ('validator_online' | 'validator_offline' | 'group_online' | 'group_offline' | 'attestation_missed' | 'proposal_success' | 'proposal_missed' | 'proposal_upcoming' | 'max_collateral' | 'min_collateral' | 'sync' | 'withdrawal' | 'validator_got_slashed' | 'validator_has_slashed' | 'incoming_tx' | 'outgoing_tx' | 'transfer_erc20' | 'transfer_erc721' | 'transfer_erc1155')[]; } export type InternalGetUserNotificationDashboardsResponse = ApiPagingResponse; -export interface NotificationEventGroup { - group_name: string; - dashboard_id: number /* uint64 */; -} export interface NotificationEventGroupBackOnline { group_name: string; - dashboard_id: number /* uint64 */; epoch_count: number /* uint64 */; } export interface NotificationEventValidatorBackOnline { @@ -59,17 +54,18 @@ export interface NotificationEventValidatorBackOnline { epoch_count: number /* uint64 */; } export interface NotificationValidatorDashboardDetail { + dashboard_name: string; validator_offline: number /* uint64 */[]; // validator indices - group_offline: NotificationEventGroup[]; // TODO not filled yet - proposal_missed: IndexBlocks[]; + group_offline: string[]; // TODO not filled yet + proposal_missed: IndexSlots[]; proposal_done: IndexBlocks[]; - upcoming_proposals: IndexBlocks[]; + upcoming_proposals: IndexSlots[]; slashed: number /* uint64 */[]; // validator indices sync_committee: number /* uint64 */[]; // validator indices attestation_missed: IndexEpoch[]; // index (epoch) withdrawal: IndexBlocks[]; validator_offline_reminder: number /* uint64 */[]; // validator indices; TODO not filled yet - group_offline_reminder: NotificationEventGroup[]; // TODO not filled yet + group_offline_reminder: string[]; // TODO not filled yet validator_back_online: NotificationEventValidatorBackOnline[]; group_back_online: NotificationEventGroupBackOnline[]; // TODO not filled yet min_collateral_reached: Address[]; // node addresses From 8e1f3e2c5500f7e37f7c01cf031b4f023b8e9d88 Mon Sep 17 00:00:00 2001 From: remoterami <142154971+remoterami@users.noreply.github.com> Date: Tue, 15 Oct 2024 12:31:54 +0200 Subject: [PATCH 03/73] remove group name --- backend/pkg/api/data_access/notifications.go | 22 ++++++++++++-------- backend/pkg/api/types/notifications.go | 14 +++++-------- frontend/types/api/notifications.ts | 11 ++++------ 3 files changed, 22 insertions(+), 25 deletions(-) diff --git a/backend/pkg/api/data_access/notifications.go b/backend/pkg/api/data_access/notifications.go index 2b297f83e..ee683619e 100644 --- a/backend/pkg/api/data_access/notifications.go +++ b/backend/pkg/api/data_access/notifications.go @@ -389,13 +389,8 @@ func (d *DataAccessService) GetDashboardNotifications(ctx context.Context, userI } func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, epoch uint64, search string) (*t.NotificationValidatorDashboardDetail, error) { - // dev hack; TODO remove - if dashboardId == 5426 || dashboardId == 5334 { - return d.dummy.GetValidatorDashboardNotificationDetails(ctx, dashboardId, groupId, epoch, search) - } notificationDetails := t.NotificationValidatorDashboardDetail{ ValidatorOffline: []uint64{}, - GroupOffline: []string{}, ProposalMissed: []t.IndexSlots{}, ProposalDone: []t.IndexBlocks{}, UpcomingProposals: []t.IndexSlots{}, @@ -404,9 +399,7 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context AttestationMissed: []t.IndexEpoch{}, Withdrawal: []t.IndexBlocks{}, ValidatorOfflineReminder: []uint64{}, - GroupOfflineReminder: []string{}, ValidatorBackOnline: []t.NotificationEventValidatorBackOnline{}, - GroupBackOnline: []t.NotificationEventGroupBackOnline{}, MinimumCollateralReached: []t.Address{}, MaximumCollateralReached: []t.Address{}, } @@ -427,8 +420,15 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context searchIndexSet[searchIndex] = true } - query := `SELECT name FROM users_val_dashboards WHERE id = $1` - err := d.alloyReader.GetContext(ctx, ¬ificationDetails.DashboardName, query, dashboardId) + query := `SELECT + uvd.name AS dashboard_name, + uvdg.name AS group_name + FROM + users_val_dashboards uvd + INNER JOIN + users_val_dashboards_groups uvdg ON uvdg.dashboard_id = uvd.id + WHERE uvd.id = $1 AND uvdg.id = $2` + err := d.alloyReader.GetContext(ctx, ¬ificationDetails, query, dashboardId, groupId) if err != nil { if err == sql.ErrNoRows { return ¬ificationDetails, nil @@ -436,6 +436,10 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context return nil, err } + if notificationDetails.GroupName == "" { + notificationDetails.GroupName = t.DefaultGroupName + } + result := []byte{} query = `SELECT details FROM users_val_dashboards_notifications_history WHERE dashboard_id = $1 AND group_id = $2 AND epoch = $3` err = d.alloyReader.GetContext(ctx, &result, query, dashboardId, groupId, epoch) diff --git a/backend/pkg/api/types/notifications.go b/backend/pkg/api/types/notifications.go index 8d59074dd..f61d868d0 100644 --- a/backend/pkg/api/types/notifications.go +++ b/backend/pkg/api/types/notifications.go @@ -48,20 +48,16 @@ type InternalGetUserNotificationDashboardsResponse ApiPagingResponse[Notificatio // ------------------------------------------------------------ // Validator Dashboard Notification Detail -type NotificationEventGroupBackOnline struct { - GroupName string `json:"group_name"` - EpochCount uint64 `json:"epoch_count"` -} - type NotificationEventValidatorBackOnline struct { Index uint64 `json:"index"` EpochCount uint64 `json:"epoch_count"` } type NotificationValidatorDashboardDetail struct { - DashboardName string `json:"dashboard_name"` + DashboardName string `db:"dashboard_name" json:"dashboard_name"` + GroupName string `db:"group_name" json:"group_name"` ValidatorOffline []uint64 `json:"validator_offline"` // validator indices - GroupOffline []string `json:"group_offline"` // TODO not filled yet + GroupOffline bool `json:"group_offline"` // TODO not filled yet ProposalMissed []IndexSlots `json:"proposal_missed"` ProposalDone []IndexBlocks `json:"proposal_done"` UpcomingProposals []IndexSlots `json:"upcoming_proposals"` @@ -70,9 +66,9 @@ type NotificationValidatorDashboardDetail struct { AttestationMissed []IndexEpoch `json:"attestation_missed"` // index (epoch) Withdrawal []IndexBlocks `json:"withdrawal"` ValidatorOfflineReminder []uint64 `json:"validator_offline_reminder"` // validator indices; TODO not filled yet - GroupOfflineReminder []string `json:"group_offline_reminder"` // TODO not filled yet + GroupOfflineReminder bool `json:"group_offline_reminder"` // TODO not filled yet ValidatorBackOnline []NotificationEventValidatorBackOnline `json:"validator_back_online"` - GroupBackOnline []NotificationEventGroupBackOnline `json:"group_back_online"` // TODO not filled yet + GroupBackOnline uint64 `json:"group_back_online"` // TODO not filled yet MinimumCollateralReached []Address `json:"min_collateral_reached"` // node addresses MaximumCollateralReached []Address `json:"max_collateral_reached"` // node addresses } diff --git a/frontend/types/api/notifications.ts b/frontend/types/api/notifications.ts index 90f524fd6..71017acf5 100644 --- a/frontend/types/api/notifications.ts +++ b/frontend/types/api/notifications.ts @@ -45,18 +45,15 @@ export interface NotificationDashboardsTableRow { event_types: ('validator_online' | 'validator_offline' | 'group_online' | 'group_offline' | 'attestation_missed' | 'proposal_success' | 'proposal_missed' | 'proposal_upcoming' | 'max_collateral' | 'min_collateral' | 'sync' | 'withdrawal' | 'validator_got_slashed' | 'validator_has_slashed' | 'incoming_tx' | 'outgoing_tx' | 'transfer_erc20' | 'transfer_erc721' | 'transfer_erc1155')[]; } export type InternalGetUserNotificationDashboardsResponse = ApiPagingResponse; -export interface NotificationEventGroupBackOnline { - group_name: string; - epoch_count: number /* uint64 */; -} export interface NotificationEventValidatorBackOnline { index: number /* uint64 */; epoch_count: number /* uint64 */; } export interface NotificationValidatorDashboardDetail { dashboard_name: string; + group_name: string; validator_offline: number /* uint64 */[]; // validator indices - group_offline: string[]; // TODO not filled yet + group_offline: boolean; // TODO not filled yet proposal_missed: IndexSlots[]; proposal_done: IndexBlocks[]; upcoming_proposals: IndexSlots[]; @@ -65,9 +62,9 @@ export interface NotificationValidatorDashboardDetail { attestation_missed: IndexEpoch[]; // index (epoch) withdrawal: IndexBlocks[]; validator_offline_reminder: number /* uint64 */[]; // validator indices; TODO not filled yet - group_offline_reminder: string[]; // TODO not filled yet + group_offline_reminder: boolean; // TODO not filled yet validator_back_online: NotificationEventValidatorBackOnline[]; - group_back_online: NotificationEventGroupBackOnline[]; // TODO not filled yet + group_back_online: number /* uint64 */; // TODO not filled yet min_collateral_reached: Address[]; // node addresses max_collateral_reached: Address[]; // node addresses } From c36bebcb5b3136e85fd2aa9bbdf9b692b6a3e27f Mon Sep 17 00:00:00 2001 From: remoterami <142154971+remoterami@users.noreply.github.com> Date: Tue, 15 Oct 2024 12:55:21 +0200 Subject: [PATCH 04/73] changed withdrawals type --- backend/pkg/api/data_access/notifications.go | 4 ++-- backend/pkg/api/types/notifications.go | 2 +- frontend/types/api/notifications.ts | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/backend/pkg/api/data_access/notifications.go b/backend/pkg/api/data_access/notifications.go index 0d018fa31..02730d186 100644 --- a/backend/pkg/api/data_access/notifications.go +++ b/backend/pkg/api/data_access/notifications.go @@ -393,7 +393,7 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context Slashed: []uint64{}, SyncCommittee: []uint64{}, AttestationMissed: []t.IndexEpoch{}, - Withdrawal: []t.IndexBlocks{}, + Withdrawal: []t.IndexSlots{}, ValidatorOfflineReminder: []uint64{}, ValidatorBackOnline: []t.NotificationEventValidatorBackOnline{}, MinimumCollateralReached: []t.Address{}, @@ -560,7 +560,7 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context continue } // TODO might need to take care of automatic + exit withdrawal happening in the same epoch ? - notificationDetails.Withdrawal = append(notificationDetails.Withdrawal, t.IndexBlocks{Index: curNotification.ValidatorIndex, Blocks: []uint64{curNotification.Slot}}) + notificationDetails.Withdrawal = append(notificationDetails.Withdrawal, t.IndexSlots{Index: curNotification.ValidatorIndex, Slots: []uint64{curNotification.Slot}}) case types.NetworkLivenessIncreasedEventName, types.EthClientUpdateEventName, types.MonitoringMachineOfflineEventName, diff --git a/backend/pkg/api/types/notifications.go b/backend/pkg/api/types/notifications.go index f61d868d0..a0b2241dd 100644 --- a/backend/pkg/api/types/notifications.go +++ b/backend/pkg/api/types/notifications.go @@ -64,7 +64,7 @@ type NotificationValidatorDashboardDetail struct { Slashed []uint64 `json:"slashed"` // validator indices SyncCommittee []uint64 `json:"sync_committee"` // validator indices AttestationMissed []IndexEpoch `json:"attestation_missed"` // index (epoch) - Withdrawal []IndexBlocks `json:"withdrawal"` + Withdrawal []IndexSlots `json:"withdrawal"` ValidatorOfflineReminder []uint64 `json:"validator_offline_reminder"` // validator indices; TODO not filled yet GroupOfflineReminder bool `json:"group_offline_reminder"` // TODO not filled yet ValidatorBackOnline []NotificationEventValidatorBackOnline `json:"validator_back_online"` diff --git a/frontend/types/api/notifications.ts b/frontend/types/api/notifications.ts index 71017acf5..bab50455b 100644 --- a/frontend/types/api/notifications.ts +++ b/frontend/types/api/notifications.ts @@ -60,7 +60,7 @@ export interface NotificationValidatorDashboardDetail { slashed: number /* uint64 */[]; // validator indices sync_committee: number /* uint64 */[]; // validator indices attestation_missed: IndexEpoch[]; // index (epoch) - withdrawal: IndexBlocks[]; + withdrawal: IndexSlots[]; validator_offline_reminder: number /* uint64 */[]; // validator indices; TODO not filled yet group_offline_reminder: boolean; // TODO not filled yet validator_back_online: NotificationEventValidatorBackOnline[]; From a78fbf22f16c163f9a66daeab68c8c0a83bd3309 Mon Sep 17 00:00:00 2001 From: remoterami <142154971+remoterami@users.noreply.github.com> Date: Tue, 15 Oct 2024 13:04:24 +0200 Subject: [PATCH 05/73] added struct tags --- backend/pkg/api/types/common.go | 4 ++-- frontend/types/api/common.ts | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/backend/pkg/api/types/common.go b/backend/pkg/api/types/common.go index 5e5ccf919..24e2b9522 100644 --- a/backend/pkg/api/types/common.go +++ b/backend/pkg/api/types/common.go @@ -142,8 +142,8 @@ type ChartHistorySeconds struct { } type IndexEpoch struct { - Index uint64 - Epoch uint64 + Index uint64 `json:"index"` + Epoch uint64 `json:"epoch"` } type IndexBlocks struct { diff --git a/frontend/types/api/common.ts b/frontend/types/api/common.ts index d5fd6e4ab..4f1d50872 100644 --- a/frontend/types/api/common.ts +++ b/frontend/types/api/common.ts @@ -114,8 +114,8 @@ export interface ChartHistorySeconds { weekly: number /* uint64 */; } export interface IndexEpoch { - Index: number /* uint64 */; - Epoch: number /* uint64 */; + index: number /* uint64 */; + epoch: number /* uint64 */; } export interface IndexBlocks { index: number /* uint64 */; From 61a39b02313ebaf3839d6ef6efb5525aaaa62f7c Mon Sep 17 00:00:00 2001 From: remoterami <142154971+remoterami@users.noreply.github.com> Date: Tue, 15 Oct 2024 17:52:05 +0200 Subject: [PATCH 06/73] fixed details list, adjusted withdrawal field --- backend/go.mod | 2 +- backend/go.sum | 2 + backend/pkg/api/data_access/header.go | 43 +++ backend/pkg/api/data_access/notifications.go | 349 +++++++++++-------- backend/pkg/api/types/data_access.go | 1 + backend/pkg/api/types/notifications.go | 8 +- frontend/types/api/notifications.ts | 9 +- 7 files changed, 255 insertions(+), 159 deletions(-) diff --git a/backend/go.mod b/backend/go.mod index c9eda313c..ff2204794 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -64,7 +64,7 @@ require ( github.com/prysmaticlabs/go-ssz v0.0.0-20210121151755-f6208871c388 github.com/rocket-pool/rocketpool-go v1.8.3-0.20240618173422-783b8668f5b4 github.com/rocket-pool/smartnode v1.13.6 - github.com/shopspring/decimal v1.3.1 + github.com/shopspring/decimal v1.4.0 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d diff --git a/backend/go.sum b/backend/go.sum index 0ab17372b..481c0f1c4 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -873,6 +873,8 @@ github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9Nz github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= diff --git a/backend/pkg/api/data_access/header.go b/backend/pkg/api/data_access/header.go index 5658c9157..1a91ad832 100644 --- a/backend/pkg/api/data_access/header.go +++ b/backend/pkg/api/data_access/header.go @@ -1,9 +1,15 @@ package dataaccess import ( + "context" + "database/sql" + "fmt" + t "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gobitfly/beaconchain/pkg/commons/cache" + "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/price" + "github.com/gobitfly/beaconchain/pkg/commons/utils" ) func (d *DataAccessService) GetLatestSlot() (uint64, error) { @@ -26,6 +32,43 @@ func (d *DataAccessService) GetBlockHeightAt(slot uint64) (uint64, error) { return d.dummy.GetBlockHeightAt(slot) } +// returns the block number of the latest existing block at or before the given slot +func (d *DataAccessService) GetLatestBlockHeightForSlot(ctx context.Context, slot uint64) (uint64, error) { + query := `SELECT MAX(exec_block_number) FROM blocks WHERE slot <= $1` + res := uint64(0) + err := d.alloyReader.GetContext(ctx, &res, query, slot) + if err != nil { + if err == sql.ErrNoRows { + log.Warnf("no EL block found at or before slot %d", slot) + return 0, nil + } + return 0, fmt.Errorf("failed to get latest existing block height at or before slot %d: %w", slot, err) + } + return res, nil +} + +func (d *DataAccessService) GetLatestBlockHeightsForEpoch(ctx context.Context, epoch uint64) ([]uint64, error) { + // use 2 epochs as safety margin + query := ` + WITH recent_blocks AS ( + SELECT slot, exec_block_number + FROM blocks + WHERE slot < $1 + ORDER BY slot DESC + LIMIT $2 * 2 + ) + SELECT MAX(exec_block_number) OVER (ORDER BY slot) AS block + FROM recent_blocks + ORDER BY slot DESC + LIMIT $2` + res := []uint64{} + err := d.alloyReader.SelectContext(ctx, &res, query, (epoch+1)*utils.Config.Chain.ClConfig.SlotsPerEpoch, utils.Config.Chain.ClConfig.SlotsPerEpoch) + if err != nil { + return nil, fmt.Errorf("failed to get latest existing block heights for slots in epoch %d: %w", epoch, err) + } + return res, nil +} + func (d *DataAccessService) GetLatestExchangeRates() ([]t.EthConversionRate, error) { result := []t.EthConversionRate{} diff --git a/backend/pkg/api/data_access/notifications.go b/backend/pkg/api/data_access/notifications.go index 02730d186..d161a4998 100644 --- a/backend/pkg/api/data_access/notifications.go +++ b/backend/pkg/api/data_access/notifications.go @@ -19,6 +19,7 @@ import ( "github.com/doug-martin/goqu/v9" "github.com/doug-martin/goqu/v9/exp" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/params" "github.com/go-redis/redis/v8" "github.com/gobitfly/beaconchain/pkg/api/enums" @@ -27,7 +28,7 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" - "github.com/gobitfly/beaconchain/pkg/notification" + n "github.com/gobitfly/beaconchain/pkg/notification" "github.com/lib/pq" "github.com/shopspring/decimal" "golang.org/x/sync/errgroup" @@ -61,17 +62,17 @@ type NotificationsRepository interface { func (*DataAccessService) registerNotificationInterfaceTypes() { var once sync.Once once.Do(func() { - gob.Register(¬ification.ValidatorProposalNotification{}) - gob.Register(¬ification.ValidatorAttestationNotification{}) - gob.Register(¬ification.ValidatorIsOfflineNotification{}) - gob.Register(¬ification.ValidatorGotSlashedNotification{}) - gob.Register(¬ification.ValidatorWithdrawalNotification{}) - gob.Register(¬ification.NetworkNotification{}) - gob.Register(¬ification.RocketpoolNotification{}) - gob.Register(¬ification.MonitorMachineNotification{}) - gob.Register(¬ification.TaxReportNotification{}) - gob.Register(¬ification.EthClientNotification{}) - gob.Register(¬ification.SyncCommitteeSoonNotification{}) + gob.Register(&n.ValidatorProposalNotification{}) + gob.Register(&n.ValidatorAttestationNotification{}) + gob.Register(&n.ValidatorIsOfflineNotification{}) + gob.Register(&n.ValidatorGotSlashedNotification{}) + gob.Register(&n.ValidatorWithdrawalNotification{}) + gob.Register(&n.NetworkNotification{}) + gob.Register(&n.RocketpoolNotification{}) + gob.Register(&n.MonitorMachineNotification{}) + gob.Register(&n.TaxReportNotification{}) + gob.Register(&n.EthClientNotification{}) + gob.Register(&n.SyncCommitteeSoonNotification{}) }) } @@ -393,7 +394,7 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context Slashed: []uint64{}, SyncCommittee: []uint64{}, AttestationMissed: []t.IndexEpoch{}, - Withdrawal: []t.IndexSlots{}, + Withdrawal: []t.NotificationEventWithdrawal{}, ValidatorOfflineReminder: []uint64{}, ValidatorBackOnline: []t.NotificationEventValidatorBackOnline{}, MinimumCollateralReached: []t.Address{}, @@ -416,6 +417,8 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context searchIndexSet[searchIndex] = true } + // ------------------------------------- + // dashboard and group name query := `SELECT uvd.name AS dashboard_name, uvdg.name AS group_name @@ -431,43 +434,28 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context } return nil, err } - if notificationDetails.GroupName == "" { notificationDetails.GroupName = t.DefaultGroupName } + if notificationDetails.DashboardName == "" { + notificationDetails.DashboardName = t.DefaultDashboardName + } - result := []byte{} + // ------------------------------------- + // retrieve notification events + eventTypesEncodedList := [][]byte{} query = `SELECT details FROM users_val_dashboards_notifications_history WHERE dashboard_id = $1 AND group_id = $2 AND epoch = $3` - err = d.alloyReader.GetContext(ctx, &result, query, dashboardId, groupId, epoch) + err = d.alloyReader.SelectContext(ctx, &eventTypesEncodedList, query, dashboardId, groupId, epoch) if err != nil { - if err == sql.ErrNoRows { - return ¬ificationDetails, nil - } return nil, err } - if len(result) == 0 { + if len(eventTypesEncodedList) == 0 { return ¬ificationDetails, nil } - buf := bytes.NewBuffer(result) - gz, err := gzip.NewReader(buf) - if err != nil { - return nil, err - } - defer gz.Close() - - // might need to loop if we get memory issues with large dashboards and can't ReadAll - decompressedData, err := io.ReadAll(gz) - if err != nil { - return nil, err - } - - decoder := gob.NewDecoder(bytes.NewReader(decompressedData)) - - notifications := []types.Notification{} - err = decoder.Decode(¬ifications) + latestBlocks, err := d.GetLatestBlockHeightsForEpoch(ctx, epoch) if err != nil { - return nil, err + return nil, fmt.Errorf("error getting latest block height: %w", err) } type ProposalInfo struct { @@ -478,124 +466,160 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context proposalsInfo := make(map[t.VDBValidator]*ProposalInfo) addressMapping := make(map[string]*t.Address) - for _, not := range notifications { - switch not.GetEventName() { - case types.ValidatorMissedProposalEventName, types.ValidatorExecutedProposalEventName /*, types.ValidatorScheduledProposalEventName*/ : - // aggregate proposals - curNotification, ok := not.(*notification.ValidatorProposalNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to ValidatorProposalNotification") - } - if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { - continue - } - if _, ok := proposalsInfo[curNotification.ValidatorIndex]; !ok { - proposalsInfo[curNotification.ValidatorIndex] = &ProposalInfo{} - } - prop := proposalsInfo[curNotification.ValidatorIndex] - switch curNotification.Status { - case 0: - prop.Scheduled = append(prop.Scheduled, curNotification.Slot) - case 1: - prop.Proposed = append(prop.Proposed, curNotification.Block) - case 2: - prop.Missed = append(prop.Missed, curNotification.Slot) - } - case types.ValidatorMissedAttestationEventName: - curNotification, ok := not.(*notification.ValidatorAttestationNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to ValidatorAttestationNotification") - } - if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { - continue - } - if curNotification.Status != 0 { - continue - } - notificationDetails.AttestationMissed = append(notificationDetails.AttestationMissed, t.IndexEpoch{Index: curNotification.ValidatorIndex, Epoch: curNotification.Epoch}) - case types.ValidatorGotSlashedEventName: - curNotification, ok := not.(*notification.ValidatorGotSlashedNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to ValidatorGotSlashedNotification") - } - if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { - continue - } - notificationDetails.Slashed = append(notificationDetails.Slashed, curNotification.ValidatorIndex) - case types.ValidatorIsOfflineEventName: - curNotification, ok := not.(*notification.ValidatorIsOfflineNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to ValidatorIsOfflineNotification") - } - if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { - continue - } - if curNotification.IsOffline { - notificationDetails.ValidatorOffline = append(notificationDetails.ValidatorOffline, curNotification.ValidatorIndex) - } else { - // TODO EpochCount is not correct, missing / cumbersome to retrieve from backend - using "back online since" instead atm - notificationDetails.ValidatorBackOnline = append(notificationDetails.ValidatorBackOnline, t.NotificationEventValidatorBackOnline{Index: curNotification.ValidatorIndex, EpochCount: curNotification.Epoch}) - } - // TODO not present in backend yet - //notificationDetails.ValidatorOfflineReminder = ... - case types.ValidatorGroupIsOfflineEventName: - // TODO type / collection not present yet, skipping - /*curNotification, ok := not.(*notification.validatorGroupIsOfflineNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to validatorGroupIsOfflineNotification") - } - if curNotification.Status == 0 { - notificationDetails.GroupOffline = ... - notificationDetails.GroupOfflineReminder = ... - } else { - notificationDetails.GroupBackOnline = ... - } - */ - case types.ValidatorReceivedWithdrawalEventName: - curNotification, ok := not.(*notification.ValidatorWithdrawalNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to ValidatorWithdrawalNotification") - } - if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { - continue - } - // TODO might need to take care of automatic + exit withdrawal happening in the same epoch ? - notificationDetails.Withdrawal = append(notificationDetails.Withdrawal, t.IndexSlots{Index: curNotification.ValidatorIndex, Slots: []uint64{curNotification.Slot}}) - case types.NetworkLivenessIncreasedEventName, - types.EthClientUpdateEventName, - types.MonitoringMachineOfflineEventName, - types.MonitoringMachineDiskAlmostFullEventName, - types.MonitoringMachineCpuLoadEventName, - types.MonitoringMachineMemoryUsageEventName, - types.TaxReportEventName: - // not vdb notifications, skip - case types.ValidatorDidSlashEventName: - case types.RocketpoolCommissionThresholdEventName, - types.RocketpoolNewClaimRoundStartedEventName: - // these could maybe returned later (?) - case types.RocketpoolCollateralMinReachedEventName, types.RocketpoolCollateralMaxReachedEventName: - _, ok := not.(*notification.RocketpoolNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to RocketpoolNotification") - } - addr := t.Address{Hash: t.Hash(not.GetEventFilter()), IsContract: true} - addressMapping[not.GetEventFilter()] = &addr - if not.GetEventName() == types.RocketpoolCollateralMinReachedEventName { - notificationDetails.MinimumCollateralReached = append(notificationDetails.MinimumCollateralReached, addr) - } else { - notificationDetails.MaximumCollateralReached = append(notificationDetails.MaximumCollateralReached, addr) - } - case types.SyncCommitteeSoonEventName: - curNotification, ok := not.(*notification.SyncCommitteeSoonNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to SyncCommitteeSoonNotification") - } - if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { - continue + contractStatusRequests := make([]db.ContractInteractionAtRequest, 0) + for _, eventTypesEncoded := range eventTypesEncodedList { + buf := bytes.NewBuffer(eventTypesEncoded) + gz, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + defer gz.Close() + + // might need to loop if we get memory issues + eventTypes, err := io.ReadAll(gz) + if err != nil { + return nil, err + } + + decoder := gob.NewDecoder(bytes.NewReader(eventTypes)) + + notifications := []types.Notification{} + err = decoder.Decode(¬ifications) + if err != nil { + return nil, err + } + + for _, notification := range notifications { + switch notification.GetEventName() { + case types.ValidatorMissedProposalEventName, types.ValidatorExecutedProposalEventName /*, types.ValidatorScheduledProposalEventName*/ : + // aggregate proposals + curNotification, ok := notification.(*n.ValidatorProposalNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorProposalNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + if _, ok := proposalsInfo[curNotification.ValidatorIndex]; !ok { + proposalsInfo[curNotification.ValidatorIndex] = &ProposalInfo{} + } + prop := proposalsInfo[curNotification.ValidatorIndex] + switch curNotification.Status { + case 0: + prop.Scheduled = append(prop.Scheduled, curNotification.Slot) + case 1: + prop.Proposed = append(prop.Proposed, curNotification.Block) + case 2: + prop.Missed = append(prop.Missed, curNotification.Slot) + } + case types.ValidatorMissedAttestationEventName: + curNotification, ok := notification.(*n.ValidatorAttestationNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorAttestationNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + if curNotification.Status != 0 { + continue + } + notificationDetails.AttestationMissed = append(notificationDetails.AttestationMissed, t.IndexEpoch{Index: curNotification.ValidatorIndex, Epoch: curNotification.Epoch}) + case types.ValidatorGotSlashedEventName: + curNotification, ok := notification.(*n.ValidatorGotSlashedNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorGotSlashedNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + notificationDetails.Slashed = append(notificationDetails.Slashed, curNotification.ValidatorIndex) + case types.ValidatorIsOfflineEventName: + curNotification, ok := notification.(*n.ValidatorIsOfflineNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorIsOfflineNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + if curNotification.IsOffline { + notificationDetails.ValidatorOffline = append(notificationDetails.ValidatorOffline, curNotification.ValidatorIndex) + } else { + // TODO EpochCount is not correct, missing / cumbersome to retrieve from backend - using "back online since" instead atm + notificationDetails.ValidatorBackOnline = append(notificationDetails.ValidatorBackOnline, t.NotificationEventValidatorBackOnline{Index: curNotification.ValidatorIndex, EpochCount: curNotification.Epoch}) + } + // TODO not present in backend yet + //notificationDetails.ValidatorOfflineReminder = ... + case types.ValidatorGroupIsOfflineEventName: + // TODO type / collection not present yet, skipping + /*curNotification, ok := not.(*notification.validatorGroupIsOfflineNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to validatorGroupIsOfflineNotification") + } + if curNotification.Status == 0 { + notificationDetails.GroupOffline = ... + notificationDetails.GroupOfflineReminder = ... + } else { + notificationDetails.GroupBackOnline = ... + } + */ + case types.ValidatorReceivedWithdrawalEventName: + curNotification, ok := notification.(*n.ValidatorWithdrawalNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorWithdrawalNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + // incorrect formatting TODO rework the Address and ContractInteractionAtRequest types to use clear string formatting (or prob go-ethereum common.Address) + contractStatusRequests = append(contractStatusRequests, db.ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", curNotification.Address), + Block: int64(latestBlocks[curNotification.Slot%utils.Config.Chain.ClConfig.SlotsPerEpoch]), + TxIdx: -1, + TraceIdx: -1, + }) + addr := t.Address{Hash: t.Hash(hexutil.Encode(curNotification.Address))} + addressMapping[hexutil.Encode(curNotification.Address)] = &addr + notificationDetails.Withdrawal = append(notificationDetails.Withdrawal, t.NotificationEventWithdrawal{ + Index: curNotification.ValidatorIndex, + Amount: decimal.NewFromUint64(curNotification.Amount), + Address: addr, + }) + case types.NetworkLivenessIncreasedEventName, + types.EthClientUpdateEventName, + types.MonitoringMachineOfflineEventName, + types.MonitoringMachineDiskAlmostFullEventName, + types.MonitoringMachineCpuLoadEventName, + types.MonitoringMachineMemoryUsageEventName, + types.TaxReportEventName: + // not vdb notifications, skip + case types.ValidatorDidSlashEventName: + case types.RocketpoolCommissionThresholdEventName, + types.RocketpoolNewClaimRoundStartedEventName: + // these could maybe returned later (?) + case types.RocketpoolCollateralMinReachedEventName, types.RocketpoolCollateralMaxReachedEventName: + _, ok := notification.(*n.RocketpoolNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to RocketpoolNotification") + } + addr := t.Address{Hash: t.Hash(notification.GetEventFilter()), IsContract: true} + addressMapping[notification.GetEventFilter()] = &addr + if notification.GetEventName() == types.RocketpoolCollateralMinReachedEventName { + notificationDetails.MinimumCollateralReached = append(notificationDetails.MinimumCollateralReached, addr) + } else { + notificationDetails.MaximumCollateralReached = append(notificationDetails.MaximumCollateralReached, addr) + } + case types.SyncCommitteeSoonEventName: + curNotification, ok := notification.(*n.SyncCommitteeSoonNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to SyncCommitteeSoonNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + notificationDetails.SyncCommittee = append(notificationDetails.SyncCommittee, curNotification.ValidatorIndex) + default: + log.Debugf("Unhandled notification type: %s", notification.GetEventName()) } - notificationDetails.SyncCommittee = append(notificationDetails.SyncCommittee, curNotification.ValidatorIndex) - default: - log.Debugf("Unhandled notification type: %s", not.GetEventName()) } } @@ -616,6 +640,14 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context if err := d.GetNamesAndEnsForAddresses(ctx, addressMapping); err != nil { return nil, err } + contractStatuses, err := d.bigtable.GetAddressContractInteractionsAt(contractStatusRequests) + if err != nil { + return nil, err + } + contractStatusPerAddress := make(map[string]int) + for i, contractStatus := range contractStatusRequests { + contractStatusPerAddress["0x"+contractStatus.Address] = i + } for i := range notificationDetails.MinimumCollateralReached { if address, ok := addressMapping[string(notificationDetails.MinimumCollateralReached[i].Hash)]; ok { notificationDetails.MinimumCollateralReached[i] = *address @@ -626,6 +658,13 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context notificationDetails.MaximumCollateralReached[i] = *address } } + for i := range notificationDetails.Withdrawal { + if address, ok := addressMapping[string(notificationDetails.Withdrawal[i].Address.Hash)]; ok { + notificationDetails.Withdrawal[i].Address = *address + } + contractStatus := contractStatuses[contractStatusPerAddress[string(notificationDetails.Withdrawal[i].Address.Hash)]] + notificationDetails.Withdrawal[i].Address.IsContract = contractStatus == types.CONTRACT_CREATION || contractStatus == types.CONTRACT_PRESENT + } return ¬ificationDetails, nil } diff --git a/backend/pkg/api/types/data_access.go b/backend/pkg/api/types/data_access.go index db35871fb..229677a9b 100644 --- a/backend/pkg/api/types/data_access.go +++ b/backend/pkg/api/types/data_access.go @@ -16,6 +16,7 @@ const DefaultGroupId = 0 const AllGroups = -1 const NetworkAverage = -2 const DefaultGroupName = "default" +const DefaultDashboardName = DefaultGroupName type Sort[T enums.Enum] struct { Column T diff --git a/backend/pkg/api/types/notifications.go b/backend/pkg/api/types/notifications.go index a0b2241dd..7a2fcaac4 100644 --- a/backend/pkg/api/types/notifications.go +++ b/backend/pkg/api/types/notifications.go @@ -53,6 +53,12 @@ type NotificationEventValidatorBackOnline struct { EpochCount uint64 `json:"epoch_count"` } +type NotificationEventWithdrawal struct { + Index uint64 `json:"index"` + Amount decimal.Decimal `json:"amount"` + Address Address `json:"address"` +} + type NotificationValidatorDashboardDetail struct { DashboardName string `db:"dashboard_name" json:"dashboard_name"` GroupName string `db:"group_name" json:"group_name"` @@ -64,7 +70,7 @@ type NotificationValidatorDashboardDetail struct { Slashed []uint64 `json:"slashed"` // validator indices SyncCommittee []uint64 `json:"sync_committee"` // validator indices AttestationMissed []IndexEpoch `json:"attestation_missed"` // index (epoch) - Withdrawal []IndexSlots `json:"withdrawal"` + Withdrawal []NotificationEventWithdrawal `json:"withdrawal"` ValidatorOfflineReminder []uint64 `json:"validator_offline_reminder"` // validator indices; TODO not filled yet GroupOfflineReminder bool `json:"group_offline_reminder"` // TODO not filled yet ValidatorBackOnline []NotificationEventValidatorBackOnline `json:"validator_back_online"` diff --git a/frontend/types/api/notifications.ts b/frontend/types/api/notifications.ts index bab50455b..3857db5c3 100644 --- a/frontend/types/api/notifications.ts +++ b/frontend/types/api/notifications.ts @@ -1,6 +1,6 @@ // Code generated by tygo. DO NOT EDIT. /* eslint-disable */ -import type { ApiDataResponse, ApiPagingResponse, IndexSlots, IndexBlocks, IndexEpoch, Address, Hash } from './common' +import type { ApiDataResponse, ApiPagingResponse, Address, IndexSlots, IndexBlocks, IndexEpoch, Hash } from './common' ////////// // source: notifications.go @@ -49,6 +49,11 @@ export interface NotificationEventValidatorBackOnline { index: number /* uint64 */; epoch_count: number /* uint64 */; } +export interface NotificationEventWithdrawal { + index: number /* uint64 */; + amount: string /* decimal.Decimal */; + address: Address; +} export interface NotificationValidatorDashboardDetail { dashboard_name: string; group_name: string; @@ -60,7 +65,7 @@ export interface NotificationValidatorDashboardDetail { slashed: number /* uint64 */[]; // validator indices sync_committee: number /* uint64 */[]; // validator indices attestation_missed: IndexEpoch[]; // index (epoch) - withdrawal: IndexSlots[]; + withdrawal: NotificationEventWithdrawal[]; validator_offline_reminder: number /* uint64 */[]; // validator indices; TODO not filled yet group_offline_reminder: boolean; // TODO not filled yet validator_back_online: NotificationEventValidatorBackOnline[]; From 10152e640b70cc37feb5ad77b7cf9015b5151626 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 16 Oct 2024 06:27:56 +0000 Subject: [PATCH 07/73] chore(notification): bump daily emails for non-paying users to 10 --- backend/pkg/commons/db/user.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/pkg/commons/db/user.go b/backend/pkg/commons/db/user.go index c877a1379..1c25ff4d6 100644 --- a/backend/pkg/commons/db/user.go +++ b/backend/pkg/commons/db/user.go @@ -36,7 +36,7 @@ var freeTierProduct t.PremiumProduct = t.PremiumProduct{ Daily: 0, Weekly: 0, }, - EmailNotificationsPerDay: 5, + EmailNotificationsPerDay: 10, ConfigureNotificationsViaApi: false, ValidatorGroupNotifications: 1, WebhookEndpoints: 1, From 222064c78a13e81079efde5b015da9145824d083 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 16 Oct 2024 11:34:19 +0000 Subject: [PATCH 08/73] fix(dashboard): use max_ts tables for retrieving last exported chart ts --- backend/pkg/api/data_access/vdb_summary.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/backend/pkg/api/data_access/vdb_summary.go b/backend/pkg/api/data_access/vdb_summary.go index 58e29ad2a..a649e7377 100644 --- a/backend/pkg/api/data_access/vdb_summary.go +++ b/backend/pkg/api/data_access/vdb_summary.go @@ -1093,17 +1093,17 @@ func (d *DataAccessService) GetLatestExportedChartTs(ctx context.Context, aggreg var dateColumn string switch aggregation { case enums.IntervalEpoch: - table = "validator_dashboard_data_epoch" - dateColumn = "epoch_timestamp" + table = "view_validator_dashboard_data_epoch_max_ts" + dateColumn = "t" case enums.IntervalHourly: - table = "validator_dashboard_data_hourly" - dateColumn = "hour" + table = "view_validator_dashboard_data_hourly_max_ts" + dateColumn = "t" case enums.IntervalDaily: - table = "validator_dashboard_data_daily" - dateColumn = "day" + table = "view_validator_dashboard_data_daily_max_ts" + dateColumn = "t" case enums.IntervalWeekly: - table = "validator_dashboard_data_weekly" - dateColumn = "week" + table = "view_validator_dashboard_data_weekly_max_ts" + dateColumn = "t" default: return 0, fmt.Errorf("unexpected aggregation type: %v", aggregation) } From 5549964a3e0f063e41e6c2036b07907e32dde193 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 16 Oct 2024 14:30:55 +0200 Subject: [PATCH 09/73] refactor(execution_deposits_exporter): export deposits up to head (#939) * export deposits up to head * handle reorgs by reexporting blocks since lastExportedFinalizedBlock * persist lastExportedFinalizedBlock in redis * refactor export-logic from multiple event-based goroutines with canceling to single loop * aggregated deposits and cached view are only updated when export is on head * avoid fetching old blocks if no deposits are on the chain BEDS-585 --- .../modules/execution_deposits_exporter.go | 234 +++++++++++------- 1 file changed, 141 insertions(+), 93 deletions(-) diff --git a/backend/pkg/exporter/modules/execution_deposits_exporter.go b/backend/pkg/exporter/modules/execution_deposits_exporter.go index 41fab0f25..ef7d2129f 100644 --- a/backend/pkg/exporter/modules/execution_deposits_exporter.go +++ b/backend/pkg/exporter/modules/execution_deposits_exporter.go @@ -8,7 +8,7 @@ import ( "encoding/hex" "fmt" "math/big" - "sync" + "sync/atomic" "time" "github.com/attestantio/go-eth2-client/spec/phase0" @@ -19,13 +19,16 @@ import ( gethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" gethrpc "github.com/ethereum/go-ethereum/rpc" + "github.com/go-redis/redis/v8" "golang.org/x/exp/maps" + "golang.org/x/sync/errgroup" "github.com/gobitfly/beaconchain/pkg/commons/contracts/deposit_contract" "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/metrics" "github.com/gobitfly/beaconchain/pkg/commons/rpc" + "github.com/gobitfly/beaconchain/pkg/commons/services" "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" constypes "github.com/gobitfly/beaconchain/pkg/consapi/types" @@ -38,37 +41,34 @@ import ( type executionDepositsExporter struct { ModuleContext - Client rpc.Client - ErigonClient *gethrpc.Client - GethClient *gethrpc.Client - LogClient *ethclient.Client - LogFilterer *deposit_contract.DepositContractFilterer - DepositContractAddress common.Address - LastExportedBlock uint64 - ExportMutex *sync.Mutex - StopEarlyMutex *sync.Mutex - StopEarly context.CancelFunc - Signer gethtypes.Signer - DepositMethod abi.Method + Client rpc.Client + ErigonClient *gethrpc.Client + GethClient *gethrpc.Client + LogClient *ethclient.Client + LogFilterer *deposit_contract.DepositContractFilterer + DepositContractAddress common.Address + LastExportedBlock uint64 + LastExportedFinalizedBlock uint64 + LastExportedFinalizedBlockRedisKey string + CurrentHeadBlock atomic.Uint64 + Signer gethtypes.Signer + DepositMethod abi.Method } func NewExecutionDepositsExporter(moduleContext ModuleContext) ModuleInterface { return &executionDepositsExporter{ - ModuleContext: moduleContext, - Client: moduleContext.ConsClient, - DepositContractAddress: common.HexToAddress(utils.Config.Chain.ClConfig.DepositContractAddress), - LastExportedBlock: 0, - ExportMutex: &sync.Mutex{}, - StopEarlyMutex: &sync.Mutex{}, + ModuleContext: moduleContext, + Client: moduleContext.ConsClient, + DepositContractAddress: common.HexToAddress(utils.Config.Chain.ClConfig.DepositContractAddress), + LastExportedBlock: 0, + LastExportedFinalizedBlock: 0, } } -func (d *executionDepositsExporter) OnHead(event *constypes.StandardEventHeadResponse) (err error) { - return nil // nop -} - func (d *executionDepositsExporter) Init() error { - d.Signer = gethtypes.NewCancunSigner(big.NewInt(int64(utils.Config.Chain.ClConfig.DepositChainID))) + d.Signer = gethtypes.NewCancunSigner(big.NewInt(0).SetUint64(utils.Config.Chain.ClConfig.DepositChainID)) + + d.LastExportedFinalizedBlockRedisKey = fmt.Sprintf("%d:execution_deposits_exporter:last_exported_finalized_block", utils.Config.Chain.ClConfig.DepositChainID) rpcClient, err := gethrpc.Dial(utils.Config.Eth1GethEndpoint) if err != nil { @@ -124,14 +124,29 @@ func (d *executionDepositsExporter) Init() error { d.LastExportedBlock = utils.Config.Indexer.ELDepositContractFirstBlock } - log.Infof("initialized execution deposits exporter with last exported block: %v", d.LastExportedBlock) + val, err := db.PersistentRedisDbClient.Get(context.Background(), d.LastExportedFinalizedBlockRedisKey).Uint64() + switch { + case err == redis.Nil: + log.Warnf("%v missing in redis, exporting from beginning", d.LastExportedFinalizedBlockRedisKey) + case err != nil: + log.Fatal(err, "error getting last exported finalized block from redis", 0) + } + + d.LastExportedFinalizedBlock = val + // avoid fetching old bocks on a chain without deposits + if d.LastExportedFinalizedBlock > d.LastExportedBlock { + d.LastExportedBlock = d.LastExportedFinalizedBlock + } + + log.Infof("initialized execution deposits exporter with last exported block/finalizedBlock: %v/%v", d.LastExportedBlock, d.LastExportedFinalizedBlock) - // quick kick-start go func() { - err := d.OnFinalizedCheckpoint(nil) + // quick kick-start + err = d.OnHead(nil) if err != nil { - log.Error(err, "error during kick-start", 0) + log.Error(err, "error kick-starting executionDepositsExporter", 0) } + d.exportLoop() }() return nil @@ -145,90 +160,119 @@ func (d *executionDepositsExporter) OnChainReorg(event *constypes.StandardEventC return nil // nop } -// can take however long it wants to run, is run in a separate goroutine, so no need to worry about blocking func (d *executionDepositsExporter) OnFinalizedCheckpoint(event *constypes.StandardFinalizedCheckpointResponse) (err error) { - // important: have to fetch the actual finalized epoch because even tho its called on finalized checkpoint it actually emits for each justified epoch - // so we have to do an extra request to get the actual latest finalized epoch - res, err := d.CL.GetFinalityCheckpoints("head") - if err != nil { - return err - } + return nil // nop +} - var nearestELBlock sql.NullInt64 - err = db.ReaderDb.Get(&nearestELBlock, "select exec_block_number from blocks where slot <= $1 and exec_block_number > 0 order by slot desc limit 1", res.Data.Finalized.Epoch*utils.Config.Chain.ClConfig.SlotsPerEpoch) - if err != nil { - return err - } - if !nearestELBlock.Valid { - return fmt.Errorf("no block found for finalized epoch %v", res.Data.Finalized.Epoch) +func (d *executionDepositsExporter) OnHead(event *constypes.StandardEventHeadResponse) (err error) { + return nil // nop +} + +func (d *executionDepositsExporter) exportLoop() { + ticker := time.NewTicker(time.Second * 10) + defer ticker.Stop() + for ; true; <-ticker.C { + err := d.export() + if err != nil { + log.Error(err, "error during export", 0) + services.ReportStatus("execution_deposits_exporter", err.Error(), nil) + } else { + services.ReportStatus("execution_deposits_exporter", "Running", nil) + } } - log.Debugf("exporting execution layer deposits till block %v", nearestELBlock.Int64) +} - err = d.exportTillBlock(uint64(nearestELBlock.Int64)) +func (d *executionDepositsExporter) export() (err error) { + var headBlock, finBlock uint64 + var g errgroup.Group + g.Go(func() error { + headSlot, err := d.CL.GetSlot("head") + if err != nil { + return fmt.Errorf("error getting head-slot: %w", err) + } + headBlock = headSlot.Data.Message.Body.ExecutionPayload.BlockNumber + return nil + }) + g.Go(func() error { + finSlot, err := d.CL.GetSlot("finalized") + if err != nil { + return fmt.Errorf("error getting finalized-slot: %w", err) + } + finBlock = finSlot.Data.Message.Body.ExecutionPayload.BlockNumber + return nil + }) + err = g.Wait() if err != nil { return err } - return nil -} - -// this is basically synchronous, each time it gets called it will kill the previous export and replace it with itself -func (d *executionDepositsExporter) exportTillBlock(block uint64) (err error) { - // following blocks if a previous function call is still waiting for an export to stop early - d.StopEarlyMutex.Lock() - if d.StopEarly != nil { - // this will run even if the previous export has already finished - // preventing this would require an overly complex solution - log.Debugf("asking potentially running export to stop early") - d.StopEarly() + if d.LastExportedBlock >= headBlock && d.LastExportedFinalizedBlock >= finBlock { + log.Debugf("skip exporting execution layer deposits: last exported block/finalizedBlock: %v/%v, headBlock/finalizedBlock: %v/%v", d.LastExportedBlock, d.LastExportedFinalizedBlock, headBlock, finBlock) + return nil } - // following blocks as long as the running export hasn't finished yet - d.ExportMutex.Lock() - ctx, cancel := context.WithCancel(context.Background()) - d.StopEarly = cancel - // we have over taken and allow potentially newer function calls to signal us to stop early - d.StopEarlyMutex.Unlock() + nextFinalizedBlock := finBlock blockOffset := d.LastExportedBlock + 1 - blockTarget := block - - defer d.ExportMutex.Unlock() - - log.Infof("exporting execution layer deposits from %v to %v", blockOffset, blockTarget) - - depositsToSave := make([]*types.ELDeposit, 0) - + // make sure to reexport every block since last exported finalized blocks to handle reorgs + if blockOffset > d.LastExportedFinalizedBlock { + blockOffset = d.LastExportedFinalizedBlock + 1 + } + blockTarget := headBlock + + log.InfoWithFields(log.Fields{ + "nextHeadBlock": headBlock, + "nextFinBlock": nextFinalizedBlock, + "lastHeadBlock": d.LastExportedBlock, + "lastFinBlock": d.LastExportedFinalizedBlock, + }, fmt.Sprintf("exporting execution layer deposits from %d to %d", blockOffset, blockTarget)) + + depositsToSaveBatchSize := 10_000 // limit how much deposits we save in one go + blockBatchSize := uint64(10_000) // limit how much blocks we fetch until updating the redis-key + depositsToSave := make([]*types.ELDeposit, 0, depositsToSaveBatchSize) for blockOffset < blockTarget { - tmpBlockTarget := blockOffset + 1000 - if tmpBlockTarget > blockTarget { - tmpBlockTarget = blockTarget + depositsToSave = depositsToSave[:0] + blockBatchStart := blockOffset + for blockOffset < blockTarget && len(depositsToSave) <= depositsToSaveBatchSize && blockOffset < blockBatchStart+blockBatchSize { + tmpBlockTarget := blockOffset + 1000 + if tmpBlockTarget > blockTarget { + tmpBlockTarget = blockTarget + } + log.Debugf("fetching deposits from %v to %v", blockOffset, tmpBlockTarget) + tmp, err := d.fetchDeposits(blockOffset, tmpBlockTarget) + if err != nil { + return err + } + depositsToSave = append(depositsToSave, tmp...) + blockOffset = tmpBlockTarget } - log.Debugf("fetching deposits from %v to %v", blockOffset, tmpBlockTarget) - tmp, err := d.fetchDeposits(blockOffset, tmpBlockTarget) + + log.Debugf("saving %v deposits", len(depositsToSave)) + err = d.saveDeposits(depositsToSave) if err != nil { return err } - depositsToSave = append(depositsToSave, tmp...) - blockOffset = tmpBlockTarget - - select { - case <-ctx.Done(): // a newer function call has asked us to stop early - log.Warnf("stop early signal received, stopping export early") - blockTarget = tmpBlockTarget - default: - continue + d.LastExportedBlock = blockOffset + + prevLastExportedFinalizedBlock := d.LastExportedFinalizedBlock + if nextFinalizedBlock > d.LastExportedBlock && d.LastExportedBlock > d.LastExportedFinalizedBlock { + d.LastExportedFinalizedBlock = d.LastExportedBlock + } else if nextFinalizedBlock > d.LastExportedFinalizedBlock { + d.LastExportedFinalizedBlock = nextFinalizedBlock } - } - log.Debugf("saving %v deposits", len(depositsToSave)) - err = d.saveDeposits(depositsToSave) - if err != nil { - return err + // update redis to keep track of last exported finalized block persistently + if prevLastExportedFinalizedBlock != d.LastExportedFinalizedBlock { + log.Infof("updating %v: %v", d.LastExportedFinalizedBlockRedisKey, d.LastExportedFinalizedBlock) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + err := db.PersistentRedisDbClient.Set(ctx, d.LastExportedFinalizedBlockRedisKey, d.LastExportedFinalizedBlock, 0).Err() + if err != nil { + log.Error(err, fmt.Sprintf("error setting redis %v = %v", d.LastExportedFinalizedBlockRedisKey, d.LastExportedFinalizedBlock), 0) + } + } } - d.LastExportedBlock = blockTarget - start := time.Now() // update cached view err = d.updateCachedView() @@ -271,8 +315,8 @@ func (d *executionDepositsExporter) fetchDeposits(fromBlock, toBlock uint64) (de return nil, fmt.Errorf("nil deposit-log") } - depositLog := - depositLogIterator.Event + depositLog := depositLogIterator.Event + err = utils.VerifyDepositSignature(&phase0.DepositData{ PublicKey: phase0.BLSPubKey(depositLog.Pubkey), WithdrawalCredentials: depositLog.WithdrawalCredentials, @@ -387,6 +431,10 @@ func (d *executionDepositsExporter) fetchDeposits(fromBlock, toBlock uint64) (de } func (d *executionDepositsExporter) saveDeposits(depositsToSave []*types.ELDeposit) error { + if len(depositsToSave) == 0 { + return nil + } + tx, err := db.WriterDb.Beginx() if err != nil { return err From 23b07d6f13611b6488869142b2583e3bb473b500 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 16 Oct 2024 16:16:30 +0200 Subject: [PATCH 10/73] chore(notifications): warn on noncritical errors and cap body (#965) BEDS-90 --- backend/pkg/commons/utils/utils.go | 7 +++++++ backend/pkg/notification/sending.go | 13 +++++++------ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/backend/pkg/commons/utils/utils.go b/backend/pkg/commons/utils/utils.go index c84aca8d3..4a9e1538a 100644 --- a/backend/pkg/commons/utils/utils.go +++ b/backend/pkg/commons/utils/utils.go @@ -396,3 +396,10 @@ func Deduplicate(slice []uint64) []uint64 { } return list } + +func FirstN(input string, n int) string { + if len(input) <= n { + return input + } + return input[:n] +} diff --git a/backend/pkg/notification/sending.go b/backend/pkg/notification/sending.go index fe148ca53..699f4c3f2 100644 --- a/backend/pkg/notification/sending.go +++ b/backend/pkg/notification/sending.go @@ -17,6 +17,7 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/metrics" "github.com/gobitfly/beaconchain/pkg/commons/services" "github.com/gobitfly/beaconchain/pkg/commons/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/lib/pq" ) @@ -268,7 +269,8 @@ func sendWebhookNotifications() error { } resp, err := client.Post(n.Content.Webhook.Url, "application/json", reqBody) if err != nil { - log.Error(err, "error sending webhook request", 0) + log.Warnf("error sending webhook request: %v", err) + metrics.NotificationsSent.WithLabelValues("webhook", "error").Inc() return } else { metrics.NotificationsSent.WithLabelValues("webhook", resp.Status).Inc() @@ -393,7 +395,8 @@ func sendDiscordNotifications() error { resp, err := client.Post(webhook.Url, "application/json", reqBody) if err != nil { - log.Error(err, "error sending discord webhook request", 0) + log.Warnf("failed sending discord webhook request %v: %v", webhook.ID, err) + metrics.NotificationsSent.WithLabelValues("webhook_discord", "error").Inc() } else { metrics.NotificationsSent.WithLabelValues("webhook_discord", resp.Status).Inc() } @@ -413,10 +416,8 @@ func sendDiscordNotifications() error { errResp.Status = resp.Status resp.Body.Close() - if resp.StatusCode == http.StatusTooManyRequests { - log.Warnf("could not push to discord webhook due to rate limit. %v url: %v", errResp.Body, webhook.Url) - } else { - log.Error(nil, "error pushing discord webhook", 0, map[string]interface{}{"errResp.Body": errResp.Body, "webhook.Url": webhook.Url}) + if resp.StatusCode != http.StatusOK { + log.WarnWithFields(map[string]interface{}{"errResp.Body": utils.FirstN(errResp.Body, 1000), "webhook.Url": webhook.Url}, "error pushing discord webhook") } _, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET request = $2, response = $3 WHERE id = $1;`, webhook.ID, reqs[i].Content.DiscordRequest, errResp) if err != nil { From c7b831b048e762b6049deb2d403c15fa9bfe47ff Mon Sep 17 00:00:00 2001 From: marcel-bitfly <174338434+marcel-bitfly@users.noreply.github.com> Date: Fri, 4 Oct 2024 16:56:06 +0200 Subject: [PATCH 11/73] refactor: rename `component` It is easier to work with components if their name reflect the file path in nuxt. --- .../{DashboardsTable.vue => NotificationsDashboardsTable.vue} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename frontend/components/notifications/{DashboardsTable.vue => NotificationsDashboardsTable.vue} (100%) diff --git a/frontend/components/notifications/DashboardsTable.vue b/frontend/components/notifications/NotificationsDashboardsTable.vue similarity index 100% rename from frontend/components/notifications/DashboardsTable.vue rename to frontend/components/notifications/NotificationsDashboardsTable.vue From 0ce9ae593c43bd935d344b29d610f100dd85e54e Mon Sep 17 00:00:00 2001 From: marcel-bitfly <174338434+marcel-bitfly@users.noreply.github.com> Date: Mon, 7 Oct 2024 09:26:40 +0200 Subject: [PATCH 12/73] refactor: overwrite `user-agent-styles` globally This will remove `margin` e.g. from `

` --- frontend/assets/css/main.scss | 7 ++++++- frontend/components/bc/BcText.vue | 4 ---- .../components/notifications/NotificationsOverview.vue | 5 +---- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/frontend/assets/css/main.scss b/frontend/assets/css/main.scss index cb6d26ba6..77fce977f 100644 --- a/frontend/assets/css/main.scss +++ b/frontend/assets/css/main.scss @@ -3,9 +3,14 @@ @import "~/assets/css/fonts.scss"; @import "~/assets/css/utils.scss"; -html { +html, +h1,h2,h3,h4,h5,h6, +ul, +li + { margin: 0; padding: 0; + font: inherit; } ul { padding-inline-start: 1.5rem; diff --git a/frontend/components/bc/BcText.vue b/frontend/components/bc/BcText.vue index 4829e1583..57d343cf6 100644 --- a/frontend/components/bc/BcText.vue +++ b/frontend/components/bc/BcText.vue @@ -13,7 +13,6 @@ withDefaults( - - - - - - - () > {{ proposal.index }} - - - - - - () > {{ upcomingProposal.index }} - - - - { has-unit :label="$t('notifications.subscriptions.validators.max_collateral_reached.label')" /> - - Date: Fri, 18 Oct 2024 14:18:35 +0200 Subject: [PATCH 61/73] refactor(NotificationsManagementSubscriptionDialog): rename `component` It is easier to work with components if their name reflect the file path in nuxt. --- frontend/.vscode/settings.json | 2 +- ...Dialog.vue => NotificationsManagementSubscriptionDialog.vue} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename frontend/components/notifications/management/{SubscriptionDialog.vue => NotificationsManagementSubscriptionDialog.vue} (100%) diff --git a/frontend/.vscode/settings.json b/frontend/.vscode/settings.json index 22971fa5c..4e60da420 100644 --- a/frontend/.vscode/settings.json +++ b/frontend/.vscode/settings.json @@ -11,10 +11,10 @@ "NotificationsDashboardDialogEntity", "NotificationsDashboardTable", "NotificationsManagementModalWebhook", + "NotificationsManagementSubscriptionDialog", "NotificationsManagmentMachines", "NotificationsNetworkTable", "NotificationsOverview", - "SubscriptionDialog", "a11y", "checkout", "ci", diff --git a/frontend/components/notifications/management/SubscriptionDialog.vue b/frontend/components/notifications/management/NotificationsManagementSubscriptionDialog.vue similarity index 100% rename from frontend/components/notifications/management/SubscriptionDialog.vue rename to frontend/components/notifications/management/NotificationsManagementSubscriptionDialog.vue From 123d0c0f9ea2997fafeb6a9081866d0cbbc4ff0a Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 18 Oct 2024 15:49:06 +0200 Subject: [PATCH 62/73] chore(exporter): improve logging when fetching relays-data (#989) see: BEDS-90 --- backend/pkg/exporter/modules/relays.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/backend/pkg/exporter/modules/relays.go b/backend/pkg/exporter/modules/relays.go index 8c0049e98..1b2573c01 100644 --- a/backend/pkg/exporter/modules/relays.go +++ b/backend/pkg/exporter/modules/relays.go @@ -114,7 +114,7 @@ func fetchDeliveredPayloads(r types.Relay, offset uint64) ([]BidTrace, error) { if err != nil { log.Error(err, "error retrieving delivered payloads", 0, map[string]interface{}{"relay": r.ID}) - return nil, err + return nil, fmt.Errorf("error retrieving delivered payloads for cursor: %v, url: %v: %v", offset, url, err) } defer resp.Body.Close() @@ -122,7 +122,7 @@ func fetchDeliveredPayloads(r types.Relay, offset uint64) ([]BidTrace, error) { err = json.NewDecoder(resp.Body).Decode(&payloads) if err != nil { - return nil, err + return nil, fmt.Errorf("error decoding json for delivered payloads for cursor: %v, url: %v: %v", offset, url, err) } return payloads, nil @@ -176,7 +176,7 @@ func retrieveAndInsertPayloadsFromRelay(r types.Relay, low_bound uint64, high_bo for { resp, err := fetchDeliveredPayloads(r, offset) if err != nil { - return err + return fmt.Errorf("error calling fetchDeliveredPayloads with offset: %v for relay: %v", r.ID, err) } if resp == nil { From ea224ae571d41ab25811650656f66c1063a13ec2 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 18 Oct 2024 16:03:07 +0200 Subject: [PATCH 63/73] chore(exporter): improve logging when fetching relays-data (#990) BEDS-90 --- backend/pkg/exporter/modules/relays.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/backend/pkg/exporter/modules/relays.go b/backend/pkg/exporter/modules/relays.go index 1b2573c01..78d068f93 100644 --- a/backend/pkg/exporter/modules/relays.go +++ b/backend/pkg/exporter/modules/relays.go @@ -113,16 +113,15 @@ func fetchDeliveredPayloads(r types.Relay, offset uint64) ([]BidTrace, error) { resp, err := client.Get(url) if err != nil { - log.Error(err, "error retrieving delivered payloads", 0, map[string]interface{}{"relay": r.ID}) - return nil, fmt.Errorf("error retrieving delivered payloads for cursor: %v, url: %v: %v", offset, url, err) + log.Error(err, "error retrieving delivered payloads", 0, map[string]interface{}{"relay": r.ID, "offset": offset, "url": url}) + return nil, fmt.Errorf("error retrieving delivered payloads for relay: %v, offset: %v, url: %v: %w", r.ID, offset, url, err) } defer resp.Body.Close() err = json.NewDecoder(resp.Body).Decode(&payloads) - if err != nil { - return nil, fmt.Errorf("error decoding json for delivered payloads for cursor: %v, url: %v: %v", offset, url, err) + return nil, fmt.Errorf("error decoding json for delivered payloads for relay: %v, offset: %v, url: %v: %w", r.ID, offset, url, err) } return payloads, nil @@ -176,7 +175,7 @@ func retrieveAndInsertPayloadsFromRelay(r types.Relay, low_bound uint64, high_bo for { resp, err := fetchDeliveredPayloads(r, offset) if err != nil { - return fmt.Errorf("error calling fetchDeliveredPayloads with offset: %v for relay: %v", r.ID, err) + return fmt.Errorf("error calling fetchDeliveredPayloads with offset: %v for relay: %v: %w", offset, r.ID, err) } if resp == nil { From a899c3e36b2aaf7de4bfc8d47b70367cde850911 Mon Sep 17 00:00:00 2001 From: benji-bitfly Date: Fri, 18 Oct 2024 15:58:56 +0200 Subject: [PATCH 64/73] refactor: fix `translation` for `attestation missed` See: BEDS-607 --- frontend/locales/en.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/locales/en.json b/frontend/locales/en.json index da75288c4..66a5f0ec2 100644 --- a/frontend/locales/en.json +++ b/frontend/locales/en.json @@ -837,7 +837,7 @@ "label": "All events" }, "attestation_missed": { - "info": "We will trigger every epoch ({count} minute) during downtime. | We will trigger every epoch ({count} minutes) during downtime.", + "info": "We will trigger a notification every epoch ({count} minute) during downtime. | We will trigger a notification every epoch ({count} minutes) during downtime.", "label": "Attestations missed" }, "block_proposal": { From f253ce867085b41e7fc0be0c2eb146473f7c166f Mon Sep 17 00:00:00 2001 From: marcel-bitfly <174338434+marcel-bitfly@users.noreply.github.com> Date: Fri, 18 Oct 2024 11:49:05 +0200 Subject: [PATCH 65/73] chore: add `command` to `mock api responses` --- frontend/README.md | 5 ++++- frontend/package.json | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/frontend/README.md b/frontend/README.md index 190640f2a..7f1a5afb3 100644 --- a/frontend/README.md +++ b/frontend/README.md @@ -128,7 +128,10 @@ If your `user` was added to the `ADMIN` or `DEV` group by the `api team`, you ca `mocked data` from the `api` for certain `endpoints` by adding `?is_mocked=true` as a `query parameter`. -You can `turn on` mocked data `globally` for all `configured enpoints` by setting `NUXT_PUBLIC_IS_API_MOCKED=true`. +You can `turn on` mocked data `globally` for all `configured enpoints` +- by setting `NUXT_PUBLIC_IS_API_MOCKED=true` +in your [.env](.env) or +- running `npm run dev:mock:api` (See: [package.json](package.json)) ## Descision Record diff --git a/frontend/package.json b/frontend/package.json index ab4b347d7..dce992e57 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -53,6 +53,7 @@ "scripts": { "build": "nuxt build", "dev": "NODE_TLS_REJECT_UNAUTHORIZED=0 nuxt dev", + "dev:mock:api": "NUXT_IS_API_MOCKED=true npm run dev", "dev:mock:production": "NUXT_DEPLOYMENT_TYPE=production npm run dev", "dev:mock:staging": "NUXT_DEPLOYMENT_TYPE=staging npm run dev", "generate": "nuxt generate", From 6a354cc894fe2b1a55986dbe84d2c0f9ef420241 Mon Sep 17 00:00:00 2001 From: marcel-bitfly <174338434+marcel-bitfly@users.noreply.github.com> Date: Fri, 18 Oct 2024 16:56:17 +0200 Subject: [PATCH 66/73] refactor(notifications): disable `machines tab ui` when `user has no machines configured` See: BEDS-574 --- frontend/components/bc/BcSlider.vue | 1 - .../NotificationsManagementMachines.vue | 43 +++++++++++++------ 2 files changed, 30 insertions(+), 14 deletions(-) diff --git a/frontend/components/bc/BcSlider.vue b/frontend/components/bc/BcSlider.vue index b3dcaf9c0..d965a55df 100644 --- a/frontend/components/bc/BcSlider.vue +++ b/frontend/components/bc/BcSlider.vue @@ -15,7 +15,6 @@ defineProps<{ :step class="bc-slider" type="range" - v-bind="$attrs" > diff --git a/frontend/components/notifications/management/NotificationsManagementMachines.vue b/frontend/components/notifications/management/NotificationsManagementMachines.vue index 67841ce52..b966c91bc 100644 --- a/frontend/components/notifications/management/NotificationsManagementMachines.vue +++ b/frontend/components/notifications/management/NotificationsManagementMachines.vue @@ -21,6 +21,8 @@ watchDebounced(() => notificationsManagementStore.settings.general_settings, asy deep: true, maxWait: waitExtraLongForSliders, }) + +const hasMachines = computed(() => notificationsManagementStore.settings.has_machines)