diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 7e6614089..23196270b 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -4,6 +4,8 @@ # in order to work execute once: # git config blame.ignoreRevsFile .git-blame-ignore-revs +#chore(eslint): add `natural sorting` for `json` files +f3da99c5c685eae1914aad8513d3b4b2f1cdcaa2 # style(eslint): add `typescript delimiter` rule d6b42edb3f687cad3082a9044bcb71fc39a46176 # style(eslint): apply `max-len` rule diff --git a/.github/workflows/backend-converted-types-check.yml b/.github/workflows/backend-converted-types-check.yml new file mode 100644 index 000000000..b502e296e --- /dev/null +++ b/.github/workflows/backend-converted-types-check.yml @@ -0,0 +1,46 @@ +name: Backend-Converted-Types-Check +on: + push: + paths: + - 'backend/pkg/api/types/**' + - 'frontend/types/api/**' + branches: + - main + - staging + pull_request: + paths: + - 'backend/pkg/api/types/**' + - 'frontend/types/api/**' + branches: + - '*' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: read + pull-requests: read + checks: write + +jobs: + build: + name: converted-types-check + runs-on: self-hosted + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version-file: 'backend/go.mod' + cache-dependency-path: 'backend/go.sum' + - name: Check if all backend-types have been converted to frontend-types + working-directory: backend + run: | + currHash=$(find ../frontend/types/api -type f -print0 | sort -z | xargs -0 sha1sum | sha256sum | head -c 64) + make frontend-types + newHash=$(find ../frontend/types/api -type f -print0 | sort -z | xargs -0 sha1sum | sha256sum | head -c 64) + if [ "$currHash" != "$newHash" ]; then + echo "frontend-types have changed, please commit the changes" + exit 1 + fi + diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml index 48243480e..c95d43c70 100644 --- a/.github/workflows/backend-integration-test.yml +++ b/.github/workflows/backend-integration-test.yml @@ -33,7 +33,9 @@ jobs: cache-dependency-path: 'backend/go.sum' - name: Test with the Go CLI working-directory: backend - run: go test -failfast ./pkg/api/... -config "${{ secrets.CI_CONFIG_PATH }}" + run: + go install github.com/swaggo/swag/cmd/swag@latest && swag init --ot json -o ./pkg/api/docs -d ./pkg/api/ -g ./handlers/public.go + go test -failfast ./pkg/api/... -config "${{ secrets.CI_CONFIG_PATH }}" diff --git a/backend/.gitignore b/backend/.gitignore index 7006633d8..b5f10c4da 100644 --- a/backend/.gitignore +++ b/backend/.gitignore @@ -5,4 +5,5 @@ local_deployment/config.yml local_deployment/elconfig.json local_deployment/.env __gitignore -cmd/playground \ No newline at end of file +cmd/playground +pkg/api/docs/swagger.json diff --git a/backend/Makefile b/backend/Makefile index 01c3705ba..dd099490d 100644 --- a/backend/Makefile +++ b/backend/Makefile @@ -10,8 +10,8 @@ CGO_CFLAGS_ALLOW="-O -D__BLST_PORTABLE__" all: mkdir -p bin + go install github.com/swaggo/swag/cmd/swag@latest && swag init --ot json -o ./pkg/api/docs -d ./pkg/api/ -g ./handlers/public.go CGO_CFLAGS=${CGO_CFLAGS} CGO_CFLAGS_ALLOW=${CGO_CFLAGS_ALLOW} go build --ldflags=${LDFLAGS} -o ./bin/bc ./cmd/main.go - clean: rm -rf bin diff --git a/backend/cmd/api/main.go b/backend/cmd/api/main.go index 90ce205f3..04b995a8d 100644 --- a/backend/cmd/api/main.go +++ b/backend/cmd/api/main.go @@ -58,12 +58,15 @@ func Run() { if dummyApi { dataAccessor = dataaccess.NewDummyService() } else { - dataAccessor = dataaccess.NewDataAccessService(cfg) - dataAccessor.StartDataAccessServices() + service := dataaccess.NewDataAccessService(cfg) + service.StartDataAccessServices() + dataAccessor = service } defer dataAccessor.Close() - router := api.NewApiRouter(dataAccessor, cfg) + dummy := dataaccess.NewDummyService() + + router := api.NewApiRouter(dataAccessor, dummy, cfg) router.Use(api.GetCorsMiddleware(cfg.CorsAllowedHosts)) if utils.Config.Metrics.Enabled { diff --git a/backend/cmd/blobindexer/main.go b/backend/cmd/blobindexer/main.go index 9f7d37b4a..5be4f6572 100644 --- a/backend/cmd/blobindexer/main.go +++ b/backend/cmd/blobindexer/main.go @@ -16,7 +16,7 @@ import ( func Run() { fs := flag.NewFlagSet("fs", flag.ExitOnError) - configFlag := fs.String("config", "config.yml", "path to config") + configFlag := fs.String("config", "", "path to config") versionFlag := fs.Bool("version", false, "print version and exit") _ = fs.Parse(os.Args[2:]) if *versionFlag { diff --git a/backend/cmd/eth1indexer/main.go b/backend/cmd/eth1indexer/main.go index badcc7745..d6198b22a 100644 --- a/backend/cmd/eth1indexer/main.go +++ b/backend/cmd/eth1indexer/main.go @@ -189,6 +189,10 @@ func Run() { }() } + if *enableEnsUpdater { + go ImportEnsUpdatesLoop(bt, client, *ensBatchSize) + } + if *enableFullBalanceUpdater { ProcessMetadataUpdates(bt, client, balanceUpdaterPrefix, *balanceUpdaterBatchSize, -1) return @@ -375,14 +379,6 @@ func Run() { ProcessMetadataUpdates(bt, client, balanceUpdaterPrefix, *balanceUpdaterBatchSize, 10) } - if *enableEnsUpdater { - err := bt.ImportEnsUpdates(client.GetNativeClient(), *ensBatchSize) - if err != nil { - log.Error(err, "error importing ens updates", 0, nil) - continue - } - } - log.Infof("index run completed") services.ReportStatus("eth1indexer", "Running", nil) } @@ -390,6 +386,19 @@ func Run() { // utils.WaitForCtrlC() } +func ImportEnsUpdatesLoop(bt *db.Bigtable, client *rpc.ErigonClient, batchSize int64) { + time.Sleep(time.Second * 5) + for { + err := bt.ImportEnsUpdates(client.GetNativeClient(), batchSize) + if err != nil { + log.Error(err, "error importing ens updates", 0, nil) + } else { + services.ReportStatus("ensIndexer", "Running", nil) + } + time.Sleep(time.Second * 5) + } +} + func UpdateTokenPrices(bt *db.Bigtable, client *rpc.ErigonClient, tokenListPath string) error { tokenListContent, err := os.ReadFile(tokenListPath) if err != nil { diff --git a/backend/cmd/evm_node_indexer/main.go b/backend/cmd/evm_node_indexer/main.go new file mode 100644 index 000000000..4975bb3d0 --- /dev/null +++ b/backend/cmd/evm_node_indexer/main.go @@ -0,0 +1,1610 @@ +package evm_node_indexer + +// imports +import ( + "bytes" + "compress/gzip" + "context" + "database/sql" + "encoding/binary" + "encoding/json" + "errors" + "flag" + "fmt" + "io" + "net/http" + "os" + "regexp" + "strings" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum" + gethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/hexutil" + "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/metrics" + "github.com/gobitfly/beaconchain/pkg/commons/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" + "github.com/gobitfly/beaconchain/pkg/commons/version" + "github.com/gtuk/discordwebhook" + "github.com/lib/pq" + "golang.org/x/sync/errgroup" + "google.golang.org/api/option" + + gcp_bigtable "cloud.google.com/go/bigtable" +) + +// defines +const MAX_EL_BLOCK_NUMBER = int64(1_000_000_000_000 - 1) + +const BT_COLUMNFAMILY_BLOCK = "b" +const BT_COLUMN_BLOCK = "b" +const BT_COLUMNFAMILY_RECEIPTS = "r" +const BT_COLUMN_RECEIPTS = "r" +const BT_COLUMNFAMILY_TRACES = "t" +const BT_COLUMN_TRACES = "t" +const BT_COLUMNFAMILY_UNCLES = "u" +const BT_COLUMN_UNCLES = "u" + +const MAINNET_CHAINID = 1 +const GOERLI_CHAINID = 5 +const OPTIMISM_CHAINID = 10 +const GNOSIS_CHAINID = 100 +const HOLESKY_CHAINID = 17000 +const ARBITRUM_CHAINID = 42161 +const ARBITRUM_NITRO_BLOCKNUMBER = 22207815 +const SEPOLIA_CHAINID = 11155111 + +const HTTP_TIMEOUT_IN_SECONDS = 120 +const MAX_REORG_DEPTH = 256 // maxmimum value for reorg (that number of blocks we are looking 'back'), includes latest block +const MAX_NODE_REQUESTS_AT_ONCE = 1024 // maximum node requests allowed +const OUTPUT_CYCLE_IN_SECONDS = 8 // duration between 2 outputs / updates, just a visual thing +const TRY_TO_RECOVER_ON_ERROR_COUNT = 8 // total retries, so with a value of 4, it is 1 try + 4 retries + +// structs +type jsonRpcReturnId struct { + Id int64 `json:"id"` +} +type fullBlockRawData struct { + blockNumber int64 + blockHash hexutil.Bytes + blockUnclesCount int + blockTxs []string + + blockCompressed hexutil.Bytes + receiptsCompressed hexutil.Bytes + tracesCompressed hexutil.Bytes + unclesCompressed hexutil.Bytes +} +type intRange struct { + start int64 + end int64 +} + +// local globals +var currentNodeBlockNumber atomic.Int64 +var elClient *ethclient.Client +var reorgDepth *int64 +var httpClient *http.Client +var errorIdentifier *regexp.Regexp +var eth1RpcEndpoint string + +// init +func init() { + httpClient = &http.Client{Timeout: time.Second * HTTP_TIMEOUT_IN_SECONDS} + + var err error + errorIdentifier, err = regexp.Compile(`\"error":\{\"code\":\-[0-9]+\,\"message\":\"([^\"]*)`) + if err != nil { + log.Fatal(err, "fatal, compiling regex", 0) + } +} + +// main +func Run() { + fs := flag.NewFlagSet("fs", flag.ExitOnError) + + // read / set parameter + configPath := fs.String("config", "config/default.config.yml", "Path to the config file") + versionFlag := fs.Bool("version", false, "print version and exit") + startBlockNumber := fs.Int64("start-block-number", -1, "trigger a REEXPORT, only working in combination with end-block-number, defined block is included, will be the first action done and will quite afterwards, ignore every other action") + endBlockNumber := fs.Int64("end-block-number", -1, "trigger a REEXPORT, only working in combination with start-block-number, defined block is included, will be the first action done and will quite afterwards, ignore every other action") + reorgDepth = fs.Int64("reorg.depth", 32, fmt.Sprintf("lookback to check and handle chain reorgs (MAX %s), you should NEVER reduce this after the first start, otherwise there will be unchecked areas", _formatInt64(MAX_REORG_DEPTH))) + concurrency := fs.Int64("concurrency", 8, "maximum threads used (running on maximum whenever possible)") + nodeRequestsAtOnce := fs.Int64("node-requests-at-once", 16, fmt.Sprintf("bulk size per node = bt = db request (MAX %s)", _formatInt64(MAX_NODE_REQUESTS_AT_ONCE))) + skipHoleCheck := fs.Bool("skip-hole-check", false, "skips the initial check for holes, doesn't go very well with only-hole-check") + onlyHoleCheck := fs.Bool("only-hole-check", false, "just check for holes and quit, can be used for a reexport running simulation to a normal setup, just remove entries in postgres and start with this flag, doesn't go very well with skip-hole-check") + noNewBlocks := fs.Bool("ignore-new-blocks", false, "there are no new blocks, at all") + noNewBlocksThresholdSeconds := fs.Int("fatal-if-no-new-block-for-x-seconds", 600, "will fatal if there is no new block for x seconds (MIN 30), will start throwing errors at 2/3 of the time, will start throwing warnings at 1/3 of the time, doesn't go very well with ignore-new-blocks") + discordWebhookBlockThreshold := fs.Int64("discord-block-threshold", 100000, "every x blocks an update is send to Discord") + discordWebhookReportUrl := fs.String("discord-url", "", "report progress to discord url") + discordWebhookUser := fs.String("discord-user", "", "report progress to discord user") + discordWebhookAddTextFatal := fs.String("discord-fatal-text", "", "this text will be added to the discord message in the case of an fatal") + err := fs.Parse(os.Args[2:]) + if err != nil { + log.Fatal(err, "error parsing flags", 0) + } + if *versionFlag { + log.Info(version.Version) + return + } + + // tell the user about all parameter + { + log.Infof("config set to '%s'", *configPath) + if *startBlockNumber >= 0 { + log.Infof("start-block-number set to '%s'", _formatInt64(*startBlockNumber)) + } + if *endBlockNumber >= 0 { + log.Infof("end-block-number set to '%s'", _formatInt64(*endBlockNumber)) + } + log.Infof("reorg.depth set to '%s'", _formatInt64(*reorgDepth)) + log.Infof("concurrency set to '%s'", _formatInt64(*concurrency)) + log.Infof("node-requests-at-once set to '%s'", _formatInt64(*nodeRequestsAtOnce)) + if *skipHoleCheck { + log.Infof("skip-hole-check set true") + } + if *onlyHoleCheck { + log.Infof("only-hole-check set true") + } + if *noNewBlocks { + log.Infof("ignore-new-blocks set true") + } + log.Infof("fatal-if-no-new-block-for-x-seconds set to '%d' seconds", *noNewBlocksThresholdSeconds) + } + + // check config + { + log.InfoWithFields(log.Fields{"config": *configPath, "version": version.Version, "commit": version.GitCommit, "chainName": utils.Config.Chain.ClConfig.ConfigName}, "starting") + cfg := &types.Config{} + err := utils.ReadConfig(cfg, *configPath) + if err != nil { + log.Fatal(err, "error reading config file", 0) // fatal, as there is no point without a config + } else { + log.Info("reading config completed") + } + utils.Config = cfg + + if len(utils.Config.Eth1ErigonEndpoint) > 0 { + eth1RpcEndpoint = utils.Config.Eth1ErigonEndpoint + } else { + eth1RpcEndpoint = utils.Config.Eth1GethEndpoint + } + + if utils.Config.Metrics.Enabled { + go func() { + log.Infof("serving metrics on %v", utils.Config.Metrics.Address) + if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof, utils.Config.Metrics.PprofExtra); err != nil { + log.Fatal(err, "error serving metrics", 0) + } + }() + } + } + + // check parameters + if *nodeRequestsAtOnce < 1 { + log.Warnf("node-requests-at-once set to %s, corrected to 1", _formatInt64(*nodeRequestsAtOnce)) + *nodeRequestsAtOnce = 1 + } + if *nodeRequestsAtOnce > MAX_NODE_REQUESTS_AT_ONCE { + log.Warnf("node-requests-at-once set to %s, corrected to %s", _formatInt64(*nodeRequestsAtOnce), _formatInt64(MAX_NODE_REQUESTS_AT_ONCE)) + *nodeRequestsAtOnce = MAX_NODE_REQUESTS_AT_ONCE + } + if *reorgDepth < 0 || *reorgDepth > MAX_REORG_DEPTH { + log.Warnf("reorg.depth parameter set to %s, corrected to %s", _formatInt64(*reorgDepth), _formatInt64(MAX_REORG_DEPTH)) + *reorgDepth = MAX_REORG_DEPTH + } + if *concurrency < 1 { + log.Warnf("concurrency parameter set to %s, corrected to 1", _formatInt64(*concurrency)) + *concurrency = 1 + } + if *noNewBlocksThresholdSeconds < 30 { + log.Warnf("fatal-if-no-new-block-for-x-seconds set to %d, corrected to 30", *noNewBlocksThresholdSeconds) + *noNewBlocksThresholdSeconds = 30 + } + + // init postgres + { + db.WriterDb, db.ReaderDb = db.MustInitDB(&types.DatabaseConfig{ + Username: utils.Config.WriterDatabase.Username, + Password: utils.Config.WriterDatabase.Password, + Name: utils.Config.WriterDatabase.Name, + Host: utils.Config.WriterDatabase.Host, + Port: utils.Config.WriterDatabase.Port, + MaxOpenConns: utils.Config.WriterDatabase.MaxOpenConns, + MaxIdleConns: utils.Config.WriterDatabase.MaxIdleConns, + SSL: utils.Config.WriterDatabase.SSL, + }, &types.DatabaseConfig{ + Username: utils.Config.ReaderDatabase.Username, + Password: utils.Config.ReaderDatabase.Password, + Name: utils.Config.ReaderDatabase.Name, + Host: utils.Config.ReaderDatabase.Host, + Port: utils.Config.ReaderDatabase.Port, + MaxOpenConns: utils.Config.ReaderDatabase.MaxOpenConns, + MaxIdleConns: utils.Config.ReaderDatabase.MaxIdleConns, + SSL: utils.Config.ReaderDatabase.SSL, + }, "pgx", "postgres") + defer db.ReaderDb.Close() + defer db.WriterDb.Close() + log.Info("starting postgres completed") + } + + // init bigtable + log.Info("init BT...") + btClient, err := gcp_bigtable.NewClient(context.Background(), utils.Config.Bigtable.Project, utils.Config.Bigtable.Instance, option.WithGRPCConnectionPool(1)) + if err != nil { + log.Fatal(err, "creating new client for Bigtable", 0) // fatal, no point to continue without BT + } + tableBlocksRaw := btClient.Open("blocks-raw") + if tableBlocksRaw == nil { + log.Fatal(err, "open blocks-raw table", 0) // fatal, no point to continue without BT + } + defer btClient.Close() + log.Info("...init BT done.") + + // init el client + log.Info("init el client endpoint...") + // #RECY IMPROVE split http / ws endpoint, http is mandatory, ws optional - So add an http/ws config entry, where ws is optional (to use subscribe) + elClient, err = ethclient.Dial(eth1RpcEndpoint) + if err != nil { + log.Fatal(err, "error dialing eth url", 0) // fatal, no point to continue without node connection + } + log.Info("...init el client endpoint done.") + + // check chain id + { + log.Info("check chain id...") + chainID, err := rpciGetChainId() + if chainID == ARBITRUM_CHAINID { // #RECY REMOVE currently necessary as there is no default config / setting in utils for Arbitrum + utils.Config.Chain.Id = ARBITRUM_CHAINID + } + if chainID == OPTIMISM_CHAINID { // #RECY REMOVE currently necessary as there is no default config / setting in utils for Optimism + utils.Config.Chain.Id = OPTIMISM_CHAINID + } + if err != nil { + log.Fatal(err, "error get chain id", 0) // fatal, no point to continue without chain id + } + if chainID != utils.Config.Chain.Id { // if the chain id is removed from the config, just remove this if, there is no point, except checking consistency + log.Fatal(err, "node chain different from config chain", 0) // fatal, config doesn't match node + } + log.Info("...check chain id done.") + } + + // get latest block (as it's global, so we have a initial value) + log.Info("get latest block from node...") + updateBlockNumber(true, *noNewBlocks, time.Duration(*noNewBlocksThresholdSeconds)*time.Second, discordWebhookReportUrl, discordWebhookUser, discordWebhookAddTextFatal) + log.Infof("...get latest block (%s) from node done.", _formatInt64(currentNodeBlockNumber.Load())) + + // ////////////////////////////////////////// + // Config done, now actually "doing" stuff // + // ////////////////////////////////////////// + + // check if reexport requested + if *startBlockNumber >= 0 && *endBlockNumber >= 0 && *startBlockNumber <= *endBlockNumber { + log.Infof("Found REEXPORT for block %s to %s...", _formatInt64(*startBlockNumber), _formatInt64(*endBlockNumber)) + err := bulkExportBlocksRange(tableBlocksRaw, []intRange{{start: *startBlockNumber, end: *endBlockNumber}}, *concurrency, *nodeRequestsAtOnce, discordWebhookBlockThreshold, discordWebhookReportUrl, discordWebhookUser) + if err != nil { + sendMessage(fmt.Sprintf("%s NODE EXPORT: Fatal, reexport not completed, check logs %s", getChainNamePretty(), *discordWebhookAddTextFatal), discordWebhookReportUrl, discordWebhookUser) + log.Fatal(err, "error while reexport blocks for bigtable (reexport range)", 0) // fatal, as there is nothing more todo anyway + } + log.Info("Job done, have a nice day :)") + return + } + + // find holes in our previous runs / sanity check + if *skipHoleCheck { + log.Warn("Skipping hole check!") + } else { + log.Info("Checking for holes...") + startTime := time.Now() + missingBlocks, err := psqlFindGaps() // find the holes + findHolesTook := time.Since(startTime) + if err != nil { + log.Fatal(err, "error checking for holes", 0) // fatal, as we highly depend on postgres, if this is not working, we can quit + } + l := len(missingBlocks) + if l > 0 { // some holes found + log.Warnf("Found %s missing block ranges in %v, fixing them now...", _formatInt(l), findHolesTook) + if l <= 10 { + log.Warnf("%v", missingBlocks) + } else { + log.Warnf("%v<...>", missingBlocks[:10]) + } + startTime = time.Now() + err := bulkExportBlocksRange(tableBlocksRaw, missingBlocks, *concurrency, *nodeRequestsAtOnce, discordWebhookBlockThreshold, discordWebhookReportUrl, discordWebhookUser) // reexport the holes + if err != nil { + log.Fatal(err, "error while reexport blocks for bigtable (fixing holes)", 0) // fatal, as if we wanna start with holes, we should set the skip-hole-check parameter + } + log.Warnf("...fixed them in %v", time.Since(startTime)) + } else { + log.Infof("...no missing block found in %v", findHolesTook) + } + } + if *onlyHoleCheck { + log.Info("only-hole-check set, job done, have a nice day :)") + return + } + + // waiting for new blocks and export them, while checking reorg before every new block + latestPGBlock, err := psqlGetLatestBlock(false) + if err != nil { + log.Fatal(err, "error while using psqlGetLatestBlock (start / read)", 0) // fatal, as if there is no initial value, we have nothing to start from + } + var consecutiveErrorCount int + consecutiveErrorCountThreshold := 0 // after threshold + 1 errors it will be fatal instead #TODO not working correct wenn syncing big amount of data, setting meanwhile to 0, as an error will result in a fully retry (which is wrong) + for { + currentNodeBN := currentNodeBlockNumber.Load() + if currentNodeBN < latestPGBlock { + // fatal, as this is an impossible error + log.Fatal(err, "impossible error currentNodeBN < lastestPGBlock", 0, map[string]interface{}{"currentNodeBN": currentNodeBN, "latestPGBlock": latestPGBlock}) + } else if currentNodeBN == latestPGBlock { + time.Sleep(time.Second) + continue // still the same block + } else { + // checking for reorg + if *reorgDepth > 0 && latestPGBlock >= 0 { + // define length to check + l := *reorgDepth + if l > latestPGBlock+1 { + l = latestPGBlock + 1 + } + + // fill array with block numbers to check + blockRawData := make([]fullBlockRawData, l) + for i := int64(0); i < l; i++ { + blockRawData[i].blockNumber = latestPGBlock + i - l + 1 + } + + // get all hashes from node + err = rpciGetBulkBlockRawHash(blockRawData, *nodeRequestsAtOnce) + if err != nil { + consecutiveErrorCount++ + if consecutiveErrorCount <= consecutiveErrorCountThreshold { + log.Error(err, "error when bulk getting raw block hashes", 0, map[string]interface{}{"reorgErrorCount": consecutiveErrorCount, "latestPGBlock": latestPGBlock, "reorgDepth": *reorgDepth}) + } else { + log.Fatal(err, "error when bulk getting raw block hashes", 0, map[string]interface{}{"reorgErrorCount": consecutiveErrorCount, "latestPGBlock": latestPGBlock, "reorgDepth": *reorgDepth}) + } + continue + } + + // get a list of all block_ids where the hashes are fine + var matchingHashesBlockIdList []int64 + matchingHashesBlockIdList, err = psqlGetHashHitsIdList(blockRawData) + if err != nil { + consecutiveErrorCount++ + if consecutiveErrorCount <= consecutiveErrorCountThreshold { + log.Error(err, "error when getting hash hits id list", 0, map[string]interface{}{"reorgErrorCount": consecutiveErrorCount, "latestPGBlock": latestPGBlock, "reorgDepth": *reorgDepth}) + } else { + log.Fatal(err, "error when getting hash hits id list", 0, map[string]interface{}{"reorgErrorCount": consecutiveErrorCount, "latestPGBlock": latestPGBlock, "reorgDepth": *reorgDepth}) + } + continue + } + + matchingLength := len(matchingHashesBlockIdList) + if len(blockRawData) != matchingLength { // nothing todo if all elements are fine, but if not... + if len(blockRawData) < matchingLength { + // fatal, as this is an impossible error + log.Fatal(err, "impossible error len(blockRawData) < matchingLength", 0, map[string]interface{}{"latestPGBlock": latestPGBlock, "matchingLength": matchingLength}) + } + + // reverse the "fine" list, so we have a "not fine" list + wrongHashRanges := []intRange{{start: -1}} + wrongHashRangesIndex := 0 + var i int + var failCounter int + for _, v := range blockRawData { + for i < matchingLength && v.blockNumber > matchingHashesBlockIdList[i] { + i++ + } + if i >= matchingLength || v.blockNumber != matchingHashesBlockIdList[i] { + failCounter++ + if wrongHashRanges[wrongHashRangesIndex].start < 0 { + wrongHashRanges[wrongHashRangesIndex].start = v.blockNumber + wrongHashRanges[wrongHashRangesIndex].end = v.blockNumber + } else if wrongHashRanges[wrongHashRangesIndex].end+1 == v.blockNumber { + wrongHashRanges[wrongHashRangesIndex].end = v.blockNumber + } else { + wrongHashRangesIndex++ + wrongHashRanges[wrongHashRangesIndex].start = v.blockNumber + wrongHashRanges[wrongHashRangesIndex].end = v.blockNumber + } + } + } + if failCounter != len(blockRawData)-matchingLength { + // fatal, as this is an impossible error + log.Fatal(err, "impossible error failureLength != len(blockRawData)-matchingLength", 0, map[string]interface{}{"failCounter": failCounter, "len(blockRawData)-matchingLength": len(blockRawData) - matchingLength}) + } + log.Infof("found %s wrong hashes when checking for reorgs, reexporting them now...", _formatInt(failCounter)) + log.Infof("%v", wrongHashRanges) + + // export the hits again + err = bulkExportBlocksRange(tableBlocksRaw, wrongHashRanges, *concurrency, *nodeRequestsAtOnce, discordWebhookBlockThreshold, discordWebhookReportUrl, discordWebhookUser) + // we will retry again, but it's important to skip the export of new blocks in the case of an error + if err != nil { + consecutiveErrorCount++ + if consecutiveErrorCount <= consecutiveErrorCountThreshold { + log.Error(err, "error exporting hits on reorg", 0, map[string]interface{}{"reorgErrorCount": consecutiveErrorCount, "len(blockRawData)": len(blockRawData), "reorgDepth": *reorgDepth, "matchingHashesBlockIdList": matchingHashesBlockIdList, "wrongHashRanges": wrongHashRanges}) + } else { + log.Fatal(err, "error exporting hits on reorg", 0, map[string]interface{}{"reorgErrorCount": consecutiveErrorCount, "len(blockRawData)": len(blockRawData), "reorgDepth": *reorgDepth, "matchingHashesBlockIdList": matchingHashesBlockIdList, "wrongHashRanges": wrongHashRanges}) + } + continue + } else { + log.Info("...done. Everything fine with reorgs again.") + } + } + } + + // export all new blocks + newerNodeBN := currentNodeBlockNumber.Load() // just in case it took a while doing the reorg stuff, no problem if range > reorg limit, as the exported blocks will be newest also + if newerNodeBN < currentNodeBN { + // fatal, as this is an impossible error + log.Fatal(err, "impossible error newerNodeBN < currentNodeBN", 0, map[string]interface{}{"newerNodeBN": newerNodeBN, "currentNodeBN": currentNodeBN}) + } + err = bulkExportBlocksRange(tableBlocksRaw, []intRange{{start: latestPGBlock + 1, end: newerNodeBN}}, *concurrency, *nodeRequestsAtOnce, discordWebhookBlockThreshold, discordWebhookReportUrl, discordWebhookUser) + // we can try again, as throw a fatal will result in try again anyway + if err != nil { + consecutiveErrorCount++ + if consecutiveErrorCount <= consecutiveErrorCountThreshold { + log.Error(err, "error while reexport blocks for bigtable (newest blocks)", 0, map[string]interface{}{"reorgErrorCount": consecutiveErrorCount, "latestPGBlock+1": latestPGBlock + 1, "newerNodeBN": newerNodeBN}) + } else { + log.Fatal(err, "error while reexport blocks for bigtable (newest blocks)", 0, map[string]interface{}{"reorgErrorCount": consecutiveErrorCount, "latestPGBlock+1": latestPGBlock + 1, "newerNodeBN": newerNodeBN}) + } + continue + } else { + latestPGBlock, err = psqlGetLatestBlock(true) + if err != nil { + consecutiveErrorCount++ + if consecutiveErrorCount <= consecutiveErrorCountThreshold { + log.Error(err, "error while using psqlGetLatestBlock (ongoing / write)", 0, map[string]interface{}{"reorgErrorCount": consecutiveErrorCount}) + } else { + log.Fatal(err, "error while using psqlGetLatestBlock (ongoing / write)", 0, map[string]interface{}{"reorgErrorCount": consecutiveErrorCount}) + } + continue + } else if latestPGBlock != newerNodeBN { + // fatal, as this is a nearly impossible error + log.Fatal(err, "impossible error latestPGBlock != newerNodeBN", 0, map[string]interface{}{"latestPGBlock": latestPGBlock, "newerNodeBN": newerNodeBN}) + } + } + + // reset consecutive error count if no change during this run + if consecutiveErrorCount > 0 { + log.Infof("reset consecutive error count to 0, as no error in this run (was %d)", consecutiveErrorCount) + consecutiveErrorCount = 0 + } + } + } +} + +// improve the behaviour in case of an error +func _bulkExportBlocksHandler(tableBlocksRaw *gcp_bigtable.Table, blockRawData []fullBlockRawData, nodeRequestsAtOnce int64, deep int) error { + err := _bulkExportBlocksImpl(tableBlocksRaw, blockRawData, nodeRequestsAtOnce) + if err != nil { + if deep < TRY_TO_RECOVER_ON_ERROR_COUNT { + elementCount := len(blockRawData) + + // output the error + { + s := errorIdentifier.FindStringSubmatch(err.Error()) + if len(s) >= 2 { // if we have a valid json error available, should be the case if it's a node issue + log.WarnWithFields(log.Fields{"deep": deep, "cause": s[1], "0block": blockRawData[0].blockNumber, "elements": elementCount}, "got an error and will try to fix it (sub)") + } else { // if we have a no json error available, should be the case if it's a BT or Postgres issue + log.WarnWithFields(log.Fields{"deep": deep, "cause": err, "0block": blockRawData[0].blockNumber, "elements": elementCount}, "got an error and will try to fix it (err)") + } + } + + if deep > TRY_TO_RECOVER_ON_ERROR_COUNT/2 { + duration := deep - TRY_TO_RECOVER_ON_ERROR_COUNT/2 + if duration > 8 { + duration = 8 + } + time.Sleep(time.Second * time.Duration(duration)) + } + + // try to recover + if elementCount == 1 { // if there is only 1 element, no split possible + err = _bulkExportBlocksHandler(tableBlocksRaw, blockRawData, nodeRequestsAtOnce, deep+1) + } else if elementCount > 1 { // split the elements in half and try again to put less strain on the node + err = _bulkExportBlocksHandler(tableBlocksRaw, blockRawData[:elementCount/2], nodeRequestsAtOnce, deep+1) + if err == nil { + err = _bulkExportBlocksHandler(tableBlocksRaw, blockRawData[elementCount/2:], nodeRequestsAtOnce, deep+1) + } + } + } + } + if err != nil { + return fmt.Errorf("_bulkExportBlocksHandler with deep (%d): %w", deep, err) + } + return nil +} + +// export all blocks, heavy use of bulk & concurrency, providing a block raw data array (used by the other bulkExportBlocks+ functions) +func _bulkExportBlocksImpl(tableBlocksRaw *gcp_bigtable.Table, blockRawData []fullBlockRawData, nodeRequestsAtOnce int64) error { + // check values + { + if tableBlocksRaw == nil { + return fmt.Errorf("tableBlocksRaw == nil") + } + + l := int64(len(blockRawData)) + if l < 1 || l > nodeRequestsAtOnce { + return fmt.Errorf("blockRawData length (%d) is 0 or greater 'node requests at once' (%d)", l, nodeRequestsAtOnce) + } + } + + // get block_hash, block_unclesCount, block_compressed & block_txs + err := rpciGetBulkBlockRawData(blockRawData, nodeRequestsAtOnce) + if err != nil { + return fmt.Errorf("rpciGetBulkBlockRawData: %w", err) + } + err = rpciGetBulkRawUncles(blockRawData, nodeRequestsAtOnce) + if err != nil { + return fmt.Errorf("rpciGetBulkRawUncles: %w", err) + } + err = rpciGetBulkRawReceipts(blockRawData, nodeRequestsAtOnce) + if err != nil { + return fmt.Errorf("rpciGetBulkRawReceipts: %w", err) + } + err = rpciGetBulkRawTraces(blockRawData, nodeRequestsAtOnce) + if err != nil { + return fmt.Errorf("rpciGetBulkRawTraces: %w", err) + } + + // write to bigtable + { + // prepare array + muts := []*gcp_bigtable.Mutation{} + keys := []string{} + for _, v := range blockRawData { + if len(v.blockCompressed) == 0 || len(v.tracesCompressed) == 0 { + log.Fatal(nil, "tried writing empty data to BT", 0, map[string]interface{}{"len(v.blockCompressed)": len(v.blockCompressed), "len(v.receiptsCompressed)": len(v.receiptsCompressed), "len(v.tracesCompressed)": len(v.tracesCompressed)}) // fatal, as if this is not working in the first place, it will never work + } + mut := gcp_bigtable.NewMutation() + mut.Set(BT_COLUMNFAMILY_BLOCK, BT_COLUMN_BLOCK, gcp_bigtable.Timestamp(0), v.blockCompressed) + if len(v.receiptsCompressed) < 1 { + log.Warnf("empty receipts at block %d lRec %d lTxs %d", v.blockNumber, len(v.receiptsCompressed), len(v.blockTxs)) + } + mut.Set(BT_COLUMNFAMILY_RECEIPTS, BT_COLUMN_RECEIPTS, gcp_bigtable.Timestamp(0), v.receiptsCompressed) + mut.Set(BT_COLUMNFAMILY_TRACES, BT_COLUMN_TRACES, gcp_bigtable.Timestamp(0), v.tracesCompressed) + if v.blockUnclesCount > 0 { + mut.Set(BT_COLUMNFAMILY_UNCLES, BT_COLUMN_UNCLES, gcp_bigtable.Timestamp(0), v.unclesCompressed) + } + muts = append(muts, mut) + keys = append(keys, fmt.Sprintf("%d:%12d", utils.Config.Chain.Id, MAX_EL_BLOCK_NUMBER-v.blockNumber)) + } + + // write + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + var errs []error + errs, err = tableBlocksRaw.ApplyBulk(ctx, keys, muts) + if err != nil { + return fmt.Errorf("tableBlocksRaw.ApplyBulk err: %w", err) + } + for i, e := range errs { + return fmt.Errorf("tableBlocksRaw.ApplyBulk errs(%d): %w", i, e) + } + } + + // write to SQL + err = psqlAddElements(blockRawData) + if err != nil { + return fmt.Errorf("psqlAddElements: %w", err) + } + + return nil +} + +// export all blocks, heavy use of bulk & concurrency, providing a range array +func bulkExportBlocksRange(tableBlocksRaw *gcp_bigtable.Table, blockRanges []intRange, concurrency int64, nodeRequestsAtOnce int64, discordWebhookBlockThreshold *int64, discordWebhookReportUrl *string, discordWebhookUser *string) error { + { + var blocksTotalCount int64 + l := len(blockRanges) + if l <= 0 { + return fmt.Errorf("got empty blockRanges array") + } + for i, v := range blockRanges { + if v.start <= v.end { + blocksTotalCount += v.end - v.start + 1 + } else { + return fmt.Errorf("blockRanges at index %d has wrong start (%s) > end (%s) combination", i, _formatInt64(v.start), _formatInt64(v.end)) + } + } + + if l == 1 { + log.Infof("Only 1 range found, started export of blocks %s to %s, total block amount %s, using an updater every %d seconds for more details.", _formatInt64(blockRanges[0].start), _formatInt64(blockRanges[0].end), _formatInt64(blocksTotalCount), OUTPUT_CYCLE_IN_SECONDS) + } else { + log.Infof("%d ranges found, total block amount %d, using an updater every %d seconds for more details.", l, blocksTotalCount, OUTPUT_CYCLE_IN_SECONDS) + } + } + + gOuterMustStop := atomic.Bool{} + gOuter := &errgroup.Group{} + gOuter.SetLimit(int(concurrency)) + + totalStart := time.Now() + exportStart := totalStart + var lastDiscordReportAtBlocksProcessedTotal int64 + blocksProcessedTotal := atomic.Int64{} + blocksProcessedIntv := atomic.Int64{} + + go func() { + for { + time.Sleep(time.Second * OUTPUT_CYCLE_IN_SECONDS) + if gOuterMustStop.Load() { + break + } + + bpi := blocksProcessedIntv.Swap(0) + newStart := time.Now() + blocksProcessedTotal.Add(bpi) + bpt := blocksProcessedTotal.Load() + + var totalBlocks int64 + latestNodeBlock := currentNodeBlockNumber.Load() + for _, v := range blockRanges { + if v.end > latestNodeBlock { + totalBlocks += latestNodeBlock - v.start + 1 + } else { + totalBlocks += v.end - v.start + 1 + } + } + blocksPerSecond := float64(bpi) / time.Since(exportStart).Seconds() + blocksPerSecondTotal := float64(bpt) / time.Since(totalStart).Seconds() + durationRemainingTotal := time.Second * time.Duration(float64(totalBlocks-bpt)/float64(blocksPerSecondTotal)) + + log.Infof("current speed: %0.1f b/s %0.1f t/s %s remain %s total %0.2fh (=%0.2fd to go)", blocksPerSecond, blocksPerSecondTotal, _formatInt64(totalBlocks-bpt), _formatInt64(totalBlocks), durationRemainingTotal.Hours(), durationRemainingTotal.Hours()/24) + exportStart = newStart + if lastDiscordReportAtBlocksProcessedTotal+(*discordWebhookBlockThreshold) <= bpt { + lastDiscordReportAtBlocksProcessedTotal += (*discordWebhookBlockThreshold) + sendMessage(fmt.Sprintf("%s NODE EXPORT: %0.1f block/s %s remaining (%0.1f day/s to go)", getChainNamePretty(), blocksPerSecondTotal, _formatInt64(totalBlocks-bpt), durationRemainingTotal.Hours()/24), discordWebhookReportUrl, discordWebhookUser) + } + } + }() + defer gOuterMustStop.Store(true) // kill the updater + + blockRawData := make([]fullBlockRawData, 0, nodeRequestsAtOnce) + blockRawDataLen := int64(0) +Loop: + for _, blockRange := range blockRanges { + current := blockRange.start + for blockRange.end-current+1 > 0 { + if gOuterMustStop.Load() { + break Loop + } + + currentNodeBlockNumberLocalCopy := currentNodeBlockNumber.Load() + for blockRawDataLen < nodeRequestsAtOnce && current <= blockRange.end { + if currentNodeBlockNumberLocalCopy >= current { + blockRawData = append(blockRawData, fullBlockRawData{blockNumber: current}) + blockRawDataLen++ + current++ + } else { + log.Warnf("tried to export block %d, but latest block on node is %d, so stopping all further export till %d", current, currentNodeBlockNumberLocalCopy, blockRange.end) + current = blockRange.end + 1 + } + } + if blockRawDataLen == nodeRequestsAtOnce { + brd := blockRawData + gOuter.Go(func() error { + err := _bulkExportBlocksHandler(tableBlocksRaw, brd, nodeRequestsAtOnce, 0) + if err != nil { + gOuterMustStop.Store(true) + return err + } + blocksProcessedIntv.Add(int64(len(brd))) + return nil + }) + blockRawData = make([]fullBlockRawData, 0, nodeRequestsAtOnce) + blockRawDataLen = 0 + } + } + } + + // write the rest + if !gOuterMustStop.Load() && blockRawDataLen > 0 { + brd := blockRawData + gOuter.Go(func() error { + err := _bulkExportBlocksHandler(tableBlocksRaw, brd, nodeRequestsAtOnce, 0) + if err != nil { + gOuterMustStop.Store(true) + return err + } + blocksProcessedIntv.Add(int64(len(brd))) + return nil + }) + } + + return gOuter.Wait() +} + +// ////////// +// HELPERs // +// ////////// +// Send message to discord +func sendMessage(content string, webhookUrl *string, username *string) { + if len(*webhookUrl) > 0 { + err := discordwebhook.SendMessage(*webhookUrl, discordwebhook.Message{Username: username, Content: &content}) + if err != nil { + log.Error(err, "error sending message to discord", 0, map[string]interface{}{"content": content, "webhookUrl": *webhookUrl, "username": *username}) + } + } +} + +// Get pretty name for chain +func getChainNamePretty() string { + switch utils.Config.Chain.Id { + case MAINNET_CHAINID: + return "<:eth:1184470363967598623> ETHEREUM mainnet" + case GOERLI_CHAINID: + return "GOERLI testnet" + case OPTIMISM_CHAINID: + return "<:op:1184470125458489354> OPTIMISM mainnet" + case GNOSIS_CHAINID: + return "<:gnosis:1184470353947398155> GNOSIS mainnet" + case HOLESKY_CHAINID: + return "HOLESKY testnet" + case ARBITRUM_CHAINID: + return "<:arbitrum:1184470344506036334> ARBITRUM mainnet" + case SEPOLIA_CHAINID: + return "SEPOLIA testnet" + } + return fmt.Sprintf("%d", utils.Config.Chain.Id) +} + +// format int for pretty output +func _formatInt(value int) string { + return _formatInt64(int64(value)) +} + +// format int64 for pretty output +func _formatInt64(value int64) string { + result := "" + for value >= 1000 { + lastPart := value % 1000 + value /= 1000 + if len(result) > 0 { + result = fmt.Sprintf("%03d,%s", lastPart, result) + } else { + result = fmt.Sprintf("%03d", lastPart) + } + } + if len(result) > 0 { + return fmt.Sprintf("%d,%s", value, result) + } + return fmt.Sprintf("%d", value) +} + +// compress given byte slice +func compress(src []byte) []byte { + var buf bytes.Buffer + zw := gzip.NewWriter(&buf) + if _, err := zw.Write(src); err != nil { + log.Fatal(err, "error writing to gzip writer", 0) // fatal, as if this is not working in the first place, it will never work + } + if err := zw.Close(); err != nil { + log.Fatal(err, "error closing gzip writer", 0) // fatal, as if this is not working in the first place, it will never work + } + return buf.Bytes() +} + +// decompress given byte slice +/* func decompress(src []byte) []byte { + zr, err := gzip.NewReader(bytes.NewReader(src)) + if err != nil { + log.Fatal(err, "error creating gzip reader", 0) // fatal, as if this is not working in the first place, it will never work + } + data, err := io.ReadAll(zr) + if err != nil { + log.Fatal(err, "error reading from gzip reader", 0) // fatal, as if this is not working in the first place, it will never work + } + return data +} */ + +// used by splitAndVerifyJsonArray to add an element to the list depending on its Id +func _splitAndVerifyJsonArrayAddElement(r *[][]byte, element []byte, lastId int64) (int64, error) { + // adding empty elements will cause issues, so we don't allow it + if len(element) <= 0 { + return -1, fmt.Errorf("error, tried to add empty element, lastId (%d)", lastId) + } + + // unmarshal + data := &jsonRpcReturnId{} + err := json.Unmarshal(element, data) + if err != nil { + return -1, fmt.Errorf("error decoding '%s': %w", element, err) + } + + // negativ ids signals an issue + if data.Id < 0 { + return -1, fmt.Errorf("error, provided Id (%d) < 0", data.Id) + } + // id must ascending or equal + if data.Id < lastId { + return -1, fmt.Errorf("error, provided Id (%d) < lastId (%d)", data.Id, lastId) + } + + // new element + if data.Id != lastId { + *r = append(*r, element) + } else { // append element (same id) + i := len(*r) - 1 + if (*r)[i][0] == byte('[') { + (*r)[i] = (*r)[i][1 : len((*r)[i])-1] + } + (*r)[i] = append(append(append(append([]byte("["), (*r)[i]...), byte(',')), element...), byte(']')) + } + + return data.Id, nil +} + +// split a bulk json request in single requests +func _splitAndVerifyJsonArray(jArray []byte, providedElementCount int64) ([][]byte, error) { + endDigit := byte('}') + searchValue := []byte(`{"jsonrpc":"`) + searchLen := len(searchValue) + foundElementCount := int64(0) + + // remove everything before the first hit + i := bytes.Index(jArray, searchValue) + if i < 0 { + return nil, fmt.Errorf("no element found") + } + jArray = jArray[i:] + + // find all elements + var err error + lastId := int64(-1) + r := make([][]byte, 0) + for { + if len(jArray) < searchLen { // weird corner case, shouldn't happen at all + i = -1 + } else { // get next hit / ignore current (at index 0) + i = bytes.Index(jArray[searchLen:], searchValue) + } + // handle last element + if i < 0 { + for l := len(jArray) - 1; l >= 0 && jArray[l] != endDigit; l-- { + jArray = jArray[:l] + } + foundElementCount++ + _, err = _splitAndVerifyJsonArrayAddElement(&r, jArray, lastId) + if err != nil { + return nil, fmt.Errorf("error calling split and verify json array add element - last element: %w", err) + } + break + } + // handle normal element + foundElementCount++ + lastId, err = _splitAndVerifyJsonArrayAddElement(&r, jArray[:i+searchLen-1], lastId) + if err != nil { + return nil, fmt.Errorf("error calling split and verify json array add element: %w", err) + } + // set cursor to new start + jArray = jArray[i+searchLen:] + } + if foundElementCount != providedElementCount { + return r, fmt.Errorf("provided element count %d doesn't match found %d", providedElementCount, foundElementCount) + } + return r, nil +} + +// get newest block number from node, should be called always with TRUE +func updateBlockNumber(firstCall bool, noNewBlocks bool, noNewBlocksThresholdDuration time.Duration, discordWebhookReportUrl *string, discordWebhookUser *string, discordWebhookAddTextFatal *string) { + if firstCall { + blockNumber, err := rpciGetLatestBlock() + if err != nil { + sendMessage(fmt.Sprintf("%s NODE EXPORT: Fatal, failed to get newest block from node, on first try %s", getChainNamePretty(), *discordWebhookAddTextFatal), discordWebhookReportUrl, discordWebhookUser) + log.Fatal(err, "fatal, failed to get newest block from node, on first try", 0) + } + currentNodeBlockNumber.Store(blockNumber) + if !noNewBlocks { + go updateBlockNumber(false, false, noNewBlocksThresholdDuration, discordWebhookReportUrl, discordWebhookUser, discordWebhookAddTextFatal) + } + return + } + + var errorText string + gotNewBlockAt := time.Now() + timePerBlock := time.Second * time.Duration(utils.Config.Chain.ClConfig.SecondsPerSlot) + if strings.HasPrefix(eth1RpcEndpoint, "ws") { + log.Infof("ws node endpoint found, will use subscribe") + var timer *time.Timer + previousBlock := int64(-1) + newestBlock := int64(-1) + for { + headers := make(chan *gethtypes.Header) + sub, err := rpciSubscribeNewHead(headers) + if err != nil { + errorText = "error, init subscribe for new head" + } else { + Loop: + for { + if timer != nil && !timer.Stop() { + <-timer.C + } + timer = time.NewTimer(noNewBlocksThresholdDuration / 3) + + select { + case err = <-sub.Err(): + errorText = "error, subscribe new head was canceled" + break Loop + case <-timer.C: + errorText = "error, timer triggered for subscribe of new head" + break Loop + case header := <-headers: + previousBlock = currentNodeBlockNumber.Load() + newestBlock = header.Number.Int64() + if newestBlock <= previousBlock { + log.Fatal(nil, "impossible error, newest block <= previous block", 0, map[string]interface{}{"previousBlock": previousBlock, "newestBlock": newestBlock}) + } + currentNodeBlockNumber.Store(newestBlock) + gotNewBlockAt = time.Now() + } + } + } + + durationSinceLastBlockReceived := time.Since(gotNewBlockAt) + if durationSinceLastBlockReceived < noNewBlocksThresholdDuration/3*2 { + log.WarnWithFields(log.Fields{"durationSinceLastBlockReceived": durationSinceLastBlockReceived, "error": err}, errorText) + } else if durationSinceLastBlockReceived < noNewBlocksThresholdDuration { + log.Error(err, errorText, 0, map[string]interface{}{"durationSinceLastBlockReceived": durationSinceLastBlockReceived, "previousBlock": previousBlock, "newestBlock": newestBlock}) + } else { + sendMessage(fmt.Sprintf("%s NODE EXPORT: Fatal, %s, %v, %v %s", getChainNamePretty(), errorText, err, durationSinceLastBlockReceived, *discordWebhookAddTextFatal), discordWebhookReportUrl, discordWebhookUser) + log.Fatal(err, errorText, 0, map[string]interface{}{"durationSinceLastBlockReceived": durationSinceLastBlockReceived, "previousBlock": previousBlock, "newestBlock": newestBlock}) + } + + close(headers) + sub.Unsubscribe() + time.Sleep(timePerBlock) // Sleep for 1 block in case of an error + } + } else { // no ws node endpoint available + log.Infof("no ws node endpoint found, can't use subscribe") + errorText := "error, no new block for a longer time" + for { + time.Sleep(timePerBlock / 2) // wait half a block + previousBlock := currentNodeBlockNumber.Load() + newestBlock, err := rpciGetLatestBlock() + if err == nil { + if previousBlock > newestBlock { + log.Fatal(nil, "impossible error, newest block <= previous block", 0, map[string]interface{}{"previousBlock": previousBlock, "newestBlock": newestBlock}) + } else if previousBlock < newestBlock { + currentNodeBlockNumber.Store(newestBlock) + gotNewBlockAt = time.Now() + continue + } + } + + durationSinceLastBlockReceived := time.Since(gotNewBlockAt) + if durationSinceLastBlockReceived >= noNewBlocksThresholdDuration { + sendMessage(fmt.Sprintf("%s NODE EXPORT: Fatal, %s, %d, %d, %v, %v %s", getChainNamePretty(), errorText, previousBlock, newestBlock, err, durationSinceLastBlockReceived, *discordWebhookAddTextFatal), discordWebhookReportUrl, discordWebhookUser) + log.Fatal(err, errorText, 0, map[string]interface{}{"durationSinceLastBlockReceived": durationSinceLastBlockReceived, "previousBlock": previousBlock, "newestBlock": newestBlock}) + } else if durationSinceLastBlockReceived >= noNewBlocksThresholdDuration/3*2 { + log.Error(err, errorText, 0, map[string]interface{}{"durationSinceLastBlockReceived": durationSinceLastBlockReceived, "previousBlock": previousBlock, "newestBlock": newestBlock}) + } else if durationSinceLastBlockReceived >= noNewBlocksThresholdDuration/3 { + log.WarnWithFields(log.Fields{"durationSinceLastBlockReceived": durationSinceLastBlockReceived, "error": err, "previousBlock": previousBlock, "newestBlock": newestBlock}, errorText) + } + } + } +} + +// ///////////////////// +// Postgres interface // +// ///////////////////// +// find gaps (missing ids) in raw_block_status +func psqlFindGaps() ([]intRange, error) { + gaps := []intRange{} + + // check for a gap at the beginning + { + var firstBlock int64 + err := db.ReaderDb.Get(&firstBlock, `SELECT block_id FROM raw_block_status WHERE chain_id = $1 ORDER BY block_id LIMIT 1;`, utils.Config.Chain.Id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { // no entries = no gaps + return []intRange{}, nil + } + return []intRange{}, fmt.Errorf("error reading first block from postgres: %w", err) + } + if firstBlock != 0 { + gaps = append(gaps, intRange{start: 0, end: firstBlock - 1}) + } + } + + // check for gaps everywhere else + rows, err := db.ReaderDb.Query(` + SELECT + block_id + 1 as gapStart, + nextNumber - 1 as gapEnd + FROM + ( + SELECT + block_id, LEAD(block_id) OVER (ORDER BY block_id) as nextNumber + FROM + raw_block_status + WHERE + chain_id = $1 + ) number + WHERE + block_id + 1 <> nextNumber + ORDER BY + gapStart;`, utils.Config.Chain.Id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return gaps, nil + } + return []intRange{}, err + } + defer rows.Close() + + for rows.Next() { + var gap intRange + err := rows.Scan(&gap.start, &gap.end) + if err != nil { + return []intRange{}, err + } + gaps = append(gaps, gap) + } + + return gaps, nil +} + +// get latest block in postgres db +func psqlGetLatestBlock(useWriterDb bool) (int64, error) { + var err error + var latestBlock int64 + query := `SELECT block_id FROM raw_block_status WHERE chain_id = $1 ORDER BY block_id DESC LIMIT 1;` + if useWriterDb { + err = db.WriterDb.Get(&latestBlock, query, utils.Config.Chain.Id) + } else { + err = db.ReaderDb.Get(&latestBlock, query, utils.Config.Chain.Id) + } + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return -1, nil + } + return -1, fmt.Errorf("error reading latest block in postgres: %w", err) + } + return latestBlock, nil +} + +// will add elements to sql, based on blockRawData +// on conflict, it will only overwrite / change current entry if hash is different +func psqlAddElements(blockRawData []fullBlockRawData) error { + l := len(blockRawData) + if l <= 0 { + return fmt.Errorf("error, got empty blockRawData array (%d)", l) + } + + block_number := make([]int64, l) + block_hash := make(pq.ByteaArray, l) + for i, v := range blockRawData { + block_number[i] = v.blockNumber + block_hash[i] = v.blockHash + } + + _, err := db.WriterDb.Exec(` + INSERT INTO raw_block_status + (chain_id, block_id, block_hash) + SELECT + $1, + UNNEST($2::int[]), + UNNEST($3::bytea[][]) + ON CONFLICT (chain_id, block_id) DO + UPDATE SET + block_hash = excluded.block_hash, + indexed_bt = FALSE + WHERE + raw_block_status.block_hash != excluded.block_hash;`, + utils.Config.Chain.Id, pq.Array(block_number), block_hash) + return err +} + +// will return a list of all provided block_ids where the hash in the database matches the provided list +func psqlGetHashHitsIdList(blockRawData []fullBlockRawData) ([]int64, error) { + l := len(blockRawData) + if l <= 0 { + return nil, fmt.Errorf("error, got empty blockRawData array (%d)", l) + } + + block_number := make([]int64, l) + block_hash := make(pq.ByteaArray, l) + for i, v := range blockRawData { + block_number[i] = v.blockNumber + block_hash[i] = v.blockHash + } + + // as there are corner cases, to be on the safe side, we will use WriterDb here + rows, err := db.WriterDb.Query(` + SELECT + raw_block_status.block_id + FROM + raw_block_status, + (SELECT UNNEST($1::int[]) as block_id, UNNEST($2::bytea[][]) as block_hash) as node_block_status + WHERE + chain_id = $3 + AND + raw_block_status.block_id = node_block_status.block_id + AND + raw_block_status.block_hash = node_block_status.block_hash + ORDER + by raw_block_status.block_id;`, + pq.Array(block_number), block_hash, utils.Config.Chain.Id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return []int64{}, nil + } + return nil, err + } + defer rows.Close() + + result := []int64{} + for rows.Next() { + var block_id int64 + err := rows.Scan(&block_id) + if err != nil { + return nil, err + } + result = append(result, block_id) + } + + return result, nil +} + +// //////////////// +// RPC interface // +// //////////////// +// get chain id from node +func rpciGetChainId() (uint64, error) { + chainId, err := elClient.ChainID(context.Background()) + if err != nil { + return 0, fmt.Errorf("error retrieving chain id from node: %w", err) + } + return chainId.Uint64(), nil +} + +// get latest block number from node +func rpciGetLatestBlock() (int64, error) { + latestBlockNumber, err := elClient.BlockNumber(context.Background()) + if err != nil { + return 0, fmt.Errorf("error retrieving latest block number: %w", err) + } + return int64(latestBlockNumber), nil +} + +// subscribe for latest block +func rpciSubscribeNewHead(ch chan<- *gethtypes.Header) (ethereum.Subscription, error) { + return elClient.SubscribeNewHead(context.Background(), ch) +} + +// do all the http stuff +func _rpciGetHttpResult(body []byte, nodeRequestsAtOnce int64, count int64) ([][]byte, error) { + /* + funny thing: EL call can crash with 'batch response exceeded limit of 10000000 bytes' even if there is only 1 element. + so to avoid a dead lock, we will remove the "batch" and make a single request, which doesn't have this error at all. + imho this is a EL bug and should be fixed there, but meanwhile this "workaround" will be fine as well. + */ + _ = nodeRequestsAtOnce + if count == 1 && len(body) > 2 && body[0] == byte('[') && body[len(body)-1] == byte(']') { + body = body[1 : len(body)-1] + } + + r, err := http.NewRequest(http.MethodPost, eth1RpcEndpoint, bytes.NewBuffer(body)) + if err != nil { + return nil, fmt.Errorf("error creating post request: %w", err) + } + + r.Header.Add("Content-Type", "application/json") + res, err := httpClient.Do(r) + if err != nil { + return nil, fmt.Errorf("error executing post request: %w", err) + } + + if res.StatusCode != http.StatusOK { + return nil, fmt.Errorf("error unexpected status code: %d", res.StatusCode) + } + + defer res.Body.Close() + resByte, err := io.ReadAll(res.Body) + if err != nil { + return nil, fmt.Errorf("error reading request body: %w", err) + } + + errorToCheck := []byte(`"error":{"code"`) + if bytes.Contains(resByte, errorToCheck) { + const keepDigitsTotal = 1000 + const keepDigitsFront = 100 + if len(resByte) > keepDigitsTotal { + i := bytes.Index(resByte, errorToCheck) + if i >= keepDigitsFront { + resByte = append([]byte(`<...>`), resByte[i-keepDigitsFront:]...) + } + if len(resByte) > keepDigitsTotal { + resByte = append(resByte[:keepDigitsTotal-5], []byte(`<...>`)...) + } + } + return nil, fmt.Errorf("rpc error: %s", resByte) + } + + return _splitAndVerifyJsonArray(resByte, count) +} + +// will fill only receipts_compressed based on block, used by rpciGetBulkRawReceipts function +func _rpciGetBulkRawBlockReceipts(blockRawData []fullBlockRawData, nodeRequestsAtOnce int64) error { + // check + { + l := int64(len(blockRawData)) + if l < 1 { + return fmt.Errorf("empty blockRawData array received") + } + if l > nodeRequestsAtOnce { + return fmt.Errorf("blockRawData array received with more elements (%d) than allowed (%d)", l, nodeRequestsAtOnce) + } + } + + // get array + var rawData [][]byte + { + bodyStr := "[" + for i, v := range blockRawData { + if i != 0 { + bodyStr += "," + } + bodyStr += fmt.Sprintf(`{"jsonrpc":"2.0","method":"eth_getBlockReceipts","params":["0x%x"],"id":%d}`, v.blockNumber, i) + } + bodyStr += "]" + var err error + rawData, err = _rpciGetHttpResult([]byte(bodyStr), nodeRequestsAtOnce, int64(len(blockRawData))) + if err != nil { + return fmt.Errorf("error (_rpciGetBulkRawBlockReceipts) split and verify json array: %w", err) + } + } + + // get data + for i, v := range rawData { + blockRawData[i].receiptsCompressed = compress(v) + } + + return nil +} + +// will fill only receipts_compressed based on transaction, used by rpciGetBulkRawReceipts function +func _rpciGetBulkRawTransactionReceipts(blockRawData []fullBlockRawData, nodeRequestsAtOnce int64) error { + // check + { + l := int64(len(blockRawData)) + if l < 1 { + return fmt.Errorf("empty blockRawData array received") + } + if l > nodeRequestsAtOnce { + return fmt.Errorf("blockRawData array received with more elements (%d) than allowed (%d)", l, nodeRequestsAtOnce) + } + } + + // iterate through array and get data when threshold reached + var blockRawDataWriteIndex int + var currentElementCount int64 + var dataAvailable bool + bodyStr := "[" + for i, v := range blockRawData { + l := int64(len(v.blockTxs)) + if l < 1 { + continue // skip empty + } + + // threshold reached, getting data... + if dataAvailable { + if currentElementCount+l > nodeRequestsAtOnce { + bodyStr += "]" + rawData, err := _rpciGetHttpResult([]byte(bodyStr), nodeRequestsAtOnce, currentElementCount) + if err != nil { + return fmt.Errorf("error (_rpciGetBulkRawTransactionReceipts) split and verify json array: %w", err) + } + + for _, vv := range rawData { + for len(blockRawData[blockRawDataWriteIndex].blockTxs) < 1 { + blockRawDataWriteIndex++ + } + blockRawData[blockRawDataWriteIndex].receiptsCompressed = compress(vv) + blockRawDataWriteIndex++ + } + + currentElementCount = 0 + bodyStr = "[" + } else { + bodyStr += "," + } + } + + // adding txs of current block + dataAvailable = true + currentElementCount += l + for txIndex, txValue := range v.blockTxs { + if txIndex != 0 { + bodyStr += "," + } + bodyStr += fmt.Sprintf(`{"jsonrpc":"2.0","method":"eth_getTransactionReceipt","params":["%s"],"id":%d}`, txValue, i) + } + } + + // getting data for the rest... + { + bodyStr += "]" + if len(bodyStr) > 2 { + rawData, err := _rpciGetHttpResult([]byte(bodyStr), nodeRequestsAtOnce, currentElementCount) + if err != nil { + return fmt.Errorf("error (_rpciGetBulkRawTransactionReceipts) split and verify json array: %w", err) + } + + for _, vv := range rawData { + for len(blockRawData[blockRawDataWriteIndex].blockTxs) < 1 { + blockRawDataWriteIndex++ + } + blockRawData[blockRawDataWriteIndex].receiptsCompressed = compress(vv) + blockRawDataWriteIndex++ + } + } + } + + return nil +} + +// will fill only block_hash, block_unclesCount, block_compressed & block_txs +func rpciGetBulkBlockRawData(blockRawData []fullBlockRawData, nodeRequestsAtOnce int64) error { + // check + { + l := int64(len(blockRawData)) + if l < 1 { + return fmt.Errorf("empty blockRawData array received") + } + if l > nodeRequestsAtOnce { + return fmt.Errorf("blockRawData array received with more elements (%d) than allowed (%d)", l, nodeRequestsAtOnce) + } + } + + // get array + var rawData [][]byte + { + bodyStr := "[" + for i, v := range blockRawData { + if i != 0 { + bodyStr += "," + } + bodyStr += fmt.Sprintf(`{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["0x%x", true],"id":%d}`, v.blockNumber, i) + } + bodyStr += "]" + var err error + rawData, err = _rpciGetHttpResult([]byte(bodyStr), nodeRequestsAtOnce, int64(len(blockRawData))) + if err != nil { + return fmt.Errorf("error (rpciGetBulkBlockRawData) split and verify json array: %w", err) + } + } + + // get data + blockParsed := &types.Eth1RpcGetBlockResponse{} + for i, v := range rawData { + // block + { + blockRawData[i].blockCompressed = compress(v) + err := json.Unmarshal(v, blockParsed) + if err != nil { + return fmt.Errorf("error decoding block '%d' response: %w", blockRawData[i].blockNumber, err) + } + } + + // id + if i != blockParsed.Id { + return fmt.Errorf("impossible error, i '%d' doesn't match blockParsed.Id '%d'", i, blockParsed.Id) + } + + // number + { + blockParsedResultNumber := int64(binary.BigEndian.Uint64(append(make([]byte, 8-len(blockParsed.Result.Number)), blockParsed.Result.Number...))) + if blockRawData[i].blockNumber != blockParsedResultNumber { + log.Error(nil, "Doesn't match", 0, map[string]interface{}{"blockRawData[i].blockNumber": blockRawData[i].blockNumber, "blockParsedResultNumber": blockParsedResultNumber}) + } + } + + // hash + if blockParsed.Result.Hash == nil { + return fmt.Errorf("blockParsed.Result.Hash is nil at block '%d'", blockRawData[i].blockNumber) + } + blockRawData[i].blockHash = blockParsed.Result.Hash + + // transaction + if blockParsed.Result.Transactions == nil { + return fmt.Errorf("blockParsed.Result.Transactions is nil at block '%d'", blockRawData[i].blockNumber) + } + blockRawData[i].blockTxs = make([]string, len(blockParsed.Result.Transactions)) + for ii, tx := range blockParsed.Result.Transactions { + blockRawData[i].blockTxs[ii] = tx.Hash.String() + } + + // uncle count + if blockParsed.Result.Uncles != nil { + blockRawData[i].blockUnclesCount = len(blockParsed.Result.Uncles) + } + } + + return nil +} + +// will fill only block_hash +func rpciGetBulkBlockRawHash(blockRawData []fullBlockRawData, nodeRequestsAtOnce int64) error { + // check + { + l := int64(len(blockRawData)) + if l < 1 { + return fmt.Errorf("empty blockRawData array received") + } + if l > 1 && l > nodeRequestsAtOnce { + err := rpciGetBulkBlockRawHash(blockRawData[:l/2], nodeRequestsAtOnce) + if err == nil { + err = rpciGetBulkBlockRawHash(blockRawData[l/2:], nodeRequestsAtOnce) + } + return err + } + } + + // get array + var rawData [][]byte + { + bodyStr := "[" + for i, v := range blockRawData { + if i != 0 { + bodyStr += "," + } + bodyStr += fmt.Sprintf(`{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["0x%x", true],"id":%d}`, v.blockNumber, i) + } + bodyStr += "]" + var err error + rawData, err = _rpciGetHttpResult([]byte(bodyStr), nodeRequestsAtOnce, int64(len(blockRawData))) + if err != nil { + return fmt.Errorf("error (rpciGetBulkBlockRawHash) split and verify json array: %w", err) + } + } + + // get data + blockParsed := &types.Eth1RpcGetBlockResponse{} + for i, v := range rawData { + err := json.Unmarshal(v, blockParsed) + if err != nil { + return fmt.Errorf("error decoding block '%d' response: %w", blockRawData[i].blockNumber, err) + } + if i != blockParsed.Id { + return fmt.Errorf("impossible error, i '%d' doesn't match blockParsed.Id '%d'", i, blockParsed.Id) + } + { + blockParsedResultNumber := int64(binary.BigEndian.Uint64(append(make([]byte, 8-len(blockParsed.Result.Number)), blockParsed.Result.Number...))) + if blockRawData[i].blockNumber != blockParsedResultNumber { + log.Error(nil, "Doesn't match", 0, map[string]interface{}{"blockRawData[i].blockNumber": blockRawData[i].blockNumber, "blockParsedResultNumber": blockParsedResultNumber}) + } + } + if blockParsed.Result.Hash == nil { + return fmt.Errorf("blockParsed.Result.Hash is nil at block '%d'", blockRawData[i].blockNumber) + } + blockRawData[i].blockHash = blockParsed.Result.Hash + } + + return nil +} + +// will fill only uncles (if available) +func rpciGetBulkRawUncles(blockRawData []fullBlockRawData, nodeRequestsAtOnce int64) error { + // check + { + l := int64(len(blockRawData)) + if l < 1 { + return fmt.Errorf("empty blockRawData array received") + } + if l > nodeRequestsAtOnce { + // I know, in the case of uncles, it's very unlikly that we need all slots, but handling this separate, would be way to much, so whatever + return fmt.Errorf("blockRawData array received with more elements (%d) than allowed (%d)", l, nodeRequestsAtOnce) + } + } + + // get array + var rawData [][]byte + { + requestedCount := int64(0) + firstElement := true + bodyStr := "[" + for _, v := range blockRawData { + if v.blockUnclesCount > 2 || v.blockUnclesCount < 0 { + // fatal, as this is an impossible error + log.Fatal(nil, "impossible error, found impossible uncle count, expected 0, 1 or 2", 0, map[string]interface{}{"block_unclesCount": v.blockUnclesCount, "block_number": v.blockNumber}) + } else if v.blockUnclesCount == 0 { + continue + } else { + if firstElement { + firstElement = false + } else { + bodyStr += "," + } + if v.blockUnclesCount == 1 { + requestedCount++ + bodyStr += fmt.Sprintf(`{"jsonrpc":"2.0","method":"eth_getUncleByBlockNumberAndIndex","params":["0x%x", "0x0"],"id":%d}`, v.blockNumber, v.blockNumber) + } else /* if v.block_unclesCount == 2 */ { + requestedCount++ + bodyStr += fmt.Sprintf(`{"jsonrpc":"2.0","method":"eth_getUncleByBlockNumberAndIndex","params":["0x%x", "0x0"],"id":%d},`, v.blockNumber, v.blockNumber) + requestedCount++ + bodyStr += fmt.Sprintf(`{"jsonrpc":"2.0","method":"eth_getUncleByBlockNumberAndIndex","params":["0x%x", "0x1"],"id":%d}`, v.blockNumber, v.blockNumber) + } + } + } + bodyStr += "]" + if requestedCount == 0 { // nothing todo, no uncles in set + return nil + } + + var err error + rawData, err = _rpciGetHttpResult([]byte(bodyStr), nodeRequestsAtOnce, requestedCount) + if err != nil { + return fmt.Errorf("error (rpciGetBulkRawUncles) split and verify json array: %w", err) + } + } + + // get data + rdIndex := 0 + for i, v := range blockRawData { + if v.blockUnclesCount > 0 { // Not the prettiest way, but the unmarshal would take much longer with the same result + blockRawData[i].unclesCompressed = compress(rawData[rdIndex]) + rdIndex++ + } + } + + return nil +} + +// will fill only receipts_compressed +func rpciGetBulkRawReceipts(blockRawData []fullBlockRawData, nodeRequestsAtOnce int64) error { + if utils.Config.Chain.Id == ARBITRUM_CHAINID { + return _rpciGetBulkRawTransactionReceipts(blockRawData, nodeRequestsAtOnce) + } + return _rpciGetBulkRawBlockReceipts(blockRawData, nodeRequestsAtOnce) +} + +// will fill only traces_compressed +func rpciGetBulkRawTraces(blockRawData []fullBlockRawData, nodeRequestsAtOnce int64) error { + // check + { + l := int64(len(blockRawData)) + if l < 1 { + return fmt.Errorf("empty blockRawData array received") + } + if l > nodeRequestsAtOnce { + return fmt.Errorf("blockRawData array received with more elements (%d) than allowed (%d)", l, nodeRequestsAtOnce) + } + } + + // get array + var rawData [][]byte + { + bodyStr := "[" + for i, v := range blockRawData { + if i != 0 { + bodyStr += "," + } + if utils.Config.Chain.Id == ARBITRUM_CHAINID && v.blockNumber < ARBITRUM_NITRO_BLOCKNUMBER { + bodyStr += fmt.Sprintf(`{"jsonrpc":"2.0","method":"arbtrace_block","params":["0x%x"],"id":%d}`, v.blockNumber, i) + } else { + bodyStr += fmt.Sprintf(`{"jsonrpc":"2.0","method":"debug_traceBlockByNumber","params":["0x%x", {"tracer": "callTracer"}],"id":%d}`, v.blockNumber, i) + } + } + bodyStr += "]" + var err error + rawData, err = _rpciGetHttpResult([]byte(bodyStr), nodeRequestsAtOnce, int64(len(blockRawData))) + if err != nil { + return fmt.Errorf("error (rpciGetBulkRawTraces) split and verify json array: %w", err) + } + } + + // get data + for i, v := range rawData { + blockRawData[i].tracesCompressed = compress(v) + } + + return nil +} diff --git a/backend/cmd/exporter/main.go b/backend/cmd/exporter/main.go index 2bf0a014e..6f3cf7c72 100644 --- a/backend/cmd/exporter/main.go +++ b/backend/cmd/exporter/main.go @@ -193,7 +193,19 @@ func Run() { go services.StartHistoricPriceService() } - go modules.StartAll(context) + usedModules := []modules.ModuleInterface{} + + if cfg.JustV2 { + usedModules = append(usedModules, modules.NewDashboardDataModule(context)) + } else { + usedModules = append(usedModules, + modules.NewSlotExporter(context), + modules.NewExecutionDepositsExporter(context), + modules.NewExecutionPayloadsExporter(context), + ) + } + + go modules.StartAll(context, usedModules, cfg.JustV2) // Keep the program alive until Ctrl+C is pressed utils.WaitForCtrlC() diff --git a/backend/cmd/main.go b/backend/cmd/main.go index 813939263..df242be16 100644 --- a/backend/cmd/main.go +++ b/backend/cmd/main.go @@ -9,6 +9,7 @@ import ( "github.com/gobitfly/beaconchain/cmd/blobindexer" "github.com/gobitfly/beaconchain/cmd/eth1indexer" "github.com/gobitfly/beaconchain/cmd/ethstore_exporter" + "github.com/gobitfly/beaconchain/cmd/evm_node_indexer" "github.com/gobitfly/beaconchain/cmd/exporter" "github.com/gobitfly/beaconchain/cmd/misc" "github.com/gobitfly/beaconchain/cmd/monitoring" @@ -63,6 +64,8 @@ func main() { user_service.Run() case "monitoring": monitoring.Run() + case "evm_node_indexer": + evm_node_indexer.Run() default: log.Fatal(nil, fmt.Sprintf("unknown target: %s", target), 0) } diff --git a/backend/cmd/misc/commands/app_bundle.go b/backend/cmd/misc/commands/app_bundle.go new file mode 100644 index 000000000..030931c96 --- /dev/null +++ b/backend/cmd/misc/commands/app_bundle.go @@ -0,0 +1,124 @@ +package commands + +import ( + "flag" + "fmt" + "strings" + + "github.com/gobitfly/beaconchain/cmd/misc/misctypes" + "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/log" + + "github.com/pkg/errors" +) + +type AppBundleCommand struct { + FlagSet *flag.FlagSet + Config appBundleCommandConfig +} + +type appBundleCommandConfig struct { + DryRun bool + Force bool // bypass summary confirm + BundleURL string + BundleVersionCode int64 + NativeVersionCode int64 + TargetInstalls int64 +} + +func (s *AppBundleCommand) ParseCommandOptions() { + s.FlagSet.Int64Var(&s.Config.BundleVersionCode, "version-code", 0, "Version code of that bundle (Default: Next)") + s.FlagSet.Int64Var(&s.Config.NativeVersionCode, "min-native-version", 0, "Minimum required native version (Default: Current)") + s.FlagSet.Int64Var(&s.Config.TargetInstalls, "target-installs", -1, "How many people to roll out to (Default: All)") + s.FlagSet.StringVar(&s.Config.BundleURL, "bundle-url", "", "URL to bundle that contains the update, bundle.zip") + s.FlagSet.BoolVar(&s.Config.Force, "force", false, "Skips summary and confirmation") +} + +func (s *AppBundleCommand) Requires() misctypes.Requires { + return misctypes.Requires{ + UserDBs: true, + } +} + +func (s *AppBundleCommand) Run() error { + if s.Config.BundleURL == "" { + s.showHelp() + return errors.New("Please provide a valid bundle URL via --bundle-url") + } + if s.Config.BundleVersionCode == 0 { + fileName := strings.Split(s.Config.BundleURL, "/") + if len(fileName) == 0 { + return errors.New("Invalid bundle URL") + } + + split := strings.Split(fileName[len(fileName)-1], "_") + if len(split) < 2 { + return errors.New("Invalid bundle URL") + } + + // split[1] is the version code + _, err := fmt.Sscanf(split[1], "%d", &s.Config.BundleVersionCode) + if err != nil { + return errors.Wrap(err, "Error parsing version code") + } + } + if s.Config.NativeVersionCode <= 0 { + err := db.FrontendReaderDB.Get(&s.Config.NativeVersionCode, "SELECT MAX(min_native_version) FROM mobile_app_bundles") + if err != nil { + return errors.Wrap(err, "Error getting max native version") + } + } + + if s.Config.TargetInstalls < 0 { + s.Config.TargetInstalls = -1 + } + + if !s.Config.Force { + // Summary + log.Infof("=== Bundle Summary ===") + log.Infof("Bundle URL: %s", s.Config.BundleURL) + log.Infof("Bundle Version Code: %d", s.Config.BundleVersionCode) + log.Infof("Minimum Native Version: %d", s.Config.NativeVersionCode) + if s.Config.TargetInstalls == -1 { + log.Infof("Target Installs: All") + } else { + log.Infof("Target Installs: %d", s.Config.TargetInstalls) + } + log.Infof("======================\n") + + // ask for y/n input + log.Infof("Do you want to add this bundle? (y/n)\n") + var input string + _, err := fmt.Scanln(&input) + if err != nil { + return errors.Wrap(err, "Error reading input") + } + + if input != "y" { + log.Infof("Bundle not added\n") + return nil + } + } + + if s.Config.DryRun { + log.Infof("Dry run, not adding bundle\n") + return nil + } + + _, err := db.FrontendWriterDB.Exec("INSERT INTO mobile_app_bundles (bundle_url, bundle_version, min_native_version, target_count) VALUES ($1, $2, $3, $4)", s.Config.BundleURL, s.Config.BundleVersionCode, s.Config.NativeVersionCode, s.Config.TargetInstalls) + if err != nil { + return errors.Wrap(err, "Error inserting app bundle") + } + + log.Infof("Bundle added successfully") + return nil +} + +func (s *AppBundleCommand) showHelp() { + log.Infof("Usage: app_bundle [options]") + log.Infof("Options:") + log.Infof(" --version-code int\tVersion code of that bundle") + log.Infof(" --min-native-version int\tMinimum required native version (Default: Current)") + log.Infof(" --target-installs int\tHow many people to roll out to (Default: All)") + log.Infof(" --bundle-url string\tURL to bundle that contains the update, bundle.zip") +} diff --git a/backend/cmd/misc/commands/validator_stats_partition.go b/backend/cmd/misc/commands/validator_stats_partition.go index 0222c26d5..ee0088d3c 100644 --- a/backend/cmd/misc/commands/validator_stats_partition.go +++ b/backend/cmd/misc/commands/validator_stats_partition.go @@ -43,17 +43,17 @@ func (s *StatsMigratorCommand) ParseCommandOptions() { s.FlagSet.IntVar(&s.NumberOfPartitions, "partitions", 0, "Number of partitions. Recommended 2 - 128 for PostgreSQL 15") } -func (s *StatsMigratorCommand) StartStatsPartitionCommand() error { +func (s *StatsMigratorCommand) Run() error { if s.CurrentTable == "" { - showHelp() + s.showHelp() return errors.New("Please specify a valid current-table name via --current-table") } if s.DestinationTable == "" { - showHelp() + s.showHelp() return errors.New("Please specify a valid destination-table name via --destination-table") } if s.NumberOfPartitions <= 0 { - showHelp() + s.showHelp() return errors.New("Please specify a valid number of partitions via --partitions. Number of partitions must be > 0") } @@ -64,7 +64,7 @@ func (s *StatsMigratorCommand) StartStatsPartitionCommand() error { return nil } -func showHelp() { +func (s *StatsMigratorCommand) showHelp() { log.Infof("Usage: %s --current-table=validator_stats --destination-table=validator_stats_partitioned --partitions=64\n", "validator_stats_partition") log.Infof("Usage: %s --current-table=validator_stats --destination-table=validator_stats_partitioned --partitions=64 --drop-existing\n", "validator_stats_partition") log.Infof("Usage: %s --current-table=validator_stats --destination-table=validator_stats_partitioned --partitions=64 --drop-existing --batch-size=20000 --sleep-between-batches=1s --rename-destination-on-complete=true\n", "validator_stats_partition") diff --git a/backend/cmd/misc/main.go b/backend/cmd/misc/main.go index d7b9b77a1..c567a0371 100644 --- a/backend/cmd/misc/main.go +++ b/backend/cmd/misc/main.go @@ -17,10 +17,14 @@ import ( "sync" "time" + firebase "firebase.google.com/go/v4" + "firebase.google.com/go/v4/messaging" "github.com/coocood/freecache" + "github.com/davecgh/go-spew/spew" "github.com/ethereum/go-ethereum/common" "github.com/go-redis/redis/v8" "github.com/gobitfly/beaconchain/cmd/misc/commands" + "github.com/gobitfly/beaconchain/cmd/misc/misctypes" "github.com/gobitfly/beaconchain/pkg/commons/cache" "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" @@ -32,11 +36,13 @@ import ( edb "github.com/gobitfly/beaconchain/pkg/exporter/db" "github.com/gobitfly/beaconchain/pkg/exporter/modules" "github.com/gobitfly/beaconchain/pkg/exporter/services" + "github.com/gobitfly/beaconchain/pkg/notification" _ "github.com/jackc/pgx/v5/stdlib" "github.com/pkg/errors" utilMath "github.com/protolambda/zrnt/eth2/util/math" go_ens "github.com/wealdtech/go-ens/v3" "golang.org/x/sync/errgroup" + "google.golang.org/api/option" "flag" @@ -67,6 +73,14 @@ var opts = struct { DryRun bool }{} +/** + * If your command does not require Bigtable, Redis, Node, or DBs, you can remove that requirement for that command here. + * By default, all commands that are not in the REQUIRES_LIST will automatically require everything. + */ +var REQUIRES_LIST = map[string]misctypes.Requires{ + "app-bundle": (&commands.AppBundleCommand{}).Requires(), +} + func Run() { fs := flag.NewFlagSet("fs", flag.ExitOnError) @@ -74,8 +88,12 @@ func Run() { FlagSet: fs, } + appBundleCommand := commands.AppBundleCommand{ + FlagSet: fs, + } + configPath := fs.String("config", "config/default.config.yml", "Path to the config file") - fs.StringVar(&opts.Command, "command", "", "command to run, available: updateAPIKey, applyDbSchema, initBigtableSchema, epoch-export, debug-rewards, debug-blocks, clear-bigtable, index-old-eth1-blocks, update-aggregation-bits, historic-prices-export, index-missing-blocks, export-epoch-missed-slots, migrate-last-attestation-slot-bigtable, export-genesis-validators, update-block-finalization-sequentially, nameValidatorsByRanges, export-stats-totals, export-sync-committee-periods, export-sync-committee-validator-stats, partition-validator-stats, migrate-app-purchases") + fs.StringVar(&opts.Command, "command", "", "command to run, available: updateAPIKey, applyDbSchema, initBigtableSchema, epoch-export, debug-rewards, debug-blocks, clear-bigtable, index-old-eth1-blocks, update-aggregation-bits, historic-prices-export, index-missing-blocks, export-epoch-missed-slots, migrate-last-attestation-slot-bigtable, export-genesis-validators, update-block-finalization-sequentially, nameValidatorsByRanges, export-stats-totals, export-sync-committee-periods, export-sync-committee-validator-stats, partition-validator-stats, migrate-app-purchases, collect-notifications, collect-user-db-notifications, verify-fcm-tokens, app-bundle") fs.Uint64Var(&opts.StartEpoch, "start-epoch", 0, "start epoch") fs.Uint64Var(&opts.EndEpoch, "end-epoch", 0, "end epoch") fs.Uint64Var(&opts.User, "user", 0, "user id") @@ -99,6 +117,7 @@ func Run() { versionFlag := fs.Bool("version", false, "Show version and exit") statsPartitionCommand.ParseCommandOptions() + appBundleCommand.ParseCommandOptions() _ = fs.Parse(os.Args[2:]) if *versionFlag { @@ -116,69 +135,116 @@ func Run() { } utils.Config = cfg + requires, ok := REQUIRES_LIST[opts.Command] + if !ok { + requires = misctypes.Requires{ + Bigtable: true, + Redis: true, + ClNode: true, + ElNode: true, + UserDBs: true, + NetworkDBs: true, + } + } + chainIdString := strconv.FormatUint(utils.Config.Chain.ClConfig.DepositChainID, 10) - bt, err := db.InitBigtable(utils.Config.Bigtable.Project, utils.Config.Bigtable.Instance, chainIdString, utils.Config.RedisCacheEndpoint) - if err != nil { - log.Fatal(err, "error initializing bigtable", 0) + var bt *db.Bigtable + if requires.Bigtable { + bt, err = db.InitBigtable(utils.Config.Bigtable.Project, utils.Config.Bigtable.Instance, chainIdString, utils.Config.RedisCacheEndpoint) + if err != nil { + log.Fatal(err, "error initializing bigtable", 0) + } } - cl := consapi.NewClient("http://" + cfg.Indexer.Node.Host + ":" + cfg.Indexer.Node.Port) - nodeImpl, ok := cl.ClientInt.(*consapi.NodeClient) - if !ok { - log.Fatal(nil, "lighthouse client can only be used with real node impl", 0) - } - chainIDBig := new(big.Int).SetUint64(utils.Config.Chain.ClConfig.DepositChainID) - rpcClient, err := rpc.NewLighthouseClient(nodeImpl, chainIDBig) - if err != nil { - log.Fatal(err, "lighthouse client error", 0) + var rpcClient *rpc.LighthouseClient + if requires.ClNode { + cl := consapi.NewClient("http://" + cfg.Indexer.Node.Host + ":" + cfg.Indexer.Node.Port) + nodeImpl, ok := cl.ClientInt.(*consapi.NodeClient) + if !ok { + log.Fatal(nil, "lighthouse client can only be used with real node impl", 0) + } + chainIDBig := new(big.Int).SetUint64(utils.Config.Chain.ClConfig.DepositChainID) + rpcClient, err = rpc.NewLighthouseClient(nodeImpl, chainIDBig) + if err != nil { + log.Fatal(err, "lighthouse client error", 0) + } } - erigonClient, err := rpc.NewErigonClient(utils.Config.Eth1ErigonEndpoint) - if err != nil { - log.Fatal(err, "error initializing erigon client", 0) - } - - db.WriterDb, db.ReaderDb = db.MustInitDB(&types.DatabaseConfig{ - Username: cfg.WriterDatabase.Username, - Password: cfg.WriterDatabase.Password, - Name: cfg.WriterDatabase.Name, - Host: cfg.WriterDatabase.Host, - Port: cfg.WriterDatabase.Port, - MaxOpenConns: cfg.WriterDatabase.MaxOpenConns, - MaxIdleConns: cfg.WriterDatabase.MaxIdleConns, - SSL: cfg.WriterDatabase.SSL, - }, &types.DatabaseConfig{ - Username: cfg.ReaderDatabase.Username, - Password: cfg.ReaderDatabase.Password, - Name: cfg.ReaderDatabase.Name, - Host: cfg.ReaderDatabase.Host, - Port: cfg.ReaderDatabase.Port, - MaxOpenConns: cfg.ReaderDatabase.MaxOpenConns, - MaxIdleConns: cfg.ReaderDatabase.MaxIdleConns, - SSL: cfg.ReaderDatabase.SSL, - }, "pgx", "postgres") - defer db.ReaderDb.Close() - defer db.WriterDb.Close() - db.FrontendWriterDB, db.FrontendReaderDB = db.MustInitDB(&types.DatabaseConfig{ - Username: cfg.Frontend.WriterDatabase.Username, - Password: cfg.Frontend.WriterDatabase.Password, - Name: cfg.Frontend.WriterDatabase.Name, - Host: cfg.Frontend.WriterDatabase.Host, - Port: cfg.Frontend.WriterDatabase.Port, - MaxOpenConns: cfg.Frontend.WriterDatabase.MaxOpenConns, - MaxIdleConns: cfg.Frontend.WriterDatabase.MaxIdleConns, - }, &types.DatabaseConfig{ - Username: cfg.Frontend.ReaderDatabase.Username, - Password: cfg.Frontend.ReaderDatabase.Password, - Name: cfg.Frontend.ReaderDatabase.Name, - Host: cfg.Frontend.ReaderDatabase.Host, - Port: cfg.Frontend.ReaderDatabase.Port, - MaxOpenConns: cfg.Frontend.ReaderDatabase.MaxOpenConns, - MaxIdleConns: cfg.Frontend.ReaderDatabase.MaxIdleConns, - }, "pgx", "postgres") - defer db.FrontendReaderDB.Close() - defer db.FrontendWriterDB.Close() + var erigonClient *rpc.ErigonClient + if requires.ElNode { + erigonClient, err = rpc.NewErigonClient(utils.Config.Eth1ErigonEndpoint) + if err != nil { + log.Fatal(err, "error initializing erigon client", 0) + } + } + + if requires.NetworkDBs { + db.WriterDb, db.ReaderDb = db.MustInitDB(&types.DatabaseConfig{ + Username: cfg.WriterDatabase.Username, + Password: cfg.WriterDatabase.Password, + Name: cfg.WriterDatabase.Name, + Host: cfg.WriterDatabase.Host, + Port: cfg.WriterDatabase.Port, + MaxOpenConns: cfg.WriterDatabase.MaxOpenConns, + MaxIdleConns: cfg.WriterDatabase.MaxIdleConns, + SSL: cfg.WriterDatabase.SSL, + }, &types.DatabaseConfig{ + Username: cfg.ReaderDatabase.Username, + Password: cfg.ReaderDatabase.Password, + Name: cfg.ReaderDatabase.Name, + Host: cfg.ReaderDatabase.Host, + Port: cfg.ReaderDatabase.Port, + MaxOpenConns: cfg.ReaderDatabase.MaxOpenConns, + MaxIdleConns: cfg.ReaderDatabase.MaxIdleConns, + SSL: cfg.ReaderDatabase.SSL, + }, "pgx", "postgres") + defer db.ReaderDb.Close() + defer db.WriterDb.Close() + + db.AlloyWriter, db.AlloyReader = db.MustInitDB(&types.DatabaseConfig{ + Username: cfg.AlloyWriter.Username, + Password: cfg.AlloyWriter.Password, + Name: cfg.AlloyWriter.Name, + Host: cfg.AlloyWriter.Host, + Port: cfg.AlloyWriter.Port, + MaxOpenConns: cfg.AlloyWriter.MaxOpenConns, + MaxIdleConns: cfg.AlloyWriter.MaxIdleConns, + SSL: cfg.AlloyWriter.SSL, + }, &types.DatabaseConfig{ + Username: cfg.AlloyReader.Username, + Password: cfg.AlloyReader.Password, + Name: cfg.AlloyReader.Name, + Host: cfg.AlloyReader.Host, + Port: cfg.AlloyReader.Port, + MaxOpenConns: cfg.AlloyReader.MaxOpenConns, + MaxIdleConns: cfg.AlloyReader.MaxIdleConns, + SSL: cfg.AlloyReader.SSL, + }, "pgx", "postgres") + defer db.AlloyReader.Close() + defer db.AlloyWriter.Close() + } + if requires.UserDBs { + db.FrontendWriterDB, db.FrontendReaderDB = db.MustInitDB(&types.DatabaseConfig{ + Username: cfg.Frontend.WriterDatabase.Username, + Password: cfg.Frontend.WriterDatabase.Password, + Name: cfg.Frontend.WriterDatabase.Name, + Host: cfg.Frontend.WriterDatabase.Host, + Port: cfg.Frontend.WriterDatabase.Port, + MaxOpenConns: cfg.Frontend.WriterDatabase.MaxOpenConns, + MaxIdleConns: cfg.Frontend.WriterDatabase.MaxIdleConns, + }, &types.DatabaseConfig{ + Username: cfg.Frontend.ReaderDatabase.Username, + Password: cfg.Frontend.ReaderDatabase.Password, + Name: cfg.Frontend.ReaderDatabase.Name, + Host: cfg.Frontend.ReaderDatabase.Host, + Port: cfg.Frontend.ReaderDatabase.Port, + MaxOpenConns: cfg.Frontend.ReaderDatabase.MaxOpenConns, + MaxIdleConns: cfg.Frontend.ReaderDatabase.MaxIdleConns, + }, "pgx", "postgres") + defer db.FrontendReaderDB.Close() + defer db.FrontendWriterDB.Close() + } // clickhouse db.ClickHouseWriter, db.ClickHouseReader = db.MustInitDB(&types.DatabaseConfig{ @@ -204,17 +270,27 @@ func Run() { defer db.ClickHouseWriter.Close() // Initialize the persistent redis client - rdc := redis.NewClient(&redis.Options{ - Addr: utils.Config.RedisSessionStoreEndpoint, - ReadTimeout: time.Second * 20, - }) + if requires.Redis { + rdc := redis.NewClient(&redis.Options{ + Addr: utils.Config.RedisSessionStoreEndpoint, + ReadTimeout: time.Second * 20, + }) - if err := rdc.Ping(context.Background()).Err(); err != nil { - log.Fatal(err, "error connecting to persistent redis store", 0) - } + if err := rdc.Ping(context.Background()).Err(); err != nil { + log.Fatal(err, "error connecting to persistent redis store", 0) + } - db.PersistentRedisDbClient = rdc - defer db.PersistentRedisDbClient.Close() + db.PersistentRedisDbClient = rdc + defer db.PersistentRedisDbClient.Close() + + if utils.Config.TieredCacheProvider != "redis" { + log.Fatal(nil, "no cache provider set, please set TierdCacheProvider (redis)", 0) + } + if utils.Config.TieredCacheProvider == "redis" || len(utils.Config.RedisCacheEndpoint) != 0 { + cache.MustInitTieredCache(utils.Config.RedisCacheEndpoint) + log.Infof("tiered Cache initialized, latest finalized epoch: %v", cache.LatestFinalizedEpoch.Get()) + } + } switch opts.Command { case "nameValidatorsByRanges": @@ -451,11 +527,20 @@ func Run() { err = fixExecTransactionsCount() case "partition-validator-stats": statsPartitionCommand.Config.DryRun = opts.DryRun - err = statsPartitionCommand.StartStatsPartitionCommand() + err = statsPartitionCommand.Run() + case "app-bundle": + appBundleCommand.Config.DryRun = opts.DryRun + err = appBundleCommand.Run() case "fix-ens": err = fixEns(erigonClient) case "fix-ens-addresses": err = fixEnsAddresses(erigonClient) + case "collect-notifications": + err = collectNotifications(opts.StartEpoch) + case "collect-user-db-notifications": + err = collectUserDbNotifications(opts.StartEpoch) + case "verify-fcm-tokens": + err = verifyFCMTokens() default: log.Fatal(nil, fmt.Sprintf("unknown command %s", opts.Command), 0) } @@ -467,6 +552,103 @@ func Run() { } } +func collectNotifications(startEpoch uint64) error { + epoch := startEpoch + + notifications, err := notification.GetNotificationsForEpoch(utils.Config.Notifications.PubkeyCachePath, epoch) + if err != nil { + return err + } + + log.Infof("found %v notifications for epoch %v with %v notifications for user 0", len(notifications), epoch, len(notifications[0])) + if len(notifications[0]) > 0 { + spew.Dump(notifications[0]) + } + + emails, err := notification.RenderEmailsForUserEvents(0, notifications) + if err != nil { + return err + } + + for _, email := range emails { + // if email.Address == "" { + log.Infof("to: %v", email.Address) + log.Infof("subject: %v", email.Subject) + log.Infof("body: %v", email.Email.Body) + log.Info("-----") + // } + } + + // pushMessages, err := notification.RenderPushMessagesForUserEvents(0, notifications) + // if err != nil { + // return err + // } + + // for _, pushMessage := range pushMessages { + // message := pushMessage.Messages[0] + // log.Infof("title: %v body: %v", message.Notification.Title, message.Notification.Body) + + // if message.Token == "" { + // log.Info("sending test message") + + // err = notification.SendPushBatch(pushMessage.UserId, []*messaging.Message{message}, false) + // if err != nil { + // log.Error(err, "error sending firebase batch job", 0) + // } + // } + // } + + return nil +} + +func collectUserDbNotifications(startEpoch uint64) error { + epoch := startEpoch + + log.Infof("collecting notifications for epoch %v", epoch) + notifications, err := notification.GetUserNotificationsForEpoch(utils.Config.Notifications.PubkeyCachePath, epoch) + if err != nil { + return err + } + + log.Infof("found %v notifications for epoch %v", len(notifications), epoch) + + log.Infof("found %v dashboard notifications for user", len(notifications[3])) + + emails, err := notification.RenderEmailsForUserEvents(0, notifications) + if err != nil { + return err + } + + for _, email := range emails { + // if email.Address == "" { + log.Infof("to: %v", email.Address) + log.Infof("subject: %v", email.Subject) + log.Infof("body: %v", email.Email.Body) + log.Info("-----") + // } + } + + pushMessages, err := notification.RenderPushMessagesForUserEvents(0, notifications) + if err != nil { + return err + } + + for _, pushMessage := range pushMessages { + message := pushMessage.Messages[0] + log.Infof("title: %v body: %v", message.Notification.Title, message.Notification.Body) + + if message.Token == "" { + log.Info("sending test message") + + err = notification.SendPushBatch(pushMessage.UserId, []*messaging.Message{message}, true) + if err != nil { + log.Error(err, "error sending firebase batch job", 0) + } + } + } + return nil +} + func fixEns(erigonClient *rpc.ErigonClient) error { log.Infof("command: fix-ens") addrs := []struct { @@ -1995,3 +2177,56 @@ func reExportSyncCommittee(rpcClient rpc.Client, p uint64, dryRun bool) error { return tx.Commit() } } + +func verifyFCMTokens() error { + type row struct { + RefreshToken string `db:"refresh_token"` + UserId int `db:"user_id"` + NotificationToken string `db:"notification_token"` + } + + var rows []row + err := db.FrontendWriterDB.Select(&rows, `SELECT refresh_token, user_id, COALESCE(notification_token, '') AS notification_token FROM users_devices where length(notification_token) > 20 and id >= 0 order by id`) + + if err != nil { + return err + } + + ctx := context.Background() + opt := option.WithCredentialsJSON([]byte(utils.Config.Notifications.FirebaseCredentialsPath)) + + app, err := firebase.NewApp(context.Background(), nil, opt) + if err != nil { + log.Error(err, "error initializing app", 0) + return err + } + + client, err := app.Messaging(ctx) + if err != nil { + log.Error(err, "error initializing messaging", 0) + return err + } + + for _, r := range rows { + log.Infof("checking token %s for user %v", r.NotificationToken, r.UserId) + + _, err := client.SendDryRun(ctx, &messaging.Message{ + Token: r.NotificationToken, + }) + if err != nil { + if err.Error() == "Requested entity was not found." { + log.Infof("token %s for user %v is invalid", r.NotificationToken, r.UserId) + res, err := db.FrontendWriterDB.Exec(`UPDATE users_devices SET notification_token = NULL WHERE notification_token = $1 AND user_id = $2 AND refresh_token = $3`, r.NotificationToken, r.UserId, r.RefreshToken) + if err != nil { + log.Error(err, "error updating token", 0) + } + rowsAffected, _ := res.RowsAffected() + log.Infof("updated %d rows", rowsAffected) + } else { + log.Error(err, "error sending message", 0) + } + } + time.Sleep(time.Millisecond * 250) + } + return nil +} diff --git a/backend/cmd/misc/misctypes/requires.go b/backend/cmd/misc/misctypes/requires.go new file mode 100644 index 000000000..6d2337412 --- /dev/null +++ b/backend/cmd/misc/misctypes/requires.go @@ -0,0 +1,10 @@ +package misctypes + +type Requires struct { + Bigtable bool + Redis bool + ClNode bool + ElNode bool + NetworkDBs bool + UserDBs bool +} diff --git a/backend/cmd/monitoring/main.go b/backend/cmd/monitoring/main.go index a4bef3a60..eef141caf 100644 --- a/backend/cmd/monitoring/main.go +++ b/backend/cmd/monitoring/main.go @@ -6,6 +6,7 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/metrics" "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/gobitfly/beaconchain/pkg/commons/version" @@ -31,6 +32,15 @@ func Run() { } utils.Config = cfg + if utils.Config.Metrics.Enabled { + go func() { + log.Infof("serving metrics on %v", utils.Config.Metrics.Address) + if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof, utils.Config.Metrics.PprofExtra); err != nil { + log.Fatal(err, "error serving metrics", 0) + } + }() + } + db.ClickHouseWriter, db.ClickHouseReader = db.MustInitDB(&types.DatabaseConfig{ Username: cfg.ClickHouse.WriterDatabase.Username, Password: cfg.ClickHouse.WriterDatabase.Password, diff --git a/backend/cmd/notification_collector/main.go b/backend/cmd/notification_collector/main.go index 428bc6588..feeecf98b 100644 --- a/backend/cmd/notification_collector/main.go +++ b/backend/cmd/notification_collector/main.go @@ -104,6 +104,30 @@ func Run() { }, "pgx", "postgres") }() + wg.Add(1) + go func() { + defer wg.Done() + db.AlloyWriter, db.AlloyReader = db.MustInitDB(&types.DatabaseConfig{ + Username: cfg.AlloyWriter.Username, + Password: cfg.AlloyWriter.Password, + Name: cfg.AlloyWriter.Name, + Host: cfg.AlloyWriter.Host, + Port: cfg.AlloyWriter.Port, + MaxOpenConns: cfg.AlloyWriter.MaxOpenConns, + MaxIdleConns: cfg.AlloyWriter.MaxIdleConns, + SSL: cfg.AlloyWriter.SSL, + }, &types.DatabaseConfig{ + Username: cfg.AlloyReader.Username, + Password: cfg.AlloyReader.Password, + Name: cfg.AlloyReader.Name, + Host: cfg.AlloyReader.Host, + Port: cfg.AlloyReader.Port, + MaxOpenConns: cfg.AlloyReader.MaxOpenConns, + MaxIdleConns: cfg.AlloyReader.MaxIdleConns, + SSL: cfg.AlloyReader.SSL, + }, "pgx", "postgres") + }() + wg.Add(1) go func() { defer wg.Done() @@ -126,6 +150,31 @@ func Run() { }, "pgx", "postgres") }() + wg.Add(1) + go func() { + defer wg.Done() + // clickhouse + db.ClickHouseWriter, db.ClickHouseReader = db.MustInitDB(&types.DatabaseConfig{ + Username: cfg.ClickHouse.WriterDatabase.Username, + Password: cfg.ClickHouse.WriterDatabase.Password, + Name: cfg.ClickHouse.WriterDatabase.Name, + Host: cfg.ClickHouse.WriterDatabase.Host, + Port: cfg.ClickHouse.WriterDatabase.Port, + MaxOpenConns: cfg.ClickHouse.WriterDatabase.MaxOpenConns, + SSL: true, + MaxIdleConns: cfg.ClickHouse.WriterDatabase.MaxIdleConns, + }, &types.DatabaseConfig{ + Username: cfg.ClickHouse.ReaderDatabase.Username, + Password: cfg.ClickHouse.ReaderDatabase.Password, + Name: cfg.ClickHouse.ReaderDatabase.Name, + Host: cfg.ClickHouse.ReaderDatabase.Host, + Port: cfg.ClickHouse.ReaderDatabase.Port, + MaxOpenConns: cfg.ClickHouse.ReaderDatabase.MaxOpenConns, + SSL: true, + MaxIdleConns: cfg.ClickHouse.ReaderDatabase.MaxIdleConns, + }, "clickhouse", "clickhouse") + }() + wg.Add(1) go func() { defer wg.Done() @@ -158,6 +207,10 @@ func Run() { defer db.WriterDb.Close() defer db.FrontendReaderDB.Close() defer db.FrontendWriterDB.Close() + defer db.AlloyReader.Close() + defer db.AlloyWriter.Close() + defer db.ClickHouseReader.Close() + defer db.ClickHouseWriter.Close() defer db.BigtableClient.Close() log.Infof("database connection established") diff --git a/backend/cmd/notification_sender/main.go b/backend/cmd/notification_sender/main.go index e78508597..a4840a89e 100644 --- a/backend/cmd/notification_sender/main.go +++ b/backend/cmd/notification_sender/main.go @@ -1,6 +1,7 @@ package notification_sender import ( + "context" "flag" "fmt" "net/http" @@ -8,6 +9,7 @@ import ( "sync" "time" + "github.com/go-redis/redis/v8" "github.com/gobitfly/beaconchain/pkg/commons/cache" "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" @@ -147,6 +149,21 @@ func Run() { }() } + // Initialize the persistent redis client + wg.Add(1) + go func() { + defer wg.Done() + rdc := redis.NewClient(&redis.Options{ + Addr: cfg.RedisSessionStoreEndpoint, + ReadTimeout: time.Second * 60, + }) + + if err := rdc.Ping(context.Background()).Err(); err != nil { + log.Fatal(err, "error connecting to persistent redis store", 0) + } + db.PersistentRedisDbClient = rdc + }() + wg.Wait() defer db.ReaderDb.Close() diff --git a/backend/cmd/typescript_converter/main.go b/backend/cmd/typescript_converter/main.go index 360e57188..4b06729b3 100644 --- a/backend/cmd/typescript_converter/main.go +++ b/backend/cmd/typescript_converter/main.go @@ -3,6 +3,7 @@ package typescript_converter import ( "flag" "go/ast" + "iter" "os" "path/filepath" "slices" @@ -18,10 +19,12 @@ const ( fallbackType = "any" commonFileName = "common" lintDisable = "/* eslint-disable */\n" + goFileSuffix = ".go" + tsFileSuffix = ".ts" ) // Files that should not be converted to TypeScript -var ignoredFiles = []string{"data_access", "search_types", "archiver"} +var ignoredFiles = []string{"data_access", "search_types", "archiver", "rocketpool"} var typeMappings = map[string]string{ "decimal.Decimal": "string /* decimal.Decimal */", @@ -31,7 +34,7 @@ var typeMappings = map[string]string{ // Expects the following flags: // -out: Output folder for the generated TypeScript file -// Standard usage (execute in backend folder): go run cmd/typescript_converter/main.go -out ../frontend/types/api +// Standard usage (execute in backend folder): go run cmd/main.go typescript-converter -out ../frontend/types/api func Run() { var out string @@ -65,24 +68,23 @@ func Run() { log.Fatal(nil, "Failed to load package", 0) } - // Find all common types + // Find all common types, i.e. types that are used in multiple files and must be imported in ts commonTypes := getCommonTypes(pkgs) - // Find all usages of common types - usage := getCommonUsages(pkgs, commonTypes) - - // Generate Tygo for common.go - tygos := []*tygo.Tygo{tygo.New(getTygoConfig(out, commonFileName, ""))} - // Generate Tygo for each file - for file, typesUsed := range usage { - importStr := "" + // Find imports (usages of common types) for each file + imports := getImports(pkgs, commonTypes) + + var configs []*tygo.Tygo + // Generate Tygo config for each file + for fileName, typesUsed := range imports { + var importStr string if len(typesUsed) > 0 { importStr = "import type { " + strings.Join(typesUsed, ", ") + " } from './" + commonFileName + "'\n" } - tygos = append(tygos, tygo.New(getTygoConfig(out, file, importStr))) + configs = append(configs, tygo.New(getTygoConfig(out, fileName, importStr))) } // Generate TypeScript - for _, tygo := range tygos { + for _, tygo := range configs { err := tygo.Generate() if err != nil { log.Fatal(err, "Failed to generate TypeScript", 0) @@ -93,7 +95,7 @@ func Run() { } func deleteFiles(out string) error { - files, err := filepath.Glob(out + "*.ts") + files, err := filepath.Glob(out + "*" + tsFileSuffix) if err != nil { return err } @@ -106,68 +108,82 @@ func deleteFiles(out string) error { return nil } -func getTygoConfig(out, file, frontmatter string) *tygo.Config { +func getTygoConfig(outDir, fileName, frontmatter string) *tygo.Config { return &tygo.Config{ Packages: []*tygo.PackageConfig{ { Path: packagePath, TypeMappings: typeMappings, FallbackType: fallbackType, - IncludeFiles: []string{file + ".go"}, - OutputPath: out + file + ".ts", + IncludeFiles: []string{fileName + goFileSuffix}, + OutputPath: outDir + fileName + tsFileSuffix, Frontmatter: lintDisable + frontmatter, }, }, } } -// Parse common.go to find all common types -func getCommonTypes(pkgs []*packages.Package) map[string]bool { - commonTypes := make(map[string]bool) - for _, pkg := range pkgs { - for _, file := range pkg.Syntax { - filename := strings.TrimSuffix(filepath.Base(pkg.Fset.File(file.Pos()).Name()), ".go") - if filepath.Base(filename) != commonFileName { - continue - } - ast.Inspect(file, func(n ast.Node) bool { - if typeSpec, ok := n.(*ast.TypeSpec); ok { - commonTypes[typeSpec.Name.Name] = true +// Iterate over all file names and files in the packages +func allFiles(pkgs []*packages.Package) iter.Seq2[string, *ast.File] { + return func(yield func(string, *ast.File) bool) { + for _, pkg := range pkgs { + for _, file := range pkg.Syntax { + fileName := filepath.Base(pkg.Fset.File(file.Pos()).Name()) + if !yield(fileName, file) { + return } - return true - }) - return commonTypes + } } } - return nil +} + +// Parse common.go to find all common types +func getCommonTypes(pkgs []*packages.Package) map[string]struct{} { + var commonFile *ast.File + // find common file + for fileName, file := range allFiles(pkgs) { + fileName = strings.TrimSuffix(fileName, goFileSuffix) + if filepath.Base(fileName) == commonFileName { + commonFile = file + break + } + } + if commonFile == nil { + log.Fatal(nil, "common.go not found", 0) + } + commonTypes := make(map[string]struct{}) + // iterate over all types in common file and add them to the map + for node := range ast.Preorder(commonFile) { + if typeSpec, ok := node.(*ast.TypeSpec); ok { + commonTypes[typeSpec.Name.Name] = struct{}{} + } + } + return commonTypes } // Parse all files to find used common types for each file -func getCommonUsages(pkgs []*packages.Package, commonTypes map[string]bool) map[string][]string { - usage := make(map[string][]string) // Map from file to list of commonTypes used - for _, pkg := range pkgs { - for _, file := range pkg.Syntax { - filename := strings.TrimSuffix(filepath.Base(pkg.Fset.File(file.Pos()).Name()), ".go") - if filepath.Base(filename) == commonFileName || slices.Contains(ignoredFiles, filename) { +// Returns a map with file name as key and a set of common types used in the file as value +func getImports(pkgs []*packages.Package, commonTypes map[string]struct{}) map[string][]string { + imports := make(map[string][]string) // Map from file to set of commonTypes used + imports[commonFileName] = []string{} // Add common file to map with empty set + for fileName, file := range allFiles(pkgs) { + fileName = strings.TrimSuffix(fileName, goFileSuffix) + if filepath.Base(fileName) == commonFileName || slices.Contains(ignoredFiles, fileName) { + continue + } + var currentFileImports []string + // iterate over all struct fields in the file + for node := range ast.Preorder(file) { + ident, ok := node.(*ast.Ident) + if !ok { continue } - if _, exists := usage[filename]; !exists { - usage[filename] = make([]string, 0) + _, isCommonType := commonTypes[ident.Name] + if isCommonType && !slices.Contains(currentFileImports, ident.Name) { + currentFileImports = append(currentFileImports, ident.Name) } - ast.Inspect(file, func(n ast.Node) bool { - ident, ok := n.(*ast.Ident) - if !ok { - return true - } - if !commonTypes[ident.Name] { - return true - } - if !slices.Contains(usage[filename], ident.Name) { - usage[filename] = append(usage[filename], ident.Name) - } - return true - }) } + imports[fileName] = currentFileImports } - return usage + return imports } diff --git a/backend/cmd/user_service/main.go b/backend/cmd/user_service/main.go index 79ba47060..76e634dda 100644 --- a/backend/cmd/user_service/main.go +++ b/backend/cmd/user_service/main.go @@ -93,6 +93,30 @@ func Run() { }, "pgx", "postgres") }() + wg.Add(1) + go func() { + defer wg.Done() + db.WriterDb, db.ReaderDb = db.MustInitDB(&types.DatabaseConfig{ + Username: utils.Config.WriterDatabase.Username, + Password: utils.Config.WriterDatabase.Password, + Name: utils.Config.WriterDatabase.Name, + Host: utils.Config.WriterDatabase.Host, + Port: utils.Config.WriterDatabase.Port, + MaxOpenConns: utils.Config.WriterDatabase.MaxOpenConns, + MaxIdleConns: utils.Config.WriterDatabase.MaxIdleConns, + SSL: utils.Config.WriterDatabase.SSL, + }, &types.DatabaseConfig{ + Username: utils.Config.ReaderDatabase.Username, + Password: utils.Config.ReaderDatabase.Password, + Name: utils.Config.ReaderDatabase.Name, + Host: utils.Config.ReaderDatabase.Host, + Port: utils.Config.ReaderDatabase.Port, + MaxOpenConns: utils.Config.ReaderDatabase.MaxOpenConns, + MaxIdleConns: utils.Config.ReaderDatabase.MaxIdleConns, + SSL: utils.Config.ReaderDatabase.SSL, + }, "pgx", "postgres") + }() + // if needed, init the database, cache or bigtable wg.Wait() diff --git a/backend/go.mod b/backend/go.mod index b53006df9..ff2204794 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -5,7 +5,7 @@ go 1.23 require ( cloud.google.com/go/bigtable v1.21.0 cloud.google.com/go/secretmanager v1.11.5 - firebase.google.com/go v3.13.0+incompatible + firebase.google.com/go/v4 v4.14.1 github.com/ClickHouse/clickhouse-go/v2 v2.17.1 github.com/Gurpartap/storekit-go v0.0.0-20201205024111-36b6cd5c6a21 github.com/alexedwards/scs/redisstore v0.0.0-20240316134038-7e11d57e8885 @@ -13,6 +13,7 @@ require ( github.com/attestantio/go-eth2-client v0.19.10 github.com/awa/go-iap v1.26.5 github.com/aws/aws-sdk-go-v2 v1.25.0 + github.com/aws/aws-sdk-go-v2/config v1.18.45 github.com/aws/aws-sdk-go-v2/credentials v1.13.43 github.com/aws/aws-sdk-go-v2/service/s3 v1.49.0 github.com/bwmarrin/snowflake v0.3.0 @@ -24,12 +25,13 @@ require ( github.com/fergusstrange/embedded-postgres v1.29.0 github.com/gavv/httpexpect/v2 v2.16.0 github.com/go-faker/faker/v4 v4.3.0 + github.com/go-openapi/spec v0.20.14 github.com/go-redis/redis/v8 v8.11.5 github.com/gobitfly/eth-rewards v0.1.2-0.20230403064929-411ddc40a5f7 github.com/gobitfly/eth.store v0.0.0-20240312111708-b43f13990280 github.com/golang-jwt/jwt v3.2.2+incompatible github.com/golang-jwt/jwt/v4 v4.5.0 - github.com/golang/protobuf v1.5.3 + github.com/golang/protobuf v1.5.4 github.com/gomodule/redigo v1.9.2 github.com/google/uuid v1.6.0 github.com/gorilla/csrf v1.7.2 @@ -40,6 +42,7 @@ require ( github.com/gzuidhof/tygo v0.2.13 github.com/hashicorp/go-version v1.6.0 github.com/hashicorp/golang-lru v1.0.2 + github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/invopop/jsonschema v0.12.0 github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530 github.com/jackc/pgtype v1.14.2 @@ -61,7 +64,7 @@ require ( github.com/prysmaticlabs/go-ssz v0.0.0-20210121151755-f6208871c388 github.com/rocket-pool/rocketpool-go v1.8.3-0.20240618173422-783b8668f5b4 github.com/rocket-pool/smartnode v1.13.6 - github.com/shopspring/decimal v1.3.1 + github.com/shopspring/decimal v1.4.0 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d @@ -69,26 +72,28 @@ require ( github.com/wealdtech/go-eth2-types/v2 v2.8.2 github.com/wealdtech/go-eth2-util v1.8.0 github.com/xeipuuv/gojsonschema v1.2.0 - golang.org/x/crypto v0.19.0 + go.uber.org/atomic v1.11.0 + golang.org/x/crypto v0.21.0 golang.org/x/exp v0.0.0-20240213143201-ec583247a57a golang.org/x/sync v0.6.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 golang.org/x/tools v0.18.0 - google.golang.org/api v0.164.0 - google.golang.org/protobuf v1.32.0 + google.golang.org/api v0.170.0 + google.golang.org/protobuf v1.33.0 gopkg.in/yaml.v2 v2.4.0 ) require ( - cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute v1.23.3 // indirect + cloud.google.com/go v0.112.1 // indirect + cloud.google.com/go/compute v1.24.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/firestore v1.14.0 // indirect - cloud.google.com/go/iam v1.1.5 // indirect - cloud.google.com/go/longrunning v0.5.4 // indirect - cloud.google.com/go/storage v1.36.0 // indirect + cloud.google.com/go/firestore v1.15.0 // indirect + cloud.google.com/go/iam v1.1.7 // indirect + cloud.google.com/go/longrunning v0.5.5 // indirect + cloud.google.com/go/storage v1.40.0 // indirect github.com/ClickHouse/ch-go v0.58.2 // indirect + github.com/MicahParks/keyfunc v1.9.0 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2 // indirect github.com/ajg/form v1.5.1 // indirect @@ -96,13 +101,18 @@ require ( github.com/alessio/shellescape v1.4.1 // indirect github.com/andybalholm/brotli v1.0.6 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 // indirect github.com/aws/smithy-go v1.20.0 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -139,6 +149,9 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-openapi/swag v0.22.9 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/goccy/go-yaml v1.9.5 // indirect @@ -149,7 +162,7 @@ require ( github.com/google/go-querystring v1.1.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/googleapis/gax-go/v2 v2.12.3 // indirect github.com/gorilla/securecookie v1.1.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect github.com/herumi/bls-eth-go-binary v1.31.0 // indirect @@ -178,6 +191,7 @@ require ( github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/jbenet/goprocess v0.1.4 // indirect + github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect @@ -241,29 +255,30 @@ require ( github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect - go.opentelemetry.io/otel v1.23.0 // indirect - go.opentelemetry.io/otel/metric v1.23.0 // indirect - go.opentelemetry.io/otel/trace v1.23.0 // indirect - go.uber.org/atomic v1.11.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/mod v0.15.0 // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/oauth2 v0.17.0 // indirect - golang.org/x/sys v0.17.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 // indirect - google.golang.org/grpc v1.62.0 // indirect + google.golang.org/appengine/v2 v2.0.2 // indirect + google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 // indirect + google.golang.org/grpc v1.62.1 // indirect gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect moul.io/http2curl/v2 v2.3.0 // indirect rsc.io/tmplfunc v0.0.3 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) replace github.com/wealdtech/go-merkletree v1.0.1-0.20190605192610-2bb163c2ea2a => github.com/rocket-pool/go-merkletree v1.0.1-0.20220406020931-c262d9b976dd diff --git a/backend/go.sum b/backend/go.sum index 7d41b5693..481c0f1c4 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -1,26 +1,26 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= +cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= +cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= cloud.google.com/go/bigtable v1.21.0 h1:BFN4jhkA9ULYYV2Ug7AeOtetVLnN2jKuIq5TcRc5C38= cloud.google.com/go/bigtable v1.21.0/go.mod h1:V0sYNRtk0dgAKjyRr/MyBpHpSXqh+9P39euf820EZ74= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= +cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/firestore v1.14.0 h1:8aLcKnMPoldYU3YHgu4t2exrKhLQkqaXAGqT0ljrFVw= -cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ= -cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/longrunning v0.5.4 h1:w8xEcbZodnA2BbW6sVirkkoC+1gP8wS57EUUgGS0GVg= -cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= +cloud.google.com/go/firestore v1.15.0 h1:/k8ppuWOtNuDHt2tsRV42yI21uaGnKDEQnRFeBpbFF8= +cloud.google.com/go/firestore v1.15.0/go.mod h1:GWOxFXcv8GZUtYpWHw/w6IuYNux/BtmeVTMmjrm4yhk= +cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= +cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= +cloud.google.com/go/longrunning v0.5.5 h1:GOE6pZFdSrTb4KAiKnXsJBtlE6mEyaW44oKyMILWnOg= +cloud.google.com/go/longrunning v0.5.5/go.mod h1:WV2LAxD8/rg5Z1cNW6FJ/ZpX4E4VnDnoTk0yawPBB7s= cloud.google.com/go/secretmanager v1.11.5 h1:82fpF5vBBvu9XW4qj0FU2C6qVMtj1RM/XHwKXUEAfYY= cloud.google.com/go/secretmanager v1.11.5/go.mod h1:eAGv+DaCHkeVyQi0BeXgAHOU0RdrMeZIASKc+S7VqH4= -cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8= -cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= +cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= +cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= contrib.go.opencensus.io/exporter/jaeger v0.2.1 h1:yGBYzYMewVL0yO9qqJv3Z5+IRhPdU7e9o/2oKpX4YvI= contrib.go.opencensus.io/exporter/jaeger v0.2.1/go.mod h1:Y8IsLgdxqh1QxYxPC5IgXVmBaeLUeQFfBeBi9PbeZd0= -firebase.google.com/go v3.13.0+incompatible h1:3TdYC3DDi6aHn20qoRkxwGqNgdjtblwVAyRLQwGn/+4= -firebase.google.com/go v3.13.0+incompatible/go.mod h1:xlah6XbEyW6tbfSklcfe5FHJIwjt8toICdV5Wh9ptHs= +firebase.google.com/go/v4 v4.14.1 h1:4qiUETaFRWoFGE1XP5VbcEdtPX93Qs+8B/7KvP2825g= +firebase.google.com/go/v4 v4.14.1/go.mod h1:fgk2XshgNDEKaioKco+AouiegSI9oTWVqRaBdTTGBoM= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -37,6 +37,8 @@ github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t github.com/Gurpartap/storekit-go v0.0.0-20201205024111-36b6cd5c6a21 h1:HcdvlzaQ4CJfH7xbfJZ3ZHN//BTEpId46iKEMuP3wHE= github.com/Gurpartap/storekit-go v0.0.0-20201205024111-36b6cd5c6a21/go.mod h1:7PODFS++oNZ6khojmPBvkrDeFO/hrc3jmvWvQAOXorw= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/MicahParks/keyfunc v1.9.0 h1:lhKd5xrFHLNOWrDc4Tyb/Q1AJ4LCzQ48GVJyVIID3+o= +github.com/MicahParks/keyfunc v1.9.0/go.mod h1:IdnCilugA0O/99dW+/MkvlyrsX8+L8+x95xuVNtM5jw= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= @@ -69,8 +71,11 @@ github.com/aws/aws-sdk-go-v2 v1.25.0 h1:sv7+1JVJxOu/dD/sz/csHX7jFqmP001TIY7aytBW github.com/aws/aws-sdk-go-v2 v1.25.0/go.mod h1:G104G1Aho5WqF+SR3mDIobTABQzpYV0WxMsKxlMggOA= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.0 h1:2UO6/nT1lCZq1LqM67Oa4tdgP1CvL1sLSxvuD+VrOeE= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.0/go.mod h1:5zGj2eA85ClyedTDK+Whsu+w9yimnVIZvhvBKrDquM8= +github.com/aws/aws-sdk-go-v2/config v1.18.45 h1:Aka9bI7n8ysuwPeFdm77nfbyHCAKQ3z9ghB3S/38zes= +github.com/aws/aws-sdk-go-v2/config v1.18.45/go.mod h1:ZwDUgFnQgsazQTnWfeLWk5GjeqTQTL8lMkoE1UXzxdE= github.com/aws/aws-sdk-go-v2/credentials v1.13.43 h1:LU8vo40zBlo3R7bAvBVy/ku4nxGEyZe9N8MqAeFTzF8= github.com/aws/aws-sdk-go-v2/credentials v1.13.43/go.mod h1:zWJBz1Yf1ZtX5NGax9ZdNjhhI4rgjfgsyk6vTY1yfVg= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 h1:PIktER+hwIG286DqXyvVENjgLTAwGgoeriLDD5C+YlQ= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13/go.mod h1:f/Ib/qYjhV2/qdsf79H3QP/eRE4AkVyEf6sk7XfZ1tg= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0 h1:NPs/EqVO+ajwOoq56EfcGKa3L3ruWuazkIw1BqxwOPw= @@ -78,6 +83,8 @@ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0/go.mod h1:D+duLy2ylga github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.0 h1:ks7KGMVUMoDzcxNWUlEdI+/lokMFD136EL6DWmUOV80= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.0/go.mod h1:hL6BWM/d/qz113fVitZjbXR0E+RCTU1+x+1Idyn5NgE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 h1:hze8YsjSh8Wl1rYa1CJpRmXP21BvOBuc76YhW0HsuQ4= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45/go.mod h1:lD5M20o09/LCuQ2mE62Mb/iSdSlCNuj6H5ci7tW7OsE= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.0 h1:TkbRExyKSVHELwG9gz2+gql37jjec2R5vus9faTomwE= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.0/go.mod h1:T3/9xMKudHhnj8it5EqIrhvv11tVZqWYkKcot+BFStc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0 h1:a33HuFlO0KsveiP90IUJh8Xr/cx9US2PqkSroaLc+o8= @@ -91,8 +98,11 @@ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.0 h1:l5puwOHr7IxECu github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.0/go.mod h1:Oov79flWa/n7Ni+lQC3z+VM7PoRM47omRqbJU9B5Y7E= github.com/aws/aws-sdk-go-v2/service/s3 v1.49.0 h1:VfU15izXQjz4m9y1DkbY79iylIiuPwWtrram4cSpWEI= github.com/aws/aws-sdk-go-v2/service/s3 v1.49.0/go.mod h1:1o/W6JFUuREj2ExoQ21vHJgO7wakvjhol91M9eknFgs= +github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 h1:JuPGc7IkOP4AaqcZSIcyqLpFSqBWK32rM9+a1g6u73k= github.com/aws/aws-sdk-go-v2/service/sso v1.15.2/go.mod h1:gsL4keucRCgW+xA85ALBpRFfdSLH4kHOVSnLMSuBECo= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 h1:HFiiRkf1SdaAmV3/BHOFZ9DjFynPHj8G/UIO1lQS+fk= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3/go.mod h1:a7bHA82fyUXOm+ZSWKU6PIoBxrjSprdLoM8xPYvzYVg= +github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 h1:0BkLfgeDjfZnZ+MhB3ONb01u9pwFYTCZVhlsSSBvlbU= github.com/aws/aws-sdk-go-v2/service/sts v1.23.2/go.mod h1:Eows6e1uQEsc4ZaHANmsPRzAKcVDrcmjjWiih2+HUUQ= github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/aws/smithy-go v1.20.0 h1:6+kZsCXZwKxZS9RfISnPc4EXlHoyAkm2hPuM8X2BrrQ= @@ -285,6 +295,14 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/Do= +github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= @@ -323,6 +341,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= @@ -334,6 +353,7 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -345,8 +365,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -390,8 +410,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= +github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -419,6 +439,8 @@ github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mO github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/herumi/bls-eth-go-binary v1.31.0 h1:9eeW3EA4epCb7FIHt2luENpAW69MvKGL5jieHlBiP+w= github.com/herumi/bls-eth-go-binary v1.31.0/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U= github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f h1:7LYC+Yfkj3CTRcShK0KOL/w6iTiKyqqBA9a41Wnggw8= @@ -562,6 +584,7 @@ github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8 github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -850,6 +873,8 @@ github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9Nz github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -979,18 +1004,18 @@ github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxt go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= -go.opentelemetry.io/otel v1.23.0 h1:Df0pqjqExIywbMCMTxkAwzjLZtRf+bBKLbUcpxO2C9E= -go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0= -go.opentelemetry.io/otel/metric v1.23.0 h1:pazkx7ss4LFVVYSxYew7L5I6qvLXHA0Ap2pwV+9Cnpo= -go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/trace v1.23.0 h1:37Ik5Ib7xfYVb4V1UtnT97T1jI+AoIYkJyPkuL4iJgI= -go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= +go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -1032,8 +1057,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= @@ -1070,12 +1095,13 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= -golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1127,13 +1153,13 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -1175,28 +1201,30 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/api v0.164.0 h1:of5G3oE2WRMVb2yoWKME4ZP8y8zpUKC6bMhxDr8ifyk= -google.golang.org/api v0.164.0/go.mod h1:2OatzO7ZDQsoS7IFf3rvsE17/TldiU3F/zxFHeqUB5o= +google.golang.org/api v0.170.0 h1:zMaruDePM88zxZBG+NG8+reALO2rfLhe/JShitLyT48= +google.golang.org/api v0.170.0/go.mod h1:/xql9M2btF85xac/VAm4PsLMTLVGUOpq4BE9R8jyNy8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/appengine/v2 v2.0.2 h1:MSqyWy2shDLwG7chbwBJ5uMyw6SNqJzhJHNDwYB0Akk= +google.golang.org/appengine/v2 v2.0.2/go.mod h1:PkgRUWz4o1XOvbqtWTkBtCitEJ5Tp4HoVEdMMYQR/8E= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe h1:USL2DhxfgRchafRvt/wYyyQNzwgL7ZiURcozOE/Pkvo= -google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe h1:0poefMBYvYbs7g5UkjS6HcxBPaTRAmznle9jnxYoAI8= -google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 h1:FSL3lRCkhaPFxqi0s9o+V4UI2WTzAVOvkgbd4kVV4Wg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014/go.mod h1:SaPjaZGWb0lPqs6Ittu0spdfrOArqji4ZdeP5IC/9N4= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= +google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c h1:kaI7oewGK5YnVwj+Y+EJBO/YN1ht8iTL9XkFHtVZLsc= +google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 h1:9IZDv+/GcI6u+a4jRFRLxQs0RUCfavGfoOgEW6jpkI0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= -google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1209,8 +1237,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1284,5 +1312,5 @@ rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/backend/pkg/api/api_test.go b/backend/pkg/api/api_test.go index c422bf17f..8d5423fa5 100644 --- a/backend/pkg/api/api_test.go +++ b/backend/pkg/api/api_test.go @@ -9,12 +9,14 @@ import ( "net/http/httptest" "os" "os/exec" + "slices" "sort" "testing" "time" embeddedpostgres "github.com/fergusstrange/embedded-postgres" "github.com/gavv/httpexpect/v2" + "github.com/go-openapi/spec" "github.com/gobitfly/beaconchain/pkg/api" dataaccess "github.com/gobitfly/beaconchain/pkg/api/data_access" api_types "github.com/gobitfly/beaconchain/pkg/api/types" @@ -25,6 +27,7 @@ import ( "github.com/jmoiron/sqlx" "github.com/pressly/goose/v3" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.org/x/crypto/bcrypt" ) @@ -111,6 +114,16 @@ func setup() error { return fmt.Errorf("error inserting user 2: %w", err) } + // insert dummy api weight for testing + _, err = tempDb.Exec(` + INSERT INTO api_weights (bucket, endpoint, method, params, weight, valid_from) + VALUES ($1, $2, $3, $4, $5, TO_TIMESTAMP($6))`, + "default", "/api/v2/test-ratelimit", "GET", "", 2, time.Now().Unix(), + ) + if err != nil { + return fmt.Errorf("error inserting api weight: %w", err) + } + cfg := &types.Config{} err = utils.ReadConfig(cfg, *configPath) if err != nil { @@ -135,11 +148,14 @@ func setup() error { log.InfoWithFields(log.Fields{"config": *configPath, "version": version.Version, "commit": version.GitCommit, "chainName": utils.Config.Chain.ClConfig.ConfigName}, "starting") log.Info("initializing data access service") - dataAccessor = dataaccess.NewDataAccessService(cfg) - dataAccessor.StartDataAccessServices() + dataAccessService := dataaccess.NewDataAccessService(cfg) + dataAccessService.StartDataAccessServices() + dataAccessor = dataAccessService + + dummy := dataaccess.NewDummyService() log.Info("initializing api router") - router := api.NewApiRouter(dataAccessor, cfg) + router := api.NewApiRouter(dataAccessor, dummy, cfg) ts = httptest.NewTLSServer(router) jar, err := cookiejar.New(nil) @@ -389,6 +405,7 @@ func TestPublicAndSharedDashboards(t *testing.T) { numValidators := resp.Data.Validators.Exited + resp.Data.Validators.Offline + resp.Data.Validators.Pending + resp.Data.Validators.Online + resp.Data.Validators.Slashed assert.Greater(t, numValidators, uint64(0), "dashboard should contain at least one validator") assert.Greater(t, len(resp.Data.Groups), 0, "dashboard should contain at least one group") + assert.Greater(t, resp.Data.Network, uint64(0), "dashboard should contain a network id greater than 0") }) t.Run(fmt.Sprintf("[%s]: test group summary", dashboardId.id), func(t *testing.T) { @@ -469,3 +486,33 @@ func TestPublicAndSharedDashboards(t *testing.T) { }) } } + +func TestApiDoc(t *testing.T) { + e := httpexpect.WithConfig(getExpectConfig(t, ts)) + + t.Run("test api doc json", func(t *testing.T) { + resp := spec.Swagger{} + e.GET("/api/v2/docs/swagger.json"). + Expect(). + Status(http.StatusOK).JSON().Decode(&resp) + + assert.Equal(t, "/api/v2", resp.BasePath, "swagger base path should be '/api/v2'") + require.NotNil(t, 0, resp.Paths, "swagger paths should not nil") + assert.NotEqual(t, 0, len(resp.Paths.Paths), "swagger paths should not be empty") + assert.NotEqual(t, 0, len(resp.Definitions), "swagger definitions should not be empty") + assert.NotEqual(t, 0, len(resp.Host), "swagger host should not be empty") + }) + + t.Run("test api ratelimit weights endpoint", func(t *testing.T) { + resp := api_types.InternalGetRatelimitWeightsResponse{} + e.GET("/api/i/ratelimit-weights"). + Expect(). + Status(http.StatusOK).JSON().Decode(&resp) + + assert.GreaterOrEqual(t, len(resp.Data), 1, "ratelimit weights should contain at least one entry") + testEndpointIndex := slices.IndexFunc(resp.Data, func(item api_types.ApiWeightItem) bool { + return item.Endpoint == "/api/v2/test-ratelimit" + }) + assert.GreaterOrEqual(t, testEndpointIndex, 0, "ratelimit weights should contain an entry for /api/v2/test-ratelimit") + }) +} diff --git a/backend/pkg/api/auth.go b/backend/pkg/api/auth.go index dc8a62814..13c4d860d 100644 --- a/backend/pkg/api/auth.go +++ b/backend/pkg/api/auth.go @@ -13,8 +13,10 @@ import ( "github.com/gorilla/csrf" ) +var day time.Duration = time.Hour * 24 +var sessionDuration time.Duration = day * 365 + func newSessionManager(cfg *types.Config) *scs.SessionManager { - // TODO: replace redis with user db down the line (or replace sessions with oauth2) pool := &redis.Pool{ MaxIdle: 10, Dial: func() (redis.Conn, error) { @@ -23,7 +25,7 @@ func newSessionManager(cfg *types.Config) *scs.SessionManager { } scs := scs.New() - scs.Lifetime = time.Hour * 24 * 7 + scs.Lifetime = sessionDuration scs.Cookie.Name = "session_id" scs.Cookie.HttpOnly = true scs.Cookie.Persist = true @@ -42,6 +44,19 @@ func newSessionManager(cfg *types.Config) *scs.SessionManager { return scs } +// returns a middleware that extends the session expiration if the session is older than 1 day +func getSlidingSessionExpirationMiddleware(scs *scs.SessionManager) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + deadline := scs.Deadline(r.Context()) // unauthenticated requests have deadline set to now+sessionDuration + if time.Until(deadline) < sessionDuration-day { + scs.SetDeadline(r.Context(), time.Now().Add(sessionDuration).UTC()) // setting to utc because library also does that internally + } + next.ServeHTTP(w, r) + }) + } +} + // returns goriila/csrf middleware with the given config settings func getCsrfProtectionMiddleware(cfg *types.Config) func(http.Handler) http.Handler { csrfBytes, err := hex.DecodeString(cfg.Frontend.CsrfAuthKey) diff --git a/backend/pkg/api/data_access/app.go b/backend/pkg/api/data_access/app.go deleted file mode 100644 index 812549b1a..000000000 --- a/backend/pkg/api/data_access/app.go +++ /dev/null @@ -1,120 +0,0 @@ -package dataaccess - -import ( - "context" - "database/sql" - "fmt" - "time" - - t "github.com/gobitfly/beaconchain/pkg/api/types" - "github.com/gobitfly/beaconchain/pkg/commons/utils" - "github.com/gobitfly/beaconchain/pkg/userservice" - "github.com/pkg/errors" -) - -type AppRepository interface { - GetUserIdByRefreshToken(claimUserID, claimAppID, claimDeviceID uint64, hashedRefreshToken string) (uint64, error) - MigrateMobileSession(oldHashedRefreshToken, newHashedRefreshToken, deviceID, deviceName string) error - AddUserDevice(userID uint64, hashedRefreshToken string, deviceID, deviceName string, appID uint64) error - GetAppDataFromRedirectUri(callback string) (*t.OAuthAppData, error) - AddMobileNotificationToken(userID uint64, deviceID, notifyToken string) error - GetAppSubscriptionCount(userID uint64) (uint64, error) - AddMobilePurchase(tx *sql.Tx, userID uint64, paymentDetails t.MobileSubscription, verifyResponse *userservice.VerifyResponse, extSubscriptionId string) error - GetLatestBundleForNativeVersion(ctx context.Context, nativeVersion uint64) (*t.MobileAppBundleStats, error) - IncrementBundleDeliveryCount(ctx context.Context, bundleVerison uint64) error -} - -// GetUserIdByRefreshToken basically used to confirm the claimed user id with the refresh token. Returns the userId if successful -func (d *DataAccessService) GetUserIdByRefreshToken(claimUserID, claimAppID, claimDeviceID uint64, hashedRefreshToken string) (uint64, error) { - if hashedRefreshToken == "" { // sanity - return 0, errors.New("empty refresh token") - } - var userID uint64 - err := d.userWriter.Get(&userID, - `SELECT user_id FROM users_devices WHERE user_id = $1 AND - refresh_token = $2 AND app_id = $3 AND id = $4 AND active = true`, claimUserID, hashedRefreshToken, claimAppID, claimDeviceID) - if errors.Is(err, sql.ErrNoRows) { - return userID, fmt.Errorf("%w: user not found via refresh token", ErrNotFound) - } - return userID, err -} - -func (d *DataAccessService) MigrateMobileSession(oldHashedRefreshToken, newHashedRefreshToken, deviceID, deviceName string) error { - result, err := d.userWriter.Exec("UPDATE users_devices SET refresh_token = $2, device_identifier = $3, device_name = $4 WHERE refresh_token = $1", oldHashedRefreshToken, newHashedRefreshToken, deviceID, deviceName) - if err != nil { - return errors.Wrap(err, "Error updating refresh token") - } - - rowsAffected, err := result.RowsAffected() - if err != nil { - return errors.Wrap(err, "Error getting rows affected") - } - - if rowsAffected != 1 { - return errors.New(fmt.Sprintf("illegal number of rows affected, expected 1 got %d", rowsAffected)) - } - - return err -} - -func (d *DataAccessService) GetAppDataFromRedirectUri(callback string) (*t.OAuthAppData, error) { - data := t.OAuthAppData{} - err := d.userWriter.Get(&data, "SELECT id, app_name, redirect_uri, active, owner_id FROM oauth_apps WHERE active = true AND redirect_uri = $1", callback) - return &data, err -} - -func (d *DataAccessService) AddUserDevice(userID uint64, hashedRefreshToken string, deviceID, deviceName string, appID uint64) error { - _, err := d.userWriter.Exec("INSERT INTO users_devices (user_id, refresh_token, device_identifier, device_name, app_id, created_ts) VALUES($1, $2, $3, $4, $5, 'NOW()') ON CONFLICT DO NOTHING", - userID, hashedRefreshToken, deviceID, deviceName, appID, - ) - return err -} - -func (d *DataAccessService) AddMobileNotificationToken(userID uint64, deviceID, notifyToken string) error { - _, err := d.userWriter.Exec("UPDATE users_devices SET notification_token = $1 WHERE user_id = $2 AND device_identifier = $3;", - notifyToken, userID, deviceID, - ) - if errors.Is(err, sql.ErrNoRows) { - return fmt.Errorf("%w: user mobile device not found", ErrNotFound) - } - return err -} - -func (d *DataAccessService) GetAppSubscriptionCount(userID uint64) (uint64, error) { - var count uint64 - err := d.userReader.Get(&count, "SELECT COUNT(receipt) FROM users_app_subscriptions WHERE user_id = $1", userID) - return count, err -} - -func (d *DataAccessService) AddMobilePurchase(tx *sql.Tx, userID uint64, paymentDetails t.MobileSubscription, verifyResponse *userservice.VerifyResponse, extSubscriptionId string) error { - now := time.Now() - nowTs := now.Unix() - receiptHash := utils.HashAndEncode(verifyResponse.Receipt) - - query := `INSERT INTO users_app_subscriptions - (user_id, product_id, price_micros, currency, created_at, updated_at, validate_remotely, active, store, receipt, expires_at, reject_reason, receipt_hash, subscription_id) - VALUES($1, $2, $3, $4, TO_TIMESTAMP($5), TO_TIMESTAMP($6), $7, $8, $9, $10, TO_TIMESTAMP($11), $12, $13, $14) - ON CONFLICT(receipt_hash) DO UPDATE SET product_id = $2, active = $7, updated_at = TO_TIMESTAMP($5);` - var err error - if tx == nil { - _, err = d.userWriter.Exec(query, - userID, verifyResponse.ProductID, paymentDetails.PriceMicros, paymentDetails.Currency, nowTs, nowTs, verifyResponse.Valid, verifyResponse.Valid, paymentDetails.Transaction.Type, verifyResponse.Receipt, verifyResponse.ExpirationDate, verifyResponse.RejectReason, receiptHash, extSubscriptionId, - ) - } else { - _, err = tx.Exec(query, - userID, verifyResponse.ProductID, paymentDetails.PriceMicros, paymentDetails.Currency, nowTs, nowTs, verifyResponse.Valid, verifyResponse.Valid, paymentDetails.Transaction.Type, verifyResponse.Receipt, verifyResponse.ExpirationDate, verifyResponse.RejectReason, receiptHash, extSubscriptionId, - ) - } - - return err -} - -func (d *DataAccessService) GetLatestBundleForNativeVersion(ctx context.Context, nativeVersion uint64) (*t.MobileAppBundleStats, error) { - // @TODO data access - return d.dummy.GetLatestBundleForNativeVersion(ctx, nativeVersion) -} - -func (d *DataAccessService) IncrementBundleDeliveryCount(ctx context.Context, bundleVerison uint64) error { - // @TODO data access - return d.dummy.IncrementBundleDeliveryCount(ctx, bundleVerison) -} diff --git a/backend/pkg/api/data_access/archiver.go b/backend/pkg/api/data_access/archiver.go index 09fd605a3..5d6d80aa1 100644 --- a/backend/pkg/api/data_access/archiver.go +++ b/backend/pkg/api/data_access/archiver.go @@ -27,7 +27,7 @@ func (d *DataAccessService) GetValidatorDashboardsCountInfo(ctx context.Context) } var dbReturn []DashboardInfo - err := d.readerDb.Select(&dbReturn, ` + err := d.readerDb.SelectContext(ctx, &dbReturn, ` WITH dashboards_groups AS (SELECT dashboard_id, diff --git a/backend/pkg/api/data_access/block.go b/backend/pkg/api/data_access/block.go index 65e63b408..7410c765f 100644 --- a/backend/pkg/api/data_access/block.go +++ b/backend/pkg/api/data_access/block.go @@ -74,7 +74,7 @@ func (d *DataAccessService) GetBlockBlobs(ctx context.Context, chainId, block ui } func (d *DataAccessService) GetSlot(ctx context.Context, chainId, slot uint64) (*t.BlockSummary, error) { - block, err := d.GetBlockHeightAt(slot) + block, err := d.GetBlockHeightAt(ctx, slot) if err != nil { return nil, err } @@ -82,7 +82,7 @@ func (d *DataAccessService) GetSlot(ctx context.Context, chainId, slot uint64) ( } func (d *DataAccessService) GetSlotOverview(ctx context.Context, chainId, slot uint64) (*t.BlockOverview, error) { - block, err := d.GetBlockHeightAt(slot) + block, err := d.GetBlockHeightAt(ctx, slot) if err != nil { return nil, err } @@ -90,7 +90,7 @@ func (d *DataAccessService) GetSlotOverview(ctx context.Context, chainId, slot u } func (d *DataAccessService) GetSlotTransactions(ctx context.Context, chainId, slot uint64) ([]t.BlockTransactionTableRow, error) { - block, err := d.GetBlockHeightAt(slot) + block, err := d.GetBlockHeightAt(ctx, slot) if err != nil { return nil, err } @@ -98,7 +98,7 @@ func (d *DataAccessService) GetSlotTransactions(ctx context.Context, chainId, sl } func (d *DataAccessService) GetSlotVotes(ctx context.Context, chainId, slot uint64) ([]t.BlockVoteTableRow, error) { - block, err := d.GetBlockHeightAt(slot) + block, err := d.GetBlockHeightAt(ctx, slot) if err != nil { return nil, err } @@ -106,7 +106,7 @@ func (d *DataAccessService) GetSlotVotes(ctx context.Context, chainId, slot uint } func (d *DataAccessService) GetSlotAttestations(ctx context.Context, chainId, slot uint64) ([]t.BlockAttestationTableRow, error) { - block, err := d.GetBlockHeightAt(slot) + block, err := d.GetBlockHeightAt(ctx, slot) if err != nil { return nil, err } @@ -114,7 +114,7 @@ func (d *DataAccessService) GetSlotAttestations(ctx context.Context, chainId, sl } func (d *DataAccessService) GetSlotWithdrawals(ctx context.Context, chainId, slot uint64) ([]t.BlockWithdrawalTableRow, error) { - block, err := d.GetBlockHeightAt(slot) + block, err := d.GetBlockHeightAt(ctx, slot) if err != nil { return nil, err } @@ -122,7 +122,7 @@ func (d *DataAccessService) GetSlotWithdrawals(ctx context.Context, chainId, slo } func (d *DataAccessService) GetSlotBlsChanges(ctx context.Context, chainId, slot uint64) ([]t.BlockBlsChangeTableRow, error) { - block, err := d.GetBlockHeightAt(slot) + block, err := d.GetBlockHeightAt(ctx, slot) if err != nil { return nil, err } @@ -130,7 +130,7 @@ func (d *DataAccessService) GetSlotBlsChanges(ctx context.Context, chainId, slot } func (d *DataAccessService) GetSlotVoluntaryExits(ctx context.Context, chainId, slot uint64) ([]t.BlockVoluntaryExitTableRow, error) { - block, err := d.GetBlockHeightAt(slot) + block, err := d.GetBlockHeightAt(ctx, slot) if err != nil { return nil, err } @@ -138,7 +138,7 @@ func (d *DataAccessService) GetSlotVoluntaryExits(ctx context.Context, chainId, } func (d *DataAccessService) GetSlotBlobs(ctx context.Context, chainId, slot uint64) ([]t.BlockBlobTableRow, error) { - block, err := d.GetBlockHeightAt(slot) + block, err := d.GetBlockHeightAt(ctx, slot) if err != nil { return nil, err } diff --git a/backend/pkg/api/data_access/clients.go b/backend/pkg/api/data_access/clients.go new file mode 100644 index 000000000..c3fa5d79b --- /dev/null +++ b/backend/pkg/api/data_access/clients.go @@ -0,0 +1,90 @@ +package dataaccess + +import "github.com/gobitfly/beaconchain/pkg/api/types" + +type ClientRepository interface { + GetAllClients() ([]types.ClientInfo, error) +} + +func (d *DataAccessService) GetAllClients() ([]types.ClientInfo, error) { + // TODO @recy21 + // probably should load the clients into mem from some config when the service is created + + return []types.ClientInfo{ + // execution_layer + { + Id: 0, + Name: "Geth", + DbName: "geth", + Category: "execution_layer", + }, + { + Id: 1, + Name: "Nethermind", + DbName: "nethermind", + Category: "execution_layer", + }, + { + Id: 2, + Name: "Besu", + DbName: "besu", + Category: "execution_layer", + }, + { + Id: 3, + Name: "Erigon", + DbName: "erigon", + Category: "execution_layer", + }, + { + Id: 4, + Name: "Reth", + DbName: "reth", + Category: "execution_layer", + }, + // consensus_layer + { + Id: 5, + Name: "Teku", + DbName: "teku", + Category: "consensus_layer", + }, + { + Id: 6, + Name: "Prysm", + DbName: "prysm", + Category: "consensus_layer", + }, + { + Id: 7, + Name: "Nimbus", + DbName: "nimbus", + Category: "consensus_layer", + }, + { + Id: 8, + Name: "Lighthouse", + DbName: "lighthouse", + Category: "consensus_layer", + }, + { + Id: 9, + Name: "Lodestar", + DbName: "lodestar", + Category: "consensus_layer", + }, + // other + { + Id: 10, + Name: "Rocketpool Smart Node", + DbName: "rocketpool", + Category: "other", + }, + { + Id: 11, + Name: "MEV-Boost", + DbName: "mev-boost", + Category: "other", + }, + }, nil +} diff --git a/backend/pkg/api/data_access/data_access.go b/backend/pkg/api/data_access/data_access.go index c4b3b8338..4f45f3fab 100644 --- a/backend/pkg/api/data_access/data_access.go +++ b/backend/pkg/api/data_access/data_access.go @@ -13,7 +13,6 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/types" - "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/jmoiron/sqlx" "github.com/pkg/errors" ) @@ -22,6 +21,7 @@ type DataAccessor interface { ValidatorDashboardRepository SearchRepository NetworkRepository + ClientRepository UserRepository AppRepository NotificationsRepository @@ -29,21 +29,21 @@ type DataAccessor interface { BlockRepository ArchiverRepository ProtocolRepository + RatelimitRepository HealthzRepository + MachineRepository - StartDataAccessServices() Close() - GetLatestFinalizedEpoch() (uint64, error) - GetLatestSlot() (uint64, error) - GetLatestBlock() (uint64, error) - GetBlockHeightAt(slot uint64) (uint64, error) - GetLatestExchangeRates() ([]t.EthConversionRate, error) + GetLatestFinalizedEpoch(ctx context.Context) (uint64, error) + GetLatestSlot(ctx context.Context) (uint64, error) + GetLatestBlock(ctx context.Context) (uint64, error) + GetLatestExchangeRates(ctx context.Context) ([]t.EthConversionRate, error) GetProductSummary(ctx context.Context) (*t.ProductSummary, error) GetFreeTierPerks(ctx context.Context) (*t.PremiumPerks, error) - GetValidatorsFromSlices(indices []uint64, publicKeys []string) ([]t.VDBValidator, error) + GetValidatorsFromSlices(ctx context.Context, indices []uint64, publicKeys []string) ([]t.VDBValidator, error) } type DataAccessService struct { @@ -60,6 +60,8 @@ type DataAccessService struct { persistentRedisDbClient *redis.Client services *services.Services + + skipServiceInitWait bool } // ensure DataAccessService pointer implements DataAccessor @@ -87,7 +89,9 @@ func NewDataAccessService(cfg *types.Config) *DataAccessService { func createDataAccessService(cfg *types.Config) *DataAccessService { dataAccessService := DataAccessService{ - dummy: NewDummyService()} + dummy: NewDummyService(), + skipServiceInitWait: cfg.SkipDataAccessServiceInitWait, + } // Initialize the database wg := &sync.WaitGroup{} @@ -203,7 +207,7 @@ func createDataAccessService(cfg *types.Config) *DataAccessService { wg.Add(1) go func() { defer wg.Done() - bt, err := db.InitBigtable(utils.Config.Bigtable.Project, utils.Config.Bigtable.Instance, fmt.Sprintf("%d", utils.Config.Chain.ClConfig.DepositChainID), utils.Config.RedisCacheEndpoint) + bt, err := db.InitBigtable(cfg.Bigtable.Project, cfg.Bigtable.Instance, fmt.Sprintf("%d", cfg.Chain.ClConfig.DepositChainID), cfg.RedisCacheEndpoint) if err != nil { log.Fatal(err, "error connecting to bigtable", 0) } @@ -211,11 +215,11 @@ func createDataAccessService(cfg *types.Config) *DataAccessService { }() // Initialize the tiered cache (redis) - if utils.Config.TieredCacheProvider == "redis" || len(utils.Config.RedisCacheEndpoint) != 0 { + if cfg.TieredCacheProvider == "redis" || len(cfg.RedisCacheEndpoint) != 0 { wg.Add(1) go func() { defer wg.Done() - cache.MustInitTieredCache(utils.Config.RedisCacheEndpoint) + cache.MustInitTieredCache(cfg.RedisCacheEndpoint) log.Infof("tiered Cache initialized, latest finalized epoch: %v", cache.LatestFinalizedEpoch.Get()) }() } @@ -225,7 +229,7 @@ func createDataAccessService(cfg *types.Config) *DataAccessService { go func() { defer wg.Done() rdc := redis.NewClient(&redis.Options{ - Addr: utils.Config.RedisSessionStoreEndpoint, + Addr: cfg.RedisSessionStoreEndpoint, ReadTimeout: time.Second * 60, }) @@ -237,7 +241,7 @@ func createDataAccessService(cfg *types.Config) *DataAccessService { wg.Wait() - if utils.Config.TieredCacheProvider != "redis" { + if cfg.TieredCacheProvider != "redis" { log.Fatal(fmt.Errorf("no cache provider set, please set TierdCacheProvider (example redis)"), "", 0) } @@ -249,8 +253,15 @@ func (d *DataAccessService) StartDataAccessServices() { // Create the services d.services = services.NewServices(d.readerDb, d.writerDb, d.alloyReader, d.alloyWriter, d.clickhouseReader, d.bigtable, d.persistentRedisDbClient) + // Initialize repositories + d.registerNotificationInterfaceTypes() // Initialize the services - d.services.InitServices() + + if d.skipServiceInitWait { + go d.services.InitServices() + } else { + d.services.InitServices() + } } func (d *DataAccessService) Close() { diff --git a/backend/pkg/api/data_access/dummy.go b/backend/pkg/api/data_access/dummy.go index 36247ea76..f1063084a 100644 --- a/backend/pkg/api/data_access/dummy.go +++ b/backend/pkg/api/data_access/dummy.go @@ -6,12 +6,18 @@ import ( "fmt" "math/rand/v2" "reflect" + "slices" + "sync" "time" + mathrand "math/rand" + "github.com/go-faker/faker/v4" + "github.com/go-faker/faker/v4/pkg/interfaces" "github.com/go-faker/faker/v4/pkg/options" "github.com/gobitfly/beaconchain/pkg/api/enums" t "github.com/gobitfly/beaconchain/pkg/api/types" + commontypes "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/userservice" "github.com/shopspring/decimal" ) @@ -43,16 +49,23 @@ func NewDummyService() *DummyService { return &DummyService{} } -// generate random decimal.Decimal, should result in somewhere around 0.001 ETH (+/- a few decimal places) in Wei +// generate random decimal.Decimal, result is between 0.001 and 1000 GWei (returned in Wei) func randomEthDecimal() decimal.Decimal { - decimal, _ := decimal.NewFromString(fmt.Sprintf("%d00000000000", rand.Int64N(10000000))) //nolint:gosec + decimal, _ := decimal.NewFromString(fmt.Sprintf("%d000000", rand.Int64N(1000000)+1)) //nolint:gosec return decimal } +var mockLock sync.Mutex = sync.Mutex{} + // must pass a pointer to the data -func commonFakeData(a interface{}) error { - // TODO fake decimal.Decimal - return faker.FakeData(a, options.WithRandomMapAndSliceMaxSize(5)) +func populateWithFakeData(ctx context.Context, a interface{}) error { + if seed, ok := ctx.Value(t.CtxMockSeedKey).(int64); ok { + mockLock.Lock() + defer mockLock.Unlock() + faker.SetRandomSource(mathrand.NewSource(seed)) + } + + return faker.FakeData(a, options.WithRandomMapAndSliceMaxSize(10), options.WithRandomFloatBoundaries(interfaces.RandomFloatBoundary{Start: 0, End: 1})) } func (d *DummyService) StartDataAccessServices() { @@ -60,25 +73,25 @@ func (d *DummyService) StartDataAccessServices() { } // used for any non-pointer data, e.g. all primitive types or slices -func getDummyData[T any]() (T, error) { +func getDummyData[T any](ctx context.Context) (T, error) { var r T - err := commonFakeData(&r) + err := populateWithFakeData(ctx, &r) return r, err } // used for any struct data that should be returned as a pointer -func getDummyStruct[T any]() (*T, error) { +func getDummyStruct[T any](ctx context.Context) (*T, error) { var r T - err := commonFakeData(&r) + err := populateWithFakeData(ctx, &r) return &r, err } // used for any table data that should be returned with paging -func getDummyWithPaging[T any]() ([]T, *t.Paging, error) { +func getDummyWithPaging[T any](ctx context.Context) ([]T, *t.Paging, error) { r := []T{} p := t.Paging{} - _ = commonFakeData(&r) - err := commonFakeData(&p) + _ = populateWithFakeData(ctx, &r) + err := populateWithFakeData(ctx, &p) return r, &p, err } @@ -86,32 +99,28 @@ func (d *DummyService) Close() { // nothing to close } -func (d *DummyService) GetLatestSlot() (uint64, error) { - return getDummyData[uint64]() -} - -func (d *DummyService) GetLatestFinalizedEpoch() (uint64, error) { - return getDummyData[uint64]() +func (d *DummyService) GetLatestSlot(ctx context.Context) (uint64, error) { + return getDummyData[uint64](ctx) } -func (d *DummyService) GetLatestBlock() (uint64, error) { - return getDummyData[uint64]() +func (d *DummyService) GetLatestFinalizedEpoch(ctx context.Context) (uint64, error) { + return getDummyData[uint64](ctx) } -func (d *DummyService) GetBlockHeightAt(slot uint64) (uint64, error) { - return getDummyData[uint64]() +func (d *DummyService) GetLatestBlock(ctx context.Context) (uint64, error) { + return getDummyData[uint64](ctx) } -func (d *DummyService) GetLatestExchangeRates() ([]t.EthConversionRate, error) { - return getDummyData[[]t.EthConversionRate]() +func (d *DummyService) GetLatestExchangeRates(ctx context.Context) ([]t.EthConversionRate, error) { + return getDummyData[[]t.EthConversionRate](ctx) } func (d *DummyService) GetUserByEmail(ctx context.Context, email string) (uint64, error) { - return getDummyData[uint64]() + return getDummyData[uint64](ctx) } func (d *DummyService) CreateUser(ctx context.Context, email, password string) (uint64, error) { - return getDummyData[uint64]() + return getDummyData[uint64](ctx) } func (d *DummyService) RemoveUser(ctx context.Context, userId uint64) error { @@ -127,11 +136,11 @@ func (d *DummyService) UpdateUserPassword(ctx context.Context, userId uint64, pa } func (d *DummyService) GetEmailConfirmationTime(ctx context.Context, userId uint64) (time.Time, error) { - return getDummyData[time.Time]() + return getDummyData[time.Time](ctx) } func (d *DummyService) GetPasswordResetTime(ctx context.Context, userId uint64) (time.Time, error) { - return getDummyData[time.Time]() + return getDummyData[time.Time](ctx) } func (d *DummyService) UpdateEmailConfirmationTime(ctx context.Context, userId uint64) error { @@ -155,66 +164,66 @@ func (d *DummyService) UpdatePasswordResetHash(ctx context.Context, userId uint6 } func (d *DummyService) GetUserInfo(ctx context.Context, userId uint64) (*t.UserInfo, error) { - return getDummyStruct[t.UserInfo]() + return getDummyStruct[t.UserInfo](ctx) } func (d *DummyService) GetUserCredentialInfo(ctx context.Context, userId uint64) (*t.UserCredentialInfo, error) { - return getDummyStruct[t.UserCredentialInfo]() + return getDummyStruct[t.UserCredentialInfo](ctx) } func (d *DummyService) GetUserIdByApiKey(ctx context.Context, apiKey string) (uint64, error) { - return getDummyData[uint64]() + return getDummyData[uint64](ctx) } func (d *DummyService) GetUserIdByConfirmationHash(ctx context.Context, hash string) (uint64, error) { - return getDummyData[uint64]() + return getDummyData[uint64](ctx) } func (d *DummyService) GetUserIdByResetHash(ctx context.Context, hash string) (uint64, error) { - return getDummyData[uint64]() + return getDummyData[uint64](ctx) } func (d *DummyService) GetProductSummary(ctx context.Context) (*t.ProductSummary, error) { - return getDummyStruct[t.ProductSummary]() + return getDummyStruct[t.ProductSummary](ctx) } func (d *DummyService) GetFreeTierPerks(ctx context.Context) (*t.PremiumPerks, error) { - return getDummyStruct[t.PremiumPerks]() + return getDummyStruct[t.PremiumPerks](ctx) } func (d *DummyService) GetValidatorDashboardUser(ctx context.Context, dashboardId t.VDBIdPrimary) (*t.DashboardUser, error) { - return getDummyStruct[t.DashboardUser]() + return getDummyStruct[t.DashboardUser](ctx) } func (d *DummyService) GetValidatorDashboardIdByPublicId(ctx context.Context, publicDashboardId t.VDBIdPublic) (*t.VDBIdPrimary, error) { - return getDummyStruct[t.VDBIdPrimary]() + return getDummyStruct[t.VDBIdPrimary](ctx) } func (d *DummyService) GetValidatorDashboardInfo(ctx context.Context, dashboardId t.VDBIdPrimary) (*t.ValidatorDashboard, error) { - r, err := getDummyStruct[t.ValidatorDashboard]() + r, err := getDummyStruct[t.ValidatorDashboard](ctx) // return semi-valid data to not break staging r.IsArchived = false return r, err } func (d *DummyService) GetValidatorDashboardName(ctx context.Context, dashboardId t.VDBIdPrimary) (string, error) { - return getDummyData[string]() + return getDummyData[string](ctx) } -func (d *DummyService) GetValidatorsFromSlices(indices []uint64, publicKeys []string) ([]t.VDBValidator, error) { - return getDummyData[[]t.VDBValidator]() +func (d *DummyService) GetValidatorsFromSlices(ctx context.Context, indices []uint64, publicKeys []string) ([]t.VDBValidator, error) { + return getDummyData[[]t.VDBValidator](ctx) } func (d *DummyService) GetUserDashboards(ctx context.Context, userId uint64) (*t.UserDashboardsData, error) { - return getDummyStruct[t.UserDashboardsData]() + return getDummyStruct[t.UserDashboardsData](ctx) } func (d *DummyService) CreateValidatorDashboard(ctx context.Context, userId uint64, name string, network uint64) (*t.VDBPostReturnData, error) { - return getDummyStruct[t.VDBPostReturnData]() + return getDummyStruct[t.VDBPostReturnData](ctx) } func (d *DummyService) GetValidatorDashboardOverview(ctx context.Context, dashboardId t.VDBId, protocolModes t.VDBProtocolModes) (*t.VDBOverviewData, error) { - return getDummyStruct[t.VDBOverviewData]() + return getDummyStruct[t.VDBOverviewData](ctx) } func (d *DummyService) RemoveValidatorDashboard(ctx context.Context, dashboardId t.VDBIdPrimary) error { @@ -226,7 +235,7 @@ func (d *DummyService) RemoveValidatorDashboards(ctx context.Context, dashboardI } func (d *DummyService) UpdateValidatorDashboardArchiving(ctx context.Context, dashboardId t.VDBIdPrimary, archivedReason *enums.VDBArchivedReason) (*t.VDBPostArchivingReturnData, error) { - return getDummyStruct[t.VDBPostArchivingReturnData]() + return getDummyStruct[t.VDBPostArchivingReturnData](ctx) } func (d *DummyService) UpdateValidatorDashboardsArchiving(ctx context.Context, dashboards []t.ArchiverDashboardArchiveReason) error { @@ -234,15 +243,15 @@ func (d *DummyService) UpdateValidatorDashboardsArchiving(ctx context.Context, d } func (d *DummyService) UpdateValidatorDashboardName(ctx context.Context, dashboardId t.VDBIdPrimary, name string) (*t.VDBPostReturnData, error) { - return getDummyStruct[t.VDBPostReturnData]() + return getDummyStruct[t.VDBPostReturnData](ctx) } func (d *DummyService) CreateValidatorDashboardGroup(ctx context.Context, dashboardId t.VDBIdPrimary, name string) (*t.VDBPostCreateGroupData, error) { - return getDummyStruct[t.VDBPostCreateGroupData]() + return getDummyStruct[t.VDBPostCreateGroupData](ctx) } func (d *DummyService) UpdateValidatorDashboardGroup(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, name string) (*t.VDBPostCreateGroupData, error) { - return getDummyStruct[t.VDBPostCreateGroupData]() + return getDummyStruct[t.VDBPostCreateGroupData](ctx) } func (d *DummyService) RemoveValidatorDashboardGroup(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64) error { @@ -253,28 +262,24 @@ func (d *DummyService) GetValidatorDashboardGroupExists(ctx context.Context, das return true, nil } -func (d *DummyService) GetValidatorDashboardExistingValidatorCount(ctx context.Context, dashboardId t.VDBIdPrimary, validators []t.VDBValidator) (uint64, error) { - return getDummyData[uint64]() -} - func (d *DummyService) AddValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, validators []t.VDBValidator) ([]t.VDBPostValidatorsData, error) { - return getDummyData[[]t.VDBPostValidatorsData]() + return getDummyData[[]t.VDBPostValidatorsData](ctx) } func (d *DummyService) AddValidatorDashboardValidatorsByDepositAddress(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, address string, limit uint64) ([]t.VDBPostValidatorsData, error) { - return getDummyData[[]t.VDBPostValidatorsData]() + return getDummyData[[]t.VDBPostValidatorsData](ctx) } func (d *DummyService) AddValidatorDashboardValidatorsByWithdrawalAddress(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, address string, limit uint64) ([]t.VDBPostValidatorsData, error) { - return getDummyData[[]t.VDBPostValidatorsData]() + return getDummyData[[]t.VDBPostValidatorsData](ctx) } func (d *DummyService) AddValidatorDashboardValidatorsByGraffiti(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, graffiti string, limit uint64) ([]t.VDBPostValidatorsData, error) { - return getDummyData[[]t.VDBPostValidatorsData]() + return getDummyData[[]t.VDBPostValidatorsData](ctx) } func (d *DummyService) GetValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBId, groupId int64, cursor string, colSort t.Sort[enums.VDBManageValidatorsColumn], search string, limit uint64) ([]t.VDBManageValidatorsTableRow, *t.Paging, error) { - return getDummyWithPaging[t.VDBManageValidatorsTableRow]() + return getDummyWithPaging[t.VDBManageValidatorsTableRow](ctx) } func (d *DummyService) RemoveValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, validators []t.VDBValidator) error { @@ -282,15 +287,15 @@ func (d *DummyService) RemoveValidatorDashboardValidators(ctx context.Context, d } func (d *DummyService) CreateValidatorDashboardPublicId(ctx context.Context, dashboardId t.VDBIdPrimary, name string, shareGroups bool) (*t.VDBPublicId, error) { - return getDummyStruct[t.VDBPublicId]() + return getDummyStruct[t.VDBPublicId](ctx) } func (d *DummyService) GetValidatorDashboardPublicId(ctx context.Context, publicDashboardId t.VDBIdPublic) (*t.VDBPublicId, error) { - return getDummyStruct[t.VDBPublicId]() + return getDummyStruct[t.VDBPublicId](ctx) } func (d *DummyService) UpdateValidatorDashboardPublicId(ctx context.Context, publicDashboardId t.VDBIdPublic, name string, shareGroups bool) (*t.VDBPublicId, error) { - return getDummyStruct[t.VDBPublicId]() + return getDummyStruct[t.VDBPublicId](ctx) } func (d *DummyService) RemoveValidatorDashboardPublicId(ctx context.Context, publicDashboardId t.VDBIdPublic) error { @@ -301,76 +306,76 @@ func (d *DummyService) GetValidatorDashboardSlotViz(ctx context.Context, dashboa r := struct { Epochs []t.SlotVizEpoch `faker:"slice_len=4"` }{} - err := commonFakeData(&r) + err := populateWithFakeData(ctx, &r) return r.Epochs, err } func (d *DummyService) GetValidatorDashboardSummary(ctx context.Context, dashboardId t.VDBId, period enums.TimePeriod, cursor string, colSort t.Sort[enums.VDBSummaryColumn], search string, limit uint64, protocolModes t.VDBProtocolModes) ([]t.VDBSummaryTableRow, *t.Paging, error) { - return getDummyWithPaging[t.VDBSummaryTableRow]() + return getDummyWithPaging[t.VDBSummaryTableRow](ctx) } func (d *DummyService) GetValidatorDashboardGroupSummary(ctx context.Context, dashboardId t.VDBId, groupId int64, period enums.TimePeriod, protocolModes t.VDBProtocolModes) (*t.VDBGroupSummaryData, error) { - return getDummyStruct[t.VDBGroupSummaryData]() + return getDummyStruct[t.VDBGroupSummaryData](ctx) } func (d *DummyService) GetValidatorDashboardSummaryChart(ctx context.Context, dashboardId t.VDBId, groupIds []int64, efficiency enums.VDBSummaryChartEfficiencyType, aggregation enums.ChartAggregation, afterTs uint64, beforeTs uint64) (*t.ChartData[int, float64], error) { - return getDummyStruct[t.ChartData[int, float64]]() + return getDummyStruct[t.ChartData[int, float64]](ctx) } func (d *DummyService) GetValidatorDashboardSummaryValidators(ctx context.Context, dashboardId t.VDBId, groupId int64) (*t.VDBGeneralSummaryValidators, error) { - return getDummyStruct[t.VDBGeneralSummaryValidators]() + return getDummyStruct[t.VDBGeneralSummaryValidators](ctx) } func (d *DummyService) GetValidatorDashboardSyncSummaryValidators(ctx context.Context, dashboardId t.VDBId, groupId int64, period enums.TimePeriod) (*t.VDBSyncSummaryValidators, error) { - return getDummyStruct[t.VDBSyncSummaryValidators]() + return getDummyStruct[t.VDBSyncSummaryValidators](ctx) } func (d *DummyService) GetValidatorDashboardSlashingsSummaryValidators(ctx context.Context, dashboardId t.VDBId, groupId int64, period enums.TimePeriod) (*t.VDBSlashingsSummaryValidators, error) { - return getDummyStruct[t.VDBSlashingsSummaryValidators]() + return getDummyStruct[t.VDBSlashingsSummaryValidators](ctx) } func (d *DummyService) GetValidatorDashboardProposalSummaryValidators(ctx context.Context, dashboardId t.VDBId, groupId int64, period enums.TimePeriod) (*t.VDBProposalSummaryValidators, error) { - return getDummyStruct[t.VDBProposalSummaryValidators]() + return getDummyStruct[t.VDBProposalSummaryValidators](ctx) } func (d *DummyService) GetValidatorDashboardRewards(ctx context.Context, dashboardId t.VDBId, cursor string, colSort t.Sort[enums.VDBRewardsColumn], search string, limit uint64, protocolModes t.VDBProtocolModes) ([]t.VDBRewardsTableRow, *t.Paging, error) { - return getDummyWithPaging[t.VDBRewardsTableRow]() + return getDummyWithPaging[t.VDBRewardsTableRow](ctx) } func (d *DummyService) GetValidatorDashboardGroupRewards(ctx context.Context, dashboardId t.VDBId, groupId int64, epoch uint64, protocolModes t.VDBProtocolModes) (*t.VDBGroupRewardsData, error) { - return getDummyStruct[t.VDBGroupRewardsData]() + return getDummyStruct[t.VDBGroupRewardsData](ctx) } func (d *DummyService) GetValidatorDashboardRewardsChart(ctx context.Context, dashboardId t.VDBId, protocolModes t.VDBProtocolModes) (*t.ChartData[int, decimal.Decimal], error) { - return getDummyStruct[t.ChartData[int, decimal.Decimal]]() + return getDummyStruct[t.ChartData[int, decimal.Decimal]](ctx) } func (d *DummyService) GetValidatorDashboardDuties(ctx context.Context, dashboardId t.VDBId, epoch uint64, groupId int64, cursor string, colSort t.Sort[enums.VDBDutiesColumn], search string, limit uint64, protocolModes t.VDBProtocolModes) ([]t.VDBEpochDutiesTableRow, *t.Paging, error) { - return getDummyWithPaging[t.VDBEpochDutiesTableRow]() + return getDummyWithPaging[t.VDBEpochDutiesTableRow](ctx) } func (d *DummyService) GetValidatorDashboardBlocks(ctx context.Context, dashboardId t.VDBId, cursor string, colSort t.Sort[enums.VDBBlocksColumn], search string, limit uint64, protocolModes t.VDBProtocolModes) ([]t.VDBBlocksTableRow, *t.Paging, error) { - return getDummyWithPaging[t.VDBBlocksTableRow]() + return getDummyWithPaging[t.VDBBlocksTableRow](ctx) } func (d *DummyService) GetValidatorDashboardHeatmap(ctx context.Context, dashboardId t.VDBId, protocolModes t.VDBProtocolModes, aggregation enums.ChartAggregation, afterTs uint64, beforeTs uint64) (*t.VDBHeatmap, error) { - return getDummyStruct[t.VDBHeatmap]() + return getDummyStruct[t.VDBHeatmap](ctx) } func (d *DummyService) GetValidatorDashboardGroupHeatmap(ctx context.Context, dashboardId t.VDBId, groupId uint64, protocolModes t.VDBProtocolModes, aggregation enums.ChartAggregation, timestamp uint64) (*t.VDBHeatmapTooltipData, error) { - return getDummyStruct[t.VDBHeatmapTooltipData]() + return getDummyStruct[t.VDBHeatmapTooltipData](ctx) } func (d *DummyService) GetValidatorDashboardElDeposits(ctx context.Context, dashboardId t.VDBId, cursor string, limit uint64) ([]t.VDBExecutionDepositsTableRow, *t.Paging, error) { - return getDummyWithPaging[t.VDBExecutionDepositsTableRow]() + return getDummyWithPaging[t.VDBExecutionDepositsTableRow](ctx) } func (d *DummyService) GetValidatorDashboardClDeposits(ctx context.Context, dashboardId t.VDBId, cursor string, limit uint64) ([]t.VDBConsensusDepositsTableRow, *t.Paging, error) { - return getDummyWithPaging[t.VDBConsensusDepositsTableRow]() + return getDummyWithPaging[t.VDBConsensusDepositsTableRow](ctx) } func (d *DummyService) GetValidatorDashboardTotalElDeposits(ctx context.Context, dashboardId t.VDBId) (*t.VDBTotalExecutionDepositsData, error) { - return getDummyStruct[t.VDBTotalExecutionDepositsData]() + return getDummyStruct[t.VDBTotalExecutionDepositsData](ctx) } func (d *DummyService) GetValidatorDashboardTotalClDeposits(ctx context.Context, dashboardId t.VDBId) (*t.VDBTotalConsensusDepositsData, error) { - return getDummyStruct[t.VDBTotalConsensusDepositsData]() + return getDummyStruct[t.VDBTotalConsensusDepositsData](ctx) } func (d *DummyService) GetValidatorDashboardWithdrawals(ctx context.Context, dashboardId t.VDBId, cursor string, colSort t.Sort[enums.VDBWithdrawalsColumn], search string, limit uint64, protocolModes t.VDBProtocolModes) ([]t.VDBWithdrawalsTableRow, *t.Paging, error) { @@ -378,103 +383,198 @@ func (d *DummyService) GetValidatorDashboardWithdrawals(ctx context.Context, das } func (d *DummyService) GetValidatorDashboardTotalWithdrawals(ctx context.Context, dashboardId t.VDBId, search string, protocolModes t.VDBProtocolModes) (*t.VDBTotalWithdrawalsData, error) { - return getDummyStruct[t.VDBTotalWithdrawalsData]() + return getDummyStruct[t.VDBTotalWithdrawalsData](ctx) } func (d *DummyService) GetValidatorDashboardRocketPool(ctx context.Context, dashboardId t.VDBId, cursor string, colSort t.Sort[enums.VDBRocketPoolColumn], search string, limit uint64) ([]t.VDBRocketPoolTableRow, *t.Paging, error) { - return getDummyWithPaging[t.VDBRocketPoolTableRow]() + return getDummyWithPaging[t.VDBRocketPoolTableRow](ctx) } func (d *DummyService) GetValidatorDashboardTotalRocketPool(ctx context.Context, dashboardId t.VDBId, search string) (*t.VDBRocketPoolTableRow, error) { - return getDummyStruct[t.VDBRocketPoolTableRow]() -} - -func (d *DummyService) GetValidatorDashboardNodeRocketPool(ctx context.Context, dashboardId t.VDBId, node string) (*t.VDBNodeRocketPoolData, error) { - return getDummyStruct[t.VDBNodeRocketPoolData]() + return getDummyStruct[t.VDBRocketPoolTableRow](ctx) } func (d *DummyService) GetValidatorDashboardRocketPoolMinipools(ctx context.Context, dashboardId t.VDBId, node string, cursor string, colSort t.Sort[enums.VDBRocketPoolMinipoolsColumn], search string, limit uint64) ([]t.VDBRocketPoolMinipoolsTableRow, *t.Paging, error) { - return getDummyWithPaging[t.VDBRocketPoolMinipoolsTableRow]() + return getDummyWithPaging[t.VDBRocketPoolMinipoolsTableRow](ctx) } func (d *DummyService) GetAllNetworks() ([]t.NetworkInfo, error) { - return getDummyData[[]t.NetworkInfo]() + return []t.NetworkInfo{ + { + ChainId: 1, + Name: "ethereum", + NotificationsName: "mainnet", + }, + { + ChainId: 100, + Name: "gnosis", + NotificationsName: "gnosis", + }, + { + ChainId: 17000, + Name: "holesky", + NotificationsName: "holesky", + }, + }, nil +} + +func (d *DummyService) GetAllClients() ([]t.ClientInfo, error) { + return []t.ClientInfo{ + // execution_layer + { + Id: 0, + Name: "Geth", + DbName: "geth", + Category: "execution_layer", + }, + { + Id: 1, + Name: "Nethermind", + DbName: "nethermind", + Category: "execution_layer", + }, + { + Id: 2, + Name: "Besu", + DbName: "besu", + Category: "execution_layer", + }, + { + Id: 3, + Name: "Erigon", + DbName: "erigon", + Category: "execution_layer", + }, + { + Id: 4, + Name: "Reth", + DbName: "reth", + Category: "execution_layer", + }, + // consensus_layer + { + Id: 5, + Name: "Teku", + DbName: "teku", + Category: "consensus_layer", + }, + { + Id: 6, + Name: "Prysm", + DbName: "prysm", + Category: "consensus_layer", + }, + { + Id: 7, + Name: "Nimbus", + DbName: "nimbus", + Category: "consensus_layer", + }, + { + Id: 8, + Name: "Lighthouse", + DbName: "lighthouse", + Category: "consensus_layer", + }, + { + Id: 9, + Name: "Lodestar", + DbName: "lodestar", + Category: "consensus_layer", + }, + // other + { + Id: 10, + Name: "Rocketpool Smart Node", + DbName: "rocketpool", + Category: "other", + }, + { + Id: 11, + Name: "MEV-Boost", + DbName: "mev-boost", + Category: "other", + }, + }, nil } func (d *DummyService) GetSearchValidatorByIndex(ctx context.Context, chainId, index uint64) (*t.SearchValidator, error) { - return getDummyStruct[t.SearchValidator]() + return getDummyStruct[t.SearchValidator](ctx) } func (d *DummyService) GetSearchValidatorByPublicKey(ctx context.Context, chainId uint64, publicKey []byte) (*t.SearchValidator, error) { - return getDummyStruct[t.SearchValidator]() + return getDummyStruct[t.SearchValidator](ctx) } func (d *DummyService) GetSearchValidatorsByDepositAddress(ctx context.Context, chainId uint64, address []byte) (*t.SearchValidatorsByDepositAddress, error) { - return getDummyStruct[t.SearchValidatorsByDepositAddress]() + return getDummyStruct[t.SearchValidatorsByDepositAddress](ctx) } func (d *DummyService) GetSearchValidatorsByDepositEnsName(ctx context.Context, chainId uint64, ensName string) (*t.SearchValidatorsByDepositEnsName, error) { - return getDummyStruct[t.SearchValidatorsByDepositEnsName]() + return getDummyStruct[t.SearchValidatorsByDepositEnsName](ctx) } func (d *DummyService) GetSearchValidatorsByWithdrawalCredential(ctx context.Context, chainId uint64, credential []byte) (*t.SearchValidatorsByWithdrwalCredential, error) { - return getDummyStruct[t.SearchValidatorsByWithdrwalCredential]() + return getDummyStruct[t.SearchValidatorsByWithdrwalCredential](ctx) } func (d *DummyService) GetSearchValidatorsByWithdrawalEnsName(ctx context.Context, chainId uint64, ensName string) (*t.SearchValidatorsByWithrawalEnsName, error) { - return getDummyStruct[t.SearchValidatorsByWithrawalEnsName]() + return getDummyStruct[t.SearchValidatorsByWithrawalEnsName](ctx) } func (d *DummyService) GetSearchValidatorsByGraffiti(ctx context.Context, chainId uint64, graffiti string) (*t.SearchValidatorsByGraffiti, error) { - return getDummyStruct[t.SearchValidatorsByGraffiti]() + return getDummyStruct[t.SearchValidatorsByGraffiti](ctx) } func (d *DummyService) GetUserValidatorDashboardCount(ctx context.Context, userId uint64, active bool) (uint64, error) { - return getDummyData[uint64]() + return getDummyData[uint64](ctx) } func (d *DummyService) GetValidatorDashboardGroupCount(ctx context.Context, dashboardId t.VDBIdPrimary) (uint64, error) { - return getDummyData[uint64]() + return getDummyData[uint64](ctx) } func (d *DummyService) GetValidatorDashboardValidatorsCount(ctx context.Context, dashboardId t.VDBIdPrimary) (uint64, error) { - return getDummyData[uint64]() + return getDummyData[uint64](ctx) } func (d *DummyService) GetValidatorDashboardPublicIdCount(ctx context.Context, dashboardId t.VDBIdPrimary) (uint64, error) { - return getDummyData[uint64]() + return getDummyData[uint64](ctx) } func (d *DummyService) GetNotificationOverview(ctx context.Context, userId uint64) (*t.NotificationOverviewData, error) { - return getDummyStruct[t.NotificationOverviewData]() + return getDummyStruct[t.NotificationOverviewData](ctx) } -func (d *DummyService) GetDashboardNotifications(ctx context.Context, userId uint64, chainId uint64, cursor string, colSort t.Sort[enums.NotificationDashboardsColumn], search string, limit uint64) ([]t.NotificationDashboardsTableRow, *t.Paging, error) { - return getDummyWithPaging[t.NotificationDashboardsTableRow]() +func (d *DummyService) GetDashboardNotifications(ctx context.Context, userId uint64, chainIds []uint64, cursor string, colSort t.Sort[enums.NotificationDashboardsColumn], search string, limit uint64) ([]t.NotificationDashboardsTableRow, *t.Paging, error) { + return getDummyWithPaging[t.NotificationDashboardsTableRow](ctx) } -func (d *DummyService) GetValidatorDashboardNotificationDetails(ctx context.Context, notificationId string) (*t.NotificationValidatorDashboardDetail, error) { - return getDummyStruct[t.NotificationValidatorDashboardDetail]() +func (d *DummyService) GetValidatorDashboardNotificationDetails(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, epoch uint64, search string) (*t.NotificationValidatorDashboardDetail, error) { + return getDummyStruct[t.NotificationValidatorDashboardDetail](ctx) } -func (d *DummyService) GetAccountDashboardNotificationDetails(ctx context.Context, notificationId string) (*t.NotificationAccountDashboardDetail, error) { - return getDummyStruct[t.NotificationAccountDashboardDetail]() +func (d *DummyService) GetAccountDashboardNotificationDetails(ctx context.Context, dashboardId uint64, groupId uint64, epoch uint64, search string) (*t.NotificationAccountDashboardDetail, error) { + return getDummyStruct[t.NotificationAccountDashboardDetail](ctx) } func (d *DummyService) GetMachineNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationMachinesColumn], search string, limit uint64) ([]t.NotificationMachinesTableRow, *t.Paging, error) { - return getDummyWithPaging[t.NotificationMachinesTableRow]() + return getDummyWithPaging[t.NotificationMachinesTableRow](ctx) } func (d *DummyService) GetClientNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationClientsColumn], search string, limit uint64) ([]t.NotificationClientsTableRow, *t.Paging, error) { - return getDummyWithPaging[t.NotificationClientsTableRow]() + return getDummyWithPaging[t.NotificationClientsTableRow](ctx) } func (d *DummyService) GetRocketPoolNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationRocketPoolColumn], search string, limit uint64) ([]t.NotificationRocketPoolTableRow, *t.Paging, error) { - return getDummyWithPaging[t.NotificationRocketPoolTableRow]() + return getDummyWithPaging[t.NotificationRocketPoolTableRow](ctx) } -func (d *DummyService) GetNetworkNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationNetworksColumn], search string, limit uint64) ([]t.NotificationNetworksTableRow, *t.Paging, error) { - return getDummyWithPaging[t.NotificationNetworksTableRow]() +func (d *DummyService) GetNetworkNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationNetworksColumn], limit uint64) ([]t.NotificationNetworksTableRow, *t.Paging, error) { + return getDummyWithPaging[t.NotificationNetworksTableRow](ctx) } func (d *DummyService) GetNotificationSettings(ctx context.Context, userId uint64) (*t.NotificationSettings, error) { - return getDummyStruct[t.NotificationSettings]() + return getDummyStruct[t.NotificationSettings](ctx) +} +func (d *DummyService) GetNotificationSettingsDefaultValues(ctx context.Context) (*t.NotificationSettingsDefaultValues, error) { + return getDummyStruct[t.NotificationSettingsDefaultValues](ctx) } func (d *DummyService) UpdateNotificationSettingsGeneral(ctx context.Context, userId uint64, settings t.NotificationSettingsGeneral) error { return nil @@ -482,14 +582,19 @@ func (d *DummyService) UpdateNotificationSettingsGeneral(ctx context.Context, us func (d *DummyService) UpdateNotificationSettingsNetworks(ctx context.Context, userId uint64, chainId uint64, settings t.NotificationSettingsNetwork) error { return nil } -func (d *DummyService) UpdateNotificationSettingsPairedDevice(ctx context.Context, pairedDeviceId string, name string, IsNotificationsEnabled bool) error { +func (d *DummyService) UpdateNotificationSettingsPairedDevice(ctx context.Context, pairedDeviceId uint64, name string, IsNotificationsEnabled bool) error { return nil } -func (d *DummyService) DeleteNotificationSettingsPairedDevice(ctx context.Context, pairedDeviceId string) error { +func (d *DummyService) DeleteNotificationSettingsPairedDevice(ctx context.Context, pairedDeviceId uint64) error { return nil } + +func (d *DummyService) UpdateNotificationSettingsClients(ctx context.Context, userId uint64, clientId uint64, IsSubscribed bool) (*t.NotificationSettingsClient, error) { + return getDummyStruct[t.NotificationSettingsClient](ctx) +} + func (d *DummyService) GetNotificationSettingsDashboards(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationSettingsDashboardColumn], search string, limit uint64) ([]t.NotificationSettingsDashboardsTableRow, *t.Paging, error) { - r, p, err := getDummyWithPaging[t.NotificationSettingsDashboardsTableRow]() + r, p, err := getDummyWithPaging[t.NotificationSettingsDashboardsTableRow](ctx) for i, n := range r { var settings interface{} if n.IsAccountDashboard { @@ -497,15 +602,15 @@ func (d *DummyService) GetNotificationSettingsDashboards(ctx context.Context, us } else { settings = t.NotificationSettingsValidatorDashboard{} } - _ = commonFakeData(&settings) + _ = populateWithFakeData(ctx, &settings) r[i].Settings = settings } return r, p, err } -func (d *DummyService) UpdateNotificationSettingsValidatorDashboard(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsValidatorDashboard) error { +func (d *DummyService) UpdateNotificationSettingsValidatorDashboard(ctx context.Context, userId uint64, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsValidatorDashboard) error { return nil } -func (d *DummyService) UpdateNotificationSettingsAccountDashboard(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsAccountDashboard) error { +func (d *DummyService) UpdateNotificationSettingsAccountDashboard(ctx context.Context, userId uint64, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsAccountDashboard) error { return nil } func (d *DummyService) CreateAdConfiguration(ctx context.Context, key, jquerySelector string, insertMode enums.AdInsertMode, refreshInterval uint64, forAllUsers bool, bannerId uint64, htmlContent string, enabled bool) error { @@ -513,7 +618,7 @@ func (d *DummyService) CreateAdConfiguration(ctx context.Context, key, jquerySel } func (d *DummyService) GetAdConfigurations(ctx context.Context, keys []string) ([]t.AdConfigurationData, error) { - return getDummyData[[]t.AdConfigurationData]() + return getDummyData[[]t.AdConfigurationData](ctx) } func (d *DummyService) UpdateAdConfiguration(ctx context.Context, key, jquerySelector string, insertMode enums.AdInsertMode, refreshInterval uint64, forAllUsers bool, bannerId uint64, htmlContent string, enabled bool) error { @@ -525,126 +630,173 @@ func (d *DummyService) RemoveAdConfiguration(ctx context.Context, key string) er } func (d *DummyService) GetLatestExportedChartTs(ctx context.Context, aggregation enums.ChartAggregation) (uint64, error) { - return getDummyData[uint64]() + return getDummyData[uint64](ctx) } -func (d *DummyService) GetUserIdByRefreshToken(claimUserID, claimAppID, claimDeviceID uint64, hashedRefreshToken string) (uint64, error) { - return getDummyData[uint64]() +func (d *DummyService) GetUserIdByRefreshToken(ctx context.Context, claimUserID, claimAppID, claimDeviceID uint64, hashedRefreshToken string) (uint64, error) { + return getDummyData[uint64](ctx) } -func (d *DummyService) MigrateMobileSession(oldHashedRefreshToken, newHashedRefreshToken, deviceID, deviceName string) error { +func (d *DummyService) MigrateMobileSession(ctx context.Context, oldHashedRefreshToken, newHashedRefreshToken, deviceID, deviceName string) error { return nil } -func (d *DummyService) GetAppDataFromRedirectUri(callback string) (*t.OAuthAppData, error) { - return getDummyStruct[t.OAuthAppData]() +func (d *DummyService) GetAppDataFromRedirectUri(ctx context.Context, callback string) (*t.OAuthAppData, error) { + return getDummyStruct[t.OAuthAppData](ctx) } -func (d *DummyService) AddUserDevice(userID uint64, hashedRefreshToken string, deviceID, deviceName string, appID uint64) error { +func (d *DummyService) AddUserDevice(ctx context.Context, userID uint64, hashedRefreshToken string, deviceID, deviceName string, appID uint64) error { return nil } -func (d *DummyService) AddMobileNotificationToken(userID uint64, deviceID, notifyToken string) error { +func (d *DummyService) AddMobileNotificationToken(ctx context.Context, userID uint64, deviceID, notifyToken string) error { return nil } -func (d *DummyService) GetAppSubscriptionCount(userID uint64) (uint64, error) { - return getDummyData[uint64]() +func (d *DummyService) GetAppSubscriptionCount(ctx context.Context, userID uint64) (uint64, error) { + return getDummyData[uint64](ctx) } -func (d *DummyService) AddMobilePurchase(tx *sql.Tx, userID uint64, paymentDetails t.MobileSubscription, verifyResponse *userservice.VerifyResponse, extSubscriptionId string) error { +func (d *DummyService) AddMobilePurchase(ctx context.Context, tx *sql.Tx, userID uint64, paymentDetails t.MobileSubscription, verifyResponse *userservice.VerifyResponse, extSubscriptionId string) error { return nil } func (d *DummyService) GetBlockOverview(ctx context.Context, chainId, block uint64) (*t.BlockOverview, error) { - return getDummyStruct[t.BlockOverview]() + return getDummyStruct[t.BlockOverview](ctx) } func (d *DummyService) GetBlockTransactions(ctx context.Context, chainId, block uint64) ([]t.BlockTransactionTableRow, error) { - return getDummyData[[]t.BlockTransactionTableRow]() + return getDummyData[[]t.BlockTransactionTableRow](ctx) } func (d *DummyService) GetBlock(ctx context.Context, chainId, block uint64) (*t.BlockSummary, error) { - return getDummyStruct[t.BlockSummary]() + return getDummyStruct[t.BlockSummary](ctx) } func (d *DummyService) GetBlockVotes(ctx context.Context, chainId, block uint64) ([]t.BlockVoteTableRow, error) { - return getDummyData[[]t.BlockVoteTableRow]() + return getDummyData[[]t.BlockVoteTableRow](ctx) } func (d *DummyService) GetBlockAttestations(ctx context.Context, chainId, block uint64) ([]t.BlockAttestationTableRow, error) { - return getDummyData[[]t.BlockAttestationTableRow]() + return getDummyData[[]t.BlockAttestationTableRow](ctx) } func (d *DummyService) GetBlockWithdrawals(ctx context.Context, chainId, block uint64) ([]t.BlockWithdrawalTableRow, error) { - return getDummyData[[]t.BlockWithdrawalTableRow]() + return getDummyData[[]t.BlockWithdrawalTableRow](ctx) } func (d *DummyService) GetBlockBlsChanges(ctx context.Context, chainId, block uint64) ([]t.BlockBlsChangeTableRow, error) { - return getDummyData[[]t.BlockBlsChangeTableRow]() + return getDummyData[[]t.BlockBlsChangeTableRow](ctx) } func (d *DummyService) GetBlockVoluntaryExits(ctx context.Context, chainId, block uint64) ([]t.BlockVoluntaryExitTableRow, error) { - return getDummyData[[]t.BlockVoluntaryExitTableRow]() + return getDummyData[[]t.BlockVoluntaryExitTableRow](ctx) } func (d *DummyService) GetBlockBlobs(ctx context.Context, chainId, block uint64) ([]t.BlockBlobTableRow, error) { - return getDummyData[[]t.BlockBlobTableRow]() + return getDummyData[[]t.BlockBlobTableRow](ctx) } func (d *DummyService) GetSlot(ctx context.Context, chainId, block uint64) (*t.BlockSummary, error) { - return getDummyStruct[t.BlockSummary]() + return getDummyStruct[t.BlockSummary](ctx) } func (d *DummyService) GetSlotOverview(ctx context.Context, chainId, block uint64) (*t.BlockOverview, error) { - return getDummyStruct[t.BlockOverview]() + return getDummyStruct[t.BlockOverview](ctx) } func (d *DummyService) GetSlotTransactions(ctx context.Context, chainId, block uint64) ([]t.BlockTransactionTableRow, error) { - return getDummyData[[]t.BlockTransactionTableRow]() + return getDummyData[[]t.BlockTransactionTableRow](ctx) } func (d *DummyService) GetSlotVotes(ctx context.Context, chainId, block uint64) ([]t.BlockVoteTableRow, error) { - return getDummyData[[]t.BlockVoteTableRow]() + return getDummyData[[]t.BlockVoteTableRow](ctx) } func (d *DummyService) GetSlotAttestations(ctx context.Context, chainId, block uint64) ([]t.BlockAttestationTableRow, error) { - return getDummyData[[]t.BlockAttestationTableRow]() + return getDummyData[[]t.BlockAttestationTableRow](ctx) } func (d *DummyService) GetSlotWithdrawals(ctx context.Context, chainId, block uint64) ([]t.BlockWithdrawalTableRow, error) { - return getDummyData[[]t.BlockWithdrawalTableRow]() + return getDummyData[[]t.BlockWithdrawalTableRow](ctx) } func (d *DummyService) GetSlotBlsChanges(ctx context.Context, chainId, block uint64) ([]t.BlockBlsChangeTableRow, error) { - return getDummyData[[]t.BlockBlsChangeTableRow]() + return getDummyData[[]t.BlockBlsChangeTableRow](ctx) } func (d *DummyService) GetSlotVoluntaryExits(ctx context.Context, chainId, block uint64) ([]t.BlockVoluntaryExitTableRow, error) { - return getDummyData[[]t.BlockVoluntaryExitTableRow]() + return getDummyData[[]t.BlockVoluntaryExitTableRow](ctx) } func (d *DummyService) GetSlotBlobs(ctx context.Context, chainId, block uint64) ([]t.BlockBlobTableRow, error) { - return getDummyData[[]t.BlockBlobTableRow]() + return getDummyData[[]t.BlockBlobTableRow](ctx) } func (d *DummyService) GetValidatorDashboardsCountInfo(ctx context.Context) (map[uint64][]t.ArchiverDashboard, error) { - return getDummyData[map[uint64][]t.ArchiverDashboard]() + return getDummyData[map[uint64][]t.ArchiverDashboard](ctx) } func (d *DummyService) GetRocketPoolOverview(ctx context.Context) (*t.RocketPoolData, error) { - return getDummyStruct[t.RocketPoolData]() + return getDummyStruct[t.RocketPoolData](ctx) +} + +func (d *DummyService) GetApiWeights(ctx context.Context) ([]t.ApiWeightItem, error) { + return getDummyData[[]t.ApiWeightItem](ctx) } func (d *DummyService) GetHealthz(ctx context.Context, showAll bool) t.HealthzData { - r, _ := getDummyData[t.HealthzData]() + r, _ := getDummyData[t.HealthzData](ctx) return r } func (d *DummyService) GetLatestBundleForNativeVersion(ctx context.Context, nativeVersion uint64) (*t.MobileAppBundleStats, error) { - return getDummyStruct[t.MobileAppBundleStats]() + return getDummyStruct[t.MobileAppBundleStats](ctx) } func (d *DummyService) IncrementBundleDeliveryCount(ctx context.Context, bundleVerison uint64) error { return nil } + +func (d *DummyService) GetValidatorDashboardMobileWidget(ctx context.Context, dashboardId t.VDBIdPrimary) (*t.MobileWidgetData, error) { + return getDummyStruct[t.MobileWidgetData](ctx) +} + +func (d *DummyService) GetUserMachineMetrics(ctx context.Context, userID uint64, limit int, offset int) (*t.MachineMetricsData, error) { + data, err := getDummyStruct[t.MachineMetricsData](ctx) + if err != nil { + return nil, err + } + data.SystemMetrics = slices.SortedFunc(slices.Values(data.SystemMetrics), func(i, j *commontypes.MachineMetricSystem) int { + return int(i.Timestamp) - int(j.Timestamp) + }) + data.ValidatorMetrics = slices.SortedFunc(slices.Values(data.ValidatorMetrics), func(i, j *commontypes.MachineMetricValidator) int { + return int(i.Timestamp) - int(j.Timestamp) + }) + data.NodeMetrics = slices.SortedFunc(slices.Values(data.NodeMetrics), func(i, j *commontypes.MachineMetricNode) int { + return int(i.Timestamp) - int(j.Timestamp) + }) + return data, nil +} + +func (d *DummyService) PostUserMachineMetrics(ctx context.Context, userID uint64, machine, process string, data []byte) error { + return nil +} + +func (d *DummyService) GetValidatorDashboardMobileValidators(ctx context.Context, dashboardId t.VDBId, period enums.TimePeriod, cursor string, colSort t.Sort[enums.VDBMobileValidatorsColumn], search string, limit uint64) ([]t.MobileValidatorDashboardValidatorsTableRow, *t.Paging, error) { + return getDummyWithPaging[t.MobileValidatorDashboardValidatorsTableRow](ctx) +} + +func (d *DummyService) QueueTestEmailNotification(ctx context.Context, userId uint64) error { + return nil +} +func (d *DummyService) QueueTestPushNotification(ctx context.Context, userId uint64) error { + return nil +} +func (d *DummyService) QueueTestWebhookNotification(ctx context.Context, userId uint64, webhookUrl string, isDiscordWebhook bool) error { + return nil +} + +func (d *DummyService) GetPairedDeviceUserId(ctx context.Context, pairedDeviceId uint64) (uint64, error) { + return getDummyData[uint64](ctx) +} diff --git a/backend/pkg/api/data_access/general.go b/backend/pkg/api/data_access/general.go new file mode 100644 index 000000000..9d7ee0d97 --- /dev/null +++ b/backend/pkg/api/data_access/general.go @@ -0,0 +1,111 @@ +package dataaccess + +import ( + "context" + + "github.com/doug-martin/goqu/v9" + "github.com/doug-martin/goqu/v9/exp" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gobitfly/beaconchain/pkg/commons/db" +) + +// retrieve (primary) ens name and optional name (=label) maintained by beaconcha.in, if present +func (d *DataAccessService) GetNamesAndEnsForAddresses(ctx context.Context, addressMap map[string]*types.Address) error { + addresses := make([][]byte, 0, len(addressMap)) + ensMapping := make(map[string]string, len(addressMap)) + for address, data := range addressMap { + ensMapping[address] = "" + add, err := hexutil.Decode(address) + if err != nil { + return err + } + addresses = append(addresses, add) + if data == nil { + addressMap[address] = &types.Address{Hash: types.Hash(address)} + } + } + // determine ENS names + if err := db.GetEnsNamesForAddresses(ensMapping); err != nil { + return err + } + for address, ens := range ensMapping { + addressMap[address].Ens = ens + } + + // determine names + names := []struct { + Address []byte `db:"address"` + Name string `db:"name"` + }{} + err := d.alloyReader.SelectContext(ctx, &names, `SELECT address, name FROM address_names WHERE address = ANY($1)`, addresses) + if err != nil { + return err + } + + for _, name := range names { + addressMap[hexutil.Encode(name.Address)].Label = name.Name + } + return nil +} + +// helper function to sort and apply pagination to a query +// 1st param is the list of all columns necessary to sort the table deterministically; it defines their precedence and sort direction +// 2nd param is the requested sort column; it may or may not be part of the default columns (if it is, you don't have to specify the cursor limit again) +func applySortAndPagination(defaultColumns []types.SortColumn, primary types.SortColumn, cursor types.GenericCursor) ([]exp.OrderedExpression, exp.Expression) { + // prepare ordering columns; always need all columns to ensure consistent ordering + queryOrderColumns := make([]types.SortColumn, 0, len(defaultColumns)) + queryOrderColumns = append(queryOrderColumns, primary) + // secondary sorts according to default + for _, column := range defaultColumns { + if column.Column == primary.Column { + if primary.Offset == nil { + queryOrderColumns[0].Offset = column.Offset + } + continue + } + queryOrderColumns = append(queryOrderColumns, column) + } + + // apply ordering + queryOrder := []exp.OrderedExpression{} + for i := range queryOrderColumns { + column := &queryOrderColumns[i] + if cursor.IsReverse() { + column.Desc = !column.Desc + } + colOrder := column.Column.Asc().NullsFirst() + if column.Desc { + colOrder = column.Column.Desc().NullsLast() + } + queryOrder = append(queryOrder, colOrder) + } + + // apply cursor offsets + var queryWhere exp.Expression + if cursor.IsValid() { + // reverse order to nest conditions + for i := len(queryOrderColumns) - 1; i >= 0; i-- { + column := queryOrderColumns[i] + var colWhere exp.Expression + + // current convention is opposite of the psql default (ASC: nulls first, DESC: nulls last) + colWhere = goqu.Or(column.Column.Lt(column.Offset), column.Column.IsNull()) + if !column.Desc { + colWhere = column.Column.Gt(column.Offset) + if column.Offset == nil { + colWhere = goqu.Or(colWhere, column.Column.IsNull()) + } + } + + if queryWhere == nil { + queryWhere = colWhere + } else { + queryWhere = goqu.And(column.Column.Eq(column.Offset), queryWhere) + queryWhere = goqu.Or(colWhere, queryWhere) + } + } + } + + return queryOrder, queryWhere +} diff --git a/backend/pkg/api/data_access/header.go b/backend/pkg/api/data_access/header.go index 5658c9157..d06b41439 100644 --- a/backend/pkg/api/data_access/header.go +++ b/backend/pkg/api/data_access/header.go @@ -1,32 +1,75 @@ package dataaccess import ( + "context" + "database/sql" + "fmt" + t "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gobitfly/beaconchain/pkg/commons/cache" + "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/price" + "github.com/gobitfly/beaconchain/pkg/commons/utils" ) -func (d *DataAccessService) GetLatestSlot() (uint64, error) { +func (d *DataAccessService) GetLatestSlot(ctx context.Context) (uint64, error) { latestSlot := cache.LatestSlot.Get() return latestSlot, nil } -func (d *DataAccessService) GetLatestFinalizedEpoch() (uint64, error) { +func (d *DataAccessService) GetLatestFinalizedEpoch(ctx context.Context) (uint64, error) { finalizedEpoch := cache.LatestFinalizedEpoch.Get() return finalizedEpoch, nil } -func (d *DataAccessService) GetLatestBlock() (uint64, error) { +func (d *DataAccessService) GetLatestBlock(ctx context.Context) (uint64, error) { // @DATA-ACCESS implement - return d.dummy.GetLatestBlock() + return d.dummy.GetLatestBlock(ctx) } -func (d *DataAccessService) GetBlockHeightAt(slot uint64) (uint64, error) { +func (d *DataAccessService) GetBlockHeightAt(ctx context.Context, slot uint64) (uint64, error) { // @DATA-ACCESS implement; return error if no block at slot - return d.dummy.GetBlockHeightAt(slot) + return getDummyData[uint64](ctx) +} + +// returns the block number of the latest existing block at or before the given slot +func (d *DataAccessService) GetLatestBlockHeightForSlot(ctx context.Context, slot uint64) (uint64, error) { + query := `SELECT MAX(exec_block_number) FROM blocks WHERE slot <= $1` + res := uint64(0) + err := d.alloyReader.GetContext(ctx, &res, query, slot) + if err != nil { + if err == sql.ErrNoRows { + log.Warnf("no EL block found at or before slot %d", slot) + return 0, nil + } + return 0, fmt.Errorf("failed to get latest existing block height at or before slot %d: %w", slot, err) + } + return res, nil +} + +func (d *DataAccessService) GetLatestBlockHeightsForEpoch(ctx context.Context, epoch uint64) ([]uint64, error) { + // use 2 epochs as safety margin + query := ` + WITH recent_blocks AS ( + SELECT slot, exec_block_number + FROM blocks + WHERE slot < $1 + ORDER BY slot DESC + LIMIT $2 * 2 + ) + SELECT MAX(exec_block_number) OVER (ORDER BY slot) AS block + FROM recent_blocks + ORDER BY slot DESC + LIMIT $2` + res := []uint64{} + err := d.alloyReader.SelectContext(ctx, &res, query, (epoch+1)*utils.Config.Chain.ClConfig.SlotsPerEpoch, utils.Config.Chain.ClConfig.SlotsPerEpoch) + if err != nil { + return nil, fmt.Errorf("failed to get latest existing block heights for slots in epoch %d: %w", epoch, err) + } + return res, nil } -func (d *DataAccessService) GetLatestExchangeRates() ([]t.EthConversionRate, error) { +func (d *DataAccessService) GetLatestExchangeRates(ctx context.Context) ([]t.EthConversionRate, error) { result := []t.EthConversionRate{} availableCurrencies := price.GetAvailableCurrencies() diff --git a/backend/pkg/api/data_access/machine_metrics.go b/backend/pkg/api/data_access/machine_metrics.go new file mode 100644 index 000000000..a8c3ca518 --- /dev/null +++ b/backend/pkg/api/data_access/machine_metrics.go @@ -0,0 +1,58 @@ +package dataaccess + +import ( + "context" + "strings" + + apiTypes "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/types" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +type MachineRepository interface { + GetUserMachineMetrics(context context.Context, userID uint64, limit int, offset int) (*apiTypes.MachineMetricsData, error) + PostUserMachineMetrics(context context.Context, userID uint64, machine, process string, data []byte) error +} + +func (d *DataAccessService) GetUserMachineMetrics(ctx context.Context, userID uint64, limit int, offset int) (*apiTypes.MachineMetricsData, error) { + data := &apiTypes.MachineMetricsData{} + + g := errgroup.Group{} + + g.Go(func() error { + var err error + data.SystemMetrics, err = d.bigtable.GetMachineMetricsSystem(types.UserId(userID), limit, offset) + return err + }) + + g.Go(func() error { + var err error + data.ValidatorMetrics, err = d.bigtable.GetMachineMetricsValidator(types.UserId(userID), limit, offset) + return err + }) + + g.Go(func() error { + var err error + data.NodeMetrics, err = d.bigtable.GetMachineMetricsNode(types.UserId(userID), limit, offset) + return err + }) + + if err := g.Wait(); err != nil { + return nil, errors.Wrap(err, "could not get stats") + } + + return data, nil +} + +func (d *DataAccessService) PostUserMachineMetrics(ctx context.Context, userID uint64, machine, process string, data []byte) error { + err := db.BigtableClient.SaveMachineMetric(process, types.UserId(userID), machine, data) + if err != nil { + if strings.HasPrefix(err.Error(), "rate limit") { + return err + } + return errors.Wrap(err, "could not save stats") + } + return nil +} diff --git a/backend/pkg/api/data_access/mobile.go b/backend/pkg/api/data_access/mobile.go new file mode 100644 index 000000000..373688439 --- /dev/null +++ b/backend/pkg/api/data_access/mobile.go @@ -0,0 +1,368 @@ +package dataaccess + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/doug-martin/goqu/v9" + "github.com/gobitfly/beaconchain/pkg/api/enums" + t "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" + constypes "github.com/gobitfly/beaconchain/pkg/consapi/types" + "github.com/gobitfly/beaconchain/pkg/userservice" + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "golang.org/x/sync/errgroup" +) + +type AppRepository interface { + GetUserIdByRefreshToken(ctx context.Context, claimUserID, claimAppID, claimDeviceID uint64, hashedRefreshToken string) (uint64, error) + MigrateMobileSession(ctx context.Context, oldHashedRefreshToken, newHashedRefreshToken, deviceID, deviceName string) error + AddUserDevice(ctx context.Context, userID uint64, hashedRefreshToken string, deviceID, deviceName string, appID uint64) error + GetAppDataFromRedirectUri(ctx context.Context, callback string) (*t.OAuthAppData, error) + AddMobileNotificationToken(ctx context.Context, userID uint64, deviceID, notifyToken string) error + GetAppSubscriptionCount(ctx context.Context, userID uint64) (uint64, error) + AddMobilePurchase(ctx context.Context, tx *sql.Tx, userID uint64, paymentDetails t.MobileSubscription, verifyResponse *userservice.VerifyResponse, extSubscriptionId string) error + GetLatestBundleForNativeVersion(ctx context.Context, nativeVersion uint64) (*t.MobileAppBundleStats, error) + IncrementBundleDeliveryCount(ctx context.Context, bundleVerison uint64) error + GetValidatorDashboardMobileValidators(ctx context.Context, dashboardId t.VDBId, period enums.TimePeriod, cursor string, colSort t.Sort[enums.VDBMobileValidatorsColumn], search string, limit uint64) ([]t.MobileValidatorDashboardValidatorsTableRow, *t.Paging, error) +} + +// GetUserIdByRefreshToken basically used to confirm the claimed user id with the refresh token. Returns the userId if successful +func (d *DataAccessService) GetUserIdByRefreshToken(ctx context.Context, claimUserID, claimAppID, claimDeviceID uint64, hashedRefreshToken string) (uint64, error) { + if hashedRefreshToken == "" { // sanity + return 0, errors.New("empty refresh token") + } + var userID uint64 + err := d.userWriter.GetContext(ctx, &userID, + `SELECT user_id FROM users_devices WHERE user_id = $1 AND + refresh_token = $2 AND app_id = $3 AND id = $4 AND active = true`, claimUserID, hashedRefreshToken, claimAppID, claimDeviceID) + if errors.Is(err, sql.ErrNoRows) { + return userID, fmt.Errorf("%w: user not found via refresh token", ErrNotFound) + } + return userID, err +} + +func (d *DataAccessService) MigrateMobileSession(ctx context.Context, oldHashedRefreshToken, newHashedRefreshToken, deviceID, deviceName string) error { + result, err := d.userWriter.ExecContext(ctx, "UPDATE users_devices SET refresh_token = $2, device_identifier = $3, device_name = $4 WHERE refresh_token = $1", oldHashedRefreshToken, newHashedRefreshToken, deviceID, deviceName) + if err != nil { + return errors.Wrap(err, "Error updating refresh token") + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return errors.Wrap(err, "Error getting rows affected") + } + + if rowsAffected != 1 { + return errors.New(fmt.Sprintf("illegal number of rows affected, expected 1 got %d", rowsAffected)) + } + + return err +} + +func (d *DataAccessService) GetAppDataFromRedirectUri(ctx context.Context, callback string) (*t.OAuthAppData, error) { + data := t.OAuthAppData{} + err := d.userWriter.GetContext(ctx, &data, "SELECT id, app_name, redirect_uri, active, owner_id FROM oauth_apps WHERE active = true AND redirect_uri = $1", callback) + return &data, err +} + +func (d *DataAccessService) AddUserDevice(ctx context.Context, userID uint64, hashedRefreshToken string, deviceID, deviceName string, appID uint64) error { + _, err := d.userWriter.ExecContext(ctx, "INSERT INTO users_devices (user_id, refresh_token, device_identifier, device_name, app_id, created_ts) VALUES($1, $2, $3, $4, $5, 'NOW()') ON CONFLICT DO NOTHING", + userID, hashedRefreshToken, deviceID, deviceName, appID, + ) + return err +} + +func (d *DataAccessService) AddMobileNotificationToken(ctx context.Context, userID uint64, deviceID, notifyToken string) error { + _, err := d.userWriter.ExecContext(ctx, "UPDATE users_devices SET notification_token = $1 WHERE user_id = $2 AND device_identifier = $3;", + notifyToken, userID, deviceID, + ) + if errors.Is(err, sql.ErrNoRows) { + return fmt.Errorf("%w: user mobile device not found", ErrNotFound) + } + return err +} + +func (d *DataAccessService) GetAppSubscriptionCount(ctx context.Context, userID uint64) (uint64, error) { + var count uint64 + err := d.userReader.GetContext(ctx, &count, "SELECT COUNT(receipt) FROM users_app_subscriptions WHERE user_id = $1", userID) + return count, err +} + +func (d *DataAccessService) AddMobilePurchase(ctx context.Context, tx *sql.Tx, userID uint64, paymentDetails t.MobileSubscription, verifyResponse *userservice.VerifyResponse, extSubscriptionId string) error { + now := time.Now() + nowTs := now.Unix() + receiptHash := utils.HashAndEncode(verifyResponse.Receipt) + + query := `INSERT INTO users_app_subscriptions + (user_id, product_id, price_micros, currency, created_at, updated_at, validate_remotely, active, store, receipt, expires_at, reject_reason, receipt_hash, subscription_id) + VALUES($1, $2, $3, $4, TO_TIMESTAMP($5), TO_TIMESTAMP($6), $7, $8, $9, $10, TO_TIMESTAMP($11), $12, $13, $14) + ON CONFLICT(receipt_hash) DO UPDATE SET product_id = $2, active = $7, updated_at = TO_TIMESTAMP($5);` + var err error + if tx == nil { + _, err = d.userWriter.ExecContext(ctx, query, + userID, verifyResponse.ProductID, paymentDetails.PriceMicros, paymentDetails.Currency, nowTs, nowTs, verifyResponse.Valid, verifyResponse.Valid, paymentDetails.Transaction.Type, verifyResponse.Receipt, verifyResponse.ExpirationDate, verifyResponse.RejectReason, receiptHash, extSubscriptionId, + ) + } else { + _, err = tx.ExecContext(ctx, query, + userID, verifyResponse.ProductID, paymentDetails.PriceMicros, paymentDetails.Currency, nowTs, nowTs, verifyResponse.Valid, verifyResponse.Valid, paymentDetails.Transaction.Type, verifyResponse.Receipt, verifyResponse.ExpirationDate, verifyResponse.RejectReason, receiptHash, extSubscriptionId, + ) + } + + return err +} + +func (d *DataAccessService) GetLatestBundleForNativeVersion(ctx context.Context, nativeVersion uint64) (*t.MobileAppBundleStats, error) { + var bundle t.MobileAppBundleStats + err := d.userReader.GetContext(ctx, &bundle, ` + WITH + latest_native AS ( + SELECT max(min_native_version) as max_native_version + FROM mobile_app_bundles + ), + latest_bundle AS ( + SELECT + bundle_version, + bundle_url, + delivered_count, + COALESCE(target_count, -1) as target_count + FROM mobile_app_bundles + WHERE min_native_version <= $1 + ORDER BY bundle_version DESC + LIMIT 1 + ) + SELECT + COALESCE(latest_bundle.bundle_version, 0) as bundle_version, + COALESCE(latest_bundle.bundle_url, '') as bundle_url, + COALESCE(latest_bundle.target_count, -1) as target_count, + COALESCE(latest_bundle.delivered_count, 0) as delivered_count, + latest_native.max_native_version + FROM latest_native + LEFT JOIN latest_bundle ON TRUE;`, + nativeVersion, + ) + if errors.Is(err, sql.ErrNoRows) { + return nil, ErrNotFound + } + + return &bundle, err +} + +func (d *DataAccessService) IncrementBundleDeliveryCount(ctx context.Context, bundleVersion uint64) error { + _, err := d.userWriter.ExecContext(ctx, "UPDATE mobile_app_bundles SET delivered_count = COALESCE(delivered_count, 0) + 1 WHERE bundle_version = $1", bundleVersion) + return err +} + +func (d *DataAccessService) GetValidatorDashboardMobileWidget(ctx context.Context, dashboardId t.VDBIdPrimary) (*t.MobileWidgetData, error) { + data := t.MobileWidgetData{} + eg := errgroup.Group{} + var err error + + wrappedDashboardId := t.VDBId{ + Id: dashboardId, + AggregateGroups: true, + } + + // Get the average network efficiency + efficiency, err := d.services.GetCurrentEfficiencyInfo() + if err != nil { + return nil, fmt.Errorf("error retrieving validator dashboard overview data: %w", err) + } + data.NetworkEfficiency = utils.CalculateTotalEfficiency( + efficiency.AttestationEfficiency[enums.AllTime], efficiency.ProposalEfficiency[enums.AllTime], efficiency.SyncEfficiency[enums.AllTime]) + + // Validator status + eg.Go(func() error { + validatorMapping, err := d.services.GetCurrentValidatorMapping() + if err != nil { + return err + } + + validators, err := d.getDashboardValidators(ctx, wrappedDashboardId, nil) + if err != nil { + return fmt.Errorf("error retrieving validators from dashboard id: %w", err) + } + + // Status + for _, validator := range validators { + metadata := validatorMapping.ValidatorMetadata[validator] + + switch constypes.ValidatorDbStatus(metadata.Status) { + case constypes.DbExitingOnline, constypes.DbSlashingOnline, constypes.DbActiveOnline: + data.ValidatorStateCounts.Online++ + case constypes.DbExitingOffline, constypes.DbSlashingOffline, constypes.DbActiveOffline: + data.ValidatorStateCounts.Offline++ + case constypes.DbDeposited, constypes.DbPending: + data.ValidatorStateCounts.Pending++ + case constypes.DbSlashed: + data.ValidatorStateCounts.Slashed++ + case constypes.DbExited: + data.ValidatorStateCounts.Exited++ + } + } + + return nil + }) + + // RPL + eg.Go(func() error { + rpNetworkStats, err := d.getInternalRpNetworkStats(ctx) + if err != nil { + return fmt.Errorf("error retrieving rocketpool network stats: %w", err) + } + data.RplPrice = rpNetworkStats.RPLPrice + + // Find rocketpool node effective balance + type RpOperatorInfo struct { + EffectiveRPLStake decimal.Decimal `db:"effective_rpl_stake"` + RPLStake decimal.Decimal `db:"rpl_stake"` + } + var queryResult RpOperatorInfo + + ds := goqu.Dialect("postgres"). + Select( + goqu.COALESCE(goqu.SUM("rpln.effective_rpl_stake"), 0).As("effective_rpl_stake"), + goqu.COALESCE(goqu.SUM("rpln.rpl_stake"), 0).As("rpl_stake")). + From(goqu.L("rocketpool_nodes AS rpln")). + LeftJoin(goqu.L("rocketpool_minipools AS m"), goqu.On(goqu.L("m.node_address = rpln.address"))). + LeftJoin(goqu.L("validators AS v"), goqu.On(goqu.L("m.pubkey = v.pubkey"))). + Where(goqu.L("node_deposit_balance IS NOT NULL")). + Where(goqu.L("user_deposit_balance IS NOT NULL")). + LeftJoin(goqu.L("users_val_dashboards_validators uvdv"), goqu.On(goqu.L("uvdv.validator_index = v.validatorindex"))). + Where(goqu.L("uvdv.dashboard_id = ?", dashboardId)) + + query, args, err := ds.Prepared(true).ToSQL() + if err != nil { + return fmt.Errorf("error preparing query: %w", err) + } + + err = d.alloyReader.GetContext(ctx, &queryResult, query, args...) + if err != nil { + return fmt.Errorf("error retrieving rocketpool validators data: %w", err) + } + + if !rpNetworkStats.EffectiveRPLStaked.IsZero() && !queryResult.EffectiveRPLStake.IsZero() && !rpNetworkStats.NodeOperatorRewards.IsZero() && rpNetworkStats.ClaimIntervalHours > 0 { + share := queryResult.EffectiveRPLStake.Div(rpNetworkStats.EffectiveRPLStaked) + + periodsPerYear := decimal.NewFromFloat(365 / (rpNetworkStats.ClaimIntervalHours / 24)) + data.RplApr = rpNetworkStats.NodeOperatorRewards. + Mul(share). + Div(queryResult.RPLStake). + Mul(periodsPerYear). + Mul(decimal.NewFromInt(100)).InexactFloat64() + } + return nil + }) + + retrieveApr := func(hours int, apr *float64) { + eg.Go(func() error { + _, elApr, _, clApr, err := d.internal_getElClAPR(ctx, wrappedDashboardId, -1, hours) + if err != nil { + return err + } + *apr = elApr + clApr + return nil + }) + } + + retrieveRewards := func(hours int, rewards *decimal.Decimal) { + eg.Go(func() error { + clRewards, _, elRewards, _, err := d.internal_getElClAPR(ctx, wrappedDashboardId, -1, hours) + if err != nil { + return err + } + *rewards = clRewards.Add(elRewards) + return nil + }) + } + + retrieveEfficiency := func(table string, efficiency *float64) { + eg.Go(func() error { + ds := goqu.Dialect("postgres"). + From(goqu.L(fmt.Sprintf(`%s AS r FINAL`, table))). + With("validators", goqu.L("(SELECT dashboard_id, validator_index FROM users_val_dashboards_validators WHERE dashboard_id = ?)", dashboardId)). + Select( + goqu.L("COALESCE(SUM(r.attestations_reward)::decimal, 0) AS attestations_reward"), + goqu.L("COALESCE(SUM(r.attestations_ideal_reward)::decimal, 0) AS attestations_ideal_reward"), + goqu.L("COALESCE(SUM(r.blocks_proposed), 0) AS blocks_proposed"), + goqu.L("COALESCE(SUM(r.blocks_scheduled), 0) AS blocks_scheduled"), + goqu.L("COALESCE(SUM(r.sync_executed), 0) AS sync_executed"), + goqu.L("COALESCE(SUM(r.sync_scheduled), 0) AS sync_scheduled")). + InnerJoin(goqu.L("validators v"), goqu.On(goqu.L("r.validator_index = v.validator_index"))). + Where(goqu.L("r.validator_index IN (SELECT validator_index FROM validators)")) + + var queryResult struct { + AttestationReward decimal.Decimal `db:"attestations_reward"` + AttestationIdealReward decimal.Decimal `db:"attestations_ideal_reward"` + BlocksProposed uint64 `db:"blocks_proposed"` + BlocksScheduled uint64 `db:"blocks_scheduled"` + SyncExecuted uint64 `db:"sync_executed"` + SyncScheduled uint64 `db:"sync_scheduled"` + } + + query, args, err := ds.Prepared(true).ToSQL() + if err != nil { + return fmt.Errorf("error preparing query: %w", err) + } + + err = d.clickhouseReader.GetContext(ctx, &queryResult, query, args...) + if err != nil { + return err + } + + // Calculate efficiency + var attestationEfficiency, proposerEfficiency, syncEfficiency sql.NullFloat64 + if !queryResult.AttestationIdealReward.IsZero() { + attestationEfficiency.Float64 = queryResult.AttestationReward.Div(queryResult.AttestationIdealReward).InexactFloat64() + attestationEfficiency.Valid = true + } + if queryResult.BlocksScheduled > 0 { + proposerEfficiency.Float64 = float64(queryResult.BlocksProposed) / float64(queryResult.BlocksScheduled) + proposerEfficiency.Valid = true + } + if queryResult.SyncScheduled > 0 { + syncEfficiency.Float64 = float64(queryResult.SyncExecuted) / float64(queryResult.SyncScheduled) + syncEfficiency.Valid = true + } + *efficiency = utils.CalculateTotalEfficiency(attestationEfficiency, proposerEfficiency, syncEfficiency) + + return nil + }) + } + + retrieveRewards(24, &data.Last24hIncome) + retrieveRewards(7*24, &data.Last7dIncome) + retrieveApr(30*24, &data.Last30dApr) + retrieveEfficiency("validator_dashboard_data_rolling_30d", &data.Last30dEfficiency) + + err = eg.Wait() + + if err != nil { + return nil, fmt.Errorf("error retrieving validator dashboard overview data: %w", err) + } + + return &data, nil +} + +func (d *DataAccessService) getInternalRpNetworkStats(ctx context.Context) (*t.RPNetworkStats, error) { + var networkStats t.RPNetworkStats + err := d.alloyReader.GetContext(ctx, &networkStats, ` + SELECT + EXTRACT(EPOCH FROM claim_interval_time) / 3600 AS claim_interval_hours, + node_operator_rewards, + effective_rpl_staked, + rpl_price + FROM rocketpool_network_stats + ORDER BY ID + DESC + LIMIT 1 + `) + return &networkStats, err +} + +func (d *DataAccessService) GetValidatorDashboardMobileValidators(ctx context.Context, dashboardId t.VDBId, period enums.TimePeriod, cursor string, colSort t.Sort[enums.VDBMobileValidatorsColumn], search string, limit uint64) ([]t.MobileValidatorDashboardValidatorsTableRow, *t.Paging, error) { + return d.dummy.GetValidatorDashboardMobileValidators(ctx, dashboardId, period, cursor, colSort, search, limit) +} diff --git a/backend/pkg/api/data_access/networks.go b/backend/pkg/api/data_access/networks.go index df95fb008..30c0669ec 100644 --- a/backend/pkg/api/data_access/networks.go +++ b/backend/pkg/api/data_access/networks.go @@ -12,16 +12,19 @@ func (d *DataAccessService) GetAllNetworks() ([]types.NetworkInfo, error) { return []types.NetworkInfo{ { - ChainId: 1, - Name: "ethereum", + ChainId: 1, + Name: "ethereum", + NotificationsName: "mainnet", }, { - ChainId: 100, - Name: "gnosis", + ChainId: 100, + Name: "gnosis", + NotificationsName: "gnosis", }, { - ChainId: 17000, - Name: "holesky", + ChainId: 17000, + Name: "holesky", + NotificationsName: "holesky", }, }, nil } diff --git a/backend/pkg/api/data_access/notifications.go b/backend/pkg/api/data_access/notifications.go index 4859b770c..4774fed0c 100644 --- a/backend/pkg/api/data_access/notifications.go +++ b/backend/pkg/api/data_access/notifications.go @@ -1,83 +1,2317 @@ package dataaccess import ( + "bytes" + "compress/gzip" "context" + "database/sql" + "encoding/gob" + "fmt" + "io" + "regexp" + "slices" + "sort" + "strconv" + "strings" + "sync" + "time" + "github.com/doug-martin/goqu/v9" + "github.com/doug-martin/goqu/v9/exp" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/params" + "github.com/go-redis/redis/v8" "github.com/gobitfly/beaconchain/pkg/api/enums" t "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" + "github.com/gobitfly/beaconchain/pkg/notification" + n "github.com/gobitfly/beaconchain/pkg/notification" + "github.com/lib/pq" + "github.com/shopspring/decimal" + "golang.org/x/sync/errgroup" ) type NotificationsRepository interface { GetNotificationOverview(ctx context.Context, userId uint64) (*t.NotificationOverviewData, error) - GetDashboardNotifications(ctx context.Context, userId uint64, chainId uint64, cursor string, colSort t.Sort[enums.NotificationDashboardsColumn], search string, limit uint64) ([]t.NotificationDashboardsTableRow, *t.Paging, error) + GetDashboardNotifications(ctx context.Context, userId uint64, chainIds []uint64, cursor string, colSort t.Sort[enums.NotificationDashboardsColumn], search string, limit uint64) ([]t.NotificationDashboardsTableRow, *t.Paging, error) // depending on how notifications are implemented, we may need to use something other than `notificationId` for identifying the notification - GetValidatorDashboardNotificationDetails(ctx context.Context, notificationId string) (*t.NotificationValidatorDashboardDetail, error) - GetAccountDashboardNotificationDetails(ctx context.Context, notificationId string) (*t.NotificationAccountDashboardDetail, error) + GetValidatorDashboardNotificationDetails(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, epoch uint64, search string) (*t.NotificationValidatorDashboardDetail, error) + GetAccountDashboardNotificationDetails(ctx context.Context, dashboardId uint64, groupId uint64, epoch uint64, search string) (*t.NotificationAccountDashboardDetail, error) GetMachineNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationMachinesColumn], search string, limit uint64) ([]t.NotificationMachinesTableRow, *t.Paging, error) GetClientNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationClientsColumn], search string, limit uint64) ([]t.NotificationClientsTableRow, *t.Paging, error) GetRocketPoolNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationRocketPoolColumn], search string, limit uint64) ([]t.NotificationRocketPoolTableRow, *t.Paging, error) - GetNetworkNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationNetworksColumn], search string, limit uint64) ([]t.NotificationNetworksTableRow, *t.Paging, error) + GetNetworkNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationNetworksColumn], limit uint64) ([]t.NotificationNetworksTableRow, *t.Paging, error) GetNotificationSettings(ctx context.Context, userId uint64) (*t.NotificationSettings, error) + GetNotificationSettingsDefaultValues(ctx context.Context) (*t.NotificationSettingsDefaultValues, error) UpdateNotificationSettingsGeneral(ctx context.Context, userId uint64, settings t.NotificationSettingsGeneral) error UpdateNotificationSettingsNetworks(ctx context.Context, userId uint64, chainId uint64, settings t.NotificationSettingsNetwork) error - UpdateNotificationSettingsPairedDevice(ctx context.Context, pairedDeviceId string, name string, IsNotificationsEnabled bool) error - DeleteNotificationSettingsPairedDevice(ctx context.Context, pairedDeviceId string) error + GetPairedDeviceUserId(ctx context.Context, pairedDeviceId uint64) (uint64, error) + UpdateNotificationSettingsPairedDevice(ctx context.Context, pairedDeviceId uint64, name string, IsNotificationsEnabled bool) error + DeleteNotificationSettingsPairedDevice(ctx context.Context, pairedDeviceId uint64) error + UpdateNotificationSettingsClients(ctx context.Context, userId uint64, clientId uint64, IsSubscribed bool) (*t.NotificationSettingsClient, error) GetNotificationSettingsDashboards(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationSettingsDashboardColumn], search string, limit uint64) ([]t.NotificationSettingsDashboardsTableRow, *t.Paging, error) - UpdateNotificationSettingsValidatorDashboard(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsValidatorDashboard) error - UpdateNotificationSettingsAccountDashboard(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsAccountDashboard) error + UpdateNotificationSettingsValidatorDashboard(ctx context.Context, userId uint64, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsValidatorDashboard) error + UpdateNotificationSettingsAccountDashboard(ctx context.Context, userId uint64, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsAccountDashboard) error + + QueueTestEmailNotification(ctx context.Context, userId uint64) error + QueueTestPushNotification(ctx context.Context, userId uint64) error + QueueTestWebhookNotification(ctx context.Context, userId uint64, webhookUrl string, isDiscordWebhook bool) error +} + +func (*DataAccessService) registerNotificationInterfaceTypes() { + var once sync.Once + once.Do(func() { + gob.Register(&n.ValidatorProposalNotification{}) + gob.Register(&n.ValidatorUpcomingProposalNotification{}) + gob.Register(&n.ValidatorGroupEfficiencyNotification{}) + gob.Register(&n.ValidatorAttestationNotification{}) + gob.Register(&n.ValidatorIsOfflineNotification{}) + gob.Register(&n.ValidatorIsOnlineNotification{}) + gob.Register(&n.ValidatorGotSlashedNotification{}) + gob.Register(&n.ValidatorWithdrawalNotification{}) + gob.Register(&n.NetworkNotification{}) + gob.Register(&n.RocketpoolNotification{}) + gob.Register(&n.MonitorMachineNotification{}) + gob.Register(&n.TaxReportNotification{}) + gob.Register(&n.EthClientNotification{}) + gob.Register(&n.SyncCommitteeSoonNotification{}) + }) } +const ( + ValidatorDashboardEventPrefix string = "vdb" + AccountDashboardEventPrefix string = "adb" + + GroupEfficiencyBelowThresholdDefault float64 = 0.95 + MaxCollateralThresholdDefault float64 = 1.0 + MinCollateralThresholdDefault float64 = 0.2 + ERC20TokenTransfersValueThresholdDefault float64 = 0.1 + + MachineStorageUsageThresholdDefault float64 = 0.9 + MachineCpuUsageThresholdDefault float64 = 0.6 + MachineMemoryUsageThresholdDefault float64 = 0.8 + + GasAboveThresholdDefault float64 = 950 + GasBelowThresholdDefault float64 = 150 + ParticipationRateThresholdDefault float64 = 0.8 +) + func (d *DataAccessService) GetNotificationOverview(ctx context.Context, userId uint64) (*t.NotificationOverviewData, error) { - return d.dummy.GetNotificationOverview(ctx, userId) + response := t.NotificationOverviewData{} + eg := errgroup.Group{} + + // enabled channels + eg.Go(func() error { + var channels []struct { + Channel string `db:"channel"` + Active bool `db:"active"` + } + + err := d.userReader.SelectContext(ctx, &channels, `SELECT channel, active FROM users_notification_channels WHERE user_id = $1`, userId) + if err != nil { + return err + } + + for _, channel := range channels { + switch channel.Channel { + case "email": + response.IsEmailNotificationsEnabled = channel.Active + case "push": + response.IsPushNotificationsEnabled = channel.Active + } + } + return nil + }) + + // most notified groups + latestSlot, err := d.GetLatestSlot(ctx) + if err != nil { + return nil, err + } + epoch30dAgo := utils.TimeToEpoch(utils.EpochToTime(utils.EpochOfSlot(latestSlot)).Add(time.Duration(-30) * time.Hour * 24)) + getMostNotifiedGroups := func(historyTable, groupsTable string) ([3]string, error) { + query := goqu.Dialect("postgres"). + From(goqu.T(historyTable).As("history")). + Select( + goqu.I("history.dashboard_id"), + goqu.I("history.group_id"), + ). + Where( + goqu.Ex{"history.user_id": userId}, + goqu.I("history.epoch").Gt(epoch30dAgo), + ). + GroupBy( + goqu.I("history.dashboard_id"), + goqu.I("history.group_id"), + ). + Order( + goqu.L("COUNT(*)").Desc(), + ). + Limit(3) + + // join result with names + query = goqu.Dialect("postgres"). + Select(goqu.L("COALESCE(name, 'default') AS name")). + From(query.As("history")). + LeftJoin(goqu.I(groupsTable).As("groups"), goqu.On( + goqu.Ex{"groups.dashboard_id": goqu.I("history.dashboard_id")}, + goqu.Ex{"groups.id": goqu.I("history.group_id")}, + )) + + mostNotifiedGroups := [3]string{} + querySql, args, err := query.Prepared(true).ToSQL() + if err != nil { + return mostNotifiedGroups, fmt.Errorf("failed to prepare getMostNotifiedGroups query: %w", err) + } + res := []string{} + err = d.alloyReader.SelectContext(ctx, &res, querySql, args...) + if err != nil { + return mostNotifiedGroups, fmt.Errorf("failed to execute getMostNotifiedGroups query: %w", err) + } + copy(mostNotifiedGroups[:], res) + return mostNotifiedGroups, err + } + + eg.Go(func() error { + var err error + response.VDBMostNotifiedGroups, err = getMostNotifiedGroups("users_val_dashboards_notifications_history", "users_val_dashboards_groups") + return err + }) + // TODO account dashboards + /*eg.Go(func() error { + var err error + response.VDBMostNotifiedGroups, err = getMostNotifiedGroups("users_acc_dashboards_notifications_history", "users_acc_dashboards_groups") + return err + })*/ + + // 24h counts + eg.Go(func() error { + var err error + day := time.Now().Truncate(utils.Day).Unix() + getMessageCount := func(prefix string) (uint64, error) { + key := fmt.Sprintf("%s:%d:%d", prefix, userId, day) + res := d.persistentRedisDbClient.Get(ctx, key) + if res.Err() == redis.Nil { + return 0, nil + } else if res.Err() != nil { + return 0, res.Err() + } + return res.Uint64() + } + response.Last24hEmailsCount, err = getMessageCount("n_mails") + if err != nil { + return err + } + response.Last24hPushCount, err = getMessageCount("n_push") + if err != nil { + return err + } + response.Last24hWebhookCount, err = getMessageCount("n_webhook") + return err + }) + + // subscription counts + eg.Go(func() error { + networks, err := d.GetAllNetworks() + if err != nil { + return err + } + + whereNetwork := "" + for _, network := range networks { + if len(whereNetwork) > 0 { + whereNetwork += " OR " + } + whereNetwork += "event_name like '" + network.NotificationsName + ":rocketpool_%' OR event_name like '" + network.NotificationsName + ":network_%'" + } + + query := goqu.Dialect("postgres"). + From("users_subscriptions"). + Select( + goqu.L("count(*) FILTER (WHERE event_filter like 'vdb:%')").As("vdb_subscriptions_count"), + goqu.L("count(*) FILTER (WHERE event_filter like 'adb:%')").As("adb_subscriptions_count"), + goqu.L("count(*) FILTER (WHERE event_name like 'monitoring_%')").As("machines_subscription_count"), + goqu.L("count(*) FILTER (WHERE event_name = 'eth_client_update')").As("clients_subscription_count"), + // not sure if there's a better way in goqu + goqu.L("count(*) FILTER (WHERE "+whereNetwork+")").As("networks_subscription_count"), + ). + Where(goqu.Ex{ + "user_id": userId, + }) + + querySql, args, err := query.Prepared(true).ToSQL() + if err != nil { + return err + } + + err = d.userReader.GetContext(ctx, &response, querySql, args...) + return err + }) + + err = eg.Wait() + return &response, err } -func (d *DataAccessService) GetDashboardNotifications(ctx context.Context, userId uint64, chainId uint64, cursor string, colSort t.Sort[enums.NotificationDashboardsColumn], search string, limit uint64) ([]t.NotificationDashboardsTableRow, *t.Paging, error) { - return d.dummy.GetDashboardNotifications(ctx, userId, chainId, cursor, colSort, search, limit) + +func (d *DataAccessService) GetDashboardNotifications(ctx context.Context, userId uint64, chainIds []uint64, cursor string, colSort t.Sort[enums.NotificationDashboardsColumn], search string, limit uint64) ([]t.NotificationDashboardsTableRow, *t.Paging, error) { + response := []t.NotificationDashboardsTableRow{} + var err error + + var currentCursor t.NotificationsDashboardsCursor + if cursor != "" { + if currentCursor, err = utils.StringToCursor[t.NotificationsDashboardsCursor](cursor); err != nil { + return nil, nil, fmt.Errorf("failed to parse passed cursor as NotificationsDashboardsCursor: %w", err) + } + } + + // validator query + vdbQuery := goqu.Dialect("postgres"). + From(goqu.T("users_val_dashboards_notifications_history").As("uvdnh")). + Select( + goqu.L("false").As("is_account_dashboard"), + goqu.I("uvd.network").As("chain_id"), + goqu.I("uvdnh.epoch"), + goqu.I("uvd.id").As("dashboard_id"), + goqu.I("uvd.name").As("dashboard_name"), + goqu.I("uvdg.id").As("group_id"), + goqu.I("uvdg.name").As("group_name"), + goqu.SUM("uvdnh.event_count").As("entity_count"), + goqu.L("ARRAY_AGG(DISTINCT event_type)").As("event_types"), + ). + InnerJoin(goqu.T("users_val_dashboards").As("uvd"), goqu.On( + goqu.Ex{"uvd.id": goqu.I("uvdnh.dashboard_id")})). + InnerJoin(goqu.T("users_val_dashboards_groups").As("uvdg"), goqu.On( + goqu.Ex{"uvdg.id": goqu.I("uvdnh.group_id")}, + goqu.Ex{"uvdg.dashboard_id": goqu.I("uvd.id")}, + )). + Where( + goqu.Ex{"uvd.user_id": userId}, + ). + GroupBy( + goqu.I("uvdnh.epoch"), + goqu.I("uvd.network"), + goqu.I("uvd.id"), + goqu.I("uvdg.id"), + goqu.I("uvdg.name"), + ) + + if chainIds != nil { + vdbQuery = vdbQuery.Where( + goqu.L("uvd.network = ANY(?)", pq.Array(chainIds)), + ) + } + + // TODO account dashboards + /*adbQuery := goqu.Dialect("postgres"). + From(goqu.T("adb_notifications_history").As("anh")). + Select( + goqu.L("true").As("is_account_dashboard"), + goqu.I("anh.network").As("chain_id"), + goqu.I("anh.epoch"), + goqu.I("uad.id").As("dashboard_id"), + goqu.I("uad.name").As("dashboard_name"), + goqu.I("uadg.id").As("group_id"), + goqu.I("uadg.name").As("group_name"), + goqu.SUM("anh.event_count").As("entity_count"), + goqu.L("ARRAY_AGG(DISTINCT event_type)").As("event_types"), + ). + InnerJoin(goqu.T("users_acc_dashboards").As("uad"), goqu.On( + goqu.Ex{"uad.id": goqu.I("anh.dashboard_id"), + })). + InnerJoin(goqu.T("users_acc_dashboards_groups").As("uadg"), goqu.On( + goqu.Ex{"uadg.id": goqu.I("anh.group_id"), + goqu.Ex{"uadg.dashboard_id": goqu.I("uad.id")}, + })). + Where( + goqu.Ex{"uad.user_id": userId}, + goqu.L("anh.network = ANY(?)", pq.Array(chainIds)), + ). + GroupBy( + goqu.I("anh.epoch"), + goqu.I("anh.network"), + goqu.I("uad.id"), + goqu.I("uadg.id"), + goqu.I("uadg.name"), + ) + + unionQuery := vdbQuery.Union(adbQuery)*/ + unionQuery := goqu.From(vdbQuery) + + // sorting + defaultColumns := []t.SortColumn{ + {Column: enums.NotificationsDashboardsColumns.Timestamp.ToExpr(), Desc: true, Offset: currentCursor.Epoch}, + {Column: enums.NotificationsDashboardsColumns.DashboardName.ToExpr(), Desc: false, Offset: currentCursor.DashboardName}, + {Column: enums.NotificationsDashboardsColumns.DashboardId.ToExpr(), Desc: false, Offset: currentCursor.DashboardId}, + {Column: enums.NotificationsDashboardsColumns.GroupName.ToExpr(), Desc: false, Offset: currentCursor.GroupName}, + {Column: enums.NotificationsDashboardsColumns.GroupId.ToExpr(), Desc: false, Offset: currentCursor.GroupId}, + {Column: enums.NotificationsDashboardsColumns.ChainId.ToExpr(), Desc: true, Offset: currentCursor.ChainId}, + } + order, directions := applySortAndPagination(defaultColumns, t.SortColumn{Column: colSort.Column.ToExpr(), Desc: colSort.Desc}, currentCursor.GenericCursor) + unionQuery = unionQuery.Order(order...) + if directions != nil { + unionQuery = unionQuery.Where(directions) + } + + // search + searchName := regexp.MustCompile(`^[a-zA-Z0-9_\-.\ ]+$`).MatchString(search) + if searchName { + searchLower := strings.ToLower(strings.Replace(search, "_", "\\_", -1)) + "%" + unionQuery = unionQuery.Where(exp.NewExpressionList( + exp.OrType, + goqu.L("LOWER(?)", goqu.I("dashboard_name")).Like(searchLower), + goqu.L("LOWER(?)", goqu.I("group_name")).Like(searchLower), + )) + } + unionQuery = unionQuery.Limit(uint(limit + 1)) + + query, args, err := unionQuery.ToSQL() + if err != nil { + return nil, nil, err + } + err = d.alloyReader.SelectContext(ctx, &response, query, args...) + if err != nil { + return nil, nil, err + } + + moreDataFlag := len(response) > int(limit) + if moreDataFlag { + response = response[:len(response)-1] + } + if currentCursor.IsReverse() { + slices.Reverse(response) + } + if !moreDataFlag && !currentCursor.IsValid() { + // No paging required + return response, &t.Paging{}, nil + } + paging, err := utils.GetPagingFromData(response, currentCursor, moreDataFlag) + if err != nil { + return nil, nil, err + } + return response, paging, nil } -func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context.Context, notificationId string) (*t.NotificationValidatorDashboardDetail, error) { - return d.dummy.GetValidatorDashboardNotificationDetails(ctx, notificationId) +func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, epoch uint64, search string) (*t.NotificationValidatorDashboardDetail, error) { + notificationDetails := t.NotificationValidatorDashboardDetail{ + ValidatorOffline: []uint64{}, + ProposalMissed: []t.IndexSlots{}, + ProposalDone: []t.IndexBlocks{}, + UpcomingProposals: []t.IndexSlots{}, + Slashed: []uint64{}, + SyncCommittee: []uint64{}, + AttestationMissed: []t.IndexEpoch{}, + Withdrawal: []t.NotificationEventWithdrawal{}, + ValidatorOfflineReminder: []uint64{}, + ValidatorBackOnline: []t.NotificationEventValidatorBackOnline{}, + MinimumCollateralReached: []t.Address{}, + MaximumCollateralReached: []t.Address{}, + } + + var searchIndices []uint64 + // TODO move to api layer + searchIndicesStrings := strings.Split(search, ",") + for _, searchIndex := range searchIndicesStrings { + idx, err := strconv.Atoi(searchIndex) + if err == nil { + searchIndices = append(searchIndices, uint64(idx)) + } + } + + searchEnabled := len(searchIndices) > 0 + searchIndexSet := make(map[uint64]bool) + for _, searchIndex := range searchIndices { + searchIndexSet[searchIndex] = true + } + + // ------------------------------------- + // dashboard and group name + query := `SELECT + uvd.name AS dashboard_name, + uvdg.name AS group_name + FROM + users_val_dashboards uvd + INNER JOIN + users_val_dashboards_groups uvdg ON uvdg.dashboard_id = uvd.id + WHERE uvd.id = $1 AND uvdg.id = $2` + err := d.alloyReader.GetContext(ctx, ¬ificationDetails, query, dashboardId, groupId) + if err != nil { + if err == sql.ErrNoRows { + return ¬ificationDetails, nil + } + return nil, err + } + if notificationDetails.GroupName == "" { + notificationDetails.GroupName = t.DefaultGroupName + } + if notificationDetails.DashboardName == "" { + notificationDetails.DashboardName = t.DefaultDashboardName + } + + // ------------------------------------- + // retrieve notification events + eventTypesEncodedList := [][]byte{} + query = `SELECT details FROM users_val_dashboards_notifications_history WHERE dashboard_id = $1 AND group_id = $2 AND epoch = $3` + err = d.alloyReader.SelectContext(ctx, &eventTypesEncodedList, query, dashboardId, groupId, epoch) + if err != nil { + return nil, err + } + if len(eventTypesEncodedList) == 0 { + return ¬ificationDetails, nil + } + + latestBlocks, err := d.GetLatestBlockHeightsForEpoch(ctx, epoch) + if err != nil { + return nil, fmt.Errorf("error getting latest block height: %w", err) + } + + type ProposalInfo struct { + Proposed []uint64 + Scheduled []uint64 + Missed []uint64 + } + + proposalsInfo := make(map[t.VDBValidator]*ProposalInfo) + addressMapping := make(map[string]*t.Address) + contractStatusRequests := make([]db.ContractInteractionAtRequest, 0) + for _, eventTypesEncoded := range eventTypesEncodedList { + buf := bytes.NewBuffer(eventTypesEncoded) + gz, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + defer gz.Close() + + // might need to loop if we get memory issues + eventTypes, err := io.ReadAll(gz) + if err != nil { + return nil, err + } + + decoder := gob.NewDecoder(bytes.NewReader(eventTypes)) + + notifications := []types.Notification{} + err = decoder.Decode(¬ifications) + if err != nil { + return nil, err + } + + for _, notification := range notifications { + switch notification.GetEventName() { + case types.ValidatorMissedProposalEventName, types.ValidatorExecutedProposalEventName /*, types.ValidatorScheduledProposalEventName*/ : + // aggregate proposals + curNotification, ok := notification.(*n.ValidatorProposalNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorProposalNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + if _, ok := proposalsInfo[curNotification.ValidatorIndex]; !ok { + proposalsInfo[curNotification.ValidatorIndex] = &ProposalInfo{} + } + prop := proposalsInfo[curNotification.ValidatorIndex] + switch curNotification.Status { + case 0: + prop.Scheduled = append(prop.Scheduled, curNotification.Slot) + case 1: + prop.Proposed = append(prop.Proposed, curNotification.Block) + case 2: + prop.Missed = append(prop.Missed, curNotification.Slot) + } + case types.ValidatorMissedAttestationEventName: + curNotification, ok := notification.(*n.ValidatorAttestationNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorAttestationNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + if curNotification.Status != 0 { + continue + } + notificationDetails.AttestationMissed = append(notificationDetails.AttestationMissed, t.IndexEpoch{Index: curNotification.ValidatorIndex, Epoch: curNotification.Epoch}) + case types.ValidatorUpcomingProposalEventName: + curNotification, ok := notification.(*n.ValidatorUpcomingProposalNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorUpcomingProposalNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + notificationDetails.UpcomingProposals = append(notificationDetails.UpcomingProposals, t.IndexSlots{Index: curNotification.ValidatorIndex, Slots: []uint64{curNotification.Slot}}) + case types.ValidatorGotSlashedEventName: + curNotification, ok := notification.(*n.ValidatorGotSlashedNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorGotSlashedNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + notificationDetails.Slashed = append(notificationDetails.Slashed, curNotification.ValidatorIndex) + case types.ValidatorIsOfflineEventName: + curNotification, ok := notification.(*n.ValidatorIsOfflineNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorIsOfflineNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + notificationDetails.ValidatorOffline = append(notificationDetails.ValidatorOffline, curNotification.ValidatorIndex) + // TODO not present in backend yet + //notificationDetails.ValidatorOfflineReminder = ... + case types.ValidatorIsOnlineEventName: + curNotification, ok := notification.(*n.ValidatorIsOnlineNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorIsOnlineNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + notificationDetails.ValidatorBackOnline = append(notificationDetails.ValidatorBackOnline, t.NotificationEventValidatorBackOnline{Index: curNotification.ValidatorIndex, EpochCount: curNotification.Epoch}) + case types.ValidatorReceivedWithdrawalEventName: + curNotification, ok := notification.(*n.ValidatorWithdrawalNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorWithdrawalNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + // incorrect formatting TODO rework the Address and ContractInteractionAtRequest types to use clear string formatting (or prob go-ethereum common.Address) + contractStatusRequests = append(contractStatusRequests, db.ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", curNotification.Address), + Block: int64(latestBlocks[curNotification.Slot%utils.Config.Chain.ClConfig.SlotsPerEpoch]), + TxIdx: -1, + TraceIdx: -1, + }) + addr := t.Address{Hash: t.Hash(hexutil.Encode(curNotification.Address))} + addressMapping[hexutil.Encode(curNotification.Address)] = &addr + notificationDetails.Withdrawal = append(notificationDetails.Withdrawal, t.NotificationEventWithdrawal{ + Index: curNotification.ValidatorIndex, + Amount: decimal.NewFromUint64(curNotification.Amount).Mul(decimal.NewFromFloat(params.GWei)), // Amounts have to be in WEI + Address: addr, + }) + case types.NetworkLivenessIncreasedEventName, + types.EthClientUpdateEventName, + types.MonitoringMachineOfflineEventName, + types.MonitoringMachineDiskAlmostFullEventName, + types.MonitoringMachineCpuLoadEventName, + types.MonitoringMachineMemoryUsageEventName, + types.TaxReportEventName: + // not vdb notifications, skip + case types.ValidatorDidSlashEventName: + case types.RocketpoolCommissionThresholdEventName, + types.RocketpoolNewClaimRoundStartedEventName: + // these could maybe returned later (?) + case types.RocketpoolCollateralMinReachedEventName, types.RocketpoolCollateralMaxReachedEventName: + _, ok := notification.(*n.RocketpoolNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to RocketpoolNotification") + } + addr := t.Address{Hash: t.Hash(notification.GetEventFilter()), IsContract: true} + addressMapping[notification.GetEventFilter()] = &addr + if notification.GetEventName() == types.RocketpoolCollateralMinReachedEventName { + notificationDetails.MinimumCollateralReached = append(notificationDetails.MinimumCollateralReached, addr) + } else { + notificationDetails.MaximumCollateralReached = append(notificationDetails.MaximumCollateralReached, addr) + } + case types.SyncCommitteeSoonEventName: + curNotification, ok := notification.(*n.SyncCommitteeSoonNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to SyncCommitteeSoonNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + notificationDetails.SyncCommittee = append(notificationDetails.SyncCommittee, curNotification.ValidatorIndex) + default: + log.Debugf("Unhandled notification type: %s", notification.GetEventName()) + } + } + } + + // fill proposals + for validatorIndex, proposalInfo := range proposalsInfo { + if len(proposalInfo.Proposed) > 0 { + notificationDetails.ProposalDone = append(notificationDetails.ProposalDone, t.IndexBlocks{Index: validatorIndex, Blocks: proposalInfo.Proposed}) + } + if len(proposalInfo.Scheduled) > 0 { + notificationDetails.UpcomingProposals = append(notificationDetails.UpcomingProposals, t.IndexSlots{Index: validatorIndex, Slots: proposalInfo.Scheduled}) + } + if len(proposalInfo.Missed) > 0 { + notificationDetails.ProposalMissed = append(notificationDetails.ProposalMissed, t.IndexSlots{Index: validatorIndex, Slots: proposalInfo.Missed}) + } + } + + // fill addresses + if err := d.GetNamesAndEnsForAddresses(ctx, addressMapping); err != nil { + return nil, err + } + contractStatuses, err := d.bigtable.GetAddressContractInteractionsAt(contractStatusRequests) + if err != nil { + return nil, err + } + contractStatusPerAddress := make(map[string]int) + for i, contractStatus := range contractStatusRequests { + contractStatusPerAddress["0x"+contractStatus.Address] = i + } + for i := range notificationDetails.MinimumCollateralReached { + if address, ok := addressMapping[string(notificationDetails.MinimumCollateralReached[i].Hash)]; ok { + notificationDetails.MinimumCollateralReached[i] = *address + } + } + for i := range notificationDetails.MaximumCollateralReached { + if address, ok := addressMapping[string(notificationDetails.MaximumCollateralReached[i].Hash)]; ok { + notificationDetails.MaximumCollateralReached[i] = *address + } + } + for i := range notificationDetails.Withdrawal { + if address, ok := addressMapping[string(notificationDetails.Withdrawal[i].Address.Hash)]; ok { + notificationDetails.Withdrawal[i].Address = *address + } + contractStatus := contractStatuses[contractStatusPerAddress[string(notificationDetails.Withdrawal[i].Address.Hash)]] + notificationDetails.Withdrawal[i].Address.IsContract = contractStatus == types.CONTRACT_CREATION || contractStatus == types.CONTRACT_PRESENT + } + + return ¬ificationDetails, nil } -func (d *DataAccessService) GetAccountDashboardNotificationDetails(ctx context.Context, notificationId string) (*t.NotificationAccountDashboardDetail, error) { - return d.dummy.GetAccountDashboardNotificationDetails(ctx, notificationId) +func (d *DataAccessService) GetAccountDashboardNotificationDetails(ctx context.Context, dashboardId uint64, groupId uint64, epoch uint64, search string) (*t.NotificationAccountDashboardDetail, error) { + return d.dummy.GetAccountDashboardNotificationDetails(ctx, dashboardId, groupId, epoch, search) } func (d *DataAccessService) GetMachineNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationMachinesColumn], search string, limit uint64) ([]t.NotificationMachinesTableRow, *t.Paging, error) { - return d.dummy.GetMachineNotifications(ctx, userId, cursor, colSort, search, limit) + result := make([]t.NotificationMachinesTableRow, 0) + var paging t.Paging + + // Initialize the cursor + var currentCursor t.NotificationMachinesCursor + var err error + if cursor != "" { + currentCursor, err = utils.StringToCursor[t.NotificationMachinesCursor](cursor) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse passed cursor as NotificationMachinesCursor: %w", err) + } + } + + // ------------------------------------- + // Get the machine notification history + notificationHistory := []struct { + Epoch uint64 `db:"epoch"` + MachineId uint64 `db:"machine_id"` + MachineName string `db:"machine_name"` + EventType types.EventName `db:"event_type"` + EventThreshold float64 `db:"event_threshold"` + }{} + + ds := goqu.Dialect("postgres"). + Select( + goqu.L("epoch"), + goqu.L("machine_id"), + goqu.L("machine_name"), + goqu.L("event_type"), + goqu.L("event_threshold")). + From("machine_notifications_history"). + Where(goqu.L("user_id = ?", userId)). + Limit(uint(limit + 1)) + + // Search + if search != "" { + ds = ds.Where(goqu.L("machine_name ILIKE ?", search)) + } + + // Sorting and limiting if cursor is present + defaultColumns := []t.SortColumn{ + {Column: enums.NotificationsMachinesColumns.Timestamp.ToExpr(), Desc: true, Offset: currentCursor.Epoch}, + {Column: enums.NotificationsMachinesColumns.MachineId.ToExpr(), Desc: false, Offset: currentCursor.MachineId}, + {Column: enums.NotificationsMachinesColumns.EventType.ToExpr(), Desc: false, Offset: currentCursor.EventType}, + } + var offset interface{} + switch colSort.Column { + case enums.NotificationsMachinesColumns.MachineName: + offset = currentCursor.MachineName + case enums.NotificationsMachinesColumns.Threshold: + offset = currentCursor.EventThreshold + } + + order, directions := applySortAndPagination(defaultColumns, t.SortColumn{Column: colSort.Column.ToExpr(), Desc: colSort.Desc, Offset: offset}, currentCursor.GenericCursor) + ds = ds.Order(order...) + if directions != nil { + ds = ds.Where(directions) + } + + query, args, err := ds.Prepared(true).ToSQL() + if err != nil { + return nil, nil, fmt.Errorf("error preparing machine notifications query: %w", err) + } + + err = d.userReader.SelectContext(ctx, ¬ificationHistory, query, args...) + if err != nil { + return nil, nil, fmt.Errorf(`error retrieving data for machine notifications: %w`, err) + } + + // ------------------------------------- + // Calculate the result + cursorData := notificationHistory + for _, notification := range notificationHistory { + resultEntry := t.NotificationMachinesTableRow{ + MachineName: notification.MachineName, + Threshold: notification.EventThreshold, + Timestamp: utils.EpochToTime(notification.Epoch).Unix(), + } + switch notification.EventType { + case types.MonitoringMachineOfflineEventName: + resultEntry.EventType = "offline" + case types.MonitoringMachineDiskAlmostFullEventName: + resultEntry.EventType = "storage" + case types.MonitoringMachineCpuLoadEventName: + resultEntry.EventType = "cpu" + case types.MonitoringMachineMemoryUsageEventName: + resultEntry.EventType = "memory" + default: + return nil, nil, fmt.Errorf("invalid event name for machine notification: %v", notification.EventType) + } + result = append(result, resultEntry) + } + + // ------------------------------------- + // Paging + + // Flag if above limit + moreDataFlag := len(result) > int(limit) + if !moreDataFlag && !currentCursor.IsValid() { + // No paging required + return result, &paging, nil + } + + // Remove the last entries from data + if moreDataFlag { + result = result[:limit] + cursorData = cursorData[:limit] + } + + if currentCursor.IsReverse() { + slices.Reverse(result) + slices.Reverse(cursorData) + } + + p, err := utils.GetPagingFromData(cursorData, currentCursor, moreDataFlag) + if err != nil { + return nil, nil, fmt.Errorf("failed to get paging: %w", err) + } + + return result, p, nil } func (d *DataAccessService) GetClientNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationClientsColumn], search string, limit uint64) ([]t.NotificationClientsTableRow, *t.Paging, error) { - return d.dummy.GetClientNotifications(ctx, userId, cursor, colSort, search, limit) + result := make([]t.NotificationClientsTableRow, 0) + var paging t.Paging + + // Initialize the cursor + var currentCursor t.NotificationClientsCursor + var err error + if cursor != "" { + currentCursor, err = utils.StringToCursor[t.NotificationClientsCursor](cursor) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse passed cursor as NotificationClientsCursor: %w", err) + } + } + + // ------------------------------------- + // Get the client notification history + notificationHistory := []struct { + Epoch uint64 `db:"epoch"` + Client string `db:"client"` + Version string `db:"client_version"` + Url string `db:"client_url"` + }{} + + ds := goqu.Dialect("postgres"). + Select( + goqu.L("epoch"), + goqu.L("client"), + goqu.L("client_version"), + goqu.L("client_url")). + From("client_notifications_history"). + Where(goqu.L("user_id = ?", userId)). + Limit(uint(limit + 1)) + + // Search + if search != "" { + ds = ds.Where(goqu.L("client ILIKE ?", search)) + } + + // Sorting and limiting if cursor is present + // Rows can be uniquely identified by (epoch, client) + defaultColumns := []t.SortColumn{ + {Column: enums.NotificationsClientsColumns.Timestamp.ToExpr(), Desc: true, Offset: currentCursor.Epoch}, + {Column: enums.NotificationsClientsColumns.ClientName.ToExpr(), Desc: false, Offset: currentCursor.Client}, + } + order, directions := applySortAndPagination(defaultColumns, t.SortColumn{Column: colSort.Column.ToExpr(), Desc: colSort.Desc}, currentCursor.GenericCursor) + ds = ds.Order(order...) + if directions != nil { + ds = ds.Where(directions) + } + + query, args, err := ds.Prepared(true).ToSQL() + if err != nil { + return nil, nil, fmt.Errorf("error preparing client notifications query: %w", err) + } + + err = d.userReader.SelectContext(ctx, ¬ificationHistory, query, args...) + if err != nil { + return nil, nil, fmt.Errorf(`error retrieving data for client notifications: %w`, err) + } + + // ------------------------------------- + // Calculate the result + cursorData := notificationHistory + for _, notification := range notificationHistory { + resultEntry := t.NotificationClientsTableRow{ + ClientName: notification.Client, + Version: notification.Version, + Url: notification.Url, + Timestamp: utils.EpochToTime(notification.Epoch).Unix(), + } + result = append(result, resultEntry) + } + + // ------------------------------------- + // Paging + + // Flag if above limit + moreDataFlag := len(result) > int(limit) + if !moreDataFlag && !currentCursor.IsValid() { + // No paging required + return result, &paging, nil + } + + // Remove the last entries from data + if moreDataFlag { + result = result[:limit] + cursorData = cursorData[:limit] + } + + if currentCursor.IsReverse() { + slices.Reverse(result) + slices.Reverse(cursorData) + } + + p, err := utils.GetPagingFromData(cursorData, currentCursor, moreDataFlag) + if err != nil { + return nil, nil, fmt.Errorf("failed to get paging: %w", err) + } + + return result, p, nil } func (d *DataAccessService) GetRocketPoolNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationRocketPoolColumn], search string, limit uint64) ([]t.NotificationRocketPoolTableRow, *t.Paging, error) { return d.dummy.GetRocketPoolNotifications(ctx, userId, cursor, colSort, search, limit) + + // TODO: Adjust after db structure has been clarified + // result := make([]t.NotificationRocketPoolTableRow, 0) + // var paging t.Paging + + // // Initialize the cursor + // var currentCursor t.NotificationRocketPoolsCursor + // var err error + // if cursor != "" { + // currentCursor, err = utils.StringToCursor[t.NotificationRocketPoolsCursor](cursor) + // if err != nil { + // return nil, nil, fmt.Errorf("failed to parse passed cursor as NotificationRocketPoolsCursor: %w", err) + // } + // } + + // isReverseDirection := (colSort.Desc && !currentCursor.IsReverse()) || (!colSort.Desc && currentCursor.IsReverse()) + // sortSearchDirection := ">" + // if isReverseDirection { + // sortSearchDirection = "<" + // } + + // // ------------------------------------- + // // Get the machine notification history + // notificationHistory := []struct { + // Epoch uint64 `db:"epoch"` + // LastBlock int64 `db:"last_block"` + // EventType types.EventName `db:"event_type"` + // EventThreshold float64 `db:"event_threshold"` + // NodeAddress []byte `db:"node_address"` + // }{} + + // ds := goqu.Dialect("postgres"). + // Select( + // goqu.L("epoch"), + // goqu.L("last_block"), + // goqu.L("event_type"), + // goqu.L("event_threshold"), + // goqu.L("node_address")). + // From("rocketpool_notifications_history"). + // Where(goqu.L("user_id = ?", userId)). + // Limit(uint(limit + 1)) + + // // Search + // if search != "" { + // if !utils.IsEth1Address(search) { + // // If search is not a valid address, return empty result + // return result, &paging, nil + // } + // nodeAddress, err := hexutil.Decode(search) + // if err != nil { + // return nil, nil, fmt.Errorf("failed to decode node address: %w", err) + // } + // ds = ds.Where(goqu.L("node_address = ?", nodeAddress)) + // } + + // // Sorting and limiting if cursor is present + // // Rows can be uniquely identified by (epoch, event_type, node_address) + // sortDirFunc := func(column string) exp.OrderedExpression { + // return goqu.I(column).Asc() + // } + // if isReverseDirection { + // sortDirFunc = func(column string) exp.OrderedExpression { + // return goqu.I(column).Desc() + // } + // } + // switch colSort.Column { + // case enums.NotificationRocketPoolColumns.Timestamp: + // if currentCursor.IsValid() { + // ds = ds.Where(goqu.Or( + // goqu.L(fmt.Sprintf("(epoch %s ?)", sortSearchDirection), currentCursor.Epoch), + // goqu.L(fmt.Sprintf("(epoch = ? AND event_type %s ?)", sortSearchDirection), currentCursor.Epoch, currentCursor.EventType), + // goqu.L(fmt.Sprintf("(epoch = ? AND event_type = ? AND node_address %s ?)", sortSearchDirection), currentCursor.Epoch, currentCursor.EventType, currentCursor.NodeAddress), + // )) + // } + // ds = ds.Order( + // sortDirFunc("epoch"), + // sortDirFunc("event_type"), + // sortDirFunc("node_address")) + // case enums.NotificationRocketPoolColumns.EventType: + // if currentCursor.IsValid() { + // ds = ds.Where(goqu.Or( + // goqu.L(fmt.Sprintf("(event_type %s ?)", sortSearchDirection), currentCursor.EventType), + // goqu.L(fmt.Sprintf("(event_type = ? AND epoch %s ?)", sortSearchDirection), currentCursor.EventType, currentCursor.Epoch), + // goqu.L(fmt.Sprintf("(event_type = ? AND epoch = ? AND node_address %s ?)", sortSearchDirection), currentCursor.EventType, currentCursor.Epoch, currentCursor.NodeAddress), + // )) + // } + // ds = ds.Order( + // sortDirFunc("event_type"), + // sortDirFunc("epoch"), + // sortDirFunc("node_address")) + // case enums.NotificationRocketPoolColumns.NodeAddress: + // if currentCursor.IsValid() { + // ds = ds.Where(goqu.Or( + // goqu.L(fmt.Sprintf("(node_address %s ?)", sortSearchDirection), currentCursor.NodeAddress), + // goqu.L(fmt.Sprintf("(node_address = ? AND epoch %s ?)", sortSearchDirection), currentCursor.NodeAddress, currentCursor.Epoch), + // goqu.L(fmt.Sprintf("(node_address = ? AND epoch = ? AND event_type %s ?)", sortSearchDirection), currentCursor.NodeAddress, currentCursor.Epoch, currentCursor.EventType), + // )) + // } + // ds = ds.Order( + // sortDirFunc("node_address"), + // sortDirFunc("epoch"), + // sortDirFunc("event_type")) + // default: + // return nil, nil, fmt.Errorf("invalid column for sorting of rocketpool notification history: %v", colSort.Column) + // } + + // query, args, err := ds.Prepared(true).ToSQL() + // if err != nil { + // return nil, nil, fmt.Errorf("error preparing rocketpool notifications query: %w", err) + // } + + // err = d.userReader.SelectContext(ctx, ¬ificationHistory, query, args...) + // if err != nil { + // return nil, nil, fmt.Errorf(`error retrieving data for rocketpool notifications: %w`, err) + // } + + // // ------------------------------------- + // // Get the node address info + // addressMapping := make(map[string]*t.Address) + // contractStatusRequests := make([]db.ContractInteractionAtRequest, 0) + + // for _, notification := range notificationHistory { + // addressMapping[hexutil.Encode(notification.NodeAddress)] = nil + // contractStatusRequests = append(contractStatusRequests, db.ContractInteractionAtRequest{ + // Address: fmt.Sprintf("%x", notification.NodeAddress), + // Block: notification.LastBlock, + // TxIdx: -1, + // TraceIdx: -1, + // }) + // } + + // err = d.GetNamesAndEnsForAddresses(ctx, addressMapping) + // if err != nil { + // return nil, nil, err + // } + + // contractStatuses, err := d.bigtable.GetAddressContractInteractionsAt(contractStatusRequests) + // if err != nil { + // return nil, nil, err + // } + + // // ------------------------------------- + // // Calculate the result + // cursorData := notificationHistory + // for idx, notification := range notificationHistory { + // resultEntry := t.NotificationRocketPoolTableRow{ + // Timestamp: utils.EpochToTime(notification.Epoch).Unix(), + // Threshold: notification.EventThreshold, + // Node: *addressMapping[hexutil.Encode(notification.NodeAddress)], + // } + // resultEntry.Node.IsContract = contractStatuses[idx] == types.CONTRACT_CREATION || contractStatuses[idx] == types.CONTRACT_PRESENT + + // switch notification.EventType { + // case types.RocketpoolNewClaimRoundStartedEventName: + // resultEntry.EventType = "reward_round" + // case types.RocketpoolCollateralMinReachedEventName: + // resultEntry.EventType = "collateral_min" + // case types.RocketpoolCollateralMaxReachedEventName: + // resultEntry.EventType = "collateral_max" + // default: + // return nil, nil, fmt.Errorf("invalid event name for rocketpool notification: %v", notification.EventType) + // } + // result = append(result, resultEntry) + // } + + // // ------------------------------------- + // // Paging + + // // Flag if above limit + // moreDataFlag := len(result) > int(limit) + // if !moreDataFlag && !currentCursor.IsValid() { + // // No paging required + // return result, &paging, nil + // } + + // // Remove the last entries from data + // if moreDataFlag { + // result = result[:limit] + // cursorData = cursorData[:limit] + // } + + // if currentCursor.IsReverse() { + // slices.Reverse(result) + // slices.Reverse(cursorData) + // } + + // p, err := utils.GetPagingFromData(cursorData, currentCursor, moreDataFlag) + // if err != nil { + // return nil, nil, fmt.Errorf("failed to get paging: %w", err) + // } + + // return result, p, nil } -func (d *DataAccessService) GetNetworkNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationNetworksColumn], search string, limit uint64) ([]t.NotificationNetworksTableRow, *t.Paging, error) { - return d.dummy.GetNetworkNotifications(ctx, userId, cursor, colSort, search, limit) +func (d *DataAccessService) GetNetworkNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationNetworksColumn], limit uint64) ([]t.NotificationNetworksTableRow, *t.Paging, error) { + result := make([]t.NotificationNetworksTableRow, 0) + var paging t.Paging + + // Initialize the cursor + var currentCursor t.NotificationNetworksCursor + var err error + if cursor != "" { + currentCursor, err = utils.StringToCursor[t.NotificationNetworksCursor](cursor) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse passed cursor as NotificationNetworksCursor: %w", err) + } + } + + // ------------------------------------- + // Get the network notification history + notificationHistory := []struct { + Epoch uint64 `db:"epoch"` + Network uint64 `db:"network"` + EventType types.EventName `db:"event_type"` + EventThreshold float64 `db:"event_threshold"` + }{} + + ds := goqu.Dialect("postgres"). + Select( + goqu.L("epoch"), + goqu.L("network"), + goqu.L("event_type"), + goqu.L("event_threshold")). + From("network_notifications_history"). + Where(goqu.L("user_id = ?", userId)). + Limit(uint(limit + 1)) + + // Sorting and limiting if cursor is present + // Rows can be uniquely identified by (epoch, network, event_type) + defaultColumns := []t.SortColumn{ + {Column: enums.NotificationNetworksColumns.Timestamp.ToExpr(), Desc: true, Offset: currentCursor.Epoch}, + {Column: enums.NotificationNetworksColumns.Network.ToExpr(), Desc: false, Offset: currentCursor.Network}, + {Column: enums.NotificationNetworksColumns.EventType.ToExpr(), Desc: false, Offset: currentCursor.EventType}, + } + order, directions := applySortAndPagination(defaultColumns, t.SortColumn{Column: colSort.Column.ToExpr(), Desc: colSort.Desc}, currentCursor.GenericCursor) + ds = ds.Order(order...) + if directions != nil { + ds = ds.Where(directions) + } + + query, args, err := ds.Prepared(true).ToSQL() + if err != nil { + return nil, nil, fmt.Errorf("error preparing network notifications query: %w", err) + } + + err = d.userReader.SelectContext(ctx, ¬ificationHistory, query, args...) + if err != nil { + return nil, nil, fmt.Errorf(`error retrieving data for network notifications: %w`, err) + } + + // ------------------------------------- + // Calculate the result + cursorData := notificationHistory + for _, notification := range notificationHistory { + resultEntry := t.NotificationNetworksTableRow{ + ChainId: notification.Network, + Timestamp: utils.EpochToTime(notification.Epoch).Unix(), + } + switch notification.EventType { + case types.NetworkGasAboveThresholdEventName: + resultEntry.EventType = "gas_above" + resultEntry.Threshold = decimal.NewFromFloat(notification.EventThreshold).Mul(decimal.NewFromInt(params.GWei)) + case types.NetworkGasBelowThresholdEventName: + resultEntry.EventType = "gas_below" + resultEntry.Threshold = decimal.NewFromFloat(notification.EventThreshold).Mul(decimal.NewFromInt(params.GWei)) + case types.NetworkParticipationRateThresholdEventName: + resultEntry.EventType = "participation_rate" + resultEntry.Threshold = decimal.NewFromFloat(notification.EventThreshold) + case types.RocketpoolNewClaimRoundStartedEventName: + resultEntry.EventType = "new_reward_round" + default: + return nil, nil, fmt.Errorf("invalid event name for network notification: %v", notification.EventType) + } + result = append(result, resultEntry) + } + + // ------------------------------------- + // Paging + + // Flag if above limit + moreDataFlag := len(result) > int(limit) + if !moreDataFlag && !currentCursor.IsValid() { + // No paging required + return result, &paging, nil + } + + // Remove the last entries from data + if moreDataFlag { + result = result[:limit] + cursorData = cursorData[:limit] + } + + if currentCursor.IsReverse() { + slices.Reverse(result) + slices.Reverse(cursorData) + } + + p, err := utils.GetPagingFromData(cursorData, currentCursor, moreDataFlag) + if err != nil { + return nil, nil, fmt.Errorf("failed to get paging: %w", err) + } + + return result, p, nil } func (d *DataAccessService) GetNotificationSettings(ctx context.Context, userId uint64) (*t.NotificationSettings, error) { - return d.dummy.GetNotificationSettings(ctx, userId) + wg := errgroup.Group{} + + // ------------------------------------- + // Create the default settings + result := &t.NotificationSettings{ + GeneralSettings: t.NotificationSettingsGeneral{ + MachineStorageUsageThreshold: MachineStorageUsageThresholdDefault, + MachineCpuUsageThreshold: MachineCpuUsageThresholdDefault, + MachineMemoryUsageThreshold: MachineMemoryUsageThresholdDefault, + }, + } + + // For networks + networks, err := d.GetAllNetworks() + if err != nil { + return nil, err + } + networksSettings := make(map[string]*t.NotificationNetwork, len(networks)) + for _, network := range networks { + networksSettings[network.NotificationsName] = &t.NotificationNetwork{ + ChainId: network.ChainId, + Settings: t.NotificationSettingsNetwork{ + GasAboveThreshold: decimal.NewFromFloat(GasAboveThresholdDefault).Mul(decimal.NewFromInt(params.GWei)), + GasBelowThreshold: decimal.NewFromFloat(GasBelowThresholdDefault).Mul(decimal.NewFromInt(params.GWei)), + ParticipationRateThreshold: ParticipationRateThresholdDefault, + }, + } + } + + // For clients + clients, err := d.GetAllClients() + if err != nil { + return nil, err + } + clientSettings := make(map[string]*t.NotificationSettingsClient, len(clients)) + for _, client := range clients { + clientSettings[client.DbName] = &t.NotificationSettingsClient{ + Id: client.Id, + Name: client.Name, + Category: client.Category, + } + } + + // ------------------------------------- + // Get the "do not disturb" setting + var doNotDisturbTimestamp sql.NullTime + wg.Go(func() error { + err := d.userReader.GetContext(ctx, &doNotDisturbTimestamp, ` + SELECT + notifications_do_not_disturb_ts + FROM users + WHERE id = $1`, userId) + if err != nil { + return fmt.Errorf(`error retrieving data for notifications "do not disturb" setting: %w`, err) + } + + return nil + }) + + // ------------------------------------- + // Get the notification channels + notificationChannels := []struct { + Channel types.NotificationChannel `db:"channel"` + Active bool `db:"active"` + }{} + wg.Go(func() error { + err := d.userReader.SelectContext(ctx, ¬ificationChannels, ` + SELECT + channel, + active + FROM users_notification_channels + WHERE user_id = $1`, userId) + if err != nil { + return fmt.Errorf(`error retrieving data for notifications channels: %w`, err) + } + + return nil + }) + + // ------------------------------------- + // Get the subscribed events + subscribedEvents := []struct { + Name types.EventName `db:"event_name"` + Filter string `db:"event_filter"` + Threshold float64 `db:"event_threshold"` + }{} + wg.Go(func() error { + err := d.userReader.SelectContext(ctx, &subscribedEvents, ` + SELECT + event_name, + event_filter, + event_threshold + FROM users_subscriptions + WHERE user_id = $1`, userId) + if err != nil { + return fmt.Errorf(`error retrieving data for notifications subscribed events: %w`, err) + } + + return nil + }) + + // ------------------------------------- + // Get the paired devices + pairedDevices := []struct { + DeviceId uint64 `db:"id"` + CreatedTs time.Time `db:"created_ts"` + DeviceName string `db:"device_name"` + NotifyEnabled bool `db:"notify_enabled"` + }{} + wg.Go(func() error { + err := d.userReader.SelectContext(ctx, &pairedDevices, ` + SELECT + id, + created_ts, + device_name, + COALESCE(notify_enabled, false) AS notify_enabled + FROM users_devices + WHERE user_id = $1`, userId) + if err != nil { + return fmt.Errorf(`error retrieving data for notifications paired devices: %w`, err) + } + + return nil + }) + + // ------------------------------------- + // Get the machines + hasMachines := false + wg.Go(func() error { + machineNames, err := db.BigtableClient.GetMachineMetricsMachineNames(types.UserId(userId)) + if err != nil { + return fmt.Errorf(`error retrieving data for notifications machine names: %w`, err) + } + if len(machineNames) > 0 { + hasMachines = true + } + return nil + }) + + err = wg.Wait() + if err != nil { + return nil, err + } + + // ------------------------------------- + // Fill the result + result.HasMachines = hasMachines + if doNotDisturbTimestamp.Valid { + result.GeneralSettings.DoNotDisturbTimestamp = doNotDisturbTimestamp.Time.Unix() + } + + for _, channel := range notificationChannels { + if channel.Channel == types.EmailNotificationChannel { + result.GeneralSettings.IsEmailNotificationsEnabled = channel.Active + } else if channel.Channel == types.PushNotificationChannel { + result.GeneralSettings.IsPushNotificationsEnabled = channel.Active + } + } + + for _, event := range subscribedEvents { + eventSplit := strings.Split(string(event.Name), ":") + + if len(eventSplit) == 2 { + networkName := eventSplit[0] + networkEvent := types.EventName(eventSplit[1]) + + if _, ok := networksSettings[networkName]; !ok { + return nil, fmt.Errorf("network is not defined: %s", networkName) + } + + switch networkEvent { + case types.RocketpoolNewClaimRoundStartedEventName: + networksSettings[networkName].Settings.IsNewRewardRoundSubscribed = true + case types.NetworkGasAboveThresholdEventName: + networksSettings[networkName].Settings.IsGasAboveSubscribed = true + networksSettings[networkName].Settings.GasAboveThreshold = decimal.NewFromFloat(event.Threshold).Mul(decimal.NewFromInt(params.GWei)) + case types.NetworkGasBelowThresholdEventName: + networksSettings[networkName].Settings.IsGasBelowSubscribed = true + networksSettings[networkName].Settings.GasBelowThreshold = decimal.NewFromFloat(event.Threshold).Mul(decimal.NewFromInt(params.GWei)) + case types.NetworkParticipationRateThresholdEventName: + networksSettings[networkName].Settings.IsParticipationRateSubscribed = true + networksSettings[networkName].Settings.ParticipationRateThreshold = event.Threshold + } + } else { + switch event.Name { + case types.MonitoringMachineOfflineEventName: + result.GeneralSettings.IsMachineOfflineSubscribed = true + case types.MonitoringMachineDiskAlmostFullEventName: + result.GeneralSettings.IsMachineStorageUsageSubscribed = true + result.GeneralSettings.MachineStorageUsageThreshold = event.Threshold + case types.MonitoringMachineCpuLoadEventName: + result.GeneralSettings.IsMachineCpuUsageSubscribed = true + result.GeneralSettings.MachineCpuUsageThreshold = event.Threshold + case types.MonitoringMachineMemoryUsageEventName: + result.GeneralSettings.IsMachineMemoryUsageSubscribed = true + result.GeneralSettings.MachineMemoryUsageThreshold = event.Threshold + case types.EthClientUpdateEventName: + if clientSettings[event.Filter] != nil { + clientSettings[event.Filter].IsSubscribed = true + } else { + log.Warnf("client %s is not found in the client settings", event.Filter) + } + } + } + } + + for _, settings := range networksSettings { + result.Networks = append(result.Networks, *settings) + } + + for _, device := range pairedDevices { + result.PairedDevices = append(result.PairedDevices, t.NotificationPairedDevice{ + Id: device.DeviceId, + PairedTimestamp: device.CreatedTs.Unix(), + Name: device.DeviceName, + IsNotificationsEnabled: device.NotifyEnabled, + }) + } + + for _, settings := range clientSettings { + result.Clients = append(result.Clients, *settings) + } + + // properly sort the responses + sort.Slice(result.Networks, func(i, j int) bool { // sort by chain id ascending + return result.Networks[i].ChainId < result.Networks[j].ChainId + }) + sort.Slice(result.Clients, func(i, j int) bool { // sort by client name ascending + return result.Clients[i].Name < result.Clients[j].Name + }) + sort.Slice(result.PairedDevices, func(i, j int) bool { // sort by paired timestamp descending + return result.PairedDevices[i].PairedTimestamp > result.PairedDevices[j].PairedTimestamp + }) + + return result, nil } + +func (d *DataAccessService) GetNotificationSettingsDefaultValues(ctx context.Context) (*t.NotificationSettingsDefaultValues, error) { + return &t.NotificationSettingsDefaultValues{ + GroupEfficiencyBelowThreshold: GroupEfficiencyBelowThresholdDefault, + MaxCollateralThreshold: MaxCollateralThresholdDefault, + MinCollateralThreshold: MinCollateralThresholdDefault, + ERC20TokenTransfersValueThreshold: ERC20TokenTransfersValueThresholdDefault, + + MachineStorageUsageThreshold: MachineStorageUsageThresholdDefault, + MachineCpuUsageThreshold: MachineCpuUsageThresholdDefault, + MachineMemoryUsageThreshold: MachineMemoryUsageThresholdDefault, + + GasAboveThreshold: decimal.NewFromFloat(GasAboveThresholdDefault).Mul(decimal.NewFromInt(params.GWei)), + GasBelowThreshold: decimal.NewFromFloat(GasAboveThresholdDefault).Mul(decimal.NewFromInt(params.GWei)), + + NetworkParticipationRateThreshold: ParticipationRateThresholdDefault, + }, nil +} + func (d *DataAccessService) UpdateNotificationSettingsGeneral(ctx context.Context, userId uint64, settings t.NotificationSettingsGeneral) error { - return d.dummy.UpdateNotificationSettingsGeneral(ctx, userId, settings) + epoch := utils.TimeToEpoch(time.Now()) + + var eventsToInsert []goqu.Record + var eventsToDelete []goqu.Expression + + tx, err := d.userWriter.BeginTxx(ctx, nil) + if err != nil { + return fmt.Errorf("error starting db transactions to update general notification settings: %w", err) + } + defer utils.Rollback(tx) + + // ------------------------------------- + // Set the "do not disturb" setting + _, err = tx.ExecContext(ctx, ` + UPDATE users + SET notifications_do_not_disturb_ts = + CASE + WHEN $1 = 0 THEN NULL + ELSE TO_TIMESTAMP($1) + END + WHERE id = $2`, settings.DoNotDisturbTimestamp, userId) + if err != nil { + return err + } + + // ------------------------------------- + // Set the notification channels + _, err = tx.ExecContext(ctx, ` + INSERT INTO users_notification_channels (user_id, channel, active) + VALUES ($1, $2, $3), ($1, $4, $5) + ON CONFLICT (user_id, channel) + DO UPDATE SET active = EXCLUDED.active`, + userId, types.EmailNotificationChannel, settings.IsEmailNotificationsEnabled, types.PushNotificationChannel, settings.IsPushNotificationsEnabled) + if err != nil { + return err + } + + // ------------------------------------- + // Collect the machine and rocketpool events to set and delete + + //Machine events + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineOfflineSubscribed, userId, types.MonitoringMachineOfflineEventName, "", "", epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineStorageUsageSubscribed, userId, types.MonitoringMachineDiskAlmostFullEventName, "", "", epoch, settings.MachineStorageUsageThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineCpuUsageSubscribed, userId, types.MonitoringMachineCpuLoadEventName, "", "", epoch, settings.MachineCpuUsageThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineMemoryUsageSubscribed, userId, types.MonitoringMachineMemoryUsageEventName, "", "", epoch, settings.MachineMemoryUsageThreshold) + + // Insert all the events or update the threshold if they already exist + if len(eventsToInsert) > 0 { + insertDs := goqu.Dialect("postgres"). + Insert("users_subscriptions"). + Cols("user_id", "event_name", "event_filter", "created_ts", "created_epoch", "event_threshold"). + Rows(eventsToInsert). + OnConflict(goqu.DoUpdate( + "user_id, event_name, event_filter", + goqu.Record{"event_threshold": goqu.L("EXCLUDED.event_threshold")}, + )) + + query, args, err := insertDs.Prepared(true).ToSQL() + if err != nil { + return fmt.Errorf("error preparing query: %v", err) + } + + _, err = tx.ExecContext(ctx, query, args...) + if err != nil { + return err + } + } + + // Delete all the events + if len(eventsToDelete) > 0 { + deleteDs := goqu.Dialect("postgres"). + Delete("users_subscriptions"). + Where(goqu.Or(eventsToDelete...)) + + query, args, err := deleteDs.Prepared(true).ToSQL() + if err != nil { + return fmt.Errorf("error preparing query: %v", err) + } + + _, err = tx.ExecContext(ctx, query, args...) + if err != nil { + return err + } + } + + err = tx.Commit() + if err != nil { + return fmt.Errorf("error committing tx to update general notification settings: %w", err) + } + return nil } func (d *DataAccessService) UpdateNotificationSettingsNetworks(ctx context.Context, userId uint64, chainId uint64, settings t.NotificationSettingsNetwork) error { - return d.dummy.UpdateNotificationSettingsNetworks(ctx, userId, chainId, settings) + epoch := utils.TimeToEpoch(time.Now()) + + networks, err := d.GetAllNetworks() + if err != nil { + return err + } + + networkName := "" + for _, network := range networks { + if network.ChainId == chainId { + networkName = network.NotificationsName + break + } + } + if networkName == "" { + return fmt.Errorf("network with chain id %d to update general notification settings not found", chainId) + } + + var eventsToInsert []goqu.Record + var eventsToDelete []goqu.Expression + + tx, err := d.userWriter.BeginTxx(ctx, nil) + if err != nil { + return fmt.Errorf("error starting db transactions to update general notification settings: %w", err) + } + defer utils.Rollback(tx) + + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsGasAboveSubscribed, userId, types.NetworkGasAboveThresholdEventName, networkName, "", epoch, settings.GasAboveThreshold.Div(decimal.NewFromInt(params.GWei)).InexactFloat64()) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsGasBelowSubscribed, userId, types.NetworkGasBelowThresholdEventName, networkName, "", epoch, settings.GasBelowThreshold.Div(decimal.NewFromInt(params.GWei)).InexactFloat64()) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsParticipationRateSubscribed, userId, types.NetworkParticipationRateThresholdEventName, networkName, "", epoch, settings.ParticipationRateThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsNewRewardRoundSubscribed, userId, types.RocketpoolNewClaimRoundStartedEventName, networkName, "", epoch, 0) + + // Insert all the events or update the threshold if they already exist + if len(eventsToInsert) > 0 { + insertDs := goqu.Dialect("postgres"). + Insert("users_subscriptions"). + Cols("user_id", "event_name", "event_filter", "created_ts", "created_epoch", "event_threshold"). + Rows(eventsToInsert). + OnConflict(goqu.DoUpdate( + "user_id, event_name, event_filter", + goqu.Record{"event_threshold": goqu.L("EXCLUDED.event_threshold")}, + )) + + query, args, err := insertDs.Prepared(true).ToSQL() + if err != nil { + return fmt.Errorf("error preparing query: %v", err) + } + + _, err = tx.ExecContext(ctx, query, args...) + if err != nil { + return err + } + } + + // Delete all the events + if len(eventsToDelete) > 0 { + deleteDs := goqu.Dialect("postgres"). + Delete("users_subscriptions"). + Where(goqu.Or(eventsToDelete...)) + + query, args, err := deleteDs.Prepared(true).ToSQL() + if err != nil { + return fmt.Errorf("error preparing query: %v", err) + } + + _, err = tx.ExecContext(ctx, query, args...) + if err != nil { + return err + } + } + + err = tx.Commit() + if err != nil { + return fmt.Errorf("error committing tx to update general notification settings: %w", err) + } + return nil } -func (d *DataAccessService) UpdateNotificationSettingsPairedDevice(ctx context.Context, pairedDeviceId string, name string, IsNotificationsEnabled bool) error { - return d.dummy.UpdateNotificationSettingsPairedDevice(ctx, pairedDeviceId, name, IsNotificationsEnabled) + +func (d *DataAccessService) GetPairedDeviceUserId(ctx context.Context, pairedDeviceId uint64) (uint64, error) { + var userId uint64 + err := d.userReader.GetContext(context.Background(), &userId, ` + SELECT user_id + FROM users_devices + WHERE id = $1`, pairedDeviceId) + if err != nil { + if err == sql.ErrNoRows { + return 0, fmt.Errorf("%w, paired device with id %v not found", ErrNotFound, pairedDeviceId) + } + return 0, err + } + return userId, nil +} + +func (d *DataAccessService) UpdateNotificationSettingsPairedDevice(ctx context.Context, pairedDeviceId uint64, name string, IsNotificationsEnabled bool) error { + result, err := d.userWriter.ExecContext(ctx, ` + UPDATE users_devices + SET + device_name = $1, + notify_enabled = $2 + WHERE id = $3`, + name, IsNotificationsEnabled, pairedDeviceId) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected == 0 { + return fmt.Errorf("%w, paired device with id %v not found", ErrNotFound, pairedDeviceId) + } + return nil } -func (d *DataAccessService) DeleteNotificationSettingsPairedDevice(ctx context.Context, pairedDeviceId string) error { - return d.dummy.DeleteNotificationSettingsPairedDevice(ctx, pairedDeviceId) + +func (d *DataAccessService) DeleteNotificationSettingsPairedDevice(ctx context.Context, pairedDeviceId uint64) error { + result, err := d.userWriter.ExecContext(ctx, ` + DELETE FROM users_devices + WHERE id = $1`, + pairedDeviceId) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected == 0 { + return fmt.Errorf("%w, paired device with id %v not found", ErrNotFound, pairedDeviceId) + } + return nil +} + +func (d *DataAccessService) UpdateNotificationSettingsClients(ctx context.Context, userId uint64, clientId uint64, IsSubscribed bool) (*t.NotificationSettingsClient, error) { + result := &t.NotificationSettingsClient{Id: clientId, IsSubscribed: IsSubscribed} + + var clientInfo *t.ClientInfo + + clients, err := d.GetAllClients() + if err != nil { + return nil, err + } + for _, client := range clients { + if client.Id == clientId { + clientInfo = &client + break + } + } + if clientInfo == nil { + return nil, fmt.Errorf("client with id %d to update client notification settings not found", clientId) + } + + if IsSubscribed { + _, err = d.userWriter.ExecContext(ctx, ` + INSERT INTO users_subscriptions (user_id, event_name, event_filter, created_ts, created_epoch) + VALUES ($1, $2, $3, NOW(), $4) + ON CONFLICT (user_id, event_name, event_filter) + DO NOTHING`, + userId, types.EthClientUpdateEventName, clientInfo.DbName, utils.TimeToEpoch(time.Now())) + } else { + _, err = d.userWriter.ExecContext(ctx, `DELETE FROM users_subscriptions WHERE user_id = $1 AND event_name = $2 AND event_filter = $3`, + userId, types.EthClientUpdateEventName, clientInfo.DbName) + } + if err != nil { + return nil, err + } + + result.Name = clientInfo.Name + result.Category = clientInfo.Category + + return result, nil } func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationSettingsDashboardColumn], search string, limit uint64) ([]t.NotificationSettingsDashboardsTableRow, *t.Paging, error) { - return d.dummy.GetNotificationSettingsDashboards(ctx, userId, cursor, colSort, search, limit) + result := make([]t.NotificationSettingsDashboardsTableRow, 0) + var paging t.Paging + + wg := errgroup.Group{} + + // Initialize the cursor + var currentCursor t.NotificationSettingsCursor + var err error + if cursor != "" { + currentCursor, err = utils.StringToCursor[t.NotificationSettingsCursor](cursor) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse passed cursor as NotificationSettingsCursor: %w", err) + } + } + + isReverseDirection := (colSort.Desc && !currentCursor.IsReverse()) || (!colSort.Desc && currentCursor.IsReverse()) + + // ------------------------------------- + // Get the events + events := []struct { + Name types.EventName `db:"event_name"` + Filter string `db:"event_filter"` + Threshold float64 `db:"event_threshold"` + }{} + wg.Go(func() error { + err := d.userReader.SelectContext(ctx, &events, ` + SELECT + event_name, + event_filter, + event_threshold + FROM users_subscriptions + WHERE user_id = $1`, userId) + if err != nil { + return fmt.Errorf(`error retrieving data for account dashboard notifications: %w`, err) + } + + return nil + }) + + // ------------------------------------- + // Get the validator dashboards + valDashboards := []struct { + DashboardId uint64 `db:"dashboard_id"` + DashboardName string `db:"dashboard_name"` + GroupId uint64 `db:"group_id"` + GroupName string `db:"group_name"` + Network uint64 `db:"network"` + WebhookUrl sql.NullString `db:"webhook_target"` + WebhookFormat sql.NullString `db:"webhook_format"` + }{} + wg.Go(func() error { + err := d.alloyReader.SelectContext(ctx, &valDashboards, ` + SELECT + d.id AS dashboard_id, + d.name AS dashboard_name, + g.id AS group_id, + g.name AS group_name, + d.network, + g.webhook_target, + g.webhook_format + FROM users_val_dashboards d + INNER JOIN users_val_dashboards_groups g ON d.id = g.dashboard_id + WHERE d.user_id = $1`, userId) + if err != nil { + return fmt.Errorf(`error retrieving data for validator dashboard notifications: %w`, err) + } + + return nil + }) + + // ------------------------------------- + // Get the account dashboards + accDashboards := []struct { + DashboardId uint64 `db:"dashboard_id"` + DashboardName string `db:"dashboard_name"` + GroupId uint64 `db:"group_id"` + GroupName string `db:"group_name"` + WebhookUrl sql.NullString `db:"webhook_target"` + WebhookFormat sql.NullString `db:"webhook_format"` + IsIgnoreSpamTransactionsEnabled bool `db:"ignore_spam_transactions"` + SubscribedChainIds []uint64 `db:"subscribed_chain_ids"` + }{} + // TODO: Account dashboard handling will be handled later + // wg.Go(func() error { + // err := d.alloyReader.SelectContext(ctx, &accDashboards, ` + // SELECT + // d.id AS dashboard_id, + // d.name AS dashboard_name, + // g.id AS group_id, + // g.name AS group_name, + // g.webhook_target, + // g.webhook_format, + // g.ignore_spam_transactions, + // g.subscribed_chain_ids + // FROM users_acc_dashboards d + // INNER JOIN users_acc_dashboards_groups g ON d.id = g.dashboard_id + // WHERE d.user_id = $1`, userId) + // if err != nil { + // return fmt.Errorf(`error retrieving data for validator dashboard notifications: %w`, err) + // } + + // return nil + // }) + + err = wg.Wait() + if err != nil { + return nil, nil, fmt.Errorf("error retrieving dashboard notification data: %w", err) + } + + // ------------------------------------- + // Evaluate the data + resultMap := make(map[string]*t.NotificationSettingsDashboardsTableRow) + + for _, event := range events { + eventFilterSplit := strings.Split(event.Filter, ":") + if len(eventFilterSplit) != 3 { + continue + } + dashboardType := eventFilterSplit[0] + + eventNameSplit := strings.Split(string(event.Name), ":") + if len(eventNameSplit) != 2 && dashboardType == ValidatorDashboardEventPrefix { + return nil, nil, fmt.Errorf("invalid event name formatting for val dashboard notification: expected {network:event_name}, got %v", event.Name) + } + + eventName := event.Name + if len(eventNameSplit) == 2 { + eventName = types.EventName(eventNameSplit[1]) + } + + if _, ok := resultMap[event.Filter]; !ok { + if dashboardType == ValidatorDashboardEventPrefix { + resultMap[event.Filter] = &t.NotificationSettingsDashboardsTableRow{ + Settings: t.NotificationSettingsValidatorDashboard{ + GroupEfficiencyBelowThreshold: GroupEfficiencyBelowThresholdDefault, + MaxCollateralThreshold: MaxCollateralThresholdDefault, + MinCollateralThreshold: MinCollateralThresholdDefault, + }, + } + } else if dashboardType == AccountDashboardEventPrefix { + resultMap[event.Filter] = &t.NotificationSettingsDashboardsTableRow{ + Settings: t.NotificationSettingsAccountDashboard{ + ERC20TokenTransfersValueThreshold: ERC20TokenTransfersValueThresholdDefault, + }, + } + } + } + + switch settings := resultMap[event.Filter].Settings.(type) { + case t.NotificationSettingsValidatorDashboard: + switch eventName { + case types.ValidatorIsOfflineEventName: + settings.IsValidatorOfflineSubscribed = true + case types.ValidatorGroupEfficiencyEventName: + settings.IsGroupEfficiencyBelowSubscribed = true + settings.GroupEfficiencyBelowThreshold = event.Threshold + case types.ValidatorMissedAttestationEventName: + settings.IsAttestationsMissedSubscribed = true + case types.ValidatorMissedProposalEventName, types.ValidatorExecutedProposalEventName: + settings.IsBlockProposalSubscribed = true + case types.ValidatorUpcomingProposalEventName: + settings.IsUpcomingBlockProposalSubscribed = true + case types.SyncCommitteeSoon: + settings.IsSyncSubscribed = true + case types.ValidatorReceivedWithdrawalEventName: + settings.IsWithdrawalProcessedSubscribed = true + case types.ValidatorGotSlashedEventName: + settings.IsSlashedSubscribed = true + case types.RocketpoolCollateralMinReachedEventName: + settings.IsMinCollateralSubscribed = true + settings.MinCollateralThreshold = event.Threshold + case types.RocketpoolCollateralMaxReachedEventName: + settings.IsMaxCollateralSubscribed = true + settings.MaxCollateralThreshold = event.Threshold + } + resultMap[event.Filter].Settings = settings + case t.NotificationSettingsAccountDashboard: + switch eventName { + case types.IncomingTransactionEventName: + settings.IsIncomingTransactionsSubscribed = true + case types.OutgoingTransactionEventName: + settings.IsOutgoingTransactionsSubscribed = true + case types.ERC20TokenTransferEventName: + settings.IsERC20TokenTransfersSubscribed = true + settings.ERC20TokenTransfersValueThreshold = event.Threshold + case types.ERC721TokenTransferEventName: + settings.IsERC721TokenTransfersSubscribed = true + case types.ERC1155TokenTransferEventName: + settings.IsERC1155TokenTransfersSubscribed = true + } + resultMap[event.Filter].Settings = settings + } + } + + // Validator dashboards + for _, valDashboard := range valDashboards { + key := fmt.Sprintf("%s:%d:%d", ValidatorDashboardEventPrefix, valDashboard.DashboardId, valDashboard.GroupId) + + if _, ok := resultMap[key]; !ok { + resultMap[key] = &t.NotificationSettingsDashboardsTableRow{ + Settings: t.NotificationSettingsValidatorDashboard{ + GroupEfficiencyBelowThreshold: GroupEfficiencyBelowThresholdDefault, + MaxCollateralThreshold: MaxCollateralThresholdDefault, + MinCollateralThreshold: MinCollateralThresholdDefault, + }, + } + } + + // Set general info + resultMap[key].IsAccountDashboard = false + resultMap[key].DashboardId = valDashboard.DashboardId + resultMap[key].DashboardName = valDashboard.DashboardName + resultMap[key].GroupId = valDashboard.GroupId + resultMap[key].GroupName = valDashboard.GroupName + resultMap[key].ChainIds = []uint64{valDashboard.Network} + + // Set the settings + if valSettings, ok := resultMap[key].Settings.(t.NotificationSettingsValidatorDashboard); ok { + valSettings.WebhookUrl = valDashboard.WebhookUrl.String + valSettings.IsWebhookDiscordEnabled = valDashboard.WebhookFormat.Valid && + types.NotificationChannel(valDashboard.WebhookFormat.String) == types.WebhookDiscordNotificationChannel + + resultMap[key].Settings = valSettings + } + } + + // Account dashboards + for _, accDashboard := range accDashboards { + key := fmt.Sprintf("%s:%d:%d", AccountDashboardEventPrefix, accDashboard.DashboardId, accDashboard.GroupId) + + if _, ok := resultMap[key]; !ok { + resultMap[key] = &t.NotificationSettingsDashboardsTableRow{ + Settings: t.NotificationSettingsAccountDashboard{ + ERC20TokenTransfersValueThreshold: ERC20TokenTransfersValueThresholdDefault, + }, + } + } + + // Set general info + resultMap[key].IsAccountDashboard = true + resultMap[key].DashboardId = accDashboard.DashboardId + resultMap[key].DashboardName = accDashboard.DashboardName + resultMap[key].GroupId = accDashboard.GroupId + resultMap[key].GroupName = accDashboard.GroupName + resultMap[key].ChainIds = accDashboard.SubscribedChainIds + + // Set the settings + if accSettings, ok := resultMap[key].Settings.(t.NotificationSettingsAccountDashboard); ok { + accSettings.WebhookUrl = accDashboard.WebhookUrl.String + accSettings.IsWebhookDiscordEnabled = accDashboard.WebhookFormat.Valid && + types.NotificationChannel(accDashboard.WebhookFormat.String) == types.WebhookDiscordNotificationChannel + accSettings.IsIgnoreSpamTransactionsEnabled = accDashboard.IsIgnoreSpamTransactionsEnabled + accSettings.SubscribedChainIds = accDashboard.SubscribedChainIds + + resultMap[key].Settings = accSettings + } + } + + // Apply filter + if search != "" { + lowerSearch := strings.ToLower(search) + for key, resultEntry := range resultMap { + if !strings.HasPrefix(strings.ToLower(resultEntry.DashboardName), lowerSearch) && + !strings.HasPrefix(strings.ToLower(resultEntry.GroupName), lowerSearch) { + delete(resultMap, key) + } + } + } + + // Convert to a slice for sorting and paging + for _, resultEntry := range resultMap { + result = append(result, *resultEntry) + } + + // ------------------------------------- + // Sort + // Each row is uniquely defined by the dashboardId, groupId, and isAccountDashboard so the sort order is DashboardName/GroupName => DashboardId => GroupId => IsAccountDashboard + var primarySortParam func(resultEntry t.NotificationSettingsDashboardsTableRow) string + switch colSort.Column { + case enums.NotificationSettingsDashboardColumns.DashboardName: + primarySortParam = func(resultEntry t.NotificationSettingsDashboardsTableRow) string { return resultEntry.DashboardName } + case enums.NotificationSettingsDashboardColumns.GroupName: + primarySortParam = func(resultEntry t.NotificationSettingsDashboardsTableRow) string { return resultEntry.GroupName } + default: + return nil, nil, fmt.Errorf("invalid sort column for notification subscriptions: %v", colSort.Column) + } + sort.Slice(result, func(i, j int) bool { + if isReverseDirection { + if primarySortParam(result[i]) == primarySortParam(result[j]) { + if result[i].DashboardId == result[j].DashboardId { + if result[i].GroupId == result[j].GroupId { + return result[i].IsAccountDashboard + } + return result[i].GroupId > result[j].GroupId + } + return result[i].DashboardId > result[j].DashboardId + } + return primarySortParam(result[i]) > primarySortParam(result[j]) + } else { + if primarySortParam(result[i]) == primarySortParam(result[j]) { + if result[i].DashboardId == result[j].DashboardId { + if result[i].GroupId == result[j].GroupId { + return result[j].IsAccountDashboard + } + return result[i].GroupId < result[j].GroupId + } + return result[i].DashboardId < result[j].DashboardId + } + return primarySortParam(result[i]) < primarySortParam(result[j]) + } + }) + + // ------------------------------------- + // Paging + + // Find the index for the cursor and limit the data + if currentCursor.IsValid() { + for idx, row := range result { + if row.DashboardId == currentCursor.DashboardId && + row.GroupId == currentCursor.GroupId && + row.IsAccountDashboard == currentCursor.IsAccountDashboard { + result = result[idx+1:] + break + } + } + } + + // Flag if above limit + moreDataFlag := len(result) > int(limit) + if !moreDataFlag && !currentCursor.IsValid() { + // No paging required + return result, &paging, nil + } + + // Remove the last entries from data + if moreDataFlag { + result = result[:limit] + } + + if currentCursor.IsReverse() { + slices.Reverse(result) + } + + p, err := utils.GetPagingFromData(result, currentCursor, moreDataFlag) + if err != nil { + return nil, nil, fmt.Errorf("failed to get paging: %w", err) + } + + return result, p, nil +} +func (d *DataAccessService) UpdateNotificationSettingsValidatorDashboard(ctx context.Context, userId uint64, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsValidatorDashboard) error { + // For the given dashboardId and groupId update users_subscriptions and users_val_dashboards_groups with the given settings + epoch := utils.TimeToEpoch(time.Now()) + + var eventsToInsert []goqu.Record + var eventsToDelete []goqu.Expression + + // Get the network for the validator dashboard + var chainId uint64 + err := d.alloyReader.GetContext(ctx, &chainId, `SELECT network FROM users_val_dashboards WHERE id = $1 AND user_id = $2`, dashboardId, userId) + if err != nil { + return fmt.Errorf("error getting network for validator dashboard: %w", err) + } + + networks, err := d.GetAllNetworks() + if err != nil { + return err + } + + networkName := "" + for _, network := range networks { + if network.ChainId == chainId { + networkName = network.NotificationsName + break + } + } + if networkName == "" { + return fmt.Errorf("network with chain id %d to update general notification settings not found", chainId) + } + + // Add and remove the events in users_subscriptions + tx, err := d.userWriter.BeginTxx(ctx, nil) + if err != nil { + return fmt.Errorf("error starting db transactions to update validator dashboard notification settings: %w", err) + } + defer utils.Rollback(tx) + + eventFilter := fmt.Sprintf("%s:%d:%d", ValidatorDashboardEventPrefix, dashboardId, groupId) + + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsValidatorOfflineSubscribed, userId, types.ValidatorIsOfflineEventName, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsGroupEfficiencyBelowSubscribed, userId, types.ValidatorGroupEfficiencyEventName, networkName, eventFilter, epoch, settings.GroupEfficiencyBelowThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsAttestationsMissedSubscribed, userId, types.ValidatorMissedAttestationEventName, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsUpcomingBlockProposalSubscribed, userId, types.ValidatorUpcomingProposalEventName, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsSyncSubscribed, userId, types.SyncCommitteeSoon, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsWithdrawalProcessedSubscribed, userId, types.ValidatorReceivedWithdrawalEventName, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsSlashedSubscribed, userId, types.ValidatorGotSlashedEventName, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMaxCollateralSubscribed, userId, types.RocketpoolCollateralMaxReachedEventName, networkName, eventFilter, epoch, settings.MaxCollateralThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMinCollateralSubscribed, userId, types.RocketpoolCollateralMinReachedEventName, networkName, eventFilter, epoch, settings.MinCollateralThreshold) + // Set two events for IsBlockProposalSubscribed + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsBlockProposalSubscribed, userId, types.ValidatorMissedProposalEventName, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsBlockProposalSubscribed, userId, types.ValidatorExecutedProposalEventName, networkName, eventFilter, epoch, 0) + + // Insert all the events or update the threshold if they already exist + if len(eventsToInsert) > 0 { + insertDs := goqu.Dialect("postgres"). + Insert("users_subscriptions"). + Cols("user_id", "event_name", "event_filter", "created_ts", "created_epoch", "event_threshold"). + Rows(eventsToInsert). + OnConflict(goqu.DoUpdate( + "user_id, event_name, event_filter", + goqu.Record{"event_threshold": goqu.L("EXCLUDED.event_threshold")}, + )) + + query, args, err := insertDs.Prepared(true).ToSQL() + if err != nil { + return fmt.Errorf("error preparing query: %v", err) + } + + _, err = tx.ExecContext(ctx, query, args...) + if err != nil { + return err + } + } + + // Delete all the events + if len(eventsToDelete) > 0 { + deleteDs := goqu.Dialect("postgres"). + Delete("users_subscriptions"). + Where(goqu.Or(eventsToDelete...)) + + query, args, err := deleteDs.Prepared(true).ToSQL() + if err != nil { + return fmt.Errorf("error preparing query: %v", err) + } + + _, err = tx.ExecContext(ctx, query, args...) + if err != nil { + return err + } + } + + err = tx.Commit() + if err != nil { + return fmt.Errorf("error committing tx to update validator dashboard notification settings: %w", err) + } + + // Set non-event settings + var webhookFormat sql.NullString + if settings.WebhookUrl != "" { + webhookFormat.String = string(types.WebhookNotificationChannel) + webhookFormat.Valid = true + if settings.IsWebhookDiscordEnabled { + webhookFormat.String = string(types.WebhookDiscordNotificationChannel) + } + } + + _, err = d.alloyWriter.ExecContext(ctx, ` + UPDATE users_val_dashboards_groups + SET + webhook_target = NULLIF($1, ''), + webhook_format = $2 + WHERE dashboard_id = $3 AND id = $4`, settings.WebhookUrl, webhookFormat, dashboardId, groupId) + if err != nil { + return err + } + + return nil +} +func (d *DataAccessService) UpdateNotificationSettingsAccountDashboard(ctx context.Context, userId uint64, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsAccountDashboard) error { + // TODO: Account dashboard handling will be handled later + // // For the given dashboardId and groupId update users_subscriptions and users_acc_dashboards_groups with the given settings + // epoch := utils.TimeToEpoch(time.Now()) + + // var eventsToInsert []goqu.Record + // var eventsToDelete []goqu.Expression + + // tx, err := d.userWriter.BeginTxx(ctx, nil) + // if err != nil { + // return fmt.Errorf("error starting db transactions to update validator dashboard notification settings: %w", err) + // } + // defer utils.Rollback(tx) + + // eventFilter := fmt.Sprintf("%s:%d:%d", AccountDashboardEventPrefix, dashboardId, groupId) + + // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsIncomingTransactionsSubscribed, userId, types.IncomingTransactionEventName, "", eventFilter, epoch, 0) + // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsOutgoingTransactionsSubscribed, userId, types.OutgoingTransactionEventName, "", eventFilter, epoch, 0) + // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsERC20TokenTransfersSubscribed, userId, types.ERC20TokenTransferEventName, "", eventFilter, epoch, settings.ERC20TokenTransfersValueThreshold) + // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsERC721TokenTransfersSubscribed, userId, types.ERC721TokenTransferEventName, "", eventFilter, epoch, 0) + // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsERC1155TokenTransfersSubscribed, userId, types.ERC1155TokenTransferEventName, "", eventFilter, epoch, 0) + + // // Insert all the events or update the threshold if they already exist + // if len(eventsToInsert) > 0 { + // insertDs := goqu.Dialect("postgres"). + // Insert("users_subscriptions"). + // Cols("user_id", "event_name", "event_filter", "created_ts", "created_epoch", "event_threshold"). + // Rows(eventsToInsert). + // OnConflict(goqu.DoUpdate( + // "user_id, event_name, event_filter", + // goqu.Record{"event_threshold": goqu.L("EXCLUDED.event_threshold")}, + // )) + + // query, args, err := insertDs.Prepared(true).ToSQL() + // if err != nil { + // return fmt.Errorf("error preparing query: %v", err) + // } + + // _, err = tx.ExecContext(ctx, query, args...) + // if err != nil { + // return err + // } + // } + + // // Delete all the events + // if len(eventsToDelete) > 0 { + // deleteDs := goqu.Dialect("postgres"). + // Delete("users_subscriptions"). + // Where(goqu.Or(eventsToDelete...)) + + // query, args, err := deleteDs.Prepared(true).ToSQL() + // if err != nil { + // return fmt.Errorf("error preparing query: %v", err) + // } + + // _, err = tx.ExecContext(ctx, query, args...) + // if err != nil { + // return err + // } + // } + + // err = tx.Commit() + // if err != nil { + // return fmt.Errorf("error committing tx to update validator dashboard notification settings: %w", err) + // } + + // // Set non-event settings + // var webhookFormat sql.NullString + // if settings.WebhookUrl != "" { + // webhookFormat.String = string(types.WebhookNotificationChannel) + // webhookFormat.Valid = true + // if settings.IsWebhookDiscordEnabled { + // webhookFormat.String = string(types.WebhookDiscordNotificationChannel) + // } + // } + + // _, err = d.alloyWriter.ExecContext(ctx, ` + // UPDATE users_acc_dashboards_groups + // SET + // webhook_target = NULLIF($1, ''), + // webhook_format = $2, + // ignore_spam_transactions = $3, + // subscribed_chain_ids = $4 + // WHERE dashboard_id = $5 AND id = $6`, settings.WebhookUrl, webhookFormat, settings.IsIgnoreSpamTransactionsEnabled, settings.SubscribedChainIds, dashboardId, groupId) + // if err != nil { + // return err + // } + + return d.dummy.UpdateNotificationSettingsAccountDashboard(ctx, userId, dashboardId, groupId, settings) +} + +func (d *DataAccessService) AddOrRemoveEvent(eventsToInsert *[]goqu.Record, eventsToDelete *[]goqu.Expression, isSubscribed bool, userId uint64, eventName types.EventName, network, eventFilter string, epoch int64, threshold float64) { + fullEventName := string(eventName) + if network != "" { + fullEventName = fmt.Sprintf("%s:%s", network, eventName) + } + + if isSubscribed { + event := goqu.Record{"user_id": userId, "event_name": fullEventName, "event_filter": eventFilter, "created_ts": goqu.L("NOW()"), "created_epoch": epoch, "event_threshold": threshold} + *eventsToInsert = append(*eventsToInsert, event) + } else { + *eventsToDelete = append(*eventsToDelete, goqu.Ex{"user_id": userId, "event_name": fullEventName, "event_filter": eventFilter}) + } +} + +func (d *DataAccessService) QueueTestEmailNotification(ctx context.Context, userId uint64) error { + return notification.SendTestEmail(ctx, types.UserId(userId), d.userReader) } -func (d *DataAccessService) UpdateNotificationSettingsValidatorDashboard(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsValidatorDashboard) error { - return d.dummy.UpdateNotificationSettingsValidatorDashboard(ctx, dashboardId, groupId, settings) +func (d *DataAccessService) QueueTestPushNotification(ctx context.Context, userId uint64) error { + return notification.QueueTestPushNotification(ctx, types.UserId(userId), d.userReader, d.readerDb) } -func (d *DataAccessService) UpdateNotificationSettingsAccountDashboard(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsAccountDashboard) error { - return d.dummy.UpdateNotificationSettingsAccountDashboard(ctx, dashboardId, groupId, settings) +func (d *DataAccessService) QueueTestWebhookNotification(ctx context.Context, userId uint64, webhookUrl string, isDiscordWebhook bool) error { + return notification.SendTestWebhookNotification(ctx, types.UserId(userId), webhookUrl, isDiscordWebhook) } diff --git a/backend/pkg/api/data_access/ratelimit.go b/backend/pkg/api/data_access/ratelimit.go new file mode 100644 index 000000000..8c17c5d0f --- /dev/null +++ b/backend/pkg/api/data_access/ratelimit.go @@ -0,0 +1,22 @@ +package dataaccess + +import ( + "context" + + "github.com/gobitfly/beaconchain/pkg/api/types" +) + +type RatelimitRepository interface { + GetApiWeights(ctx context.Context) ([]types.ApiWeightItem, error) + // TODO @patrick: move queries from commons/ratelimit/ratelimit.go to here +} + +func (d *DataAccessService) GetApiWeights(ctx context.Context) ([]types.ApiWeightItem, error) { + var result []types.ApiWeightItem + err := d.userReader.SelectContext(ctx, &result, ` + SELECT bucket, endpoint, method, weight + FROM api_weights + WHERE valid_from <= NOW() + `) + return result, err +} diff --git a/backend/pkg/api/data_access/user.go b/backend/pkg/api/data_access/user.go index 598f83995..62e689ed4 100644 --- a/backend/pkg/api/data_access/user.go +++ b/backend/pkg/api/data_access/user.go @@ -4,10 +4,10 @@ import ( "context" "database/sql" "fmt" - "math" "time" t "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/pkg/errors" "golang.org/x/sync/errgroup" @@ -259,417 +259,15 @@ func (d *DataAccessService) GetUserIdByResetHash(ctx context.Context, hash strin } func (d *DataAccessService) GetUserInfo(ctx context.Context, userId uint64) (*t.UserInfo, error) { - // TODO @patrick post-beta improve and unmock - userInfo := &t.UserInfo{ - Id: userId, - ApiKeys: []string{}, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 10, - UnitsPerMonth: 10, - ApiKeys: 4, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - Subscriptions: []t.UserSubscription{}, - } - - productSummary, err := d.GetProductSummary(ctx) - if err != nil { - return nil, fmt.Errorf("error getting productSummary: %w", err) - } - - result := struct { - Email string `db:"email"` - UserGroup string `db:"user_group"` - }{} - err = d.userReader.GetContext(ctx, &result, `SELECT email, COALESCE(user_group, '') as user_group FROM users WHERE id = $1`, userId) - if err != nil { - return nil, fmt.Errorf("error getting userEmail for user %v: %w", userId, err) - } - userInfo.Email = result.Email - userInfo.UserGroup = result.UserGroup - - userInfo.Email = utils.CensorEmail(userInfo.Email) - - err = d.userReader.SelectContext(ctx, &userInfo.ApiKeys, `SELECT api_key FROM api_keys WHERE user_id = $1`, userId) - if err != nil && err != sql.ErrNoRows { - return nil, fmt.Errorf("error getting userApiKeys for user %v: %w", userId, err) - } - - premiumProduct := struct { - ProductId string `db:"product_id"` - Store string `db:"store"` - Start time.Time `db:"start"` - End time.Time `db:"end"` - }{} - err = d.userReader.GetContext(ctx, &premiumProduct, ` - SELECT - COALESCE(uas.product_id, '') AS product_id, - COALESCE(uas.store, '') AS store, - COALESCE(to_timestamp((uss.payload->>'current_period_start')::bigint),uas.created_at) AS start, - COALESCE(to_timestamp((uss.payload->>'current_period_end')::bigint),uas.expires_at) AS end - FROM users_app_subscriptions uas - LEFT JOIN users_stripe_subscriptions uss ON uss.subscription_id = uas.subscription_id - WHERE uas.user_id = $1 AND uas.active = true AND product_id IN ('orca.yearly', 'orca', 'dolphin.yearly', 'dolphin', 'guppy.yearly', 'guppy', 'whale', 'goldfish', 'plankton') - ORDER BY CASE uas.product_id - WHEN 'orca.yearly' THEN 1 - WHEN 'orca' THEN 2 - WHEN 'dolphin.yearly' THEN 3 - WHEN 'dolphin' THEN 4 - WHEN 'guppy.yearly' THEN 5 - WHEN 'guppy' THEN 6 - WHEN 'whale' THEN 7 - WHEN 'goldfish' THEN 8 - WHEN 'plankton' THEN 9 - ELSE 10 -- For any other product_id values - END, uas.id DESC - LIMIT 1`, userId) - if err != nil { - if err != sql.ErrNoRows { - return nil, fmt.Errorf("error getting premiumProduct for userId %v: %w", userId, err) - } - premiumProduct.ProductId = "premium_free" - premiumProduct.Store = "" - } - - foundProduct := false - for _, p := range productSummary.PremiumProducts { - effectiveProductId := premiumProduct.ProductId - productName := p.ProductName - switch premiumProduct.ProductId { - case "whale": - effectiveProductId = "dolphin" - productName = "Whale" - case "goldfish": - effectiveProductId = "guppy" - productName = "Goldfish" - case "plankton": - effectiveProductId = "guppy" - productName = "Plankton" - } - if p.ProductIdMonthly == effectiveProductId || p.ProductIdYearly == effectiveProductId { - userInfo.PremiumPerks = p.PremiumPerks - foundProduct = true - - store := t.ProductStoreStripe - switch premiumProduct.Store { - case "ios-appstore": - store = t.ProductStoreIosAppstore - case "android-playstore": - store = t.ProductStoreAndroidPlaystore - case "ethpool": - store = t.ProductStoreEthpool - case "manuall": - store = t.ProductStoreCustom - } - - if effectiveProductId != "premium_free" { - userInfo.Subscriptions = append(userInfo.Subscriptions, t.UserSubscription{ - ProductId: premiumProduct.ProductId, - ProductName: productName, - ProductCategory: t.ProductCategoryPremium, - ProductStore: store, - Start: premiumProduct.Start.Unix(), - End: premiumProduct.End.Unix(), - }) - } - break - } - } - if !foundProduct { - return nil, fmt.Errorf("product %s not found", premiumProduct.ProductId) - } - - premiumAddons := []struct { - PriceId string `db:"price_id"` - Start time.Time `db:"start"` - End time.Time `db:"end"` - Quantity int `db:"quantity"` - }{} - err = d.userReader.SelectContext(ctx, &premiumAddons, ` - SELECT - price_id, - to_timestamp((uss.payload->>'current_period_start')::bigint) AS start, - to_timestamp((uss.payload->>'current_period_end')::bigint) AS end, - COALESCE((uss.payload->>'quantity')::int,1) AS quantity - FROM users_stripe_subscriptions uss - INNER JOIN users u ON u.stripe_customer_id = uss.customer_id - WHERE u.id = $1 AND uss.active = true AND uss.purchase_group = 'addon'`, userId) - if err != nil { - return nil, fmt.Errorf("error getting premiumAddons for userId %v: %w", userId, err) - } - for _, addon := range premiumAddons { - foundAddon := false - for _, p := range productSummary.ExtraDashboardValidatorsPremiumAddon { - if p.StripePriceIdMonthly == addon.PriceId || p.StripePriceIdYearly == addon.PriceId { - foundAddon = true - for i := 0; i < addon.Quantity; i++ { - userInfo.PremiumPerks.ValidatorsPerDashboard += p.ExtraDashboardValidators - userInfo.Subscriptions = append(userInfo.Subscriptions, t.UserSubscription{ - ProductId: utils.PriceIdToProductId(addon.PriceId), - ProductName: p.ProductName, - ProductCategory: t.ProductCategoryPremiumAddon, - ProductStore: t.ProductStoreStripe, - Start: addon.Start.Unix(), - End: addon.End.Unix(), - }) - } - } - } - if !foundAddon { - return nil, fmt.Errorf("addon not found: %v", addon.PriceId) - } - } - - if productSummary.ValidatorsPerDashboardLimit < userInfo.PremiumPerks.ValidatorsPerDashboard { - userInfo.PremiumPerks.ValidatorsPerDashboard = productSummary.ValidatorsPerDashboardLimit - } - - return userInfo, nil -} - -const hour uint64 = 3600 -const day = 24 * hour -const week = 7 * day -const month = 30 * day -const fullHistory uint64 = 9007199254740991 // 2^53-1 (max int in JS) - -var freeTierProduct t.PremiumProduct = t.PremiumProduct{ - ProductName: "Free", - PremiumPerks: t.PremiumPerks{ - AdFree: false, - ValidatorDashboards: 1, - ValidatorsPerDashboard: 20, - ValidatorGroupsPerDashboard: 1, - ShareCustomDashboards: false, - ManageDashboardViaApi: false, - BulkAdding: false, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: 0, - Hourly: 12 * hour, - Daily: 0, - Weekly: 0, - }, - EmailNotificationsPerDay: 5, - ConfigureNotificationsViaApi: false, - ValidatorGroupNotifications: 1, - WebhookEndpoints: 1, - MobileAppCustomThemes: false, - MobileAppWidget: false, - MonitorMachines: 1, - MachineMonitoringHistorySeconds: 3600 * 3, - CustomMachineAlerts: false, - }, - PricePerMonthEur: 0, - PricePerYearEur: 0, - ProductIdMonthly: "premium_free", - ProductIdYearly: "premium_free.yearly", + return db.GetUserInfo(ctx, userId, d.userReader) } func (d *DataAccessService) GetProductSummary(ctx context.Context) (*t.ProductSummary, error) { - // TODO @patrick post-beta put into db instead of hardcoding here and make it configurable - return &t.ProductSummary{ - ValidatorsPerDashboardLimit: 102_000, - StripePublicKey: utils.Config.Frontend.Stripe.PublicKey, - ApiProducts: []t.ApiProduct{ // TODO @patrick post-beta this data is not final yet - { - ProductId: "api_free", - ProductName: "Free", - PricePerMonthEur: 0, - PricePerYearEur: 0 * 12, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 10, - UnitsPerMonth: 10_000_000, - ApiKeys: 2, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - }, - { - ProductId: "iron", - ProductName: "Iron", - PricePerMonthEur: 1.99, - PricePerYearEur: math.Floor(1.99*12*0.9*100) / 100, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 20, - UnitsPerMonth: 20_000_000, - ApiKeys: 10, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - }, - { - ProductId: "silver", - ProductName: "Silver", - PricePerMonthEur: 2.99, - PricePerYearEur: math.Floor(2.99*12*0.9*100) / 100, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 30, - UnitsPerMonth: 100_000_000, - ApiKeys: 20, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - }, - { - ProductId: "gold", - ProductName: "Gold", - PricePerMonthEur: 3.99, - PricePerYearEur: math.Floor(3.99*12*0.9*100) / 100, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 40, - UnitsPerMonth: 200_000_000, - ApiKeys: 40, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - }, - }, - PremiumProducts: []t.PremiumProduct{ - freeTierProduct, - { - ProductName: "Guppy", - PremiumPerks: t.PremiumPerks{ - AdFree: true, - ValidatorDashboards: 1, - ValidatorsPerDashboard: 100, - ValidatorGroupsPerDashboard: 3, - ShareCustomDashboards: true, - ManageDashboardViaApi: false, - BulkAdding: true, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: day, - Hourly: 7 * day, - Daily: month, - Weekly: 0, - }, - EmailNotificationsPerDay: 15, - ConfigureNotificationsViaApi: false, - ValidatorGroupNotifications: 3, - WebhookEndpoints: 3, - MobileAppCustomThemes: true, - MobileAppWidget: true, - MonitorMachines: 2, - MachineMonitoringHistorySeconds: 3600 * 24 * 30, - CustomMachineAlerts: true, - }, - PricePerMonthEur: 9.99, - PricePerYearEur: 107.88, - ProductIdMonthly: "guppy", - ProductIdYearly: "guppy.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.Guppy, - StripePriceIdYearly: utils.Config.Frontend.Stripe.GuppyYearly, - }, - { - ProductName: "Dolphin", - PremiumPerks: t.PremiumPerks{ - AdFree: true, - ValidatorDashboards: 2, - ValidatorsPerDashboard: 300, - ValidatorGroupsPerDashboard: 10, - ShareCustomDashboards: true, - ManageDashboardViaApi: false, - BulkAdding: true, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: 5 * day, - Hourly: month, - Daily: 2 * month, - Weekly: 8 * week, - }, - EmailNotificationsPerDay: 20, - ConfigureNotificationsViaApi: false, - ValidatorGroupNotifications: 10, - WebhookEndpoints: 10, - MobileAppCustomThemes: true, - MobileAppWidget: true, - MonitorMachines: 10, - MachineMonitoringHistorySeconds: 3600 * 24 * 30, - CustomMachineAlerts: true, - }, - PricePerMonthEur: 29.99, - PricePerYearEur: 311.88, - ProductIdMonthly: "dolphin", - ProductIdYearly: "dolphin.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.Dolphin, - StripePriceIdYearly: utils.Config.Frontend.Stripe.DolphinYearly, - }, - { - ProductName: "Orca", - PremiumPerks: t.PremiumPerks{ - AdFree: true, - ValidatorDashboards: 2, - ValidatorsPerDashboard: 1000, - ValidatorGroupsPerDashboard: 30, - ShareCustomDashboards: true, - ManageDashboardViaApi: true, - BulkAdding: true, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: 3 * week, - Hourly: 6 * month, - Daily: 12 * month, - Weekly: fullHistory, - }, - EmailNotificationsPerDay: 50, - ConfigureNotificationsViaApi: true, - ValidatorGroupNotifications: 60, - WebhookEndpoints: 30, - MobileAppCustomThemes: true, - MobileAppWidget: true, - MonitorMachines: 10, - MachineMonitoringHistorySeconds: 3600 * 24 * 30, - CustomMachineAlerts: true, - }, - PricePerMonthEur: 49.99, - PricePerYearEur: 479.88, - ProductIdMonthly: "orca", - ProductIdYearly: "orca.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.Orca, - StripePriceIdYearly: utils.Config.Frontend.Stripe.OrcaYearly, - IsPopular: true, - }, - }, - ExtraDashboardValidatorsPremiumAddon: []t.ExtraDashboardValidatorsPremiumAddon{ - { - ProductName: "1k extra valis per dashboard", - ExtraDashboardValidators: 1000, - PricePerMonthEur: 74.99, - PricePerYearEur: 719.88, - ProductIdMonthly: "vdb_addon_1k", - ProductIdYearly: "vdb_addon_1k.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon1k, - StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon1kYearly, - }, - { - ProductName: "10k extra valis per dashboard", - ExtraDashboardValidators: 10000, - PricePerMonthEur: 449.99, - PricePerYearEur: 4319.88, - ProductIdMonthly: "vdb_addon_10k", - ProductIdYearly: "vdb_addon_10k.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon10k, - StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon10kYearly, - }, - }, - }, nil + return db.GetProductSummary(ctx) } func (d *DataAccessService) GetFreeTierPerks(ctx context.Context) (*t.PremiumPerks, error) { - return &freeTierProduct.PremiumPerks, nil + return db.GetFreeTierPerks(ctx) } func (d *DataAccessService) GetUserDashboards(ctx context.Context, userId uint64) (*t.UserDashboardsData, error) { @@ -764,7 +362,7 @@ func (d *DataAccessService) GetUserDashboards(ctx context.Context, userId uint64 err := wg.Wait() if err != nil { - return nil, fmt.Errorf("error retrieving user dashboards data: %v", err) + return nil, fmt.Errorf("error retrieving user dashboards data: %w", err) } // Fill the result diff --git a/backend/pkg/api/data_access/vdb.go b/backend/pkg/api/data_access/vdb.go index 1d38e5537..d15a3eb1a 100644 --- a/backend/pkg/api/data_access/vdb.go +++ b/backend/pkg/api/data_access/vdb.go @@ -28,7 +28,6 @@ type ValidatorDashboardRepository interface { GetValidatorDashboardGroupCount(ctx context.Context, dashboardId t.VDBIdPrimary) (uint64, error) GetValidatorDashboardGroupExists(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64) (bool, error) - GetValidatorDashboardExistingValidatorCount(ctx context.Context, dashboardId t.VDBIdPrimary, validators []t.VDBValidator) (uint64, error) AddValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, validators []t.VDBValidator) ([]t.VDBPostValidatorsData, error) AddValidatorDashboardValidatorsByDepositAddress(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, address string, limit uint64) ([]t.VDBPostValidatorsData, error) AddValidatorDashboardValidatorsByWithdrawalAddress(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, address string, limit uint64) ([]t.VDBPostValidatorsData, error) @@ -77,6 +76,7 @@ type ValidatorDashboardRepository interface { GetValidatorDashboardRocketPool(ctx context.Context, dashboardId t.VDBId, cursor string, colSort t.Sort[enums.VDBRocketPoolColumn], search string, limit uint64) ([]t.VDBRocketPoolTableRow, *t.Paging, error) GetValidatorDashboardTotalRocketPool(ctx context.Context, dashboardId t.VDBId, search string) (*t.VDBRocketPoolTableRow, error) - GetValidatorDashboardNodeRocketPool(ctx context.Context, dashboardId t.VDBId, node string) (*t.VDBNodeRocketPoolData, error) GetValidatorDashboardRocketPoolMinipools(ctx context.Context, dashboardId t.VDBId, node, cursor string, colSort t.Sort[enums.VDBRocketPoolMinipoolsColumn], search string, limit uint64) ([]t.VDBRocketPoolMinipoolsTableRow, *t.Paging, error) + + GetValidatorDashboardMobileWidget(ctx context.Context, dashboardId t.VDBIdPrimary) (*t.MobileWidgetData, error) } diff --git a/backend/pkg/api/data_access/vdb_blocks.go b/backend/pkg/api/data_access/vdb_blocks.go index 9f699109a..e9df9496d 100644 --- a/backend/pkg/api/data_access/vdb_blocks.go +++ b/backend/pkg/api/data_access/vdb_blocks.go @@ -9,20 +9,31 @@ import ( "strings" "time" + "github.com/doug-martin/goqu/v9" + "github.com/doug-martin/goqu/v9/exp" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/gobitfly/beaconchain/pkg/api/enums" t "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gobitfly/beaconchain/pkg/commons/cache" "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" + "github.com/lib/pq" "github.com/shopspring/decimal" ) func (d *DataAccessService) GetValidatorDashboardBlocks(ctx context.Context, dashboardId t.VDBId, cursor string, colSort t.Sort[enums.VDBBlocksColumn], search string, limit uint64, protocolModes t.VDBProtocolModes) ([]t.VDBBlocksTableRow, *t.Paging, error) { // @DATA-ACCESS incorporate protocolModes + + // ------------------------------------- + // Setup var err error var currentCursor t.BlocksCursor + validatorMapping, err := d.services.GetCurrentValidatorMapping() + if err != nil { + return nil, nil, err + } // TODO @LuccaBitfly move validation to handler? if cursor != "" { @@ -31,304 +42,279 @@ func (d *DataAccessService) GetValidatorDashboardBlocks(ctx context.Context, das } } - // regexes taken from api handler common.go searchPubkey := regexp.MustCompile(`^0x[0-9a-fA-F]{96}$`).MatchString(search) searchGroup := regexp.MustCompile(`^[a-zA-Z0-9_\-.\ ]+$`).MatchString(search) searchIndex := regexp.MustCompile(`^[0-9]+$`).MatchString(search) - validatorMap := make(map[t.VDBValidator]bool) - params := []interface{}{} - filteredValidatorsQuery := "" - validatorMapping, err := d.services.GetCurrentValidatorMapping() - if err != nil { - return nil, nil, err + validators := goqu.T("validators") // could adapt data type to make handling as table/alias less confusing + blocks := goqu.T("blocks") + groups := goqu.T("groups") + + type validatorGroup struct { + Validator t.VDBValidator `db:"validator_index"` + Group uint64 `db:"group_id"` } - // determine validators of interest first - if dashboardId.Validators == nil { - // could also optimize this for the average and/or the whale case; will go with some middle-ground, needs testing - // (query validators twice: once without search applied (fast) to pre-filter scheduled proposals (which are sent to db, want to minimize), - // again for blocks query with search applied to not having to send potentially huge validator-list) - startTime := time.Now() - valis, err := d.getDashboardValidators(ctx, dashboardId, nil) - log.Debugf("=== getting validators took %s", time.Since(startTime)) - if err != nil { - return nil, nil, err - } - for _, v := range valis { - validatorMap[v] = true - } + // ------------------------------------- + // Goqu Query to determine validators filtered by search + var filteredValidatorsDs *goqu.SelectDataset + var filteredValidators []validatorGroup - // create a subquery to get the (potentially filtered) validators and their groups for later - params = append(params, dashboardId.Id) - selectStr := `SELECT validator_index, group_id ` - from := `FROM users_val_dashboards_validators validators ` - where := `WHERE validators.dashboard_id = $1` - extraConds := make([]string, 0, 3) + filteredValidatorsDs = goqu.Dialect("postgres"). + Select( + "validator_index", + ) + if dashboardId.Validators == nil { + filteredValidatorsDs = filteredValidatorsDs. + From(goqu.T("users_val_dashboards_validators").As(validators.GetTable())). + Where(validators.Col("dashboard_id").Eq(dashboardId.Id)) + // apply search filters + searches := []exp.Expression{} if searchIndex { - params = append(params, search) - extraConds = append(extraConds, fmt.Sprintf(`validator_index = $%d`, len(params))) + searches = append(searches, validators.Col("validator_index").Eq(search)) } if searchGroup { - from += `INNER JOIN users_val_dashboards_groups groups ON validators.dashboard_id = groups.dashboard_id AND validators.group_id = groups.id ` - // escape the psql single character wildcard "_"; apply prefix-search - params = append(params, strings.Replace(search, "_", "\\_", -1)+"%") - extraConds = append(extraConds, fmt.Sprintf(`LOWER(name) LIKE LOWER($%d)`, len(params))) + filteredValidatorsDs = filteredValidatorsDs. + InnerJoin(goqu.T("users_val_dashboards_groups").As(groups), goqu.On( + validators.Col("group_id").Eq(groups.Col("id")), + validators.Col("dashboard_id").Eq(groups.Col("dashboard_id")), + )) + searches = append(searches, + goqu.L("LOWER(?)", groups.Col("name")).Like(strings.Replace(strings.ToLower(search), "_", "\\_", -1)+"%"), + ) } if searchPubkey { index, ok := validatorMapping.ValidatorIndices[search] - if !ok && len(extraConds) == 0 { - // don't even need to query + if !ok && !searchGroup && !searchIndex { + // searched pubkey doesn't exist, don't even need to query anything return make([]t.VDBBlocksTableRow, 0), &t.Paging{}, nil } - params = append(params, index) - extraConds = append(extraConds, fmt.Sprintf(`validator_index = $%d`, len(params))) + searches = append(searches, + validators.Col("validator_index").Eq(index), + ) } - if len(extraConds) > 0 { - where += ` AND (` + strings.Join(extraConds, ` OR `) + `)` + if len(searches) > 0 { + filteredValidatorsDs = filteredValidatorsDs.Where(goqu.Or(searches...)) } - - filteredValidatorsQuery = selectStr + from + where } else { - validators := make([]t.VDBValidator, 0, len(dashboardId.Validators)) + validatorList := make([]t.VDBValidator, 0, len(dashboardId.Validators)) for _, validator := range dashboardId.Validators { if searchIndex && fmt.Sprint(validator) != search || searchPubkey && validator != validatorMapping.ValidatorIndices[search] { continue } - validatorMap[validator] = true - validators = append(validators, validator) + filteredValidators = append(filteredValidators, validatorGroup{ + Validator: validator, + Group: t.DefaultGroupId, + }) + validatorList = append(validatorList, validator) if searchIndex || searchPubkey { break } } - if len(validators) == 0 { - return make([]t.VDBBlocksTableRow, 0), &t.Paging{}, nil - } - params = append(params, validators) + filteredValidatorsDs = filteredValidatorsDs. + From( + goqu.Dialect("postgres"). + From( + goqu.L("unnest(?::int[])", pq.Array(validatorList)).As("validator_index"), + ). + As(validators.GetTable()), + ) } - var proposals []struct { - Proposer t.VDBValidator `db:"proposer"` - Group uint64 `db:"group_id"` - Epoch uint64 `db:"epoch"` - Slot uint64 `db:"slot"` - Status uint64 `db:"status"` - Block sql.NullInt64 `db:"block"` - FeeRecipient []byte `db:"fee_recipient"` - ElReward decimal.NullDecimal `db:"el_reward"` - ClReward decimal.NullDecimal `db:"cl_reward"` - GraffitiText string `db:"graffiti_text"` + // ------------------------------------- + // Constuct final query + var blocksDs *goqu.SelectDataset - // for cursor only - Reward decimal.Decimal + // 1. Tables + blocksDs = filteredValidatorsDs. + InnerJoin(blocks, goqu.On( + blocks.Col("proposer").Eq(validators.Col("validator_index")), + )). + LeftJoin(goqu.T("consensus_payloads").As("cp"), goqu.On( + blocks.Col("slot").Eq(goqu.I("cp.slot")), + )). + LeftJoin(goqu.T("execution_payloads").As("ep"), goqu.On( + blocks.Col("exec_block_hash").Eq(goqu.I("ep.block_hash")), + )). + LeftJoin( + // relay bribe deduplication; select most likely (=max) relay bribe value for each block + goqu.Lateral(goqu.Dialect("postgres"). + From(goqu.T("relays_blocks")). + Select( + goqu.I("relays_blocks.exec_block_hash"), + goqu.I("relays_blocks.proposer_fee_recipient"), + goqu.MAX(goqu.I("relays_blocks.value")).As("value")). + GroupBy( + "exec_block_hash", + "proposer_fee_recipient", + )).As("rb"), + goqu.On( + goqu.I("rb.exec_block_hash").Eq(blocks.Col("exec_block_hash")), + ), + ) + + // 2. Selects + groupIdQ := goqu.C("group_id").(exp.Aliaseable) + if dashboardId.Validators != nil { + groupIdQ = exp.NewLiteralExpression("?::int", t.DefaultGroupId) } + groupId := groupIdQ.As("group_id") + + blocksDs = blocksDs. + SelectAppend( + blocks.Col("epoch"), + blocks.Col("slot"), + groupId, + blocks.Col("status"), + blocks.Col("exec_block_number"), + blocks.Col("graffiti_text"), + goqu.COALESCE(goqu.I("rb.proposer_fee_recipient"), blocks.Col("exec_fee_recipient")).As("fee_recipient"), + goqu.COALESCE(goqu.L("rb.value / 1e18"), goqu.I("ep.fee_recipient_reward")).As("el_reward"), + goqu.L("cp.cl_attestations_reward / 1e9 + cp.cl_sync_aggregate_reward / 1e9 + cp.cl_slashing_inclusion_reward / 1e9").As("cl_reward"), + ) - // handle sorting - where := `` - orderBy := `ORDER BY ` - sortOrder := ` ASC` - if colSort.Desc { - sortOrder = ` DESC` + // 3. Sorting and pagination + defaultColumns := []t.SortColumn{ + {Column: enums.VDBBlocksColumns.Slot.ToExpr(), Desc: true, Offset: currentCursor.Slot}, } - val := t.VDBValidator(0) - sortColName := `slot` + var offset any switch colSort.Column { - case enums.VDBBlockProposer: - sortColName = `proposer` - val = currentCursor.Proposer - case enums.VDBBlockStatus: - sortColName = `status` - val = currentCursor.Status - case enums.VDBBlockProposerReward: - sortColName = `reward` - val = currentCursor.Reward.BigInt().Uint64() - } - onlyPrimarySort := sortColName == `slot` - if currentCursor.IsValid() { - sign := ` > ` - if colSort.Desc && !currentCursor.IsReverse() || !colSort.Desc && currentCursor.IsReverse() { - sign = ` < ` - } - if currentCursor.IsReverse() { - if sortOrder == ` ASC` { - sortOrder = ` DESC` - } else { - sortOrder = ` ASC` - } + case enums.VDBBlocksColumns.Proposer: + offset = currentCursor.Proposer + case enums.VDBBlocksColumns.Block: + offset = currentCursor.Block + if !currentCursor.Block.Valid { + offset = nil } - params = append(params, currentCursor.Slot) - where += `WHERE (` - if onlyPrimarySort { - where += `slot` + sign + fmt.Sprintf(`$%d`, len(params)) - } else { - params = append(params, val) - secSign := ` < ` - if currentCursor.IsReverse() { - secSign = ` > ` - } - if sortColName == "status" { - // explicit cast to int because type of 'status' column is text for some reason - sortColName += "::int" - } - where += fmt.Sprintf(`(slot`+secSign+`$%d AND `+sortColName+` = $%d) OR `+sortColName+sign+`$%d`, len(params)-1, len(params), len(params)) - } - where += `) ` - } - if sortOrder == ` ASC` { - sortOrder += ` NULLS FIRST` - } else { - sortOrder += ` NULLS LAST` + case enums.VDBBlocksColumns.Status: + offset = fmt.Sprintf("%d", currentCursor.Status) // type of 'status' column is text for some reason + case enums.VDBBlocksColumns.ProposerReward: + offset = currentCursor.Reward } - orderBy += sortColName + sortOrder - secSort := `DESC` - if !onlyPrimarySort { - if currentCursor.IsReverse() { - secSort = `ASC` - } - orderBy += `, slot ` + secSort + + order, directions := applySortAndPagination(defaultColumns, t.SortColumn{Column: colSort.Column.ToExpr(), Desc: colSort.Desc, Offset: offset}, currentCursor.GenericCursor) + blocksDs = goqu.Dialect("postgres").From(goqu.T("past_blocks_cte")). + With("past_blocks_cte", blocksDs). // encapsulate so we can use selected fields + Order(order...) + if directions != nil { + blocksDs = blocksDs.Where(directions) } - // Get scheduled blocks. They aren't written to blocks table, get from duties - // Will just pass scheduled proposals to query and let db do the sorting etc - var scheduledProposers []t.VDBValidator - var scheduledEpochs []uint64 - var scheduledSlots []uint64 - // don't need to query if requested slots are in the past + // 4. Limit + blocksDs = blocksDs.Limit(uint(limit + 1)) + + // 5. Gather and supply scheduled blocks to let db do the sorting etc latestSlot := cache.LatestSlot.Get() - if !onlyPrimarySort || !currentCursor.IsValid() || - currentCursor.Slot > latestSlot+1 && currentCursor.Reverse != colSort.Desc || - currentCursor.Slot < latestSlot+1 && currentCursor.Reverse == colSort.Desc { + onlyPrimarySort := colSort.Column == enums.VDBBlockSlot + if !(onlyPrimarySort || colSort.Column == enums.VDBBlockBlock) || + !currentCursor.IsValid() || + currentCursor.Slot > latestSlot+1 || + colSort.Desc == currentCursor.Reverse { dutiesInfo, err := d.services.GetCurrentDutiesInfo() if err == nil { + if dashboardId.Validators == nil { + // fetch filtered validators if not done yet + filteredValidatorsDs = filteredValidatorsDs. + SelectAppend(groupIdQ) + validatorsQuery, validatorsArgs, err := filteredValidatorsDs.Prepared(true).ToSQL() + if err != nil { + return nil, nil, err + } + if err = d.alloyReader.SelectContext(ctx, &filteredValidators, validatorsQuery, validatorsArgs...); err != nil { + return nil, nil, err + } + } + if len(filteredValidators) == 0 { + return make([]t.VDBBlocksTableRow, 0), &t.Paging{}, nil + } + + validatorSet := make(map[t.VDBValidator]uint64) + for _, v := range filteredValidators { + validatorSet[v.Validator] = v.Group + } + var scheduledProposers []t.VDBValidator + var scheduledGroups []uint64 + var scheduledEpochs []uint64 + var scheduledSlots []uint64 + // don't need if requested slots are in the past for slot, vali := range dutiesInfo.PropAssignmentsForSlot { // only gather scheduled slots if _, ok := dutiesInfo.SlotStatus[slot]; ok { continue } // only gather slots scheduled for our validators - if _, ok := validatorMap[vali]; !ok { + if _, ok := validatorSet[vali]; !ok { continue } scheduledProposers = append(scheduledProposers, dutiesInfo.PropAssignmentsForSlot[slot]) + scheduledGroups = append(scheduledGroups, validatorSet[vali]) scheduledEpochs = append(scheduledEpochs, slot/utils.Config.Chain.ClConfig.SlotsPerEpoch) scheduledSlots = append(scheduledSlots, slot) } - } else { - log.Debugf("duties info not available, skipping scheduled slots: %s", err) - } - if len(scheduledProposers) > 0 { - // make sure the distinct clause filters out the correct duplicated row (e.g. block=nil) - orderBy += `, block` - } - } - groupIdCol := "group_id" - // this is actually just used for sorting for "reward".. will not consider EL rewards of unfinalized blocks atm - reward := "reward" - if dashboardId.Validators != nil { - groupIdCol = fmt.Sprintf("%d AS %s", t.DefaultGroupId, groupIdCol) - reward = "coalesce(rb.value / 1e18, ep.fee_recipient_reward) AS " + reward - } - selectFields := fmt.Sprintf(` - r.proposer, - %s, - r.epoch, - r.slot, - r.status, - block, - COALESCE(rb.proposer_fee_recipient, blocks.exec_fee_recipient) AS fee_recipient, - COALESCE(rb.value / 1e18, ep.fee_recipient_reward) AS el_reward, - cp.cl_attestations_reward / 1e9 + cp.cl_sync_aggregate_reward / 1e9 + cp.cl_slashing_inclusion_reward / 1e9 as cl_reward, - r.graffiti_text`, groupIdCol) - query := fmt.Sprintf(`SELECT distinct on (slot) - %s - FROM ( SELECT * FROM (`, selectFields) - // supply scheduled proposals, if any - if len(scheduledProposers) > 0 { - // distinct to filter out duplicates in an edge case (if dutiesInfo didn't update yet after a block was proposed, but the blocks table was) - // might be possible to remove this once the TODO in service_slot_viz.go:startSlotVizDataService is resolved - distinct := "slot" - if !onlyPrimarySort { - distinct = sortColName + ", " + distinct - } - params = append(params, scheduledProposers) - params = append(params, scheduledEpochs) - params = append(params, scheduledSlots) - query = fmt.Sprintf(`SELECT distinct on (%s) - %s - FROM ( SELECT * FROM (WITH scheduled_proposals ( - proposer, - epoch, - slot, - status, - block, - reward, - graffiti_text - ) AS (SELECT - *, - '0', - null::int, - null::int, - '' - FROM unnest($%d::int[], $%d::int[], $%d::int[])) - SELECT * FROM scheduled_proposals - UNION - (`, distinct, selectFields, len(params)-2, len(params)-1, len(params)) - } - query += fmt.Sprintf(` - SELECT - proposer, - epoch, - blocks.slot, - status, - exec_block_number AS block, - %s, - graffiti_text - FROM blocks - `, reward) + scheduledDs := goqu.Dialect("postgres"). + From( + goqu.L("unnest(?::int[], ?::int[], ?::int[], ?::int[]) AS prov(validator_index, group_id, epoch, slot)", pq.Array(scheduledProposers), pq.Array(scheduledGroups), pq.Array(scheduledEpochs), pq.Array(scheduledSlots)), + ). + Select( + goqu.C("validator_index"), + goqu.C("epoch"), + goqu.C("slot"), + groupId, + goqu.V("0").As("status"), + goqu.V(nil).As("exec_block_number"), + goqu.V(nil).As("graffiti_text"), + goqu.V(nil).As("fee_recipient"), + goqu.V(nil).As("el_reward"), + goqu.V(nil).As("cl_reward"), + ). + As("scheduled_blocks") - if dashboardId.Validators == nil { - query += ` - LEFT JOIN cached_proposal_rewards ON cached_proposal_rewards.dashboard_id = $1 AND blocks.slot = cached_proposal_rewards.slot - ` - } else { - query += ` - LEFT JOIN execution_payloads ep ON ep.block_hash = blocks.exec_block_hash - LEFT JOIN relays_blocks rb ON rb.exec_block_hash = blocks.exec_block_hash - ` + // Supply to result query + // distinct + block number ordering to filter out duplicates in an edge case (if dutiesInfo didn't update yet after a block was proposed, but the blocks table was) + // might be possible to remove this once the TODO in service_slot_viz.go:startSlotVizDataService is resolved + blocksDs = goqu.Dialect("Postgres"). + From(blocksDs.Union(scheduledDs)). // wrap union to apply order + Order(order...). + OrderAppend(goqu.C("exec_block_number").Desc().NullsLast()). + Limit(uint(limit + 1)). + Distinct(enums.VDBBlocksColumns.Slot.ToExpr()) + if directions != nil { + blocksDs = blocksDs.Where(directions) + } + if !onlyPrimarySort { + blocksDs = blocksDs. + Distinct(colSort.Column.ToExpr(), enums.VDBBlocksColumns.Slot.ToExpr()) + } + } else { + log.Warnf("Error getting scheduled proposals, DutiesInfo not available in Redis: %s", err) + } } - // shrink selection to our filtered validators - if len(scheduledProposers) > 0 { - query += `)` - } - query += `) as u ` - if dashboardId.Validators == nil { - query += fmt.Sprintf(` - INNER JOIN (%s) validators ON validators.validator_index = proposer - `, filteredValidatorsQuery) - } else { - query += `WHERE proposer = ANY($1) ` - } + // ------------------------------------- + // Execute query + var proposals []struct { + Proposer t.VDBValidator `db:"validator_index"` + Group uint64 `db:"group_id"` + Epoch uint64 `db:"epoch"` + Slot uint64 `db:"slot"` + Status uint64 `db:"status"` + Block sql.NullInt64 `db:"exec_block_number"` + FeeRecipient []byte `db:"fee_recipient"` + ElReward decimal.NullDecimal `db:"el_reward"` + ClReward decimal.NullDecimal `db:"cl_reward"` + GraffitiText sql.NullString `db:"graffiti_text"` - params = append(params, limit+1) - limitStr := fmt.Sprintf(` - LIMIT $%d - `, len(params)) - rewardsStr := `) r - LEFT JOIN consensus_payloads cp on r.slot = cp.slot - LEFT JOIN blocks on r.slot = blocks.slot - LEFT JOIN execution_payloads ep ON ep.block_hash = blocks.exec_block_hash - LEFT JOIN relays_blocks rb ON rb.exec_block_hash = blocks.exec_block_hash - ` - // relay bribe deduplication; select most likely (=max) relay bribe value for each block - relayOrder := `` - if colSort.Column != enums.VDBBlockProposerReward { - relayOrder += `, rb.value ` + secSort + // for cursor only + Reward decimal.Decimal } startTime := time.Now() - err = d.alloyReader.SelectContext(ctx, &proposals, query+where+orderBy+limitStr+rewardsStr+orderBy+relayOrder, params...) + query, args, err := blocksDs.Prepared(true).ToSQL() + if err != nil { + return nil, nil, err + } + err = d.alloyReader.SelectContext(ctx, &proposals, query, args...) log.Debugf("=== getting past blocks took %s", time.Since(startTime)) if err != nil { return nil, nil, err @@ -336,6 +322,9 @@ func (d *DataAccessService) GetValidatorDashboardBlocks(ctx context.Context, das if len(proposals) == 0 { return make([]t.VDBBlocksTableRow, 0), &t.Paging{}, nil } + + // ------------------------------------- + // Prepare result moreDataFlag := len(proposals) > int(limit) if moreDataFlag { proposals = proposals[:len(proposals)-1] @@ -359,7 +348,8 @@ func (d *DataAccessService) GetValidatorDashboardBlocks(ctx context.Context, das } data := make([]t.VDBBlocksTableRow, len(proposals)) - ensMapping := make(map[string]string) + addressMapping := make(map[string]*t.Address) + contractStatusRequests := make([]db.ContractInteractionAtRequest, 0, len(proposals)) for i, proposal := range proposals { data[i].GroupId = proposal.Group if dashboardId.AggregateGroups { @@ -383,20 +373,30 @@ func (d *DataAccessService) GetValidatorDashboardBlocks(ctx context.Context, das if proposal.Status == 0 || proposal.Status == 2 { continue } - graffiti := proposal.GraffitiText - data[i].Graffiti = &graffiti + if proposal.GraffitiText.Valid { + graffiti := proposal.GraffitiText.String + data[i].Graffiti = &graffiti + } + if proposal.Block.Valid { + block := uint64(proposal.Block.Int64) + data[i].Block = &block + } if proposal.Status == 3 { continue } - block := uint64(proposal.Block.Int64) - data[i].Block = &block var reward t.ClElValue[decimal.Decimal] if proposal.ElReward.Valid { rewardRecp := t.Address{ Hash: t.Hash(hexutil.Encode(proposal.FeeRecipient)), } data[i].RewardRecipient = &rewardRecp - ensMapping[hexutil.Encode(proposal.FeeRecipient)] = "" + addressMapping[hexutil.Encode(proposal.FeeRecipient)] = nil + contractStatusRequests = append(contractStatusRequests, db.ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", proposal.FeeRecipient), + Block: proposal.Block.Int64, + TxIdx: -1, + TraceIdx: -1, + }) reward.El = proposal.ElReward.Decimal.Mul(decimal.NewFromInt(1e18)) if rpValidator, ok := rpValidators[proposal.Proposer]; ok && protocolModes.RocketPool { reward.El = reward.El.Mul(d.getRocketPoolOperatorFactor(rpValidator)) @@ -413,13 +413,22 @@ func (d *DataAccessService) GetValidatorDashboardBlocks(ctx context.Context, das } // determine reward recipient ENS names startTime = time.Now() - if err := db.GetEnsNamesForAddresses(ensMapping); err != nil { + // determine ens/names + if err := d.GetNamesAndEnsForAddresses(ctx, addressMapping); err != nil { + return nil, nil, err + } + log.Debugf("=== getting ens + labels names took %s", time.Since(startTime)) + // determine contract statuses + contractStatuses, err := d.bigtable.GetAddressContractInteractionsAt(contractStatusRequests) + if err != nil { return nil, nil, err } - log.Debugf("=== getting ens names took %s", time.Since(startTime)) + var contractIdx int for i := range data { if data[i].RewardRecipient != nil { - data[i].RewardRecipient.Ens = ensMapping[string(data[i].RewardRecipient.Hash)] + data[i].RewardRecipient = addressMapping[string(data[i].RewardRecipient.Hash)] + data[i].RewardRecipient.IsContract = contractStatuses[contractIdx] == types.CONTRACT_CREATION || contractStatuses[contractIdx] == types.CONTRACT_PRESENT + contractIdx += 1 } } if !moreDataFlag && !currentCursor.IsValid() { diff --git a/backend/pkg/api/data_access/vdb_deposits.go b/backend/pkg/api/data_access/vdb_deposits.go index 7d32cc002..0f81e1d93 100644 --- a/backend/pkg/api/data_access/vdb_deposits.go +++ b/backend/pkg/api/data_access/vdb_deposits.go @@ -14,6 +14,7 @@ import ( "github.com/gobitfly/beaconchain/pkg/api/enums" t "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/lib/pq" "github.com/shopspring/decimal" @@ -121,16 +122,27 @@ func (d *DataAccessService) GetValidatorDashboardElDeposits(ctx context.Context, } responseData := make([]t.VDBExecutionDepositsTableRow, len(data)) + addressMapping := make(map[string]*t.Address) + fromContractStatusRequests := make([]db.ContractInteractionAtRequest, len(data)) + depositorContractStatusRequests := make([]db.ContractInteractionAtRequest, 0, len(data)) for i, row := range data { responseData[i] = t.VDBExecutionDepositsTableRow{ PublicKey: t.PubKey(pubkeys[i]), Block: uint64(row.BlockNumber), Timestamp: row.Timestamp.Unix(), - From: t.Address{Hash: t.Hash(hexutil.Encode(row.From))}, TxHash: t.Hash(hexutil.Encode(row.TxHash)), WithdrawalCredential: t.Hash(hexutil.Encode(row.WithdrawalCredentials)), Amount: utils.GWeiToWei(big.NewInt(row.Amount)), Valid: row.Valid, + From: t.Address{Hash: t.Hash(hexutil.Encode(row.From))}, + } + addressMapping[hexutil.Encode(row.From)] = nil + fromContractStatusRequests[i] = db.ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", row.From), + Block: row.BlockNumber, + // TODO not entirely correct, would need to determine tx index and itx index of tx. But good enough for now + TxIdx: -1, + TraceIdx: -1, } if row.GroupId.Valid { if dashboardId.AggregateGroups { @@ -143,6 +155,10 @@ func (d *DataAccessService) GetValidatorDashboardElDeposits(ctx context.Context, } if len(row.Depositor) > 0 { responseData[i].Depositor = t.Address{Hash: t.Hash(hexutil.Encode(row.Depositor))} + addressMapping[hexutil.Encode(row.Depositor)] = nil + depositorReq := fromContractStatusRequests[i] + depositorReq.Address = fmt.Sprintf("%x", row.Depositor) + depositorContractStatusRequests = append(depositorContractStatusRequests, depositorReq) } else { responseData[i].Depositor = responseData[i].From } @@ -150,6 +166,31 @@ func (d *DataAccessService) GetValidatorDashboardElDeposits(ctx context.Context, responseData[i].Index = &v } } + + // populate address data + if err := d.GetNamesAndEnsForAddresses(ctx, addressMapping); err != nil { + return nil, nil, err + } + fromContractStatuses, err := d.bigtable.GetAddressContractInteractionsAt(fromContractStatusRequests) + if err != nil { + return nil, nil, err + } + depositorContractStatuses, err := d.bigtable.GetAddressContractInteractionsAt(depositorContractStatusRequests) + if err != nil { + return nil, nil, err + } + var depositorIdx int + for i := range data { + responseData[i].From = *addressMapping[string(responseData[i].From.Hash)] + responseData[i].From.IsContract = fromContractStatuses[i] == types.CONTRACT_CREATION || fromContractStatuses[i] == types.CONTRACT_PRESENT + responseData[i].Depositor = *addressMapping[string(responseData[i].Depositor.Hash)] + responseData[i].Depositor.IsContract = responseData[i].From.IsContract + if responseData[i].Depositor.Hash != responseData[i].From.Hash { + responseData[i].Depositor.IsContract = depositorContractStatuses[depositorIdx] == types.CONTRACT_CREATION || depositorContractStatuses[depositorIdx] == types.CONTRACT_PRESENT + depositorIdx += 1 + } + } + var paging t.Paging moreDataFlag := len(responseData) > int(limit) diff --git a/backend/pkg/api/data_access/vdb_helpers.go b/backend/pkg/api/data_access/vdb_helpers.go index f4b47ff13..41e698139 100644 --- a/backend/pkg/api/data_access/vdb_helpers.go +++ b/backend/pkg/api/data_access/vdb_helpers.go @@ -43,28 +43,6 @@ func (d DataAccessService) getDashboardValidators(ctx context.Context, dashboard return dashboardId.Validators, nil } -func (d DataAccessService) calculateTotalEfficiency(attestationEff, proposalEff, syncEff sql.NullFloat64) float64 { - efficiency := float64(0) - - if !attestationEff.Valid && !proposalEff.Valid && !syncEff.Valid { - efficiency = 0 - } else if attestationEff.Valid && !proposalEff.Valid && !syncEff.Valid { - efficiency = attestationEff.Float64 * 100.0 - } else if attestationEff.Valid && proposalEff.Valid && !syncEff.Valid { - efficiency = ((56.0 / 64.0 * attestationEff.Float64) + (8.0 / 64.0 * proposalEff.Float64)) * 100.0 - } else if attestationEff.Valid && !proposalEff.Valid && syncEff.Valid { - efficiency = ((62.0 / 64.0 * attestationEff.Float64) + (2.0 / 64.0 * syncEff.Float64)) * 100.0 - } else { - efficiency = (((54.0 / 64.0) * attestationEff.Float64) + ((8.0 / 64.0) * proposalEff.Float64) + ((2.0 / 64.0) * syncEff.Float64)) * 100.0 - } - - if efficiency < 0 { - efficiency = 0 - } - - return efficiency -} - func (d DataAccessService) calculateChartEfficiency(efficiencyType enums.VDBSummaryChartEfficiencyType, row *t.VDBValidatorSummaryChartRow) (float64, error) { efficiency := float64(0) switch efficiencyType { @@ -83,7 +61,7 @@ func (d DataAccessService) calculateChartEfficiency(efficiencyType enums.VDBSumm syncEfficiency.Valid = true } - efficiency = d.calculateTotalEfficiency(attestationEfficiency, proposerEfficiency, syncEfficiency) + efficiency = utils.CalculateTotalEfficiency(attestationEfficiency, proposerEfficiency, syncEfficiency) case enums.VDBSummaryChartAttestation: if row.AttestationIdealReward > 0 { efficiency = (row.AttestationReward / row.AttestationIdealReward) * 100 diff --git a/backend/pkg/api/data_access/vdb_management.go b/backend/pkg/api/data_access/vdb_management.go index 1413c61e8..fff753ad2 100644 --- a/backend/pkg/api/data_access/vdb_management.go +++ b/backend/pkg/api/data_access/vdb_management.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "fmt" "math/big" + "slices" "sort" "strconv" "strings" @@ -15,7 +16,6 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/gobitfly/beaconchain/pkg/api/enums" t "github.com/gobitfly/beaconchain/pkg/api/types" - "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/utils" constypes "github.com/gobitfly/beaconchain/pkg/consapi/types" "github.com/lib/pq" @@ -148,7 +148,7 @@ func (d *DataAccessService) GetValidatorDashboardInfo(ctx context.Context, dashb err := wg.Wait() if err != nil { - return nil, fmt.Errorf("error retrieving user dashboards data: %v", err) + return nil, fmt.Errorf("error retrieving user dashboards data: %w", err) } return result, nil @@ -168,7 +168,7 @@ func (d *DataAccessService) GetValidatorDashboardName(ctx context.Context, dashb } // param validators: slice of validator public keys or indices -func (d *DataAccessService) GetValidatorsFromSlices(indices []t.VDBValidator, publicKeys []string) ([]t.VDBValidator, error) { +func (d *DataAccessService) GetValidatorsFromSlices(ctx context.Context, indices []t.VDBValidator, publicKeys []string) ([]t.VDBValidator, error) { if len(indices) == 0 && len(publicKeys) == 0 { return []t.VDBValidator{}, nil } @@ -235,6 +235,16 @@ func (d *DataAccessService) RemoveValidatorDashboard(ctx context.Context, dashbo _, err := d.alloyWriter.ExecContext(ctx, ` DELETE FROM users_val_dashboards WHERE id = $1 `, dashboardId) + if err != nil { + return err + } + + prefix := fmt.Sprintf("%s:%d:", ValidatorDashboardEventPrefix, dashboardId) + + // Remove all events related to the dashboard + _, err = d.userWriter.ExecContext(ctx, ` + DELETE FROM users_subscriptions WHERE event_filter LIKE ($1 || '%') + `, prefix) return err } @@ -287,8 +297,9 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d id = $1` return d.alloyReader.GetContext(ctx, &data.Network, query, dashboardId.Id) }) + } else { // load the chain id from the config in case of public dashboards + data.Network = utils.Config.Chain.ClConfig.DepositChainID } - // TODO handle network of validator set dashboards // Groups if dashboardId.Validators == nil && !dashboardId.AggregateGroups { @@ -322,7 +333,7 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d validators, err := d.getDashboardValidators(ctx, dashboardId, nil) if err != nil { - return nil, fmt.Errorf("error retrieving validators from dashboard id: %v", err) + return nil, fmt.Errorf("error retrieving validators from dashboard id: %w", err) } if dashboardId.Validators != nil || dashboardId.AggregateGroups { @@ -446,7 +457,7 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d query, args, err := ds.Prepared(true).ToSQL() if err != nil { - return fmt.Errorf("error preparing query: %v", err) + return fmt.Errorf("error preparing query: %w", err) } err = d.clickhouseReader.GetContext(ctx, &queryResult, query, args...) @@ -468,7 +479,7 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d syncEfficiency.Float64 = float64(queryResult.SyncExecuted) / float64(queryResult.SyncScheduled) syncEfficiency.Valid = true } - *efficiency = d.calculateTotalEfficiency(attestationEfficiency, proposerEfficiency, syncEfficiency) + *efficiency = utils.CalculateTotalEfficiency(attestationEfficiency, proposerEfficiency, syncEfficiency) return nil }) @@ -482,7 +493,7 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d err = eg.Wait() if err != nil { - return nil, fmt.Errorf("error retrieving validator dashboard overview data: %v", err) + return nil, fmt.Errorf("error retrieving validator dashboard overview data: %w", err) } return &data, nil @@ -541,6 +552,16 @@ func (d *DataAccessService) RemoveValidatorDashboardGroup(ctx context.Context, d _, err := d.alloyWriter.ExecContext(ctx, ` DELETE FROM users_val_dashboards_groups WHERE dashboard_id = $1 AND id = $2 `, dashboardId, groupId) + if err != nil { + return err + } + + prefix := fmt.Sprintf("%s:%d:%d", ValidatorDashboardEventPrefix, dashboardId, groupId) + + // Remove all events related to the group + _, err = d.userWriter.ExecContext(ctx, ` + DELETE FROM users_subscriptions WHERE event_filter = $1 + `, prefix) return err } @@ -760,21 +781,6 @@ func (d *DataAccessService) GetValidatorDashboardGroupExists(ctx context.Context return groupExists, err } -// return how many of the passed validators are already in the dashboard -func (d *DataAccessService) GetValidatorDashboardExistingValidatorCount(ctx context.Context, dashboardId t.VDBIdPrimary, validators []t.VDBValidator) (uint64, error) { - if len(validators) == 0 { - return 0, nil - } - - var count uint64 - err := d.alloyReader.GetContext(ctx, &count, ` - SELECT COUNT(*) - FROM users_val_dashboards_validators - WHERE dashboard_id = $1 AND validator_index = ANY($2) - `, dashboardId, pq.Array(validators)) - return count, err -} - func (d *DataAccessService) AddValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, validators []t.VDBValidator) ([]t.VDBPostValidatorsData, error) { if len(validators) == 0 { // No validators to add @@ -859,191 +865,145 @@ func (d *DataAccessService) AddValidatorDashboardValidators(ctx context.Context, return result, nil } +// Updates the group for validators already in the dashboard linked to the deposit address. +// Adds up to limit new validators associated with the deposit address, if not already in the dashboard. func (d *DataAccessService) AddValidatorDashboardValidatorsByDepositAddress(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, address string, limit uint64) ([]t.VDBPostValidatorsData, error) { - // for all validators already in the dashboard that are associated with the deposit address, update the group - // then add no more than `limit` validators associated with the deposit address to the dashboard addressParsed, err := hex.DecodeString(strings.TrimPrefix(address, "0x")) if err != nil { return nil, err } - if len(addressParsed) != 20 { - return nil, fmt.Errorf("invalid deposit address: %s", address) - } - var validatorIndicesToAdd []uint64 - err = d.readerDb.SelectContext(ctx, &validatorIndicesToAdd, "SELECT validatorindex FROM validators WHERE pubkey IN (SELECT publickey FROM eth1_deposits WHERE from_address = $1) ORDER BY validatorindex LIMIT $2;", addressParsed, limit) - if err != nil { - return nil, err - } + g, gCtx := errgroup.WithContext(ctx) - // retrieve the existing validators - var existingValidators []uint64 - err = d.alloyWriter.SelectContext(ctx, &existingValidators, "SELECT validator_index FROM users_val_dashboards_validators WHERE dashboard_id = $1", dashboardId) - if err != nil { - return nil, err - } - existingValidatorsMap := make(map[uint64]bool, len(existingValidators)) - for _, validatorIndex := range existingValidators { - existingValidatorsMap[validatorIndex] = true - } - - // filter out the validators that are already in the dashboard + // fetch validators that are already in the dashboard and associated with the deposit address var validatorIndicesToUpdate []uint64 + + g.Go(func() error { + return d.readerDb.SelectContext(gCtx, &validatorIndicesToUpdate, ` + SELECT DISTINCT uvdv.validator_index + FROM validators v + JOIN eth1_deposits d ON v.pubkey = d.publickey + JOIN users_val_dashboards_validators uvdv ON v.validatorindex = uvdv.validator_index + WHERE uvdv.dashboard_id = $1 AND d.from_address = $2; + `, dashboardId, addressParsed) + }) + + // fetch validators that are not yet in the dashboard and associated with the deposit address, up to the limit var validatorIndicesToInsert []uint64 - for _, validatorIndex := range validatorIndicesToAdd { - if _, ok := existingValidatorsMap[validatorIndex]; ok { - validatorIndicesToUpdate = append(validatorIndicesToUpdate, validatorIndex) - } else { - validatorIndicesToInsert = append(validatorIndicesToInsert, validatorIndex) - } - } + g.Go(func() error { + return d.readerDb.SelectContext(gCtx, &validatorIndicesToInsert, ` + SELECT DISTINCT v.validatorindex + FROM validators v + JOIN eth1_deposits d ON v.pubkey = d.publickey + LEFT JOIN users_val_dashboards_validators uvdv ON v.validatorindex = uvdv.validator_index AND uvdv.dashboard_id = $1 + WHERE d.from_address = $2 AND uvdv.validator_index IS NULL + ORDER BY v.validatorindex + LIMIT $3; + `, dashboardId, addressParsed, limit) + }) - // update the group for all existing validators - validatorIndices := make([]uint64, 0, int(limit)) - validatorIndices = append(validatorIndices, validatorIndicesToUpdate...) - - // insert the new validators up to the allowed user max limit taking into account how many validators are already in the dashboard - if len(validatorIndicesToInsert) > 0 { - freeSpace := int(limit) - len(existingValidators) - if freeSpace > 0 { - if len(validatorIndicesToInsert) > freeSpace { // cap inserts to the amount of free space available - log.Infof("limiting the number of validators to insert to %d", freeSpace) - validatorIndicesToInsert = validatorIndicesToInsert[:freeSpace] - } - validatorIndices = append(validatorIndices, validatorIndicesToInsert...) - } + err = g.Wait() + if err != nil { + return nil, err } - if len(validatorIndices) == 0 { - // no validators to add - return []t.VDBPostValidatorsData{}, nil - } - log.Infof("inserting %d new validators and updating %d validators of dashboard %d, limit is %d", len(validatorIndicesToInsert), len(validatorIndicesToUpdate), dashboardId, limit) + validatorIndices := slices.Concat(validatorIndicesToUpdate, validatorIndicesToInsert) + return d.AddValidatorDashboardValidators(ctx, dashboardId, groupId, validatorIndices) } +// Updates the group for validators already in the dashboard linked to the withdrawal address. +// Adds up to limit new validators associated with the withdrawal address, if not already in the dashboard. func (d *DataAccessService) AddValidatorDashboardValidatorsByWithdrawalAddress(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, address string, limit uint64) ([]t.VDBPostValidatorsData, error) { - // for all validators already in the dashboard that are associated with the withdrawal address, update the group - // then add no more than `limit` validators associated with the deposit address to the dashboard addressParsed, err := hex.DecodeString(strings.TrimPrefix(address, "0x")) if err != nil { return nil, err } - var validatorIndicesToAdd []uint64 - err = d.readerDb.SelectContext(ctx, &validatorIndicesToAdd, "SELECT validatorindex FROM validators WHERE withdrawalcredentials = $1 ORDER BY validatorindex LIMIT $2;", addressParsed, limit) - if err != nil { - return nil, err - } - // retrieve the existing validators - var existingValidators []uint64 - err = d.alloyWriter.SelectContext(ctx, &existingValidators, "SELECT validator_index FROM users_val_dashboards_validators WHERE dashboard_id = $1", dashboardId) - if err != nil { - return nil, err - } - existingValidatorsMap := make(map[uint64]bool, len(existingValidators)) - for _, validatorIndex := range existingValidators { - existingValidatorsMap[validatorIndex] = true - } + g, gCtx := errgroup.WithContext(ctx) - // filter out the validators that are already in the dashboard + // fetch validators that are already in the dashboard and associated with the withdrawal address var validatorIndicesToUpdate []uint64 + g.Go(func() error { + return d.readerDb.SelectContext(gCtx, &validatorIndicesToUpdate, ` + SELECT DISTINCT uvdv.validator_index + FROM validators v + JOIN users_val_dashboards_validators uvdv ON v.validatorindex = uvdv.validator_index + WHERE uvdv.dashboard_id = $1 AND v.withdrawalcredentials = $2 AND uvdv.dashboard_id = $2; + `, dashboardId, addressParsed) + }) + + // fetch validators that are not yet in the dashboard and associated with the withdrawal address, up to the limit var validatorIndicesToInsert []uint64 - for _, validatorIndex := range validatorIndicesToAdd { - if _, ok := existingValidatorsMap[validatorIndex]; ok { - validatorIndicesToUpdate = append(validatorIndicesToUpdate, validatorIndex) - } else { - validatorIndicesToInsert = append(validatorIndicesToInsert, validatorIndex) - } - } + g.Go(func() error { + return d.readerDb.SelectContext(gCtx, &validatorIndicesToInsert, ` + SELECT DISTINCT v.validatorindex + FROM validators v + LEFT JOIN users_val_dashboards_validators uvdv ON v.validatorindex = uvdv.validator_index AND uvdv.dashboard_id = $1 + WHERE v.withdrawalcredentials = $2 AND uvdv.validator_index IS NULL + ORDER BY v.validatorindex + LIMIT $3; + `, dashboardId, addressParsed, limit) + }) - // update the group for all existing validators - validatorIndices := make([]uint64, 0, int(limit)) - validatorIndices = append(validatorIndices, validatorIndicesToUpdate...) - - // insert the new validators up to the allowed user max limit taking into account how many validators are already in the dashboard - if len(validatorIndicesToInsert) > 0 { - freeSpace := int(limit) - len(existingValidators) - if freeSpace > 0 { - if len(validatorIndicesToInsert) > freeSpace { // cap inserts to the amount of free space available - log.Infof("limiting the number of validators to insert to %d", freeSpace) - validatorIndicesToInsert = validatorIndicesToInsert[:freeSpace] - } - validatorIndices = append(validatorIndices, validatorIndicesToInsert...) - } + err = g.Wait() + if err != nil { + return nil, err } - if len(validatorIndices) == 0 { - // no validators to add - return []t.VDBPostValidatorsData{}, nil - } - log.Infof("inserting %d new validators and updating %d validators of dashboard %d, limit is %d", len(validatorIndicesToInsert), len(validatorIndicesToUpdate), dashboardId, limit) + validatorIndices := slices.Concat(validatorIndicesToUpdate, validatorIndicesToInsert) + return d.AddValidatorDashboardValidators(ctx, dashboardId, groupId, validatorIndices) } +// Update the group for validators already in the dashboard linked to the graffiti (via produced block). +// Add up to limit new validators associated with the graffiti, if not already in the dashboard. func (d *DataAccessService) AddValidatorDashboardValidatorsByGraffiti(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, graffiti string, limit uint64) ([]t.VDBPostValidatorsData, error) { - // for all validators already in the dashboard that are associated with the graffiti (by produced block), update the group - // then add no more than `limit` validators associated with the deposit address to the dashboard - var validatorIndicesToAdd []uint64 - err := d.readerDb.SelectContext(ctx, &validatorIndicesToAdd, "SELECT DISTINCT proposer FROM blocks WHERE graffiti_text = $1 ORDER BY proposer LIMIT $2;", graffiti, limit) - if err != nil { - return nil, err - } + g, gCtx := errgroup.WithContext(ctx) - // retrieve the existing validators - var existingValidators []uint64 - err = d.alloyWriter.SelectContext(ctx, &existingValidators, "SELECT validator_index FROM users_val_dashboards_validators WHERE dashboard_id = $1", dashboardId) - if err != nil { - return nil, err - } - existingValidatorsMap := make(map[uint64]bool, len(existingValidators)) - for _, validatorIndex := range existingValidators { - existingValidatorsMap[validatorIndex] = true - } - - // filter out the validators that are already in the dashboard + // fetch validators that are already in the dashboard and associated with the graffiti var validatorIndicesToUpdate []uint64 + g.Go(func() error { + return d.readerDb.SelectContext(gCtx, &validatorIndicesToUpdate, ` + SELECT DISTINCT uvdv.validator_index + FROM blocks b + JOIN users_val_dashboards_validators uvdv ON b.proposer = uvdv.validator_index + WHERE uvdv.dashboard_id = $1 AND b.graffiti_text = $2; + `, dashboardId, graffiti) + }) + + // fetch validators that are not yet in the dashboard and associated with the graffiti, up to the limit var validatorIndicesToInsert []uint64 - for _, validatorIndex := range validatorIndicesToAdd { - if _, ok := existingValidatorsMap[validatorIndex]; ok { - validatorIndicesToUpdate = append(validatorIndicesToUpdate, validatorIndex) - } else { - validatorIndicesToInsert = append(validatorIndicesToInsert, validatorIndex) - } - } + g.Go(func() error { + return d.readerDb.SelectContext(gCtx, &validatorIndicesToInsert, ` + SELECT DISTINCT b.proposer + FROM blocks b + LEFT JOIN users_val_dashboards_validators uvdv ON b.proposer = uvdv.validator_index AND uvdv.dashboard_id = $1 + WHERE b.graffiti_text = $2 AND uvdv.validator_index IS NULL + ORDER BY b.proposer + LIMIT $3; + `, dashboardId, graffiti, limit) + }) - // update the group for all existing validators - validatorIndices := make([]uint64, 0, int(limit)) - validatorIndices = append(validatorIndices, validatorIndicesToUpdate...) - - // insert the new validators up to the allowed user max limit taking into account how many validators are already in the dashboard - if len(validatorIndicesToInsert) > 0 { - freeSpace := int(limit) - len(existingValidators) - if freeSpace > 0 { - if len(validatorIndicesToInsert) > freeSpace { // cap inserts to the amount of free space available - log.Infof("limiting the number of validators to insert to %d", freeSpace) - validatorIndicesToInsert = validatorIndicesToInsert[:freeSpace] - } - validatorIndices = append(validatorIndices, validatorIndicesToInsert...) - } + err := g.Wait() + if err != nil { + return nil, err } - if len(validatorIndices) == 0 { - // no validators to add - return []t.VDBPostValidatorsData{}, nil - } - log.Infof("inserting %d new validators and updating %d validators of dashboard %d, limit is %d", len(validatorIndicesToInsert), len(validatorIndicesToUpdate), dashboardId, limit) + validatorIndices := slices.Concat(validatorIndicesToUpdate, validatorIndicesToInsert) + return d.AddValidatorDashboardValidators(ctx, dashboardId, groupId, validatorIndices) } func (d *DataAccessService) RemoveValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, validators []t.VDBValidator) error { if len(validators) == 0 { - // // Remove all validators for the dashboard - // _, err := d.alloyWriter.ExecContext(ctx, ` - // DELETE FROM users_val_dashboards_validators - // WHERE dashboard_id = $1 - // `, dashboardId) - return fmt.Errorf("calling RemoveValidatorDashboardValidators with empty validators list is not allowed") + // Remove all validators for the dashboard + // This is usually forbidden by API validation + _, err := d.alloyWriter.ExecContext(ctx, ` + DELETE FROM users_val_dashboards_validators + WHERE dashboard_id = $1 + `, dashboardId) + return err } //Create the query to delete validators diff --git a/backend/pkg/api/data_access/vdb_rewards.go b/backend/pkg/api/data_access/vdb_rewards.go index 5ed3f3c25..e040d84f7 100644 --- a/backend/pkg/api/data_access/vdb_rewards.go +++ b/backend/pkg/api/data_access/vdb_rewards.go @@ -33,7 +33,7 @@ func (d *DataAccessService) GetValidatorDashboardRewards(ctx context.Context, da if cursor != "" { currentCursor, err = utils.StringToCursor[t.RewardsCursor](cursor) if err != nil { - return nil, nil, fmt.Errorf("failed to parse passed cursor as WithdrawalsCursor: %w", err) + return nil, nil, fmt.Errorf("failed to parse passed cursor as RewardsCursor: %w", err) } } @@ -106,12 +106,13 @@ func (d *DataAccessService) GetValidatorDashboardRewards(ctx context.Context, da LeftJoin(goqu.L("blocks b"), goqu.On(goqu.L("v.validator_index = b.proposer AND b.status = '1'"))). LeftJoin(goqu.L("execution_payloads ep"), goqu.On(goqu.L("ep.block_hash = b.exec_block_hash"))). LeftJoin( - goqu.Dialect("postgres"). + goqu.Lateral(goqu.Dialect("postgres"). From("relays_blocks"). Select( goqu.L("exec_block_hash"), goqu.MAX("value").As("value")). - GroupBy("exec_block_hash").As("rb"), + Where(goqu.L("relays_blocks.exec_block_hash = b.exec_block_hash")). + GroupBy("exec_block_hash")).As("rb"), goqu.On(goqu.L("rb.exec_block_hash = b.exec_block_hash")), ). GroupBy(goqu.L("b.proposer")) @@ -628,12 +629,13 @@ func (d *DataAccessService) GetValidatorDashboardGroupRewards(ctx context.Contex LeftJoin(goqu.L("blocks b"), goqu.On(goqu.L("v.validator_index = b.proposer AND b.status = '1'"))). LeftJoin(goqu.L("execution_payloads ep"), goqu.On(goqu.L("ep.block_hash = b.exec_block_hash"))). LeftJoin( - goqu.Dialect("postgres"). + goqu.Lateral(goqu.Dialect("postgres"). From("relays_blocks"). Select( goqu.L("exec_block_hash"), goqu.MAX("value").As("value")). - GroupBy("exec_block_hash").As("rb"), + Where(goqu.L("relays_blocks.exec_block_hash = b.exec_block_hash")). + GroupBy("exec_block_hash")).As("rb"), goqu.On(goqu.L("rb.exec_block_hash = b.exec_block_hash")), ). Where(goqu.L("b.epoch = ?", epoch)). @@ -832,12 +834,13 @@ func (d *DataAccessService) GetValidatorDashboardRewardsChart(ctx context.Contex LeftJoin(goqu.L("blocks b"), goqu.On(goqu.L("v.validator_index = b.proposer AND b.status = '1'"))). LeftJoin(goqu.L("execution_payloads ep"), goqu.On(goqu.L("ep.block_hash = b.exec_block_hash"))). LeftJoin( - goqu.Dialect("postgres"). + goqu.Lateral(goqu.Dialect("postgres"). From("relays_blocks"). Select( goqu.L("exec_block_hash"), goqu.MAX("value").As("value")). - GroupBy("exec_block_hash").As("rb"), + Where(goqu.L("relays_blocks.exec_block_hash = b.exec_block_hash")). + GroupBy("exec_block_hash")).As("rb"), goqu.On(goqu.L("rb.exec_block_hash = b.exec_block_hash")), ). Where(goqu.L("b.epoch >= ?", startEpoch)). @@ -1127,12 +1130,13 @@ func (d *DataAccessService) GetValidatorDashboardDuties(ctx context.Context, das From(goqu.L("blocks b")). LeftJoin(goqu.L("execution_payloads ep"), goqu.On(goqu.L("ep.block_hash = b.exec_block_hash"))). LeftJoin( - goqu.Dialect("postgres"). + goqu.Lateral(goqu.Dialect("postgres"). From("relays_blocks"). Select( goqu.L("exec_block_hash"), goqu.MAX("value").As("value")). - GroupBy("exec_block_hash").As("rb"), + Where(goqu.L("relays_blocks.exec_block_hash = b.exec_block_hash")). + GroupBy("exec_block_hash")).As("rb"), goqu.On(goqu.L("rb.exec_block_hash = b.exec_block_hash")), ). Where(goqu.L("b.epoch = ?", epoch)). diff --git a/backend/pkg/api/data_access/vdb_rocket_pool.go b/backend/pkg/api/data_access/vdb_rocket_pool.go index fc2cb963a..90a94d3af 100644 --- a/backend/pkg/api/data_access/vdb_rocket_pool.go +++ b/backend/pkg/api/data_access/vdb_rocket_pool.go @@ -17,11 +17,6 @@ func (d *DataAccessService) GetValidatorDashboardTotalRocketPool(ctx context.Con return d.dummy.GetValidatorDashboardTotalRocketPool(ctx, dashboardId, search) } -func (d *DataAccessService) GetValidatorDashboardNodeRocketPool(ctx context.Context, dashboardId t.VDBId, node string) (*t.VDBNodeRocketPoolData, error) { - // TODO @DATA-ACCESS - return d.dummy.GetValidatorDashboardNodeRocketPool(ctx, dashboardId, node) -} - func (d *DataAccessService) GetValidatorDashboardRocketPoolMinipools(ctx context.Context, dashboardId t.VDBId, node, cursor string, colSort t.Sort[enums.VDBRocketPoolMinipoolsColumn], search string, limit uint64) ([]t.VDBRocketPoolMinipoolsTableRow, *t.Paging, error) { // TODO @DATA-ACCESS return d.dummy.GetValidatorDashboardRocketPoolMinipools(ctx, dashboardId, node, cursor, colSort, search, limit) diff --git a/backend/pkg/api/data_access/vdb_summary.go b/backend/pkg/api/data_access/vdb_summary.go index 6c357d3b6..beebfbaea 100644 --- a/backend/pkg/api/data_access/vdb_summary.go +++ b/backend/pkg/api/data_access/vdb_summary.go @@ -92,7 +92,7 @@ func (d *DataAccessService) GetValidatorDashboardSummary(ctx context.Context, da if err != nil { return nil, nil, err } - averageNetworkEfficiency := d.calculateTotalEfficiency( + averageNetworkEfficiency := utils.CalculateTotalEfficiency( efficiency.AttestationEfficiency[period], efficiency.ProposalEfficiency[period], efficiency.SyncEfficiency[period]) // ------------------------------------------------------------------------------------------------------------------ @@ -248,12 +248,13 @@ func (d *DataAccessService) GetValidatorDashboardSummary(ctx context.Context, da From(goqu.L("blocks b")). LeftJoin(goqu.L("execution_payloads ep"), goqu.On(goqu.L("ep.block_hash = b.exec_block_hash"))). LeftJoin( - goqu.Dialect("postgres"). + goqu.Lateral(goqu.Dialect("postgres"). From("relays_blocks"). Select( goqu.L("exec_block_hash"), goqu.MAX("value").As("value")). - GroupBy("exec_block_hash").As("rb"), + Where(goqu.L("relays_blocks.exec_block_hash = b.exec_block_hash")). + GroupBy("exec_block_hash")).As("rb"), goqu.On(goqu.L("rb.exec_block_hash = b.exec_block_hash")), ). Where(goqu.L("b.epoch >= ? AND b.epoch <= ? AND b.status = '1'", epochStart, epochEnd)). @@ -427,7 +428,7 @@ func (d *DataAccessService) GetValidatorDashboardSummary(ctx context.Context, da syncEfficiency.Float64 = float64(queryEntry.SyncExecuted) / float64(queryEntry.SyncScheduled) syncEfficiency.Valid = true } - resultEntry.Efficiency = d.calculateTotalEfficiency(attestationEfficiency, proposerEfficiency, syncEfficiency) + resultEntry.Efficiency = utils.CalculateTotalEfficiency(attestationEfficiency, proposerEfficiency, syncEfficiency) // Add the duties info to the total total.AttestationReward = total.AttestationReward.Add(queryEntry.AttestationReward) @@ -547,7 +548,7 @@ func (d *DataAccessService) GetValidatorDashboardSummary(ctx context.Context, da totalSyncEfficiency.Float64 = float64(total.SyncExecuted) / float64(total.SyncScheduled) totalSyncEfficiency.Valid = true } - totalEntry.Efficiency = d.calculateTotalEfficiency(totalAttestationEfficiency, totalProposerEfficiency, totalSyncEfficiency) + totalEntry.Efficiency = utils.CalculateTotalEfficiency(totalAttestationEfficiency, totalProposerEfficiency, totalSyncEfficiency) result = append([]t.VDBSummaryTableRow{totalEntry}, result...) } @@ -947,12 +948,13 @@ func (d *DataAccessService) internal_getElClAPR(ctx context.Context, dashboardId From(goqu.L("blocks AS b")). LeftJoin(goqu.L("execution_payloads AS ep"), goqu.On(goqu.L("b.exec_block_hash = ep.block_hash"))). LeftJoin( - goqu.Dialect("postgres"). + goqu.Lateral(goqu.Dialect("postgres"). From("relays_blocks"). Select( goqu.L("exec_block_hash"), goqu.MAX("value").As("value")). - GroupBy("exec_block_hash").As("rb"), + Where(goqu.L("relays_blocks.exec_block_hash = b.exec_block_hash")). + GroupBy("exec_block_hash")).As("rb"), goqu.On(goqu.L("rb.exec_block_hash = b.exec_block_hash")), ). Where(goqu.L("b.status = '1'")). @@ -1166,7 +1168,7 @@ func (d *DataAccessService) GetValidatorDashboardSummaryChart(ctx context.Contex if err != nil { return nil, err } - averageNetworkEfficiency := d.calculateTotalEfficiency( + averageNetworkEfficiency := utils.CalculateTotalEfficiency( efficiency.AttestationEfficiency[enums.Last24h], efficiency.ProposalEfficiency[enums.Last24h], efficiency.SyncEfficiency[enums.Last24h]) for ts := range tsMap { @@ -1238,17 +1240,17 @@ func (d *DataAccessService) GetLatestExportedChartTs(ctx context.Context, aggreg var dateColumn string switch aggregation { case enums.IntervalEpoch: - table = "validator_dashboard_data_epoch" - dateColumn = "epoch_timestamp" + table = "view_validator_dashboard_data_epoch_max_ts" + dateColumn = "t" case enums.IntervalHourly: - table = "validator_dashboard_data_hourly" - dateColumn = "hour" + table = "view_validator_dashboard_data_hourly_max_ts" + dateColumn = "t" case enums.IntervalDaily: - table = "validator_dashboard_data_daily" - dateColumn = "day" + table = "view_validator_dashboard_data_daily_max_ts" + dateColumn = "t" case enums.IntervalWeekly: - table = "validator_dashboard_data_weekly" - dateColumn = "week" + table = "view_validator_dashboard_data_weekly_max_ts" + dateColumn = "t" default: return 0, fmt.Errorf("unexpected aggregation type: %v", aggregation) } diff --git a/backend/pkg/api/data_access/vdb_withdrawals.go b/backend/pkg/api/data_access/vdb_withdrawals.go index c3837f6f0..93124cd73 100644 --- a/backend/pkg/api/data_access/vdb_withdrawals.go +++ b/backend/pkg/api/data_access/vdb_withdrawals.go @@ -17,6 +17,7 @@ import ( t "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gobitfly/beaconchain/pkg/commons/cache" "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/lib/pq" "github.com/pkg/errors" @@ -111,6 +112,7 @@ func (d *DataAccessService) GetValidatorDashboardWithdrawals(ctx context.Context // Get the withdrawals for the validators queryResult := []struct { BlockSlot uint64 `db:"block_slot"` + BlockNumber uint64 `db:"exec_block_number"` WithdrawalIndex uint64 `db:"withdrawalindex"` ValidatorIndex uint64 `db:"validatorindex"` Address []byte `db:"address"` @@ -121,6 +123,7 @@ func (d *DataAccessService) GetValidatorDashboardWithdrawals(ctx context.Context withdrawalsQuery := ` SELECT w.block_slot, + b.exec_block_number, w.withdrawalindex, w.validatorindex, w.address, @@ -187,7 +190,7 @@ func (d *DataAccessService) GetValidatorDashboardWithdrawals(ctx context.Context err = d.readerDb.SelectContext(ctx, &queryResult, withdrawalsQuery, queryParams...) if err != nil { - return nil, nil, fmt.Errorf("error getting withdrawals for validators: %+v: %w", validators, err) + return nil, nil, fmt.Errorf("error getting withdrawals for dashboardId: %d (%d validators): %w", dashboardId.Id, len(validators), err) } if len(queryResult) == 0 { @@ -196,32 +199,43 @@ func (d *DataAccessService) GetValidatorDashboardWithdrawals(ctx context.Context } // Prepare the ENS map - addressEns := make(map[string]string) - for _, withdrawal := range queryResult { + addressMapping := make(map[string]*t.Address) + contractStatusRequests := make([]db.ContractInteractionAtRequest, len(queryResult)) + for i, withdrawal := range queryResult { address := hexutil.Encode(withdrawal.Address) - addressEns[address] = "" + addressMapping[address] = nil + contractStatusRequests[i] = db.ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", withdrawal.Address), + Block: int64(withdrawal.BlockNumber), + TxIdx: -1, + TraceIdx: -1, + } + } + + // Get the ENS names and (label) names for the addresses + if err := d.GetNamesAndEnsForAddresses(ctx, addressMapping); err != nil { + return nil, nil, err } - // Get the ENS names for the addresses - if err := db.GetEnsNamesForAddresses(addressEns); err != nil { + // Get the contract status for the addresses + contractStatuses, err := d.bigtable.GetAddressContractInteractionsAt(contractStatusRequests) + if err != nil { return nil, nil, err } // Create the result cursorData := make([]t.WithdrawalsCursor, 0) - for _, withdrawal := range queryResult { + for i, withdrawal := range queryResult { address := hexutil.Encode(withdrawal.Address) result = append(result, t.VDBWithdrawalsTableRow{ - Epoch: withdrawal.BlockSlot / utils.Config.Chain.ClConfig.SlotsPerEpoch, - Slot: withdrawal.BlockSlot, - Index: withdrawal.ValidatorIndex, - GroupId: validatorGroupMap[withdrawal.ValidatorIndex], - Recipient: t.Address{ - Hash: t.Hash(address), - Ens: addressEns[address], - }, - Amount: utils.GWeiToWei(big.NewInt(int64(withdrawal.Amount))), + Epoch: withdrawal.BlockSlot / utils.Config.Chain.ClConfig.SlotsPerEpoch, + Slot: withdrawal.BlockSlot, + Index: withdrawal.ValidatorIndex, + Recipient: *addressMapping[address], + GroupId: validatorGroupMap[withdrawal.ValidatorIndex], + Amount: utils.GWeiToWei(big.NewInt(int64(withdrawal.Amount))), }) + result[i].Recipient.IsContract = contractStatuses[i] == types.CONTRACT_CREATION || contractStatuses[i] == types.CONTRACT_PRESENT cursorData = append(cursorData, t.WithdrawalsCursor{ Slot: withdrawal.BlockSlot, WithdrawalIndex: withdrawal.WithdrawalIndex, @@ -256,7 +270,8 @@ func (d *DataAccessService) GetValidatorDashboardWithdrawals(ctx context.Context if nextData != nil { // Complete the next data nextData.GroupId = validatorGroupMap[nextData.Index] - nextData.Recipient.Ens = addressEns[string(nextData.Recipient.Hash)] + // TODO integrate label/ens data for "next" row + // nextData.Recipient.Ens = addressEns[string(nextData.Recipient.Hash)] } else { // If there is no next data, add a missing estimate row nextData = &t.VDBWithdrawalsTableRow{ @@ -393,12 +408,28 @@ func (d *DataAccessService) getNextWithdrawalRow(queryValidators []t.VDBValidato withdrawalAmount = 0 } + ens_name, err := db.GetEnsNameForAddress(*address, utils.SlotToTime(nextWithdrawalSlot)) + if err != sql.ErrNoRows { + return nil, err + } + + contractStatusReq := []db.ContractInteractionAtRequest{{ + Address: fmt.Sprintf("%x", address), + Block: -1, + }} + contractStatus, err := d.bigtable.GetAddressContractInteractionsAt(contractStatusReq) + if err != nil { + return nil, err + } + nextData := &t.VDBWithdrawalsTableRow{ Epoch: nextWithdrawalSlot / utils.Config.Chain.ClConfig.SlotsPerEpoch, Slot: nextWithdrawalSlot, Index: *nextValidator, Recipient: t.Address{ - Hash: t.Hash(address.String()), + Hash: t.Hash(address.String()), + Ens: ens_name, + IsContract: contractStatus[0] == types.CONTRACT_CREATION || contractStatus[0] == types.CONTRACT_PRESENT, }, Amount: utils.GWeiToWei(big.NewInt(int64(withdrawalAmount))), } diff --git a/backend/pkg/api/docs/static.go b/backend/pkg/api/docs/static.go new file mode 100644 index 000000000..93087d82f --- /dev/null +++ b/backend/pkg/api/docs/static.go @@ -0,0 +1,6 @@ +package docs + +import "embed" + +//go:embed * +var Files embed.FS diff --git a/backend/pkg/api/enums/enums.go b/backend/pkg/api/enums/enums.go index a0bd6997d..95ccd0edb 100644 --- a/backend/pkg/api/enums/enums.go +++ b/backend/pkg/api/enums/enums.go @@ -104,7 +104,6 @@ const ( Last24h Last7d Last30d - Last365d ) func (t TimePeriod) Int() int { @@ -123,27 +122,23 @@ func (TimePeriod) NewFromString(s string) TimePeriod { return Last7d case "last_30d": return Last30d - case "last_365d": - return Last365d default: return TimePeriod(-1) } } var TimePeriods = struct { - AllTime TimePeriod - Last1h TimePeriod - Last24h TimePeriod - Last7d TimePeriod - Last30d TimePeriod - Last365d TimePeriod + AllTime TimePeriod + Last1h TimePeriod + Last24h TimePeriod + Last7d TimePeriod + Last30d TimePeriod }{ AllTime, Last1h, Last24h, Last7d, Last30d, - Last365d, } func (t TimePeriod) Duration() time.Duration { @@ -157,8 +152,6 @@ func (t TimePeriod) Duration() time.Duration { return 7 * day case Last30d: return 30 * day - case Last365d: - return 365 * day default: return 0 } diff --git a/backend/pkg/api/enums/notifications_enums.go b/backend/pkg/api/enums/notifications_enums.go index 4367c44ee..9470b8caa 100644 --- a/backend/pkg/api/enums/notifications_enums.go +++ b/backend/pkg/api/enums/notifications_enums.go @@ -1,5 +1,7 @@ package enums +import "github.com/doug-martin/goqu/v9" + // ------------------------------------------------------------ // Notifications Dashboard Table Columns @@ -9,8 +11,11 @@ var _ EnumFactory[NotificationDashboardsColumn] = NotificationDashboardsColumn(0 const ( NotificationDashboardChainId NotificationDashboardsColumn = iota - NotificationDashboardTimestamp - NotificationDashboardDashboardId // sort by name + NotificationDashboardEpoch + NotificationDashboardDashboardName // sort by dashboard name + NotificationDashboardDashboardId // internal use + NotificationDashboardGroupName // internal use + NotificationDashboardGroupId // internal use ) func (c NotificationDashboardsColumn) Int() int { @@ -21,23 +26,49 @@ func (NotificationDashboardsColumn) NewFromString(s string) NotificationDashboar switch s { case "chain_id": return NotificationDashboardChainId - case "timestamp": - return NotificationDashboardTimestamp - case "dashboard_id": - return NotificationDashboardDashboardId + case "epoch": + return NotificationDashboardEpoch + case "dashboard_name", "dashboard_id": // accepting id for frontend + return NotificationDashboardDashboardName default: return NotificationDashboardsColumn(-1) } } +// internal use, used to map to query column names +func (c NotificationDashboardsColumn) ToExpr() OrderableSortable { + switch c { + case NotificationDashboardChainId: + return goqu.C("chain_id") + case NotificationDashboardEpoch: + return goqu.C("epoch") + case NotificationDashboardDashboardName: + return goqu.C("dashboard_name") + case NotificationDashboardDashboardId: + return goqu.C("dashboard_id") + case NotificationDashboardGroupName: + return goqu.C("group_name") + case NotificationDashboardGroupId: + return goqu.C("group_id") + default: + return nil + } +} + var NotificationsDashboardsColumns = struct { - ChainId NotificationDashboardsColumn - Timestamp NotificationDashboardsColumn - DashboardId NotificationDashboardsColumn + ChainId NotificationDashboardsColumn + Timestamp NotificationDashboardsColumn + DashboardName NotificationDashboardsColumn + DashboardId NotificationDashboardsColumn + GroupName NotificationDashboardsColumn + GroupId NotificationDashboardsColumn }{ NotificationDashboardChainId, - NotificationDashboardTimestamp, + NotificationDashboardEpoch, + NotificationDashboardDashboardName, NotificationDashboardDashboardId, + NotificationDashboardGroupName, + NotificationDashboardGroupId, } // ------------------------------------------------------------ @@ -48,7 +79,8 @@ type NotificationMachinesColumn int var _ EnumFactory[NotificationMachinesColumn] = NotificationMachinesColumn(0) const ( - NotificationMachineName NotificationMachinesColumn = iota + NotificationMachineId NotificationMachinesColumn = iota // internal use + NotificationMachineName NotificationMachineThreshold NotificationMachineEventType NotificationMachineTimestamp @@ -73,12 +105,32 @@ func (NotificationMachinesColumn) NewFromString(s string) NotificationMachinesCo } } +// internal use, used to map to query column names +func (c NotificationMachinesColumn) ToExpr() OrderableSortable { + switch c { + case NotificationMachineId: + return goqu.C("machine_id") + case NotificationMachineName: + return goqu.C("machine_name") + case NotificationMachineThreshold: + return goqu.C("threshold") + case NotificationMachineEventType: + return goqu.C("event_type") + case NotificationMachineTimestamp: + return goqu.C("epoch") + default: + return nil + } +} + var NotificationsMachinesColumns = struct { + MachineId NotificationMachinesColumn MachineName NotificationMachinesColumn Threshold NotificationMachinesColumn EventType NotificationMachinesColumn Timestamp NotificationMachinesColumn }{ + NotificationMachineId, NotificationMachineName, NotificationMachineThreshold, NotificationMachineEventType, @@ -112,6 +164,18 @@ func (NotificationClientsColumn) NewFromString(s string) NotificationClientsColu } } +// internal use, used to map to query column names +func (c NotificationClientsColumn) ToExpr() OrderableSortable { + switch c { + case NotificationClientName: + return goqu.C("client") + case NotificationClientTimestamp: + return goqu.C("epoch") + default: + return nil + } +} + var NotificationsClientsColumns = struct { ClientName NotificationClientsColumn Timestamp NotificationClientsColumn @@ -169,6 +233,7 @@ var _ EnumFactory[NotificationNetworksColumn] = NotificationNetworksColumn(0) const ( NotificationNetworkTimestamp NotificationNetworksColumn = iota + NotificationNetworkNetwork // internal use NotificationNetworkEventType ) @@ -187,11 +252,27 @@ func (NotificationNetworksColumn) NewFromString(s string) NotificationNetworksCo } } +// internal use, used to map to query column names +func (c NotificationNetworksColumn) ToExpr() OrderableSortable { + switch c { + case NotificationNetworkTimestamp: + return goqu.C("epoch") + case NotificationNetworkNetwork: + return goqu.C("network") + case NotificationNetworkEventType: + return goqu.C("event_type") + default: + return nil + } +} + var NotificationNetworksColumns = struct { Timestamp NotificationNetworksColumn + Network NotificationNetworksColumn EventType NotificationNetworksColumn }{ NotificationNetworkTimestamp, + NotificationNetworkNetwork, NotificationNetworkEventType, } @@ -203,7 +284,7 @@ type NotificationSettingsDashboardColumn int var _ EnumFactory[NotificationSettingsDashboardColumn] = NotificationSettingsDashboardColumn(0) const ( - NotificationSettingsDashboardDashboardId NotificationSettingsDashboardColumn = iota + NotificationSettingsDashboardDashboardName NotificationSettingsDashboardColumn = iota NotificationSettingsDashboardGroupName ) @@ -213,8 +294,8 @@ func (c NotificationSettingsDashboardColumn) Int() int { func (NotificationSettingsDashboardColumn) NewFromString(s string) NotificationSettingsDashboardColumn { switch s { - case "dashboard_id": - return NotificationSettingsDashboardDashboardId + case "dashboard_name", "dashboard_id": + return NotificationSettingsDashboardDashboardName case "group_name": return NotificationSettingsDashboardGroupName default: @@ -223,9 +304,9 @@ func (NotificationSettingsDashboardColumn) NewFromString(s string) NotificationS } var NotificationSettingsDashboardColumns = struct { - DashboardId NotificationSettingsDashboardColumn - GroupName NotificationSettingsDashboardColumn + DashboardName NotificationSettingsDashboardColumn + GroupName NotificationSettingsDashboardColumn }{ - NotificationSettingsDashboardDashboardId, + NotificationSettingsDashboardDashboardName, NotificationSettingsDashboardGroupName, } diff --git a/backend/pkg/api/enums/validator_dashboard_enums.go b/backend/pkg/api/enums/validator_dashboard_enums.go index 928d5742c..c241ef98e 100644 --- a/backend/pkg/api/enums/validator_dashboard_enums.go +++ b/backend/pkg/api/enums/validator_dashboard_enums.go @@ -1,5 +1,10 @@ package enums +import ( + "github.com/doug-martin/goqu/v9" + "github.com/doug-martin/goqu/v9/exp" +) + // ---------------- // Validator Dashboard Summary Table @@ -156,6 +161,29 @@ func (VDBBlocksColumn) NewFromString(s string) VDBBlocksColumn { } } +type OrderableSortable interface { + exp.Orderable + exp.Comparable + exp.Isable +} + +func (c VDBBlocksColumn) ToExpr() OrderableSortable { + switch c { + case VDBBlockProposer: + return goqu.C("validator_index") + case VDBBlockSlot: + return goqu.C("slot") + case VDBBlockBlock: + return goqu.C("exec_block_number") + case VDBBlockStatus: + return goqu.C("status") + case VDBBlockProposerReward: + return goqu.L("el_reward + cl_reward") + default: + return nil + } +} + var VDBBlocksColumns = struct { Proposer VDBBlocksColumn Slot VDBBlocksColumn @@ -270,6 +298,61 @@ var VDBManageValidatorsColumns = struct { VDBManageValidatorsWithdrawalCredential, } +// ---------------- +// Validator Dashboard Manage Validators Table + +type VDBMobileValidatorsColumn int + +var _ EnumFactory[VDBMobileValidatorsColumn] = VDBMobileValidatorsColumn(0) + +const ( + VDBMobileValidatorsIndex VDBMobileValidatorsColumn = iota + VDBMobileValidatorsPublicKey + VDBMobileValidatorsBalance + VDBMobileValidatorsStatus + VDBMobileValidatorsWithdrawalCredential + VDBMobileValidatorsEfficiency +) + +func (c VDBMobileValidatorsColumn) Int() int { + return int(c) +} + +func (VDBMobileValidatorsColumn) NewFromString(s string) VDBMobileValidatorsColumn { + switch s { + case "index": + return VDBMobileValidatorsIndex + case "public_key": + return VDBMobileValidatorsPublicKey + case "balance": + return VDBMobileValidatorsBalance + case "status": + return VDBMobileValidatorsStatus + case "withdrawal_credential": + return VDBMobileValidatorsWithdrawalCredential + case "efficiency": + return VDBMobileValidatorsEfficiency + default: + return VDBMobileValidatorsColumn(-1) + } +} + +var VDBMobileValidatorsColumns = struct { + Index VDBManageValidatorsColumn + PublicKey VDBManageValidatorsColumn + Balance VDBManageValidatorsColumn + Status VDBManageValidatorsColumn + WithdrawalCredential VDBManageValidatorsColumn + Efficiency VDBMobileValidatorsColumn +}{ + VDBManageValidatorsIndex, + VDBManageValidatorsPublicKey, + VDBManageValidatorsBalance, + VDBManageValidatorsStatus, + VDBManageValidatorsWithdrawalCredential, + VDBMobileValidatorsEfficiency, +} + // ---------------- // Validator Dashboard Archived Reasons @@ -437,7 +520,7 @@ func (c VDBRocketPoolMinipoolsColumn) Int() int { func (VDBRocketPoolMinipoolsColumn) NewFromString(s string) VDBRocketPoolMinipoolsColumn { switch s { - case "group": + case "group_id": return VDBRocketPoolMinipoolsGroup default: return VDBRocketPoolMinipoolsColumn(-1) diff --git a/backend/pkg/api/handlers/auth.go b/backend/pkg/api/handlers/auth.go index 62e55e23c..357a47cb5 100644 --- a/backend/pkg/api/handlers/auth.go +++ b/backend/pkg/api/handlers/auth.go @@ -1,12 +1,12 @@ package handlers import ( + "cmp" "context" "errors" "fmt" "html" "net/http" - "strconv" "strings" "time" @@ -14,6 +14,7 @@ import ( "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/mail" + "github.com/gobitfly/beaconchain/pkg/commons/metrics" commonTypes "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/gobitfly/beaconchain/pkg/userservice" @@ -33,10 +34,6 @@ const authConfirmEmailRateLimit = time.Minute * 2 const authResetEmailRateLimit = time.Minute * 2 const authEmailExpireTime = time.Minute * 30 -type ctxKet string - -const ctxUserIdKey ctxKet = "user_id" - var errBadCredentials = newUnauthorizedErr("invalid email or password") func (h *HandlerService) getUserBySession(r *http.Request) (types.UserCredentialInfo, error) { @@ -80,7 +77,7 @@ func (h *HandlerService) purgeAllSessionsForUser(ctx context.Context, userId uin // TODO move to service? func (h *HandlerService) sendConfirmationEmail(ctx context.Context, userId uint64, email string) error { // 1. check last confirmation time to enforce ratelimit - lastTs, err := h.dai.GetEmailConfirmationTime(ctx, userId) + lastTs, err := h.daService.GetEmailConfirmationTime(ctx, userId) if err != nil { return errors.New("error getting confirmation-ts") } @@ -90,7 +87,7 @@ func (h *HandlerService) sendConfirmationEmail(ctx context.Context, userId uint6 // 2. update confirmation hash (before sending so there's no hash mismatch on failure) confirmationHash := utils.RandomString(40) - err = h.dai.UpdateEmailConfirmationHash(ctx, userId, email, confirmationHash) + err = h.daService.UpdateEmailConfirmationHash(ctx, userId, email, confirmationHash) if err != nil { return errors.New("error updating confirmation hash") } @@ -111,7 +108,7 @@ Best regards, } // 4. update confirmation time (only after mail was sent) - err = h.dai.UpdateEmailConfirmationTime(ctx, userId) + err = h.daService.UpdateEmailConfirmationTime(ctx, userId) if err != nil { // shouldn't present this as error to user, confirmation works fine log.Error(err, "error updating email confirmation time, rate limiting won't be enforced", 0, nil) @@ -123,7 +120,7 @@ Best regards, func (h *HandlerService) sendPasswordResetEmail(ctx context.Context, userId uint64, email string) error { // 0. check if password resets are allowed // (can be forbidden by admin (not yet in v2)) - passwordResetAllowed, err := h.dai.IsPasswordResetAllowed(ctx, userId) + passwordResetAllowed, err := h.daService.IsPasswordResetAllowed(ctx, userId) if err != nil { return err } @@ -132,7 +129,7 @@ func (h *HandlerService) sendPasswordResetEmail(ctx context.Context, userId uint } // 1. check last confirmation time to enforce ratelimit - lastTs, err := h.dai.GetPasswordResetTime(ctx, userId) + lastTs, err := h.daService.GetPasswordResetTime(ctx, userId) if err != nil { return errors.New("error getting confirmation-ts") } @@ -142,7 +139,7 @@ func (h *HandlerService) sendPasswordResetEmail(ctx context.Context, userId uint // 2. update reset hash (before sending so there's no hash mismatch on failure) resetHash := utils.RandomString(40) - err = h.dai.UpdatePasswordResetHash(ctx, userId, resetHash) + err = h.daService.UpdatePasswordResetHash(ctx, userId, resetHash) if err != nil { return errors.New("error updating confirmation hash") } @@ -163,7 +160,7 @@ Best regards, } // 4. update reset time (only after mail was sent) - err = h.dai.UpdatePasswordResetTime(ctx, userId) + err = h.daService.UpdatePasswordResetTime(ctx, userId) if err != nil { // shouldn't present this as error to user, reset works fine log.Error(err, "error updating password reset time, rate limiting won't be enforced", 0, nil) @@ -180,17 +177,19 @@ const authHeaderPrefix = "Bearer " func (h *HandlerService) GetUserIdByApiKey(r *http.Request) (uint64, error) { // TODO: store user id in context during ratelimting and use it here - var apiKey string - authHeader := r.Header.Get("Authorization") - if strings.HasPrefix(authHeader, authHeaderPrefix) { - apiKey = strings.TrimPrefix(authHeader, authHeaderPrefix) - } else { - apiKey = r.URL.Query().Get("api_key") - } + query := r.URL.Query() + header := r.Header + apiKey := cmp.Or( + strings.TrimPrefix(header.Get("Authorization"), authHeaderPrefix), + header.Get("X-Api-Key"), + query.Get("api_key"), + query.Get("apiKey"), + query.Get("apikey"), + ) if apiKey == "" { return 0, newUnauthorizedErr("missing api key") } - userId, err := h.dai.GetUserIdByApiKey(r.Context(), apiKey) + userId, err := h.daService.GetUserIdByApiKey(r.Context(), apiKey) if errors.Is(err, dataaccess.ErrNotFound) { err = newUnauthorizedErr("api key not found") } @@ -199,7 +198,7 @@ func (h *HandlerService) GetUserIdByApiKey(r *http.Request) (uint64, error) { // if this is used, user ID should've been stored in context (by GetUserIdStoreMiddleware) func GetUserIdByContext(r *http.Request) (uint64, error) { - userId, ok := r.Context().Value(ctxUserIdKey).(uint64) + userId, ok := r.Context().Value(types.CtxUserIdKey).(uint64) if !ok { return 0, newUnauthorizedErr("user not authenticated") } @@ -239,7 +238,7 @@ func (h *HandlerService) InternalPostUsers(w http.ResponseWriter, r *http.Reques return } - _, err := h.dai.GetUserByEmail(r.Context(), email) + _, err := h.daService.GetUserByEmail(r.Context(), email) if !errors.Is(err, dataaccess.ErrNotFound) { if err == nil { returnConflict(w, r, errors.New("email already registered")) @@ -262,7 +261,7 @@ func (h *HandlerService) InternalPostUsers(w http.ResponseWriter, r *http.Reques } // add user - userId, err := h.dai.CreateUser(r.Context(), email, string(passwordHash)) + userId, err := h.daService.CreateUser(r.Context(), email, string(passwordHash)) if err != nil { handleErr(w, r, err) return @@ -287,12 +286,12 @@ func (h *HandlerService) InternalPostUserConfirm(w http.ResponseWriter, r *http. return } - userId, err := h.dai.GetUserIdByConfirmationHash(r.Context(), confirmationHash) + userId, err := h.daService.GetUserIdByConfirmationHash(r.Context(), confirmationHash) if err != nil { handleErr(w, r, err) return } - confirmationTime, err := h.dai.GetEmailConfirmationTime(r.Context(), userId) + confirmationTime, err := h.daService.GetEmailConfirmationTime(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -302,7 +301,7 @@ func (h *HandlerService) InternalPostUserConfirm(w http.ResponseWriter, r *http. return } - err = h.dai.UpdateUserEmail(r.Context(), userId) + err = h.daService.UpdateUserEmail(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -334,7 +333,7 @@ func (h *HandlerService) InternalPostUserPasswordReset(w http.ResponseWriter, r return } - userId, err := h.dai.GetUserByEmail(r.Context(), email) + userId, err := h.daService.GetUserByEmail(r.Context(), email) if err != nil { if err == dataaccess.ErrNotFound { // don't leak if email is registered @@ -372,12 +371,12 @@ func (h *HandlerService) InternalPostUserPasswordResetHash(w http.ResponseWriter } // check token validity - userId, err := h.dai.GetUserIdByResetHash(r.Context(), resetToken) + userId, err := h.daService.GetUserIdByResetHash(r.Context(), resetToken) if err != nil { handleErr(w, r, err) return } - resetTime, err := h.dai.GetPasswordResetTime(r.Context(), userId) + resetTime, err := h.daService.GetPasswordResetTime(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -393,20 +392,20 @@ func (h *HandlerService) InternalPostUserPasswordResetHash(w http.ResponseWriter handleErr(w, r, errors.New("error hashing password")) return } - err = h.dai.UpdateUserPassword(r.Context(), userId, string(passwordHash)) + err = h.daService.UpdateUserPassword(r.Context(), userId, string(passwordHash)) if err != nil { handleErr(w, r, err) return } // if email is not confirmed, confirm since they clicked a link emailed to them - userInfo, err := h.dai.GetUserCredentialInfo(r.Context(), userId) + userInfo, err := h.daService.GetUserCredentialInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return } if !userInfo.EmailConfirmed { - err = h.dai.UpdateUserEmail(r.Context(), userId) + err = h.daService.UpdateUserEmail(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -441,7 +440,7 @@ func (h *HandlerService) InternalPostLogin(w http.ResponseWriter, r *http.Reques } // fetch user - userId, err := h.dai.GetUserByEmail(r.Context(), email) + userId, err := h.daService.GetUserByEmail(r.Context(), email) if err != nil { if errors.Is(err, dataaccess.ErrNotFound) { err = errBadCredentials @@ -449,7 +448,7 @@ func (h *HandlerService) InternalPostLogin(w http.ResponseWriter, r *http.Reques handleErr(w, r, err) return } - user, err := h.dai.GetUserCredentialInfo(r.Context(), userId) + user, err := h.daService.GetUserCredentialInfo(r.Context(), userId) if err != nil { if errors.Is(err, dataaccess.ErrNotFound) { err = errBadCredentials @@ -524,7 +523,7 @@ func (h *HandlerService) InternalPostMobileAuthorize(w http.ResponseWriter, r *h } // check if oauth app exists to validate whether redirect uri is valid - appInfo, err := h.dai.GetAppDataFromRedirectUri(req.RedirectURI) + appInfo, err := h.daService.GetAppDataFromRedirectUri(r.Context(), req.RedirectURI) if err != nil { callback := req.RedirectURI + "?error=invalid_request&error_description=missing_redirect_uri" + state http.Redirect(w, r, callback, http.StatusSeeOther) @@ -541,7 +540,7 @@ func (h *HandlerService) InternalPostMobileAuthorize(w http.ResponseWriter, r *h session := h.scs.Token(r.Context()) sanitizedDeviceName := html.EscapeString(clientName) - err = h.dai.AddUserDevice(userInfo.Id, utils.HashAndEncode(session+session), clientID, sanitizedDeviceName, appInfo.ID) + err = h.daService.AddUserDevice(r.Context(), userInfo.Id, utils.HashAndEncode(session+session), clientID, sanitizedDeviceName, appInfo.ID) if err != nil { log.Warnf("Error adding user device: %v", err) callback := req.RedirectURI + "?error=invalid_request&error_description=server_error" + state @@ -581,7 +580,7 @@ func (h *HandlerService) InternalPostMobileEquivalentExchange(w http.ResponseWri } // Get user info - user, err := h.dai.GetUserCredentialInfo(r.Context(), userID) + user, err := h.daService.GetUserCredentialInfo(r.Context(), userID) if err != nil { if errors.Is(err, dataaccess.ErrNotFound) { err = errBadCredentials @@ -604,7 +603,7 @@ func (h *HandlerService) InternalPostMobileEquivalentExchange(w http.ResponseWri // invalidate old refresh token and replace with hashed session id sanitizedDeviceName := html.EscapeString(req.DeviceName) - err = h.dai.MigrateMobileSession(refreshTokenHashed, utils.HashAndEncode(session+session), req.DeviceID, sanitizedDeviceName) // salted with session + err = h.daService.MigrateMobileSession(r.Context(), refreshTokenHashed, utils.HashAndEncode(session+session), req.DeviceID, sanitizedDeviceName) // salted with session if err != nil { handleErr(w, r, err) return @@ -645,7 +644,7 @@ func (h *HandlerService) InternalPostUsersMeNotificationSettingsPairedDevicesTok return } - err = h.dai.AddMobileNotificationToken(user.Id, deviceID, req.Token) + err = h.daService.AddMobileNotificationToken(r.Context(), user.Id, deviceID, req.Token) if err != nil { handleErr(w, r, err) return @@ -685,7 +684,7 @@ func (h *HandlerService) InternalHandleMobilePurchase(w http.ResponseWriter, r * return } - subscriptionCount, err := h.dai.GetAppSubscriptionCount(user.Id) + subscriptionCount, err := h.daService.GetAppSubscriptionCount(r.Context(), user.Id) if err != nil { handleErr(w, r, err) return @@ -708,6 +707,7 @@ func (h *HandlerService) InternalHandleMobilePurchase(w http.ResponseWriter, r * validationResult, err := userservice.VerifyReceipt(nil, nil, verifyPackage) if err != nil { log.Warn(err, "could not verify receipt %v", 0, map[string]interface{}{"receipt": verifyPackage.Receipt}) + metrics.Errors.WithLabelValues(fmt.Sprintf("appsub_verify_%s_failed", req.Transaction.Type)).Inc() if errors.Is(err, userservice.ErrClientInit) { log.Error(err, "Apple or Google client is NOT initialized. Did you provide their configuration?", 0, nil) handleErr(w, r, err) @@ -715,7 +715,7 @@ func (h *HandlerService) InternalHandleMobilePurchase(w http.ResponseWriter, r * } } - err = h.dai.AddMobilePurchase(nil, user.Id, req, validationResult, "") + err = h.daService.AddMobilePurchase(r.Context(), nil, user.Id, req, validationResult, "") if err != nil { handleErr(w, r, err) return @@ -746,7 +746,7 @@ func (h *HandlerService) InternalDeleteUser(w http.ResponseWriter, r *http.Reque } // TODO allow if user has any subsciptions etc? - err = h.dai.RemoveUser(r.Context(), user.Id) + err = h.daService.RemoveUser(r.Context(), user.Id) if err != nil { handleErr(w, r, err) return @@ -768,7 +768,7 @@ func (h *HandlerService) InternalPostUserEmail(w http.ResponseWriter, r *http.Re handleErr(w, r, err) return } - userInfo, err := h.dai.GetUserCredentialInfo(r.Context(), user.Id) + userInfo, err := h.daService.GetUserCredentialInfo(r.Context(), user.Id) if err != nil { handleErr(w, r, err) return @@ -800,7 +800,7 @@ func (h *HandlerService) InternalPostUserEmail(w http.ResponseWriter, r *http.Re return } - _, err = h.dai.GetUserByEmail(r.Context(), newEmail) + _, err = h.daService.GetUserByEmail(r.Context(), newEmail) if !errors.Is(err, dataaccess.ErrNotFound) { if err == nil { handleErr(w, r, newConflictErr("email already registered")) @@ -847,7 +847,7 @@ func (h *HandlerService) InternalPutUserPassword(w http.ResponseWriter, r *http. return } // user doesn't contain password, fetch from db - userData, err := h.dai.GetUserCredentialInfo(r.Context(), user.Id) + userData, err := h.daService.GetUserCredentialInfo(r.Context(), user.Id) if err != nil { handleErr(w, r, err) return @@ -883,7 +883,7 @@ func (h *HandlerService) InternalPutUserPassword(w http.ResponseWriter, r *http. } // change password - err = h.dai.UpdateUserPassword(r.Context(), user.Id, string(passwordHash)) + err = h.daService.UpdateUserPassword(r.Context(), user.Id, string(passwordHash)) if err != nil { handleErr(w, r, err) return @@ -897,114 +897,3 @@ func (h *HandlerService) InternalPutUserPassword(w http.ResponseWriter, r *http. returnNoContent(w, r) } - -// Middlewares - -// returns a middleware that stores user id in context, using the provided function -func GetUserIdStoreMiddleware(userIdFunc func(r *http.Request) (uint64, error)) func(http.Handler) http.Handler { - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - userId, err := userIdFunc(r) - if err != nil { - if errors.Is(err, errUnauthorized) { - // if next handler requires authentication, it should return 'unauthorized' itself - next.ServeHTTP(w, r) - } else { - handleErr(w, r, err) - } - return - } - ctx := r.Context() - ctx = context.WithValue(ctx, ctxUserIdKey, userId) - r = r.WithContext(ctx) - next.ServeHTTP(w, r) - }) - } -} - -// returns a middleware that checks if user has access to dashboard when a primary id is used -func (h *HandlerService) VDBAuthMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var err error - dashboardId, err := strconv.ParseUint(mux.Vars(r)["dashboard_id"], 10, 64) - if err != nil { - // if primary id is not used, no need to check access - next.ServeHTTP(w, r) - return - } - // primary id is used -> user needs to have access to dashboard - - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - // store user id in context - ctx := r.Context() - ctx = context.WithValue(ctx, ctxUserIdKey, userId) - r = r.WithContext(ctx) - - dashboardUser, err := h.dai.GetValidatorDashboardUser(r.Context(), types.VDBIdPrimary(dashboardId)) - if err != nil { - handleErr(w, r, err) - return - } - - if dashboardUser.UserId != userId { - // user does not have access to dashboard - // the proper error would be 403 Forbidden, but we don't want to leak information so we return 404 Not Found - handleErr(w, r, newNotFoundErr("dashboard with id %v not found", dashboardId)) - return - } - - next.ServeHTTP(w, r) - }) -} - -// returns a middleware that checks if user has premium perk to use public validator dashboard api -// in the middleware chain, this should be used after GetVDBAuthMiddleware -func (h *HandlerService) ManageViaApiCheckMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // get user id from context - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) - if err != nil { - handleErr(w, r, err) - return - } - if !userInfo.PremiumPerks.ManageDashboardViaApi { - handleErr(w, r, newForbiddenErr("user does not have access to public validator dashboard endpoints")) - return - } - next.ServeHTTP(w, r) - }) -} - -// middleware check to return if specified dashboard is not archived (and accessible) -func (h *HandlerService) VDBArchivedCheckMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - if len(dashboardId.Validators) > 0 { - next.ServeHTTP(w, r) - return - } - dashboard, err := h.dai.GetValidatorDashboardInfo(r.Context(), dashboardId.Id) - if err != nil { - handleErr(w, r, err) - return - } - if dashboard.IsArchived { - handleErr(w, r, newForbiddenErr("dashboard with id %v is archived", dashboardId)) - return - } - next.ServeHTTP(w, r) - }) -} diff --git a/backend/pkg/api/handlers/backward_compat.go b/backend/pkg/api/handlers/backward_compat.go index f02161a00..8b8e69442 100644 --- a/backend/pkg/api/handlers/backward_compat.go +++ b/backend/pkg/api/handlers/backward_compat.go @@ -43,7 +43,7 @@ func (h *HandlerService) getTokenByRefresh(r *http.Request, refreshToken string) log.Infof("refresh token: %v, claims: %v, hashed refresh: %v", refreshToken, unsafeClaims, refreshTokenHashed) // confirm all claims via db lookup and refreshtoken check - userID, err := h.dai.GetUserIdByRefreshToken(unsafeClaims.UserID, unsafeClaims.AppID, unsafeClaims.DeviceID, refreshTokenHashed) + userID, err := h.daService.GetUserIdByRefreshToken(r.Context(), unsafeClaims.UserID, unsafeClaims.AppID, unsafeClaims.DeviceID, refreshTokenHashed) if err != nil { if err == sql.ErrNoRows { return 0, "", dataaccess.ErrNotFound diff --git a/backend/pkg/api/handlers/common.go b/backend/pkg/api/handlers/common.go deleted file mode 100644 index ecc9794db..000000000 --- a/backend/pkg/api/handlers/common.go +++ /dev/null @@ -1,1031 +0,0 @@ -package handlers - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "regexp" - "strconv" - "strings" - - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/gobitfly/beaconchain/pkg/commons/log" - "github.com/gorilla/mux" - "github.com/invopop/jsonschema" - "github.com/xeipuuv/gojsonschema" - - "github.com/alexedwards/scs/v2" - dataaccess "github.com/gobitfly/beaconchain/pkg/api/data_access" - "github.com/gobitfly/beaconchain/pkg/api/enums" - "github.com/gobitfly/beaconchain/pkg/api/services" - types "github.com/gobitfly/beaconchain/pkg/api/types" -) - -type HandlerService struct { - dai dataaccess.DataAccessor - scs *scs.SessionManager -} - -func NewHandlerService(dataAccessor dataaccess.DataAccessor, sessionManager *scs.SessionManager) *HandlerService { - if allNetworks == nil { - networks, err := dataAccessor.GetAllNetworks() - if err != nil { - log.Fatal(err, "error getting networks for handler", 0, nil) - } - allNetworks = networks - } - - return &HandlerService{ - dai: dataAccessor, - scs: sessionManager, - } -} - -// all networks available in the system, filled on startup in NewHandlerService -var allNetworks []types.NetworkInfo - -// -------------------------------------- - -var ( - // Subject to change, just examples - reName = regexp.MustCompile(`^[a-zA-Z0-9_\-.\ ]*$`) - reInteger = regexp.MustCompile(`^[0-9]+$`) - reValidatorDashboardPublicId = regexp.MustCompile(`^v-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$`) - reValidatorPublicKeyWithPrefix = regexp.MustCompile(`^0x[0-9a-fA-F]{96}$`) - reValidatorPublicKey = regexp.MustCompile(`^(0x)?[0-9a-fA-F]{96}$`) - reEthereumAddress = regexp.MustCompile(`^(0x)?[0-9a-fA-F]{40}$`) - reWithdrawalCredential = regexp.MustCompile(`^(0x0[01])?[0-9a-fA-F]{62}$`) - reEnsName = regexp.MustCompile(`^.+\.eth$`) - reNonEmpty = regexp.MustCompile(`^\s*\S.*$`) - reCursor = regexp.MustCompile(`^[A-Za-z0-9-_]+$`) // has to be base64 - reEmail = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$") - rePassword = regexp.MustCompile(`^.{5,}$`) - reEmailUserToken = regexp.MustCompile(`^[a-z0-9]{40}$`) - reJsonContentType = regexp.MustCompile(`^application\/json(;.*)?$`) -) - -const ( - maxNameLength = 50 - maxValidatorsInList = 20 - maxQueryLimit uint64 = 100 - defaultReturnLimit uint64 = 10 - sortOrderAscending = "asc" - sortOrderDescending = "desc" - defaultSortOrder = sortOrderAscending - ethereum = "ethereum" - gnosis = "gnosis" - allowEmpty = true - forbidEmpty = false - MaxArchivedDashboardsCount = 10 -) - -var ( - errMsgParsingId = errors.New("error parsing parameter 'dashboard_id'") - errBadRequest = errors.New("bad request") - errInternalServer = errors.New("internal server error") - errUnauthorized = errors.New("unauthorized") - errForbidden = errors.New("forbidden") - errConflict = errors.New("conflict") - errTooManyRequests = errors.New("too many requests") - errGone = errors.New("gone") -) - -type Paging struct { - cursor string - limit uint64 - search string -} - -// All changes to common functions MUST NOT break any public handler behavior (not in effect yet) - -// -------------------------------------- -// Input Validation - -// validationError is a map of parameter names to error messages. -// It is used to collect multiple validation errors before returning them to the user. -type validationError map[string]string - -func (v validationError) Error() string { - //iterate over map and create a string - var sb strings.Builder - for k, v := range v { - sb.WriteString(k) - sb.WriteString(": ") - sb.WriteString(v) - sb.WriteString("\n") - } - return sb.String()[:sb.Len()-1] -} - -func (v *validationError) add(paramName, problem string) { - if *v == nil { - *v = make(validationError) - } - validationMap := *v - if _, ok := validationMap[paramName]; ok { - problem = validationMap[paramName] + "; " + problem - } - validationMap[paramName] = problem -} - -func (v *validationError) hasErrors() bool { - return v != nil && len(*v) > 0 -} - -func (v *validationError) checkRegex(regex *regexp.Regexp, param, paramName string) string { - if !regex.MatchString(param) { - v.add(paramName, fmt.Sprintf(`given value '%s' has incorrect format`, param)) - } - return param -} - -func (v *validationError) checkLength(name, paramName string, minLength int) string { - if len(name) < minLength { - v.add(paramName, fmt.Sprintf(`given value '%s' is too short, minimum length is %d`, name, minLength)) - } - if len(name) > maxNameLength { - v.add(paramName, fmt.Sprintf(`given value '%s' is too long, maximum length is %d`, name, maxNameLength)) - } - return name -} - -func (v *validationError) checkName(name string, minLength int) string { - name = v.checkLength(name, "name", minLength) - return v.checkRegex(reName, name, "name") -} - -func (v *validationError) checkNameNotEmpty(name string) string { - return v.checkName(name, 1) -} - -func (v *validationError) checkKeyNotEmpty(key string) string { - key = v.checkLength(key, "key", 1) - return v.checkRegex(reName, key, "key") -} - -func (v *validationError) checkEmail(email string) string { - return v.checkRegex(reEmail, strings.ToLower(email), "email") -} - -func (v *validationError) checkPassword(password string) string { - return v.checkRegex(rePassword, password, "password") -} - -func (v *validationError) checkUserEmailToken(token string) string { - return v.checkRegex(reEmailUserToken, token, "token") -} - -// check request structure (body contains valid json and all required parameters are present) -// return error only if internal error occurs, otherwise add error to validationError and/or return nil -func (v *validationError) checkBody(data interface{}, r *http.Request) error { - // check if content type is application/json - if contentType := r.Header.Get("Content-Type"); !reJsonContentType.MatchString(contentType) { - v.add("request body", "'Content-Type' header must be 'application/json'") - } - - bodyBytes, err := io.ReadAll(r.Body) - r.Body = io.NopCloser(bytes.NewReader(bodyBytes)) // unconsume body for error logging - if err != nil { - return newInternalServerErr("error reading request body") - } - - // First check: Unmarshal into an empty interface to check JSON format - var i interface{} - if err := json.Unmarshal(bodyBytes, &i); err != nil { - v.add("request body", "not in JSON format") - return nil - } - - // Second check: Validate against the expected schema - sc := jsonschema.Reflect(data) - b, err := json.Marshal(sc) - if err != nil { - return newInternalServerErr("error creating expected schema") - } - loader := gojsonschema.NewBytesLoader(b) - documentLoader := gojsonschema.NewBytesLoader(bodyBytes) - schema, err := gojsonschema.NewSchema(loader) - if err != nil { - return newInternalServerErr("error creating schema") - } - result, err := schema.Validate(documentLoader) - if err != nil { - return newInternalServerErr("error validating schema") - } - isSchemaValid := result.Valid() - if !isSchemaValid { - v.add("request body", "invalid schema, check the API documentation for the expected format") - } - - // Unmarshal into the target struct, only log error if it's a valid JSON - if err := json.Unmarshal(bodyBytes, data); err != nil && isSchemaValid { - return newInternalServerErr("error unmarshalling request body") - } - - // Proceed with additional validation or processing as necessary - return nil -} - -func (v *validationError) checkInt(param, paramName string) int64 { - num, err := strconv.ParseInt(param, 10, 64) - if err != nil { - v.add(paramName, fmt.Sprintf("given value '%s' is not an integer", param)) - } - return num -} - -func (v *validationError) checkUint(param, paramName string) uint64 { - num, err := strconv.ParseUint(param, 10, 64) - if err != nil { - v.add(paramName, fmt.Sprintf("given value %s is not a positive integer", param)) - } - return num -} - -func (v *validationError) checkBool(param, paramName string) bool { - if param == "" { - return false - } - boolVar, err := strconv.ParseBool(param) - if err != nil { - v.add(paramName, fmt.Sprintf("given value '%s' is not a boolean", param)) - } - return boolVar -} - -func (v *validationError) checkAdConfigurationKeys(keysString string) []string { - if keysString == "" { - return []string{} - } - var keys []string - for _, key := range splitParameters(keysString, ',') { - keys = append(keys, v.checkRegex(reName, key, "keys")) - } - return keys -} - -type validatorSet struct { - Indexes []types.VDBValidator - PublicKeys []string -} - -// parseDashboardId is a helper function to validate the string dashboard id param. -func parseDashboardId(id string) (interface{}, error) { - var v validationError - if reInteger.MatchString(id) { - // given id is a normal id - id := v.checkUint(id, "dashboard_id") - if v.hasErrors() { - return nil, v - } - return types.VDBIdPrimary(id), nil - } - if reValidatorDashboardPublicId.MatchString(id) { - // given id is a public id - return types.VDBIdPublic(id), nil - } - // given id must be an encoded set of validators - decodedId, err := base64.RawURLEncoding.DecodeString(id) - if err != nil { - return nil, newBadRequestErr("given value '%s' is not a valid dashboard id", id) - } - indexes, publicKeys := v.checkValidatorList(string(decodedId), forbidEmpty) - if v.hasErrors() { - return nil, newBadRequestErr("given value '%s' is not a valid dashboard id", id) - } - return validatorSet{Indexes: indexes, PublicKeys: publicKeys}, nil -} - -// getDashboardId is a helper function to convert the dashboard id param to a VDBId. -// precondition: dashboardIdParam must be a valid dashboard id and either a primary id, public id, or list of validators. -func (h *HandlerService) getDashboardId(ctx context.Context, dashboardIdParam interface{}) (*types.VDBId, error) { - switch dashboardId := dashboardIdParam.(type) { - case types.VDBIdPrimary: - return &types.VDBId{Id: dashboardId, Validators: nil}, nil - case types.VDBIdPublic: - dashboardInfo, err := h.dai.GetValidatorDashboardPublicId(ctx, dashboardId) - if err != nil { - return nil, err - } - return &types.VDBId{Id: types.VDBIdPrimary(dashboardInfo.DashboardId), Validators: nil, AggregateGroups: !dashboardInfo.ShareSettings.ShareGroups}, nil - case validatorSet: - validators, err := h.dai.GetValidatorsFromSlices(dashboardId.Indexes, dashboardId.PublicKeys) - if err != nil { - return nil, err - } - if len(validators) == 0 { - return nil, newNotFoundErr("no validators found for given id") - } - if len(validators) > maxValidatorsInList { - return nil, newBadRequestErr("too many validators in list, maximum is %d", maxValidatorsInList) - } - return &types.VDBId{Validators: validators}, nil - } - return nil, errMsgParsingId -} - -// handleDashboardId is a helper function to both validate the dashboard id param and convert it to a VDBId. -// it should be used as the last validation step for all internal dashboard GET-handlers. -// Modifying handlers (POST, PUT, DELETE) should only accept primary dashboard ids and just use checkPrimaryDashboardId. -func (h *HandlerService) handleDashboardId(ctx context.Context, param string) (*types.VDBId, error) { - // validate dashboard id param - dashboardIdParam, err := parseDashboardId(param) - if err != nil { - return nil, err - } - // convert to VDBId - dashboardId, err := h.getDashboardId(ctx, dashboardIdParam) - if err != nil { - return nil, err - } - - return dashboardId, nil -} - -const chartDatapointLimit uint64 = 200 - -type ChartTimeDashboardLimits struct { - MinAllowedTs uint64 - LatestExportedTs uint64 - MaxAllowedInterval uint64 -} - -// helper function to retrieve allowed chart timestamp boundaries according to the users premium perks at the current point in time -func (h *HandlerService) getCurrentChartTimeLimitsForDashboard(ctx context.Context, dashboardId *types.VDBId, aggregation enums.ChartAggregation) (ChartTimeDashboardLimits, error) { - limits := ChartTimeDashboardLimits{} - var err error - premiumPerks, err := h.getDashboardPremiumPerks(ctx, *dashboardId) - if err != nil { - return limits, err - } - - maxAge := getMaxChartAge(aggregation, premiumPerks.ChartHistorySeconds) // can be max int for unlimited, always check for underflows - if maxAge == 0 { - return limits, newConflictErr("requested aggregation is not available for dashboard owner's premium subscription") - } - limits.LatestExportedTs, err = h.dai.GetLatestExportedChartTs(ctx, aggregation) - if err != nil { - return limits, err - } - limits.MinAllowedTs = limits.LatestExportedTs - min(maxAge, limits.LatestExportedTs) // min to prevent underflow - secondsPerEpoch := uint64(12 * 32) // TODO: fetch dashboards chain id and use correct value for network once available - limits.MaxAllowedInterval = chartDatapointLimit*uint64(aggregation.Duration(secondsPerEpoch).Seconds()) - 1 // -1 to make sure we don't go over the limit - - return limits, nil -} - -func (v *validationError) checkPrimaryDashboardId(param string) types.VDBIdPrimary { - return types.VDBIdPrimary(v.checkUint(param, "dashboard_id")) -} - -// getDashboardPremiumPerks gets the premium perks of the dashboard OWNER or if it's a guest dashboard, it returns free tier premium perks -func (h *HandlerService) getDashboardPremiumPerks(ctx context.Context, id types.VDBId) (*types.PremiumPerks, error) { - // for guest dashboards, return free tier perks - if id.Validators != nil { - perk, err := h.dai.GetFreeTierPerks(ctx) - if err != nil { - return nil, err - } - return perk, nil - } - // could be made into a single query if needed - dashboardUser, err := h.dai.GetValidatorDashboardUser(ctx, id.Id) - if err != nil { - return nil, err - } - userInfo, err := h.dai.GetUserInfo(ctx, dashboardUser.UserId) - if err != nil { - return nil, err - } - - return &userInfo.PremiumPerks, nil -} - -// helper function to unify handling of block detail request validation -func (h *HandlerService) validateBlockRequest(r *http.Request, paramName string) (uint64, uint64, error) { - var v validationError - var err error - chainId := v.checkNetworkParameter(mux.Vars(r)["network"]) - var value uint64 - switch paramValue := mux.Vars(r)[paramName]; paramValue { - // possibly add other values like "genesis", "finalized", hardforks etc. later - case "latest": - if paramName == "block" { - value, err = h.dai.GetLatestBlock() - } else if paramName == "slot" { - value, err = h.dai.GetLatestSlot() - } - if err != nil { - return 0, 0, err - } - default: - value = v.checkUint(paramValue, paramName) - } - if v.hasErrors() { - return 0, 0, v - } - return chainId, value, nil -} - -// checkGroupId validates the given group id and returns it as an int64. -// If the given group id is empty and allowEmpty is true, it returns -1 (all groups). -func (v *validationError) checkGroupId(param string, allowEmpty bool) int64 { - if param == "" && allowEmpty { - return types.AllGroups - } - return v.checkInt(param, "group_id") -} - -// checkExistingGroupId validates if the given group id is not empty and a positive integer. -func (v *validationError) checkExistingGroupId(param string) uint64 { - return v.checkUint(param, "group_id") -} - -//nolint:unparam -func splitParameters(params string, delim rune) []string { - // This splits the string by delim and removes empty strings - f := func(c rune) bool { - return c == delim - } - return strings.FieldsFunc(params, f) -} - -func parseGroupIdList[T any](groupIds string, convert func(string, string) T) []T { - var ids []T - for _, id := range splitParameters(groupIds, ',') { - ids = append(ids, convert(id, "group_ids")) - } - return ids -} - -func (v *validationError) checkExistingGroupIdList(groupIds string) []uint64 { - return parseGroupIdList(groupIds, v.checkUint) -} - -func (v *validationError) checkGroupIdList(groupIds string) []int64 { - return parseGroupIdList(groupIds, v.checkInt) -} - -func (v *validationError) checkValidatorDashboardPublicId(publicId string) types.VDBIdPublic { - return types.VDBIdPublic(v.checkRegex(reValidatorDashboardPublicId, publicId, "public_dashboard_id")) -} - -type number interface { - uint64 | int64 | float64 -} - -func checkMinMax[T number](v *validationError, param T, min T, max T, paramName string) T { - if param < min { - v.add(paramName, fmt.Sprintf("given value '%v' is too small, minimum value is %v", param, min)) - } - if param > max { - v.add(paramName, fmt.Sprintf("given value '%v' is too large, maximum value is %v", param, max)) - } - return param -} - -func (v *validationError) checkAddress(publicId string) string { - return v.checkRegex(reEthereumAddress, publicId, "address") -} - -func (v *validationError) checkUintMinMax(param string, min uint64, max uint64, paramName string) uint64 { - return checkMinMax(v, v.checkUint(param, paramName), min, max, paramName) -} - -func (v *validationError) checkPagingParams(q url.Values) Paging { - paging := Paging{ - cursor: q.Get("cursor"), - limit: defaultReturnLimit, - search: q.Get("search"), - } - - if limitStr := q.Get("limit"); limitStr != "" { - paging.limit = v.checkUintMinMax(limitStr, 1, maxQueryLimit, "limit") - } - - if paging.cursor != "" { - paging.cursor = v.checkRegex(reCursor, paging.cursor, "cursor") - } - - return paging -} - -// checkEnum validates the given enum string and returns the corresponding enum value. -func checkEnum[T enums.EnumFactory[T]](v *validationError, enumString string, name string) T { - var e T - enum := e.NewFromString(enumString) - if enums.IsInvalidEnum(enum) { - v.add(name, fmt.Sprintf("given value '%s' is not valid", enumString)) - return enum - } - return enum -} - -// checkEnumIsAllowed checks if the given enum is in the list of allowed enums. -// precondition: the enum is the same type as the allowed enums. -func (v *validationError) checkEnumIsAllowed(enum enums.Enum, allowed []enums.Enum, name string) { - if enums.IsInvalidEnum(enum) { - v.add(name, "parameter is missing or invalid, please check the API documentation") - return - } - for _, a := range allowed { - if enum.Int() == a.Int() { - return - } - } - v.add(name, "parameter is missing or invalid, please check the API documentation") -} - -func (v *validationError) parseSortOrder(order string) bool { - switch order { - case "": - return defaultSortOrder == sortOrderDescending - case sortOrderAscending: - return false - case sortOrderDescending: - return true - default: - v.add("sort", fmt.Sprintf("given value '%s' for parameter 'sort' is not valid, allowed order values are: %s, %s", order, sortOrderAscending, sortOrderDescending)) - return false - } -} - -func checkSort[T enums.EnumFactory[T]](v *validationError, sortString string) *types.Sort[T] { - var c T - if sortString == "" { - return &types.Sort[T]{Column: c, Desc: false} - } - sortSplit := strings.Split(sortString, ":") - if len(sortSplit) > 2 { - v.add("sort", fmt.Sprintf("given value '%s' for parameter 'sort' is not valid, expected format is '[:(asc|desc)]'", sortString)) - return nil - } - if len(sortSplit) == 1 { - sortSplit = append(sortSplit, "") - } - sortCol := checkEnum[T](v, sortSplit[0], "sort") - order := v.parseSortOrder(sortSplit[1]) - return &types.Sort[T]{Column: sortCol, Desc: order} -} - -func (v *validationError) checkProtocolModes(protocolModes string) types.VDBProtocolModes { - var modes types.VDBProtocolModes - if protocolModes == "" { - return modes - } - protocolsSlice := splitParameters(protocolModes, ',') - for _, protocolMode := range protocolsSlice { - switch protocolMode { - case "rocket_pool": - modes.RocketPool = true - default: - v.add("modes", fmt.Sprintf("given value '%s' is not a valid protocol mode", protocolMode)) - } - } - return modes -} - -func (v *validationError) checkValidatorList(validators string, allowEmpty bool) ([]types.VDBValidator, []string) { - if validators == "" && !allowEmpty { - v.add("validators", "list of validators must not be empty") - return nil, nil - } - validatorsSlice := splitParameters(validators, ',') - var indexes []types.VDBValidator - var publicKeys []string - for _, validator := range validatorsSlice { - if reInteger.MatchString(validator) { - indexes = append(indexes, v.checkUint(validator, "validators")) - } else if reValidatorPublicKeyWithPrefix.MatchString(validator) { - _, err := hexutil.Decode(validator) - if err != nil { - v.add("validators", fmt.Sprintf("invalid value '%s' in list of validators", v)) - } - publicKeys = append(publicKeys, validator) - } else { - v.add("validators", fmt.Sprintf("invalid value '%s' in list of validators", v)) - } - } - return indexes, publicKeys -} - -func (v *validationError) checkValidators(validators []intOrString, allowEmpty bool) ([]types.VDBValidator, []string) { - if len(validators) == 0 && !allowEmpty { - v.add("validators", "list of validators is empty") - return nil, nil - } - var indexes []types.VDBValidator - var publicKeys []string - for _, validator := range validators { - switch { - case validator.intValue != nil: - indexes = append(indexes, *validator.intValue) - case validator.strValue != nil: - if !reValidatorPublicKey.MatchString(*validator.strValue) { - v.add("validators", fmt.Sprintf("given value '%s' is not a valid validator", *validator.strValue)) - continue - } - publicKeys = append(publicKeys, *validator.strValue) - default: - v.add("validators", "list contains invalid validator") - } - } - return indexes, publicKeys -} - -func (v *validationError) checkNetwork(network intOrString) uint64 { - chainId, ok := isValidNetwork(network) - if !ok { - v.add("network", fmt.Sprintf("given value '%s' is not a valid network", network)) - } - return chainId -} - -func (v *validationError) checkNetworkParameter(param string) uint64 { - if reInteger.MatchString(param) { - chainId, err := strconv.ParseUint(param, 10, 64) - if err != nil { - v.add("network", fmt.Sprintf("given value '%s' is not a valid network", param)) - return 0 - } - return v.checkNetwork(intOrString{intValue: &chainId}) - } - return v.checkNetwork(intOrString{strValue: ¶m}) -} - -// isValidNetwork checks if the given network is a valid network. -// It returns the chain id of the network and true if it is valid, otherwise 0 and false. -func isValidNetwork(network intOrString) (uint64, bool) { - for _, realNetwork := range allNetworks { - if (network.intValue != nil && realNetwork.ChainId == *network.intValue) || (network.strValue != nil && realNetwork.Name == *network.strValue) { - return realNetwork.ChainId, true - } - } - return 0, false -} - -func (v *validationError) checkTimestamps(r *http.Request, chartLimits ChartTimeDashboardLimits) (after uint64, before uint64) { - afterParam := r.URL.Query().Get("after_ts") - beforeParam := r.URL.Query().Get("before_ts") - switch { - // If both parameters are empty, return the latest data - case afterParam == "" && beforeParam == "": - return max(chartLimits.LatestExportedTs-chartLimits.MaxAllowedInterval, chartLimits.MinAllowedTs), chartLimits.LatestExportedTs - - // If only the afterParam is provided - case afterParam != "" && beforeParam == "": - afterTs := v.checkUint(afterParam, "after_ts") - beforeTs := afterTs + chartLimits.MaxAllowedInterval - return afterTs, beforeTs - - // If only the beforeParam is provided - case beforeParam != "" && afterParam == "": - beforeTs := v.checkUint(beforeParam, "before_ts") - afterTs := max(beforeTs-chartLimits.MaxAllowedInterval, chartLimits.MinAllowedTs) - return afterTs, beforeTs - - // If both parameters are provided, validate them - default: - afterTs := v.checkUint(afterParam, "after_ts") - beforeTs := v.checkUint(beforeParam, "before_ts") - - if afterTs > beforeTs { - v.add("after_ts", "parameter `after_ts` must not be greater than `before_ts`") - } - - if beforeTs-afterTs > chartLimits.MaxAllowedInterval { - v.add("before_ts", fmt.Sprintf("parameters `after_ts` and `before_ts` must not lie apart more than %d seconds for this aggregation", chartLimits.MaxAllowedInterval)) - } - - return afterTs, beforeTs - } -} - -// getMaxChartAge returns the maximum age of a chart in seconds based on the given aggregation type and premium perks -func getMaxChartAge(aggregation enums.ChartAggregation, perkSeconds types.ChartHistorySeconds) uint64 { - aggregations := enums.ChartAggregations - switch aggregation { - case aggregations.Epoch: - return perkSeconds.Epoch - case aggregations.Hourly: - return perkSeconds.Hourly - case aggregations.Daily: - return perkSeconds.Daily - case aggregations.Weekly: - return perkSeconds.Weekly - default: - return 0 - } -} - -func isUserAdmin(user *types.UserInfo) bool { - if user == nil { - return false - } - return user.UserGroup == types.UserGroupAdmin -} - -// -------------------------------------- -// Response handling - -func writeResponse(w http.ResponseWriter, r *http.Request, statusCode int, response interface{}) { - w.Header().Set("Content-Type", "application/json") - if response == nil { - w.WriteHeader(statusCode) - return - } - jsonData, err := json.Marshal(response) - if err != nil { - logApiError(r, fmt.Errorf("error encoding json data: %w", err), 0, - log.Fields{ - "data": fmt.Sprintf("%+v", response), - }) - w.WriteHeader(http.StatusInternalServerError) - response = types.ApiErrorResponse{ - Error: "error encoding json data", - } - if err = json.NewEncoder(w).Encode(response); err != nil { - // there seems to be an error with the lib - logApiError(r, fmt.Errorf("error encoding error response after failed encoding: %w", err), 0) - } - return - } - w.WriteHeader(statusCode) - if _, err = w.Write(jsonData); err != nil { - // already returned wrong status code to user, can't prevent that - logApiError(r, fmt.Errorf("error writing response data: %w", err), 0) - } -} - -func returnError(w http.ResponseWriter, r *http.Request, code int, err error) { - response := types.ApiErrorResponse{ - Error: err.Error(), - } - writeResponse(w, r, code, response) -} - -func returnOk(w http.ResponseWriter, r *http.Request, data interface{}) { - writeResponse(w, r, http.StatusOK, data) -} - -func returnCreated(w http.ResponseWriter, r *http.Request, data interface{}) { - writeResponse(w, r, http.StatusCreated, data) -} - -func returnNoContent(w http.ResponseWriter, r *http.Request) { - writeResponse(w, r, http.StatusNoContent, nil) -} - -// Errors - -func returnBadRequest(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusBadRequest, err) -} - -func returnUnauthorized(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusUnauthorized, err) -} - -func returnNotFound(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusNotFound, err) -} - -func returnConflict(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusConflict, err) -} - -func returnForbidden(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusForbidden, err) -} - -func returnTooManyRequests(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusTooManyRequests, err) -} - -func returnGone(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusGone, err) -} - -const maxBodySize = 10 * 1024 - -func logApiError(r *http.Request, err error, callerSkip int, additionalInfos ...log.Fields) { - body, _ := io.ReadAll(io.LimitReader(r.Body, maxBodySize)) - requestFields := log.Fields{ - "endpoint": r.Method + " " + r.URL.Path, - "query": r.URL.RawQuery, - "body": string(body), - } - log.Error(err, "error handling request", callerSkip+1, append(additionalInfos, requestFields)...) -} - -func handleErr(w http.ResponseWriter, r *http.Request, err error) { - _, isValidationError := err.(validationError) - switch { - case isValidationError, errors.Is(err, errBadRequest): - returnBadRequest(w, r, err) - case errors.Is(err, dataaccess.ErrNotFound): - returnNotFound(w, r, err) - case errors.Is(err, errUnauthorized): - returnUnauthorized(w, r, err) - case errors.Is(err, errForbidden): - returnForbidden(w, r, err) - case errors.Is(err, errConflict): - returnConflict(w, r, err) - case errors.Is(err, services.ErrWaiting): - returnError(w, r, http.StatusServiceUnavailable, err) - case errors.Is(err, errTooManyRequests): - returnTooManyRequests(w, r, err) - case errors.Is(err, errGone): - returnGone(w, r, err) - default: - logApiError(r, err, 1) - // TODO: don't return the error message to the user in production - returnError(w, r, http.StatusInternalServerError, err) - } -} - -// -------------------------------------- -// Error Helpers - -func errWithMsg(err error, format string, args ...interface{}) error { - return fmt.Errorf("%w: %s", err, fmt.Sprintf(format, args...)) -} - -//nolint:nolintlint -//nolint:unparam -func newBadRequestErr(format string, args ...interface{}) error { - return errWithMsg(errBadRequest, format, args...) -} - -//nolint:unparam -func newInternalServerErr(format string, args ...interface{}) error { - return errWithMsg(errInternalServer, format, args...) -} - -//nolint:unparam -func newUnauthorizedErr(format string, args ...interface{}) error { - return errWithMsg(errUnauthorized, format, args...) -} - -func newForbiddenErr(format string, args ...interface{}) error { - return errWithMsg(errForbidden, format, args...) -} - -//nolint:unparam -func newConflictErr(format string, args ...interface{}) error { - return errWithMsg(errConflict, format, args...) -} - -//nolint:nolintlint -//nolint:unparam -func newNotFoundErr(format string, args ...interface{}) error { - return errWithMsg(dataaccess.ErrNotFound, format, args...) -} - -func newTooManyRequestsErr(format string, args ...interface{}) error { - return errWithMsg(errTooManyRequests, format, args...) -} - -func newGoneErr(format string, args ...interface{}) error { - return errWithMsg(errGone, format, args...) -} - -// -------------------------------------- -// misc. helper functions - -// maps different types of validator dashboard summary validators to a common format -func mapVDBIndices(indices interface{}) ([]types.VDBSummaryValidatorsData, error) { - if indices == nil { - return nil, errors.New("no data found when mapping") - } - - switch v := indices.(type) { - case *types.VDBGeneralSummaryValidators: - // deposited, online, offline, slashing, slashed, exited, withdrawn, pending, exiting, withdrawing - return []types.VDBSummaryValidatorsData{ - mapUintSlice("deposited", v.Deposited), - mapUintSlice("online", v.Online), - mapUintSlice("offline", v.Offline), - mapUintSlice("slashing", v.Slashing), - mapUintSlice("slashed", v.Slashed), - mapUintSlice("exited", v.Exited), - mapUintSlice("withdrawn", v.Withdrawn), - mapIndexTimestampSlice("pending", v.Pending), - mapIndexTimestampSlice("exiting", v.Exiting), - mapIndexTimestampSlice("withdrawing", v.Withdrawing), - }, nil - - case *types.VDBSyncSummaryValidators: - return []types.VDBSummaryValidatorsData{ - mapUintSlice("sync_current", v.Current), - mapUintSlice("sync_upcoming", v.Upcoming), - mapSlice("sync_past", v.Past, - func(v types.VDBValidatorSyncPast) (uint64, []uint64) { return v.Index, []uint64{v.Count} }, - ), - }, nil - - case *types.VDBSlashingsSummaryValidators: - return []types.VDBSummaryValidatorsData{ - mapSlice("got_slashed", v.GotSlashed, - func(v types.VDBValidatorGotSlashed) (uint64, []uint64) { return v.Index, []uint64{v.SlashedBy} }, - ), - mapSlice("has_slashed", v.HasSlashed, - func(v types.VDBValidatorHasSlashed) (uint64, []uint64) { return v.Index, v.SlashedIndices }, - ), - }, nil - - case *types.VDBProposalSummaryValidators: - return []types.VDBSummaryValidatorsData{ - mapIndexBlocksSlice("proposal_proposed", v.Proposed), - mapIndexBlocksSlice("proposal_missed", v.Missed), - }, nil - - default: - return nil, fmt.Errorf("unsupported indices type") - } -} - -// maps different types of validator dashboard summary validators to a common format -func mapSlice[T any](category string, validators []T, getIndexAndDutyObjects func(validator T) (index uint64, dutyObjects []uint64)) types.VDBSummaryValidatorsData { - validatorsData := make([]types.VDBSummaryValidator, len(validators)) - for i, validator := range validators { - index, dutyObjects := getIndexAndDutyObjects(validator) - validatorsData[i] = types.VDBSummaryValidator{Index: index, DutyObjects: dutyObjects} - } - return types.VDBSummaryValidatorsData{ - Category: category, - Validators: validatorsData, - } -} -func mapUintSlice(category string, validators []uint64) types.VDBSummaryValidatorsData { - return mapSlice(category, validators, - func(v uint64) (uint64, []uint64) { return v, nil }, - ) -} - -func mapIndexTimestampSlice(category string, validators []types.IndexTimestamp) types.VDBSummaryValidatorsData { - return mapSlice(category, validators, - func(v types.IndexTimestamp) (uint64, []uint64) { return v.Index, []uint64{v.Timestamp} }, - ) -} - -func mapIndexBlocksSlice(category string, validators []types.IndexBlocks) types.VDBSummaryValidatorsData { - return mapSlice(category, validators, - func(v types.IndexBlocks) (uint64, []uint64) { return v.Index, v.Blocks }, - ) -} - -// -------------------------------------- -// intOrString is a custom type that can be unmarshalled from either an int or a string (strings will also be parsed to int if possible). -// if unmarshaling throws no errors one of the two fields will be set, the other will be nil. -type intOrString struct { - intValue *uint64 - strValue *string -} - -func (v *intOrString) UnmarshalJSON(data []byte) error { - // Attempt to unmarshal as uint64 first - var intValue uint64 - if err := json.Unmarshal(data, &intValue); err == nil { - v.intValue = &intValue - return nil - } - - // If unmarshalling as uint64 fails, try to unmarshal as string - var strValue string - if err := json.Unmarshal(data, &strValue); err == nil { - if parsedInt, err := strconv.ParseUint(strValue, 10, 64); err == nil { - v.intValue = &parsedInt - } else { - v.strValue = &strValue - } - return nil - } - - // If both unmarshalling attempts fail, return an error - return fmt.Errorf("failed to unmarshal intOrString from json: %s", string(data)) -} - -func (v intOrString) String() string { - if v.intValue != nil { - return strconv.FormatUint(*v.intValue, 10) - } - if v.strValue != nil { - return *v.strValue - } - return "" -} - -func (intOrString) JSONSchema() *jsonschema.Schema { - return &jsonschema.Schema{ - OneOf: []*jsonschema.Schema{ - {Type: "string"}, {Type: "integer"}, - }, - } -} diff --git a/backend/pkg/api/handlers/handler_service.go b/backend/pkg/api/handlers/handler_service.go new file mode 100644 index 000000000..167662579 --- /dev/null +++ b/backend/pkg/api/handlers/handler_service.go @@ -0,0 +1,551 @@ +package handlers + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/invopop/jsonschema" + + "github.com/alexedwards/scs/v2" + dataaccess "github.com/gobitfly/beaconchain/pkg/api/data_access" + "github.com/gobitfly/beaconchain/pkg/api/enums" + "github.com/gobitfly/beaconchain/pkg/api/services" + types "github.com/gobitfly/beaconchain/pkg/api/types" +) + +type HandlerService struct { + daService dataaccess.DataAccessor + daDummy dataaccess.DataAccessor + scs *scs.SessionManager + isPostMachineMetricsEnabled bool // if more config options are needed, consider having the whole config in here +} + +func NewHandlerService(dataAccessor dataaccess.DataAccessor, dummy dataaccess.DataAccessor, sessionManager *scs.SessionManager, enablePostMachineMetrics bool) *HandlerService { + if allNetworks == nil { + networks, err := dataAccessor.GetAllNetworks() + if err != nil { + log.Fatal(err, "error getting networks for handler", 0, nil) + } + allNetworks = networks + } + + return &HandlerService{ + daService: dataAccessor, + daDummy: dummy, + scs: sessionManager, + isPostMachineMetricsEnabled: enablePostMachineMetrics, + } +} + +// getDataAccessor returns the correct data accessor based on the request context. +// if the request is mocked, the data access dummy is returned; otherwise the data access service. +// should only be used if getting mocked data for the endpoint is appropriate +func (h *HandlerService) getDataAccessor(r *http.Request) dataaccess.DataAccessor { + if isMocked(r) { + return h.daDummy + } + return h.daService +} + +// all networks available in the system, filled on startup in NewHandlerService +var allNetworks []types.NetworkInfo + +// -------------------------------------- +// errors + +var ( + errMsgParsingId = errors.New("error parsing parameter 'dashboard_id'") + errBadRequest = errors.New("bad request") + errInternalServer = errors.New("internal server error") + errUnauthorized = errors.New("unauthorized") + errForbidden = errors.New("forbidden") + errConflict = errors.New("conflict") + errTooManyRequests = errors.New("too many requests") + errGone = errors.New("gone") +) + +// -------------------------------------- +// utility functions + +type validatorSet struct { + Indexes []types.VDBValidator + PublicKeys []string +} + +// parseDashboardId is a helper function to validate the string dashboard id param. +func parseDashboardId(id string) (interface{}, error) { + var v validationError + if reInteger.MatchString(id) { + // given id is a normal id + id := v.checkUint(id, "dashboard_id") + if v.hasErrors() { + return nil, v + } + return types.VDBIdPrimary(id), nil + } + if reValidatorDashboardPublicId.MatchString(id) { + // given id is a public id + return types.VDBIdPublic(id), nil + } + // given id must be an encoded set of validators + decodedId, err := base64.RawURLEncoding.DecodeString(id) + if err != nil { + return nil, newBadRequestErr("given value '%s' is not a valid dashboard id", id) + } + indexes, publicKeys := v.checkValidatorList(string(decodedId), forbidEmpty) + if v.hasErrors() { + return nil, newBadRequestErr("given value '%s' is not a valid dashboard id", id) + } + return validatorSet{Indexes: indexes, PublicKeys: publicKeys}, nil +} + +// getDashboardId is a helper function to convert the dashboard id param to a VDBId. +// precondition: dashboardIdParam must be a valid dashboard id and either a primary id, public id, or list of validators. +func (h *HandlerService) getDashboardId(ctx context.Context, dashboardIdParam interface{}) (*types.VDBId, error) { + switch dashboardId := dashboardIdParam.(type) { + case types.VDBIdPrimary: + return &types.VDBId{Id: dashboardId, Validators: nil}, nil + case types.VDBIdPublic: + dashboardInfo, err := h.daService.GetValidatorDashboardPublicId(ctx, dashboardId) + if err != nil { + return nil, err + } + return &types.VDBId{Id: types.VDBIdPrimary(dashboardInfo.DashboardId), Validators: nil, AggregateGroups: !dashboardInfo.ShareSettings.ShareGroups}, nil + case validatorSet: + validators, err := h.daService.GetValidatorsFromSlices(ctx, dashboardId.Indexes, dashboardId.PublicKeys) + if err != nil { + return nil, err + } + if len(validators) == 0 { + return nil, newNotFoundErr("no validators found for given id") + } + if len(validators) > maxValidatorsInList { + return nil, newBadRequestErr("too many validators in list, maximum is %d", maxValidatorsInList) + } + return &types.VDBId{Validators: validators}, nil + } + return nil, errMsgParsingId +} + +// handleDashboardId is a helper function to both validate the dashboard id param and convert it to a VDBId. +// it should be used as the last validation step for all internal dashboard GET-handlers. +// Modifying handlers (POST, PUT, DELETE) should only accept primary dashboard ids and just use checkPrimaryDashboardId. +func (h *HandlerService) handleDashboardId(ctx context.Context, param string) (*types.VDBId, error) { + //check if dashboard id is stored in context + if dashboardId, ok := ctx.Value(types.CtxDashboardIdKey).(*types.VDBId); ok { + return dashboardId, nil + } + // validate dashboard id param + dashboardIdParam, err := parseDashboardId(param) + if err != nil { + return nil, err + } + // convert to VDBId + dashboardId, err := h.getDashboardId(ctx, dashboardIdParam) + if err != nil { + return nil, err + } + + return dashboardId, nil +} + +const chartDatapointLimit uint64 = 200 + +type ChartTimeDashboardLimits struct { + MinAllowedTs uint64 + LatestExportedTs uint64 + MaxAllowedInterval uint64 +} + +// helper function to retrieve allowed chart timestamp boundaries according to the users premium perks at the current point in time +func (h *HandlerService) getCurrentChartTimeLimitsForDashboard(ctx context.Context, dashboardId *types.VDBId, aggregation enums.ChartAggregation) (ChartTimeDashboardLimits, error) { + limits := ChartTimeDashboardLimits{} + var err error + premiumPerks, err := h.getDashboardPremiumPerks(ctx, *dashboardId) + if err != nil { + return limits, err + } + + maxAge := getMaxChartAge(aggregation, premiumPerks.ChartHistorySeconds) // can be max int for unlimited, always check for underflows + if maxAge == 0 { + return limits, newConflictErr("requested aggregation is not available for dashboard owner's premium subscription") + } + limits.LatestExportedTs, err = h.daService.GetLatestExportedChartTs(ctx, aggregation) + if err != nil { + return limits, err + } + limits.MinAllowedTs = limits.LatestExportedTs - min(maxAge, limits.LatestExportedTs) // min to prevent underflow + secondsPerEpoch := uint64(12 * 32) // TODO: fetch dashboards chain id and use correct value for network once available + limits.MaxAllowedInterval = chartDatapointLimit*uint64(aggregation.Duration(secondsPerEpoch).Seconds()) - 1 // -1 to make sure we don't go over the limit + + return limits, nil +} + +// getDashboardPremiumPerks gets the premium perks of the dashboard OWNER or if it's a guest dashboard, it returns free tier premium perks +func (h *HandlerService) getDashboardPremiumPerks(ctx context.Context, id types.VDBId) (*types.PremiumPerks, error) { + // for guest dashboards, return free tier perks + if id.Validators != nil { + perk, err := h.daService.GetFreeTierPerks(ctx) + if err != nil { + return nil, err + } + return perk, nil + } + // could be made into a single query if needed + dashboardUser, err := h.daService.GetValidatorDashboardUser(ctx, id.Id) + if err != nil { + return nil, err + } + userInfo, err := h.daService.GetUserInfo(ctx, dashboardUser.UserId) + if err != nil { + return nil, err + } + + return &userInfo.PremiumPerks, nil +} + +// getMaxChartAge returns the maximum age of a chart in seconds based on the given aggregation type and premium perks +func getMaxChartAge(aggregation enums.ChartAggregation, perkSeconds types.ChartHistorySeconds) uint64 { + aggregations := enums.ChartAggregations + switch aggregation { + case aggregations.Epoch: + return perkSeconds.Epoch + case aggregations.Hourly: + return perkSeconds.Hourly + case aggregations.Daily: + return perkSeconds.Daily + case aggregations.Weekly: + return perkSeconds.Weekly + default: + return 0 + } +} + +func isUserAdmin(user *types.UserInfo) bool { + if user == nil { // can happen for guest or shared dashboards + return false + } + return user.UserGroup == types.UserGroupAdmin +} + +// -------------------------------------- +// Response handling + +func writeResponse(w http.ResponseWriter, r *http.Request, statusCode int, response interface{}) { + w.Header().Set("Content-Type", "application/json") + if response == nil { + w.WriteHeader(statusCode) + return + } + jsonData, err := json.Marshal(response) + if err != nil { + logApiError(r, fmt.Errorf("error encoding json data: %w", err), 0, + log.Fields{ + "data": fmt.Sprintf("%+v", response), + }) + w.WriteHeader(http.StatusInternalServerError) + response = types.ApiErrorResponse{ + Error: "error encoding json data", + } + if err = json.NewEncoder(w).Encode(response); err != nil { + // there seems to be an error with the lib + logApiError(r, fmt.Errorf("error encoding error response after failed encoding: %w", err), 0) + } + return + } + w.WriteHeader(statusCode) + if _, err = w.Write(jsonData); err != nil { + // already returned wrong status code to user, can't prevent that + logApiError(r, fmt.Errorf("error writing response data: %w", err), 0) + } +} + +func returnError(w http.ResponseWriter, r *http.Request, code int, err error) { + response := types.ApiErrorResponse{ + Error: err.Error(), + } + writeResponse(w, r, code, response) +} + +func returnOk(w http.ResponseWriter, r *http.Request, data interface{}) { + writeResponse(w, r, http.StatusOK, data) +} + +func returnCreated(w http.ResponseWriter, r *http.Request, data interface{}) { + writeResponse(w, r, http.StatusCreated, data) +} + +func returnNoContent(w http.ResponseWriter, r *http.Request) { + writeResponse(w, r, http.StatusNoContent, nil) +} + +// Errors + +func returnBadRequest(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusBadRequest, err) +} + +func returnUnauthorized(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusUnauthorized, err) +} + +func returnNotFound(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusNotFound, err) +} + +func returnConflict(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusConflict, err) +} + +func returnForbidden(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusForbidden, err) +} + +func returnTooManyRequests(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusTooManyRequests, err) +} + +func returnGone(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusGone, err) +} + +const maxBodySize = 10 * 1024 + +func logApiError(r *http.Request, err error, callerSkip int, additionalInfos ...log.Fields) { + requestFields := log.Fields{ + "request_endpoint": r.Method + " " + r.URL.Path, + } + if len(r.URL.RawQuery) > 0 { + requestFields["request_query"] = r.URL.RawQuery + } + if body, _ := io.ReadAll(io.LimitReader(r.Body, maxBodySize)); len(body) > 0 { + requestFields["request_body"] = string(body) + } + if userId, _ := GetUserIdByContext(r); userId != 0 { + requestFields["request_user_id"] = userId + } + log.Error(err, "error handling request", callerSkip+1, append(additionalInfos, requestFields)...) +} + +func handleErr(w http.ResponseWriter, r *http.Request, err error) { + _, isValidationError := err.(validationError) + switch { + case isValidationError, errors.Is(err, errBadRequest): + returnBadRequest(w, r, err) + case errors.Is(err, dataaccess.ErrNotFound): + returnNotFound(w, r, err) + case errors.Is(err, errUnauthorized): + returnUnauthorized(w, r, err) + case errors.Is(err, errForbidden): + returnForbidden(w, r, err) + case errors.Is(err, errConflict): + returnConflict(w, r, err) + case errors.Is(err, services.ErrWaiting): + returnError(w, r, http.StatusServiceUnavailable, err) + case errors.Is(err, errTooManyRequests): + returnTooManyRequests(w, r, err) + case errors.Is(err, errGone): + returnGone(w, r, err) + case errors.Is(err, context.Canceled): + if r.Context().Err() != context.Canceled { // only return error if the request context was canceled + logApiError(r, err, 1) + returnError(w, r, http.StatusInternalServerError, err) + } + default: + logApiError(r, err, 1) + // TODO: don't return the error message to the user in production + returnError(w, r, http.StatusInternalServerError, err) + } +} + +// -------------------------------------- +// Error Helpers + +func errWithMsg(err error, format string, args ...interface{}) error { + return fmt.Errorf("%w: %s", err, fmt.Sprintf(format, args...)) +} + +//nolint:nolintlint +//nolint:unparam +func newBadRequestErr(format string, args ...interface{}) error { + return errWithMsg(errBadRequest, format, args...) +} + +//nolint:unparam +func newInternalServerErr(format string, args ...interface{}) error { + return errWithMsg(errInternalServer, format, args...) +} + +//nolint:unparam +func newUnauthorizedErr(format string, args ...interface{}) error { + return errWithMsg(errUnauthorized, format, args...) +} + +func newForbiddenErr(format string, args ...interface{}) error { + return errWithMsg(errForbidden, format, args...) +} + +//nolint:unparam +func newConflictErr(format string, args ...interface{}) error { + return errWithMsg(errConflict, format, args...) +} + +//nolint:nolintlint +//nolint:unparam +func newNotFoundErr(format string, args ...interface{}) error { + return errWithMsg(dataaccess.ErrNotFound, format, args...) +} + +func newTooManyRequestsErr(format string, args ...interface{}) error { + return errWithMsg(errTooManyRequests, format, args...) +} + +func newGoneErr(format string, args ...interface{}) error { + return errWithMsg(errGone, format, args...) +} + +// -------------------------------------- +// misc. helper functions + +// maps different types of validator dashboard summary validators to a common format +func mapVDBIndices(indices interface{}) ([]types.VDBSummaryValidatorsData, error) { + if indices == nil { + return nil, errors.New("no data found when mapping") + } + + switch v := indices.(type) { + case *types.VDBGeneralSummaryValidators: + // deposited, online, offline, slashing, slashed, exited, withdrawn, pending, exiting, withdrawing + return []types.VDBSummaryValidatorsData{ + mapUintSlice("deposited", v.Deposited), + mapUintSlice("online", v.Online), + mapUintSlice("offline", v.Offline), + mapUintSlice("slashing", v.Slashing), + mapUintSlice("slashed", v.Slashed), + mapUintSlice("exited", v.Exited), + mapUintSlice("withdrawn", v.Withdrawn), + mapIndexTimestampSlice("pending", v.Pending), + mapIndexTimestampSlice("exiting", v.Exiting), + mapIndexTimestampSlice("withdrawing", v.Withdrawing), + }, nil + + case *types.VDBSyncSummaryValidators: + return []types.VDBSummaryValidatorsData{ + mapUintSlice("sync_current", v.Current), + mapUintSlice("sync_upcoming", v.Upcoming), + mapSlice("sync_past", v.Past, + func(v types.VDBValidatorSyncPast) (uint64, []uint64) { return v.Index, []uint64{v.Count} }, + ), + }, nil + + case *types.VDBSlashingsSummaryValidators: + return []types.VDBSummaryValidatorsData{ + mapSlice("got_slashed", v.GotSlashed, + func(v types.VDBValidatorGotSlashed) (uint64, []uint64) { return v.Index, []uint64{v.SlashedBy} }, + ), + mapSlice("has_slashed", v.HasSlashed, + func(v types.VDBValidatorHasSlashed) (uint64, []uint64) { return v.Index, v.SlashedIndices }, + ), + }, nil + + case *types.VDBProposalSummaryValidators: + return []types.VDBSummaryValidatorsData{ + mapIndexBlocksSlice("proposal_proposed", v.Proposed), + mapIndexBlocksSlice("proposal_missed", v.Missed), + }, nil + + default: + return nil, fmt.Errorf("unsupported indices type") + } +} + +// maps different types of validator dashboard summary validators to a common format +func mapSlice[T any](category string, validators []T, getIndexAndDutyObjects func(validator T) (index uint64, dutyObjects []uint64)) types.VDBSummaryValidatorsData { + validatorsData := make([]types.VDBSummaryValidator, len(validators)) + for i, validator := range validators { + index, dutyObjects := getIndexAndDutyObjects(validator) + validatorsData[i] = types.VDBSummaryValidator{Index: index, DutyObjects: dutyObjects} + } + return types.VDBSummaryValidatorsData{ + Category: category, + Validators: validatorsData, + } +} +func mapUintSlice(category string, validators []uint64) types.VDBSummaryValidatorsData { + return mapSlice(category, validators, + func(v uint64) (uint64, []uint64) { return v, nil }, + ) +} + +func mapIndexTimestampSlice(category string, validators []types.IndexTimestamp) types.VDBSummaryValidatorsData { + return mapSlice(category, validators, + func(v types.IndexTimestamp) (uint64, []uint64) { return v.Index, []uint64{v.Timestamp} }, + ) +} + +func mapIndexBlocksSlice(category string, validators []types.IndexBlocks) types.VDBSummaryValidatorsData { + return mapSlice(category, validators, + func(v types.IndexBlocks) (uint64, []uint64) { return v.Index, v.Blocks }, + ) +} + +// -------------------------------------- +// intOrString is a custom type that can be unmarshalled from either an int or a string (strings will also be parsed to int if possible). +// if unmarshaling throws no errors one of the two fields will be set, the other will be nil. +type intOrString struct { + intValue *uint64 + strValue *string +} + +func (v *intOrString) UnmarshalJSON(data []byte) error { + // Attempt to unmarshal as uint64 first + var intValue uint64 + if err := json.Unmarshal(data, &intValue); err == nil { + v.intValue = &intValue + return nil + } + + // If unmarshalling as uint64 fails, try to unmarshal as string + var strValue string + if err := json.Unmarshal(data, &strValue); err == nil { + if parsedInt, err := strconv.ParseUint(strValue, 10, 64); err == nil { + v.intValue = &parsedInt + } else { + v.strValue = &strValue + } + return nil + } + + // If both unmarshalling attempts fail, return an error + return fmt.Errorf("failed to unmarshal intOrString from json: %s", string(data)) +} + +func (v intOrString) String() string { + if v.intValue != nil { + return strconv.FormatUint(*v.intValue, 10) + } + if v.strValue != nil { + return *v.strValue + } + return "" +} + +func (intOrString) JSONSchema() *jsonschema.Schema { + return &jsonschema.Schema{ + OneOf: []*jsonschema.Schema{ + {Type: "string"}, {Type: "integer"}, + }, + } +} + +func isMocked(r *http.Request) bool { + isMocked, ok := r.Context().Value(types.CtxIsMockedKey).(bool) + return ok && isMocked +} diff --git a/backend/pkg/api/handlers/input_validation.go b/backend/pkg/api/handlers/input_validation.go new file mode 100644 index 000000000..3545b0c63 --- /dev/null +++ b/backend/pkg/api/handlers/input_validation.go @@ -0,0 +1,558 @@ +package handlers + +import ( + "bytes" + "cmp" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/gobitfly/beaconchain/pkg/api/enums" + "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gorilla/mux" + "github.com/invopop/jsonschema" + "github.com/shopspring/decimal" + "github.com/xeipuuv/gojsonschema" +) + +// -------------------------------------- + +var ( + // Subject to change, just examples + reName = regexp.MustCompile(`^[a-zA-Z0-9_\-.\ ]*$`) + reInteger = regexp.MustCompile(`^[0-9]+$`) + reValidatorDashboardPublicId = regexp.MustCompile(`^v-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$`) + reValidatorPublicKeyWithPrefix = regexp.MustCompile(`^0x[0-9a-fA-F]{96}$`) + reValidatorPublicKey = regexp.MustCompile(`^(0x)?[0-9a-fA-F]{96}$`) + reEthereumAddress = regexp.MustCompile(`^(0x)?[0-9a-fA-F]{40}$`) + reWithdrawalCredential = regexp.MustCompile(`^(0x0[01])?[0-9a-fA-F]{62}$`) + reEnsName = regexp.MustCompile(`^.+\.eth$`) + reGraffiti = regexp.MustCompile(`^.{2,}$`) // at least 2 characters, so that queries won't time out + reCursor = regexp.MustCompile(`^[A-Za-z0-9-_]+$`) // has to be base64 + reEmail = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$") + rePassword = regexp.MustCompile(`^.{5,}$`) + reEmailUserToken = regexp.MustCompile(`^[a-z0-9]{40}$`) + reJsonContentType = regexp.MustCompile(`^application\/json(;.*)?$`) +) + +const ( + maxNameLength = 50 + maxValidatorsInList = 20 + maxQueryLimit uint64 = 100 + defaultReturnLimit uint64 = 10 + sortOrderAscending = "asc" + sortOrderDescending = "desc" + defaultSortOrder = sortOrderAscending + defaultDesc = defaultSortOrder == sortOrderDescending + ethereum = "ethereum" + gnosis = "gnosis" + allowEmpty = true + forbidEmpty = false + MaxArchivedDashboardsCount = 10 +) + +// All changes to common functions MUST NOT break any public handler behavior (not in effect yet) + +// -------------------------------------- +// Input Validation + +// validationError is a map of parameter names to error messages. +// It is used to collect multiple validation errors before returning them to the user. +type validationError map[string]string + +func (v validationError) Error() string { + //iterate over map and create a string + var sb strings.Builder + for k, v := range v { + sb.WriteString(k) + sb.WriteString(": ") + sb.WriteString(v) + sb.WriteString("\n") + } + return sb.String()[:sb.Len()-1] +} + +func (v *validationError) add(paramName, problem string) { + if *v == nil { + *v = make(validationError) + } + validationMap := *v + if _, ok := validationMap[paramName]; ok { + problem = validationMap[paramName] + "; " + problem + } + validationMap[paramName] = problem +} + +func (v *validationError) hasErrors() bool { + return v != nil && len(*v) > 0 +} + +// -------------------------------------- + +func (v *validationError) checkRegex(regex *regexp.Regexp, param, paramName string) string { + if !regex.MatchString(param) { + v.add(paramName, fmt.Sprintf(`given value '%s' has incorrect format`, param)) + } + return param +} + +func (v *validationError) checkLength(name, paramName string, minLength int) string { + if len(name) < minLength { + v.add(paramName, fmt.Sprintf(`given value '%s' is too short, minimum length is %d`, name, minLength)) + } + if len(name) > maxNameLength { + v.add(paramName, fmt.Sprintf(`given value '%s' is too long, maximum length is %d`, name, maxNameLength)) + } + return name +} + +func (v *validationError) checkName(name string, minLength int) string { + name = v.checkLength(name, "name", minLength) + return v.checkRegex(reName, name, "name") +} + +func (v *validationError) checkNameNotEmpty(name string) string { + return v.checkName(name, 1) +} + +func (v *validationError) checkKeyNotEmpty(key string) string { + key = v.checkLength(key, "key", 1) + return v.checkRegex(reName, key, "key") +} + +func (v *validationError) checkEmail(email string) string { + return v.checkRegex(reEmail, strings.ToLower(email), "email") +} + +func (v *validationError) checkPassword(password string) string { + return v.checkRegex(rePassword, password, "password") +} + +func (v *validationError) checkUserEmailToken(token string) string { + return v.checkRegex(reEmailUserToken, token, "token") +} + +// check request structure (body contains valid json and all required parameters are present) +// return error only if internal error occurs, otherwise add error to validationError and/or return nil +func (v *validationError) checkBody(data interface{}, r *http.Request) error { + // check if content type is application/json + if contentType := r.Header.Get("Content-Type"); !reJsonContentType.MatchString(contentType) { + v.add("request body", "'Content-Type' header must be 'application/json'") + } + + bodyBytes, err := io.ReadAll(r.Body) + r.Body = io.NopCloser(bytes.NewReader(bodyBytes)) // unconsume body for error logging + if err != nil { + return newInternalServerErr("error reading request body") + } + + // First check: Unmarshal into an empty interface to check JSON format + var i interface{} + if err := json.Unmarshal(bodyBytes, &i); err != nil { + v.add("request body", "not in JSON format") + return nil + } + + // Second check: Validate against the expected schema + sc := jsonschema.Reflect(data) + b, err := json.Marshal(sc) + if err != nil { + return newInternalServerErr("error creating expected schema") + } + loader := gojsonschema.NewBytesLoader(b) + documentLoader := gojsonschema.NewBytesLoader(bodyBytes) + schema, err := gojsonschema.NewSchema(loader) + if err != nil { + return newInternalServerErr("error creating schema") + } + result, err := schema.Validate(documentLoader) + if err != nil { + return newInternalServerErr("error validating schema") + } + isSchemaValid := result.Valid() + if !isSchemaValid { + v.add("request body", "invalid schema, check the API documentation for the expected format") + } + + // Unmarshal into the target struct, only log error if it's a valid JSON + if err := json.Unmarshal(bodyBytes, data); err != nil && isSchemaValid { + return newInternalServerErr("error unmarshalling request body") + } + + // Proceed with additional validation or processing as necessary + return nil +} + +func (v *validationError) checkInt(param, paramName string) int64 { + num, err := strconv.ParseInt(param, 10, 64) + if err != nil { + v.add(paramName, fmt.Sprintf("given value '%s' is not an integer", param)) + } + return num +} + +func (v *validationError) checkUint(param, paramName string) uint64 { + num, err := strconv.ParseUint(param, 10, 64) + if err != nil { + v.add(paramName, fmt.Sprintf("given value %s is not a positive integer", param)) + } + return num +} + +func (v *validationError) checkWeiDecimal(param, paramName string) decimal.Decimal { + dec := decimal.Zero + // check if only numbers are contained in the string with regex + if !reInteger.MatchString(param) { + v.add(paramName, fmt.Sprintf("given value '%s' is not a wei string (must be positive integer)", param)) + return dec + } + dec, err := decimal.NewFromString(param) + if err != nil { + v.add(paramName, fmt.Sprintf("given value '%s' is not a wei string (must be positive integer)", param)) + return dec + } + return dec +} + +func (v *validationError) checkWeiMinMax(param, paramName string, min, max decimal.Decimal) decimal.Decimal { + dec := v.checkWeiDecimal(param, paramName) + if v.hasErrors() { + return dec + } + if dec.LessThan(min) { + v.add(paramName, fmt.Sprintf("given value '%s' is too small, minimum value is %s", dec, min)) + } + if dec.GreaterThan(max) { + v.add(paramName, fmt.Sprintf("given value '%s' is too large, maximum value is %s", dec, max)) + } + return dec +} + +func (v *validationError) checkBool(param, paramName string) bool { + if param == "" { + return false + } + boolVar, err := strconv.ParseBool(param) + if err != nil { + v.add(paramName, fmt.Sprintf("given value '%s' is not a boolean", param)) + } + return boolVar +} + +func (v *validationError) checkAdConfigurationKeys(keysString string) []string { + if keysString == "" { + return []string{} + } + var keys []string + for _, key := range splitParameters(keysString, ',') { + keys = append(keys, v.checkRegex(reName, key, "keys")) + } + return keys +} + +func (v *validationError) checkPrimaryDashboardId(param string) types.VDBIdPrimary { + return types.VDBIdPrimary(v.checkUint(param, "dashboard_id")) +} + +// helper function to unify handling of block detail request validation +func (h *HandlerService) validateBlockRequest(r *http.Request, paramName string) (uint64, uint64, error) { + var v validationError + var err error + chainId := v.checkNetworkParameter(mux.Vars(r)["network"]) + var value uint64 + switch paramValue := mux.Vars(r)[paramName]; paramValue { + // possibly add other values like "genesis", "finalized", hardforks etc. later + case "latest": + ctx := r.Context() + if paramName == "block" { + value, err = h.daService.GetLatestBlock(ctx) + } else if paramName == "slot" { + value, err = h.daService.GetLatestSlot(ctx) + } + if err != nil { + return 0, 0, err + } + default: + value = v.checkUint(paramValue, paramName) + } + if v.hasErrors() { + return 0, 0, v + } + return chainId, value, nil +} + +// checkGroupId validates the given group id and returns it as an int64. +// If the given group id is empty and allowEmpty is true, it returns -1 (all groups). +func (v *validationError) checkGroupId(param string, allowEmpty bool) int64 { + if param == "" && allowEmpty { + return types.AllGroups + } + return v.checkInt(param, "group_id") +} + +// checkExistingGroupId validates if the given group id is not empty and a positive integer. +func (v *validationError) checkExistingGroupId(param string) uint64 { + return v.checkUint(param, "group_id") +} + +func splitParameters(params string, delim rune) []string { + // This splits the string by delim and removes empty strings + f := func(c rune) bool { + return c == delim + } + return strings.FieldsFunc(params, f) +} + +func parseGroupIdList[T any](groupIds string, convert func(string, string) T) []T { + var ids []T + for _, id := range splitParameters(groupIds, ',') { + ids = append(ids, convert(id, "group_ids")) + } + return ids +} + +func (v *validationError) checkExistingGroupIdList(groupIds string) []uint64 { + return parseGroupIdList(groupIds, v.checkUint) +} + +func (v *validationError) checkGroupIdList(groupIds string) []int64 { + return parseGroupIdList(groupIds, v.checkInt) +} + +func (v *validationError) checkValidatorDashboardPublicId(publicId string) types.VDBIdPublic { + return types.VDBIdPublic(v.checkRegex(reValidatorDashboardPublicId, publicId, "public_dashboard_id")) +} + +func checkMinMax[T cmp.Ordered](v *validationError, param T, min T, max T, paramName string) T { + if param < min { + v.add(paramName, fmt.Sprintf("given value '%v' is too small, minimum value is %v", param, min)) + } + if param > max { + v.add(paramName, fmt.Sprintf("given value '%v' is too large, maximum value is %v", param, max)) + } + return param +} + +func (v *validationError) checkAddress(publicId string) string { + return v.checkRegex(reEthereumAddress, publicId, "address") +} + +func (v *validationError) checkUintMinMax(param string, min uint64, max uint64, paramName string) uint64 { + return checkMinMax(v, v.checkUint(param, paramName), min, max, paramName) +} + +type Paging struct { + cursor string + limit uint64 + search string +} + +func (v *validationError) checkPagingParams(q url.Values) Paging { + paging := Paging{ + cursor: q.Get("cursor"), + limit: defaultReturnLimit, + search: q.Get("search"), + } + + if limitStr := q.Get("limit"); limitStr != "" { + paging.limit = v.checkUintMinMax(limitStr, 1, maxQueryLimit, "limit") + } + + if paging.cursor != "" { + paging.cursor = v.checkRegex(reCursor, paging.cursor, "cursor") + } + + return paging +} + +// checkEnum validates the given enum string and returns the corresponding enum value. +func checkEnum[T enums.EnumFactory[T]](v *validationError, enumString string, name string) T { + var e T + enum := e.NewFromString(enumString) + if enums.IsInvalidEnum(enum) { + v.add(name, fmt.Sprintf("given value '%s' is not valid", enumString)) + return enum + } + return enum +} + +func (v *validationError) parseSortOrder(order string) bool { + switch order { + case "": + return defaultDesc + case sortOrderAscending: + return false + case sortOrderDescending: + return true + default: + v.add("sort", fmt.Sprintf("given value '%s' for parameter 'sort' is not valid, allowed order values are: %s, %s", order, sortOrderAscending, sortOrderDescending)) + return false + } +} + +func checkSort[T enums.EnumFactory[T]](v *validationError, sortString string) *types.Sort[T] { + var c T + if sortString == "" { + return &types.Sort[T]{Column: c, Desc: defaultDesc} + } + sortSplit := splitParameters(sortString, ':') + if len(sortSplit) > 2 { + v.add("sort", fmt.Sprintf("given value '%s' for parameter 'sort' is not valid, expected format is '[:(asc|desc)]'", sortString)) + return nil + } + var desc bool + if len(sortSplit) == 1 { + desc = defaultDesc + } else { + desc = v.parseSortOrder(sortSplit[1]) + } + sortCol := checkEnum[T](v, sortSplit[0], "sort") + return &types.Sort[T]{Column: sortCol, Desc: desc} +} + +func (v *validationError) checkProtocolModes(protocolModes string) types.VDBProtocolModes { + var modes types.VDBProtocolModes + if protocolModes == "" { + return modes + } + protocolsSlice := splitParameters(protocolModes, ',') + for _, protocolMode := range protocolsSlice { + switch protocolMode { + case "rocket_pool": + modes.RocketPool = true + default: + v.add("modes", fmt.Sprintf("given value '%s' is not a valid protocol mode", protocolMode)) + } + } + return modes +} + +func (v *validationError) checkValidatorList(validators string, allowEmpty bool) ([]types.VDBValidator, []string) { + if validators == "" && !allowEmpty { + v.add("validators", "list of validators must not be empty") + return nil, nil + } + validatorsSlice := splitParameters(validators, ',') + var indexes []types.VDBValidator + var publicKeys []string + for _, validator := range validatorsSlice { + if reInteger.MatchString(validator) { + indexes = append(indexes, v.checkUint(validator, "validators")) + } else if reValidatorPublicKeyWithPrefix.MatchString(validator) { + _, err := hexutil.Decode(validator) + if err != nil { + v.add("validators", fmt.Sprintf("invalid value '%s' in list of validators", v)) + } + publicKeys = append(publicKeys, validator) + } else { + v.add("validators", fmt.Sprintf("invalid value '%s' in list of validators", v)) + } + } + return indexes, publicKeys +} + +func (v *validationError) checkValidators(validators []intOrString, allowEmpty bool) ([]types.VDBValidator, []string) { + if len(validators) == 0 && !allowEmpty { + v.add("validators", "list of validators is empty") + return nil, nil + } + var indexes []types.VDBValidator + var publicKeys []string + for _, validator := range validators { + switch { + case validator.intValue != nil: + indexes = append(indexes, *validator.intValue) + case validator.strValue != nil: + if !reValidatorPublicKey.MatchString(*validator.strValue) { + v.add("validators", fmt.Sprintf("given value '%s' is not a valid validator", *validator.strValue)) + continue + } + publicKeys = append(publicKeys, *validator.strValue) + default: + v.add("validators", "list contains invalid validator") + } + } + return indexes, publicKeys +} + +func (v *validationError) checkNetwork(network intOrString) uint64 { + chainId, ok := isValidNetwork(network) + if !ok { + v.add("network", fmt.Sprintf("given value '%s' is not a valid network", network)) + } + return chainId +} + +func (v *validationError) checkNetworkParameter(param string) uint64 { + if reInteger.MatchString(param) { + chainId, err := strconv.ParseUint(param, 10, 64) + if err != nil { + v.add("network", fmt.Sprintf("given value '%s' is not a valid network", param)) + return 0 + } + return v.checkNetwork(intOrString{intValue: &chainId}) + } + return v.checkNetwork(intOrString{strValue: ¶m}) +} + +func (v *validationError) checkNetworksParameter(param string) []uint64 { + var chainIds []uint64 + for _, network := range splitParameters(param, ',') { + chainIds = append(chainIds, v.checkNetworkParameter(network)) + } + return chainIds +} + +// isValidNetwork checks if the given network is a valid network. +// It returns the chain id of the network and true if it is valid, otherwise 0 and false. +func isValidNetwork(network intOrString) (uint64, bool) { + for _, realNetwork := range allNetworks { + if (network.intValue != nil && realNetwork.ChainId == *network.intValue) || (network.strValue != nil && realNetwork.Name == *network.strValue) { + return realNetwork.ChainId, true + } + } + return 0, false +} + +func (v *validationError) checkTimestamps(r *http.Request, chartLimits ChartTimeDashboardLimits) (after uint64, before uint64) { + afterParam := r.URL.Query().Get("after_ts") + beforeParam := r.URL.Query().Get("before_ts") + switch { + // If both parameters are empty, return the latest data + case afterParam == "" && beforeParam == "": + return max(chartLimits.LatestExportedTs-chartLimits.MaxAllowedInterval, chartLimits.MinAllowedTs), chartLimits.LatestExportedTs + + // If only the afterParam is provided + case afterParam != "" && beforeParam == "": + afterTs := v.checkUint(afterParam, "after_ts") + beforeTs := afterTs + chartLimits.MaxAllowedInterval + return afterTs, beforeTs + + // If only the beforeParam is provided + case beforeParam != "" && afterParam == "": + beforeTs := v.checkUint(beforeParam, "before_ts") + afterTs := max(beforeTs-chartLimits.MaxAllowedInterval, chartLimits.MinAllowedTs) + return afterTs, beforeTs + + // If both parameters are provided, validate them + default: + afterTs := v.checkUint(afterParam, "after_ts") + beforeTs := v.checkUint(beforeParam, "before_ts") + + if afterTs > beforeTs { + v.add("after_ts", "parameter `after_ts` must not be greater than `before_ts`") + } + + if beforeTs-afterTs > chartLimits.MaxAllowedInterval { + v.add("before_ts", fmt.Sprintf("parameters `after_ts` and `before_ts` must not lie apart more than %d seconds for this aggregation", chartLimits.MaxAllowedInterval)) + } + + return afterTs, beforeTs + } +} diff --git a/backend/pkg/api/handlers/internal.go b/backend/pkg/api/handlers/internal.go index d111ab7ca..83bb4faf2 100644 --- a/backend/pkg/api/handlers/internal.go +++ b/backend/pkg/api/handlers/internal.go @@ -2,7 +2,6 @@ package handlers import ( "errors" - "math" "net/http" "github.com/gobitfly/beaconchain/pkg/api/enums" @@ -15,7 +14,7 @@ import ( // Premium Plans func (h *HandlerService) InternalGetProductSummary(w http.ResponseWriter, r *http.Request) { - data, err := h.dai.GetProductSummary(r.Context()) + data, err := h.daService.GetProductSummary(r.Context()) if err != nil { handleErr(w, r, err) return @@ -26,23 +25,39 @@ func (h *HandlerService) InternalGetProductSummary(w http.ResponseWriter, r *htt returnOk(w, r, response) } +// -------------------------------------- +// API Ratelimit Weights + +func (h *HandlerService) InternalGetRatelimitWeights(w http.ResponseWriter, r *http.Request) { + data, err := h.daService.GetApiWeights(r.Context()) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetRatelimitWeightsResponse{ + Data: data, + } + returnOk(w, r, response) +} + // -------------------------------------- // Latest State func (h *HandlerService) InternalGetLatestState(w http.ResponseWriter, r *http.Request) { - latestSlot, err := h.dai.GetLatestSlot() + ctx := r.Context() + latestSlot, err := h.daService.GetLatestSlot(ctx) if err != nil { handleErr(w, r, err) return } - finalizedEpoch, err := h.dai.GetLatestFinalizedEpoch() + finalizedEpoch, err := h.daService.GetLatestFinalizedEpoch(ctx) if err != nil { handleErr(w, r, err) return } - exchangeRates, err := h.dai.GetLatestExchangeRates() + exchangeRates, err := h.daService.GetLatestExchangeRates(ctx) if err != nil { handleErr(w, r, err) return @@ -60,7 +75,7 @@ func (h *HandlerService) InternalGetLatestState(w http.ResponseWriter, r *http.R } func (h *HandlerService) InternalGetRocketPool(w http.ResponseWriter, r *http.Request) { - data, err := h.dai.GetRocketPoolOverview(r.Context()) + data, err := h.daService.GetRocketPoolOverview(r.Context()) if err != nil { handleErr(w, r, err) return @@ -85,7 +100,7 @@ func (h *HandlerService) InternalPostAdConfigurations(w http.ResponseWriter, r * handleErr(w, r, err) return } - if user.UserGroup != "ADMIN" { + if user.UserGroup != types.UserGroupAdmin { returnForbidden(w, r, errors.New("user is not an admin")) return } @@ -112,7 +127,7 @@ func (h *HandlerService) InternalPostAdConfigurations(w http.ResponseWriter, r * return } - err = h.dai.CreateAdConfiguration(r.Context(), key, req.JQuerySelector, insertMode, req.RefreshInterval, req.ForAllUsers, req.BannerId, req.HtmlContent, req.Enabled) + err = h.daService.CreateAdConfiguration(r.Context(), key, req.JQuerySelector, insertMode, req.RefreshInterval, req.ForAllUsers, req.BannerId, req.HtmlContent, req.Enabled) if err != nil { handleErr(w, r, err) return @@ -131,7 +146,7 @@ func (h *HandlerService) InternalGetAdConfigurations(w http.ResponseWriter, r *h handleErr(w, r, err) return } - if user.UserGroup != "ADMIN" { + if user.UserGroup != types.UserGroupAdmin { returnForbidden(w, r, errors.New("user is not an admin")) return } @@ -142,7 +157,7 @@ func (h *HandlerService) InternalGetAdConfigurations(w http.ResponseWriter, r *h return } - data, err := h.dai.GetAdConfigurations(r.Context(), keys) + data, err := h.daService.GetAdConfigurations(r.Context(), keys) if err != nil { handleErr(w, r, err) return @@ -161,7 +176,7 @@ func (h *HandlerService) InternalPutAdConfiguration(w http.ResponseWriter, r *ht handleErr(w, r, err) return } - if user.UserGroup != "ADMIN" { + if user.UserGroup != types.UserGroupAdmin { returnForbidden(w, r, errors.New("user is not an admin")) return } @@ -188,7 +203,7 @@ func (h *HandlerService) InternalPutAdConfiguration(w http.ResponseWriter, r *ht return } - err = h.dai.UpdateAdConfiguration(r.Context(), key, req.JQuerySelector, insertMode, req.RefreshInterval, req.ForAllUsers, req.BannerId, req.HtmlContent, req.Enabled) + err = h.daService.UpdateAdConfiguration(r.Context(), key, req.JQuerySelector, insertMode, req.RefreshInterval, req.ForAllUsers, req.BannerId, req.HtmlContent, req.Enabled) if err != nil { handleErr(w, r, err) return @@ -207,7 +222,7 @@ func (h *HandlerService) InternalDeleteAdConfiguration(w http.ResponseWriter, r handleErr(w, r, err) return } - if user.UserGroup != "ADMIN" { + if user.UserGroup != types.UserGroupAdmin { returnForbidden(w, r, errors.New("user is not an admin")) return } @@ -218,7 +233,7 @@ func (h *HandlerService) InternalDeleteAdConfiguration(w http.ResponseWriter, r return } - err = h.dai.RemoveAdConfiguration(r.Context(), key) + err = h.daService.RemoveAdConfiguration(r.Context(), key) if err != nil { handleErr(w, r, err) return @@ -237,7 +252,7 @@ func (h *HandlerService) InternalGetUserInfo(w http.ResponseWriter, r *http.Requ handleErr(w, r, err) return } - userInfo, err := h.dai.GetUserInfo(r.Context(), user.Id) + userInfo, err := h.daService.GetUserInfo(r.Context(), user.Id) if err != nil { handleErr(w, r, err) return @@ -353,6 +368,34 @@ func (h *HandlerService) InternalGetValidatorDashboardValidators(w http.Response h.PublicGetValidatorDashboardValidators(w, r) } +func (h *HandlerService) InternalGetValidatorDashboardMobileValidators(w http.ResponseWriter, r *http.Request) { + var v validationError + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + pagingParams := v.checkPagingParams(q) + + period := checkEnum[enums.TimePeriod](&v, q.Get("period"), "period") + sort := checkSort[enums.VDBMobileValidatorsColumn](&v, q.Get("sort")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, paging, err := h.daService.GetValidatorDashboardMobileValidators(r.Context(), *dashboardId, period, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetValidatorDashboardMobileValidatorsResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) +} + func (h *HandlerService) InternalDeleteValidatorDashboardValidators(w http.ResponseWriter, r *http.Request) { h.PublicDeleteValidatorDashboardValidators(w, r) } @@ -453,14 +496,43 @@ func (h *HandlerService) InternalGetValidatorDashboardTotalRocketPool(w http.Res h.PublicGetValidatorDashboardTotalRocketPool(w, r) } -func (h *HandlerService) InternalGetValidatorDashboardNodeRocketPool(w http.ResponseWriter, r *http.Request) { - h.PublicGetValidatorDashboardNodeRocketPool(w, r) -} - func (h *HandlerService) InternalGetValidatorDashboardRocketPoolMinipools(w http.ResponseWriter, r *http.Request) { h.PublicGetValidatorDashboardRocketPoolMinipools(w, r) } +// even though this endpoint is internal only, it should still not be broken since it is used by the mobile app +func (h *HandlerService) InternalGetValidatorDashboardMobileWidget(w http.ResponseWriter, r *http.Request) { + var v validationError + dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) + if v.hasErrors() { + handleErr(w, r, v) + return + } + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + userInfo, err := h.daService.GetUserInfo(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + if userInfo.UserGroup != types.UserGroupAdmin && !userInfo.PremiumPerks.MobileAppWidget { + returnForbidden(w, r, errors.New("user does not have access to mobile app widget")) + return + } + data, err := h.daService.GetValidatorDashboardMobileWidget(r.Context(), dashboardId) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetValidatorDashboardMobileWidgetResponse{ + Data: *data, + } + returnOk(w, r, response) +} + // -------------------------------------- // Mobile @@ -474,7 +546,7 @@ func (h *HandlerService) InternalGetMobileLatestBundle(w http.ResponseWriter, r handleErr(w, r, v) return } - stats, err := h.dai.GetLatestBundleForNativeVersion(r.Context(), nativeVersion) + stats, err := h.daService.GetLatestBundleForNativeVersion(r.Context(), nativeVersion) if err != nil { handleErr(w, r, err) return @@ -482,7 +554,7 @@ func (h *HandlerService) InternalGetMobileLatestBundle(w http.ResponseWriter, r var data types.MobileBundleData data.HasNativeUpdateAvailable = stats.MaxNativeVersion > nativeVersion // if given bundle version is smaller than the latest and delivery count is less than target count, return the latest bundle - if force || (bundleVersion < stats.LatestBundleVersion && (stats.TargetCount == 0 || stats.DeliveryCount < stats.TargetCount)) { + if force || (bundleVersion < stats.LatestBundleVersion && (stats.TargetCount == -1 || stats.DeliveryCount < stats.TargetCount)) { data.BundleUrl = stats.BundleUrl } response := types.GetMobileLatestBundleResponse{ @@ -499,7 +571,7 @@ func (h *HandlerService) InternalPostMobileBundleDeliveries(w http.ResponseWrite handleErr(w, r, v) return } - err := h.dai.IncrementBundleDeliveryCount(r.Context(), bundleVersion) + err := h.daService.IncrementBundleDeliveryCount(r.Context(), bundleVersion) if err != nil { handleErr(w, r, err) return @@ -511,459 +583,83 @@ func (h *HandlerService) InternalPostMobileBundleDeliveries(w http.ResponseWrite // Notifications func (h *HandlerService) InternalGetUserNotifications(w http.ResponseWriter, r *http.Request) { - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - data, err := h.dai.GetNotificationOverview(r.Context(), userId) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationsResponse{ - Data: *data, - } - returnOk(w, r, response) + h.PublicGetUserNotifications(w, r) } func (h *HandlerService) InternalGetUserNotificationDashboards(w http.ResponseWriter, r *http.Request) { - var v validationError - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.NotificationDashboardsColumn](&v, q.Get("sort")) - chainId := v.checkNetworkParameter(q.Get("network")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, paging, err := h.dai.GetDashboardNotifications(r.Context(), userId, chainId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationDashboardsResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) + h.PublicGetUserNotificationDashboards(w, r) } func (h *HandlerService) InternalGetUserNotificationsValidatorDashboard(w http.ResponseWriter, r *http.Request) { - var v validationError - notificationId := v.checkRegex(reNonEmpty, mux.Vars(r)["notification_id"], "notification_id") - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, err := h.dai.GetValidatorDashboardNotificationDetails(r.Context(), notificationId) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationsValidatorDashboardResponse{ - Data: *data, - } - returnOk(w, r, response) + h.PublicGetUserNotificationsValidatorDashboard(w, r) } func (h *HandlerService) InternalGetUserNotificationsAccountDashboard(w http.ResponseWriter, r *http.Request) { - var v validationError - notificationId := v.checkRegex(reNonEmpty, mux.Vars(r)["notification_id"], "notification_id") - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, err := h.dai.GetAccountDashboardNotificationDetails(r.Context(), notificationId) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationsAccountDashboardResponse{ - Data: *data, - } - returnOk(w, r, response) + h.PublicGetUserNotificationsAccountDashboard(w, r) } func (h *HandlerService) InternalGetUserNotificationMachines(w http.ResponseWriter, r *http.Request) { - var v validationError - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.NotificationMachinesColumn](&v, q.Get("sort")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, paging, err := h.dai.GetMachineNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationMachinesResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) + h.PublicGetUserNotificationMachines(w, r) } func (h *HandlerService) InternalGetUserNotificationClients(w http.ResponseWriter, r *http.Request) { - var v validationError - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.NotificationClientsColumn](&v, q.Get("sort")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, paging, err := h.dai.GetClientNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationClientsResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) + h.PublicGetUserNotificationClients(w, r) } func (h *HandlerService) InternalGetUserNotificationRocketPool(w http.ResponseWriter, r *http.Request) { - var v validationError - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.NotificationRocketPoolColumn](&v, q.Get("sort")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, paging, err := h.dai.GetRocketPoolNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationRocketPoolResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) + h.PublicGetUserNotificationRocketPool(w, r) } func (h *HandlerService) InternalGetUserNotificationNetworks(w http.ResponseWriter, r *http.Request) { - var v validationError - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.NotificationNetworksColumn](&v, q.Get("sort")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, paging, err := h.dai.GetNetworkNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationNetworksResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) + h.PublicGetUserNotificationNetworks(w, r) } func (h *HandlerService) InternalGetUserNotificationSettings(w http.ResponseWriter, r *http.Request) { - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - data, err := h.dai.GetNotificationSettings(r.Context(), userId) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationSettingsResponse{ - Data: *data, - } - returnOk(w, r, response) + h.PublicGetUserNotificationSettings(w, r) } func (h *HandlerService) InternalPutUserNotificationSettingsGeneral(w http.ResponseWriter, r *http.Request) { - var v validationError - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - var req types.NotificationSettingsGeneral - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - checkMinMax(&v, req.MachineStorageUsageThreshold, 0, 1, "machine_storage_usage_threshold") - checkMinMax(&v, req.MachineCpuUsageThreshold, 0, 1, "machine_cpu_usage_threshold") - checkMinMax(&v, req.MachineMemoryUsageThreshold, 0, 1, "machine_memory_usage_threshold") - checkMinMax(&v, req.RocketPoolMaxCollateralThreshold, 0, 1, "rocket_pool_max_collateral_threshold") - checkMinMax(&v, req.RocketPoolMinCollateralThreshold, 0, 1, "rocket_pool_min_collateral_threshold") - // TODO: check validity of clients - if v.hasErrors() { - handleErr(w, r, v) - return - } - err = h.dai.UpdateNotificationSettingsGeneral(r.Context(), userId, req) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalPutUserNotificationSettingsGeneralResponse{ - Data: req, - } - returnOk(w, r, response) + h.PublicPutUserNotificationSettingsGeneral(w, r) } func (h *HandlerService) InternalPutUserNotificationSettingsNetworks(w http.ResponseWriter, r *http.Request) { - var v validationError - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - var req types.NotificationSettingsNetwork - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - checkMinMax(&v, req.ParticipationRateThreshold, 0, 1, "participation_rate_threshold") - - chainId := v.checkNetworkParameter(mux.Vars(r)["network"]) - if v.hasErrors() { - handleErr(w, r, v) - return - } - err = h.dai.UpdateNotificationSettingsNetworks(r.Context(), userId, chainId, req) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalPutUserNotificationSettingsNetworksResponse{ - Data: types.NotificationNetwork{ - ChainId: chainId, - Settings: req, - }, - } - returnOk(w, r, response) + h.PublicPutUserNotificationSettingsNetworks(w, r) } func (h *HandlerService) InternalPutUserNotificationSettingsPairedDevices(w http.ResponseWriter, r *http.Request) { - var v validationError - req := struct { - Name string `json:"name,omitempty"` - IsNotificationsEnabled bool `json:"is_notifications_enabled"` - }{} - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - // TODO use a better way to validate the paired device id - pairedDeviceId := v.checkRegex(reNonEmpty, mux.Vars(r)["paired_device_id"], "paired_device_id") - name := v.checkNameNotEmpty(req.Name) - if v.hasErrors() { - handleErr(w, r, v) - return - } - err := h.dai.UpdateNotificationSettingsPairedDevice(r.Context(), pairedDeviceId, name, req.IsNotificationsEnabled) - if err != nil { - handleErr(w, r, err) - return - } - // TODO timestamp - response := types.InternalPutUserNotificationSettingsPairedDevicesResponse{ - Data: types.NotificationPairedDevice{ - Id: pairedDeviceId, - Name: req.Name, - IsNotificationsEnabled: req.IsNotificationsEnabled, - }, - } - - returnOk(w, r, response) + h.PublicPutUserNotificationSettingsPairedDevices(w, r) } func (h *HandlerService) InternalDeleteUserNotificationSettingsPairedDevices(w http.ResponseWriter, r *http.Request) { - var v validationError - // TODO use a better way to validate the paired device id - pairedDeviceId := v.checkRegex(reNonEmpty, mux.Vars(r)["paired_device_id"], "paired_device_id") - if v.hasErrors() { - handleErr(w, r, v) - return - } - err := h.dai.DeleteNotificationSettingsPairedDevice(r.Context(), pairedDeviceId) - if err != nil { - handleErr(w, r, err) - return - } - returnNoContent(w, r) + h.PublicDeleteUserNotificationSettingsPairedDevices(w, r) +} + +func (h *HandlerService) InternalPutUserNotificationSettingsClient(w http.ResponseWriter, r *http.Request) { + h.PublicPutUserNotificationSettingsClient(w, r) } func (h *HandlerService) InternalGetUserNotificationSettingsDashboards(w http.ResponseWriter, r *http.Request) { - var v validationError - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.NotificationSettingsDashboardColumn](&v, q.Get("sort")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, paging, err := h.dai.GetNotificationSettingsDashboards(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationSettingsDashboardsResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) + h.PublicGetUserNotificationSettingsDashboards(w, r) } func (h *HandlerService) InternalPutUserNotificationSettingsValidatorDashboard(w http.ResponseWriter, r *http.Request) { - var v validationError - var req types.NotificationSettingsValidatorDashboard - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - checkMinMax(&v, req.GroupOfflineThreshold, 0, 1, "group_offline_threshold") - vars := mux.Vars(r) - dashboardId := v.checkPrimaryDashboardId(vars["dashboard_id"]) - groupId := v.checkExistingGroupId(vars["group_id"]) - if v.hasErrors() { - handleErr(w, r, v) - return - } - err := h.dai.UpdateNotificationSettingsValidatorDashboard(r.Context(), dashboardId, groupId, req) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalPutUserNotificationSettingsValidatorDashboardResponse{ - Data: req, - } - returnOk(w, r, response) + h.PublicPutUserNotificationSettingsValidatorDashboard(w, r) } func (h *HandlerService) InternalPutUserNotificationSettingsAccountDashboard(w http.ResponseWriter, r *http.Request) { - var v validationError - req := struct { - WebhookUrl string `json:"webhook_url"` - IsWebhookDiscordEnabled bool `json:"is_webhook_discord_enabled"` - IsIgnoreSpamTransactionsEnabled bool `json:"is_ignore_spam_transactions_enabled"` - SubscribedChainIds []intOrString `json:"subscribed_chain_ids"` - - IsIncomingTransactionsSubscribed bool `json:"is_incoming_transactions_subscribed"` - IsOutgoingTransactionsSubscribed bool `json:"is_outgoing_transactions_subscribed"` - IsERC20TokenTransfersSubscribed bool `json:"is_erc20_token_transfers_subscribed"` - ERC20TokenTransfersValueThreshold float64 `json:"erc20_token_transfers_value_threshold"` // 0 does not disable, is_erc20_token_transfers_subscribed determines if it's enabled - IsERC721TokenTransfersSubscribed bool `json:"is_erc721_token_transfers_subscribed"` - IsERC1155TokenTransfersSubscribed bool `json:"is_erc1155_token_transfers_subscribed"` - }{} - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - chainIdMap := v.checkNetworkSlice(req.SubscribedChainIds) - // convert to uint64[] slice - chainIds := make([]uint64, len(chainIdMap)) - i := 0 - for k := range chainIdMap { - chainIds[i] = k - i++ - } - checkMinMax(&v, req.ERC20TokenTransfersValueThreshold, 0, math.MaxFloat64, "group_offline_threshold") - vars := mux.Vars(r) - dashboardId := v.checkPrimaryDashboardId(vars["dashboard_id"]) - groupId := v.checkExistingGroupId(vars["group_id"]) - if v.hasErrors() { - handleErr(w, r, v) - return - } - settings := types.NotificationSettingsAccountDashboard{ - WebhookUrl: req.WebhookUrl, - IsWebhookDiscordEnabled: req.IsWebhookDiscordEnabled, - IsIgnoreSpamTransactionsEnabled: req.IsIgnoreSpamTransactionsEnabled, - SubscribedChainIds: chainIds, - - IsIncomingTransactionsSubscribed: req.IsIncomingTransactionsSubscribed, - IsOutgoingTransactionsSubscribed: req.IsOutgoingTransactionsSubscribed, - IsERC20TokenTransfersSubscribed: req.IsERC20TokenTransfersSubscribed, - ERC20TokenTransfersValueThreshold: req.ERC20TokenTransfersValueThreshold, - IsERC721TokenTransfersSubscribed: req.IsERC721TokenTransfersSubscribed, - IsERC1155TokenTransfersSubscribed: req.IsERC1155TokenTransfersSubscribed, - } - err := h.dai.UpdateNotificationSettingsAccountDashboard(r.Context(), dashboardId, groupId, settings) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalPutUserNotificationSettingsAccountDashboardResponse{ - Data: settings, - } - returnOk(w, r, response) + h.PublicPutUserNotificationSettingsAccountDashboard(w, r) } func (h *HandlerService) InternalPostUserNotificationsTestEmail(w http.ResponseWriter, r *http.Request) { - // TODO - returnOk(w, r, nil) + h.PublicPostUserNotificationsTestEmail(w, r) } func (h *HandlerService) InternalPostUserNotificationsTestPush(w http.ResponseWriter, r *http.Request) { - // TODO - returnOk(w, r, nil) + h.PublicPostUserNotificationsTestPush(w, r) } func (h *HandlerService) InternalPostUserNotificationsTestWebhook(w http.ResponseWriter, r *http.Request) { - var v validationError - req := struct { - WebhookUrl string `json:"webhook_url"` - IsDiscordWebhookEnabled bool `json:"is_discord_webhook_enabled,omitempty"` - }{} - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - if v.hasErrors() { - handleErr(w, r, v) - return - } - // TODO - returnOk(w, r, nil) + h.PublicPostUserNotificationsTestWebhook(w, r) } // -------------------------------------- @@ -976,7 +672,7 @@ func (h *HandlerService) InternalGetBlock(w http.ResponseWriter, r *http.Request return } - data, err := h.dai.GetBlock(r.Context(), chainId, block) + data, err := h.daService.GetBlock(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -995,7 +691,7 @@ func (h *HandlerService) InternalGetBlockOverview(w http.ResponseWriter, r *http return } - data, err := h.dai.GetBlockOverview(r.Context(), chainId, block) + data, err := h.daService.GetBlockOverview(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -1013,7 +709,7 @@ func (h *HandlerService) InternalGetBlockTransactions(w http.ResponseWriter, r * return } - data, err := h.dai.GetBlockTransactions(r.Context(), chainId, block) + data, err := h.daService.GetBlockTransactions(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -1032,7 +728,7 @@ func (h *HandlerService) InternalGetBlockVotes(w http.ResponseWriter, r *http.Re return } - data, err := h.dai.GetBlockVotes(r.Context(), chainId, block) + data, err := h.daService.GetBlockVotes(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -1051,7 +747,7 @@ func (h *HandlerService) InternalGetBlockAttestations(w http.ResponseWriter, r * return } - data, err := h.dai.GetBlockAttestations(r.Context(), chainId, block) + data, err := h.daService.GetBlockAttestations(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -1070,7 +766,7 @@ func (h *HandlerService) InternalGetBlockWithdrawals(w http.ResponseWriter, r *h return } - data, err := h.dai.GetBlockWithdrawals(r.Context(), chainId, block) + data, err := h.daService.GetBlockWithdrawals(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -1089,7 +785,7 @@ func (h *HandlerService) InternalGetBlockBlsChanges(w http.ResponseWriter, r *ht return } - data, err := h.dai.GetBlockBlsChanges(r.Context(), chainId, block) + data, err := h.daService.GetBlockBlsChanges(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -1108,7 +804,7 @@ func (h *HandlerService) InternalGetBlockVoluntaryExits(w http.ResponseWriter, r return } - data, err := h.dai.GetBlockVoluntaryExits(r.Context(), chainId, block) + data, err := h.daService.GetBlockVoluntaryExits(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -1127,7 +823,7 @@ func (h *HandlerService) InternalGetBlockBlobs(w http.ResponseWriter, r *http.Re return } - data, err := h.dai.GetBlockBlobs(r.Context(), chainId, block) + data, err := h.daService.GetBlockBlobs(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -1149,7 +845,7 @@ func (h *HandlerService) InternalGetSlot(w http.ResponseWriter, r *http.Request) return } - data, err := h.dai.GetSlot(r.Context(), chainId, block) + data, err := h.daService.GetSlot(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -1168,7 +864,7 @@ func (h *HandlerService) InternalGetSlotOverview(w http.ResponseWriter, r *http. return } - data, err := h.dai.GetSlotOverview(r.Context(), chainId, block) + data, err := h.daService.GetSlotOverview(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -1186,7 +882,7 @@ func (h *HandlerService) InternalGetSlotTransactions(w http.ResponseWriter, r *h return } - data, err := h.dai.GetSlotTransactions(r.Context(), chainId, block) + data, err := h.daService.GetSlotTransactions(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -1205,7 +901,7 @@ func (h *HandlerService) InternalGetSlotVotes(w http.ResponseWriter, r *http.Req return } - data, err := h.dai.GetSlotVotes(r.Context(), chainId, block) + data, err := h.daService.GetSlotVotes(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -1224,7 +920,7 @@ func (h *HandlerService) InternalGetSlotAttestations(w http.ResponseWriter, r *h return } - data, err := h.dai.GetSlotAttestations(r.Context(), chainId, block) + data, err := h.daService.GetSlotAttestations(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -1243,7 +939,7 @@ func (h *HandlerService) InternalGetSlotWithdrawals(w http.ResponseWriter, r *ht return } - data, err := h.dai.GetSlotWithdrawals(r.Context(), chainId, block) + data, err := h.daService.GetSlotWithdrawals(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -1262,7 +958,7 @@ func (h *HandlerService) InternalGetSlotBlsChanges(w http.ResponseWriter, r *htt return } - data, err := h.dai.GetSlotBlsChanges(r.Context(), chainId, block) + data, err := h.daService.GetSlotBlsChanges(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -1281,7 +977,7 @@ func (h *HandlerService) InternalGetSlotVoluntaryExits(w http.ResponseWriter, r return } - data, err := h.dai.GetSlotVoluntaryExits(r.Context(), chainId, block) + data, err := h.daService.GetSlotVoluntaryExits(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -1300,7 +996,7 @@ func (h *HandlerService) InternalGetSlotBlobs(w http.ResponseWriter, r *http.Req return } - data, err := h.dai.GetSlotBlobs(r.Context(), chainId, block) + data, err := h.daService.GetSlotBlobs(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -1311,3 +1007,7 @@ func (h *HandlerService) InternalGetSlotBlobs(w http.ResponseWriter, r *http.Req } returnOk(w, r, response) } + +func (h *HandlerService) ReturnOk(w http.ResponseWriter, r *http.Request) { + returnOk(w, r, nil) +} diff --git a/backend/pkg/api/handlers/machine_metrics.go b/backend/pkg/api/handlers/machine_metrics.go new file mode 100644 index 000000000..9bef843d5 --- /dev/null +++ b/backend/pkg/api/handlers/machine_metrics.go @@ -0,0 +1,219 @@ +package handlers + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + + "github.com/gobitfly/beaconchain/pkg/api/types" + + "github.com/gobitfly/beaconchain/pkg/commons/db" + commontypes "github.com/gobitfly/beaconchain/pkg/commons/types" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" + + "google.golang.org/protobuf/proto" +) + +func (h *HandlerService) InternalGetUserMachineMetrics(w http.ResponseWriter, r *http.Request) { + h.PublicGetUserMachineMetrics(w, r) +} + +func (h *HandlerService) PublicGetUserMachineMetrics(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + + userInfo, err := h.daService.GetUserInfo(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + + q := r.URL.Query() + offset := v.checkUint(q.Get("offset"), "offset") + limit := uint64(180) + if limitParam := q.Get("limit"); limitParam != "" { + limit = v.checkUint(limitParam, "limit") + } + + // validate limit and offset according to user's premium perks + maxDataPoints := userInfo.PremiumPerks.MachineMonitoringHistorySeconds / 60 // one entry per minute + timeframe := offset + limit + if timeframe > maxDataPoints { + limit = maxDataPoints + offset = 0 + } + + data, err := h.daService.GetUserMachineMetrics(r.Context(), userId, int(limit), int(offset)) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetUserMachineMetricsRespone{ + Data: *data, + } + + returnOk(w, r, response) +} + +func (h *HandlerService) LegacyPostUserMachineMetrics(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query() + apiKey := q.Get("apikey") + machine := q.Get("machine") + + if apiKey == "" { + apiKey = r.Header.Get("apikey") + } + + if !h.isPostMachineMetricsEnabled { + returnError(w, r, http.StatusServiceUnavailable, fmt.Errorf("machine metrics pushing is temporarily disabled")) + return + } + + userID, err := h.daService.GetUserIdByApiKey(r.Context(), apiKey) + if err != nil { + returnBadRequest(w, r, fmt.Errorf("no user found with api key")) + return + } + + userInfo, err := h.daService.GetUserInfo(r.Context(), userID) + if err != nil { + handleErr(w, r, err) + return + } + + if contentType := r.Header.Get("Content-Type"); !reJsonContentType.MatchString(contentType) { + returnBadRequest(w, r, fmt.Errorf("invalid content type, expected application/json")) + return + } + + body, err := io.ReadAll(r.Body) + if err != nil { + returnBadRequest(w, r, fmt.Errorf("could not read request body")) + return + } + + var jsonObjects []map[string]interface{} + err = json.Unmarshal(body, &jsonObjects) + if err != nil { + var jsonObject map[string]interface{} + err = json.Unmarshal(body, &jsonObject) + if err != nil { + returnBadRequest(w, r, errors.Wrap(err, "Invalid JSON format in request body")) + return + } + jsonObjects = []map[string]interface{}{jsonObject} + } + + if len(jsonObjects) >= 10 { + returnBadRequest(w, r, fmt.Errorf("Max number of stat entries are 10")) + return + } + + var rateLimitErrs = 0 + for i := 0; i < len(jsonObjects); i++ { + err := h.internal_processMachine(r.Context(), machine, &jsonObjects[i], userInfo) + if err != nil { + if strings.HasPrefix(err.Error(), "rate limit") { + rateLimitErrs++ + continue + } + handleErr(w, r, err) + return + } + } + + if rateLimitErrs >= len(jsonObjects) { + returnTooManyRequests(w, r, fmt.Errorf("too many metric requests, max allowed is 1 per user per machine per process")) + return + } + + returnOk(w, r, nil) +} + +func (h *HandlerService) internal_processMachine(context context.Context, machine string, obj *map[string]interface{}, userInfo *types.UserInfo) error { + var parsedMeta *commontypes.StatsMeta + err := mapstructure.Decode(obj, &parsedMeta) + if err != nil { + return fmt.Errorf("%w: %w", errBadRequest, err) + } + + parsedMeta.Machine = machine + + if parsedMeta.Version > 2 || parsedMeta.Version <= 0 { + return newBadRequestErr("unsupported data format version") + } + + if parsedMeta.Process != "validator" && parsedMeta.Process != "beaconnode" && parsedMeta.Process != "slasher" && parsedMeta.Process != "system" { + return newBadRequestErr("unknown process") + } + + maxNodes := userInfo.PremiumPerks.MonitorMachines + + count, err := db.BigtableClient.GetMachineMetricsMachineCount(commontypes.UserId(userInfo.Id)) + if err != nil { + return errors.Wrap(err, "could not get machine count") + } + + if count > maxNodes { + return newForbiddenErr("user has reached max machine count") + } + + // protobuf encode + var data []byte + if parsedMeta.Process == "system" { + var parsedResponse *commontypes.MachineMetricSystem + err = DecodeMapStructure(obj, &parsedResponse) + if err != nil { + return fmt.Errorf("%w: %w could not parse stats (system stats)", errBadRequest, err) + } + data, err = proto.Marshal(parsedResponse) + if err != nil { + return errors.Wrap(err, "could not parse stats (system stats)") + } + } else if parsedMeta.Process == "validator" { + var parsedResponse *commontypes.MachineMetricValidator + err = DecodeMapStructure(obj, &parsedResponse) + if err != nil { + return fmt.Errorf("%w: %w could not parse stats (validator stats)", errBadRequest, err) + } + data, err = proto.Marshal(parsedResponse) + if err != nil { + return errors.Wrap(err, "could not parse stats (validator stats)") + } + } else if parsedMeta.Process == "beaconnode" { + var parsedResponse *commontypes.MachineMetricNode + err = DecodeMapStructure(obj, &parsedResponse) + if err != nil { + return fmt.Errorf("%w: %w could not parse stats (beaconnode stats)", errBadRequest, err) + } + data, err = proto.Marshal(parsedResponse) + if err != nil { + return errors.Wrap(err, "could not parse stats (beaconnode stats)") + } + } + + return h.daService.PostUserMachineMetrics(context, userInfo.Id, machine, parsedMeta.Process, data) +} + +func DecodeMapStructure(input interface{}, output interface{}) error { + config := &mapstructure.DecoderConfig{ + Metadata: nil, + Result: output, + TagName: "json", + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} diff --git a/backend/pkg/api/handlers/middlewares.go b/backend/pkg/api/handlers/middlewares.go new file mode 100644 index 000000000..f24ae39a7 --- /dev/null +++ b/backend/pkg/api/handlers/middlewares.go @@ -0,0 +1,212 @@ +package handlers + +import ( + "context" + "errors" + "net/http" + "slices" + "strconv" + + "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gorilla/mux" +) + +// Middlewares + +// middleware that stores user id in context, using the provided function +func StoreUserIdMiddleware(next http.Handler, userIdFunc func(r *http.Request) (uint64, error)) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + userId, err := userIdFunc(r) + if err != nil { + if errors.Is(err, errUnauthorized) { + // if next handler requires authentication, it should return 'unauthorized' itself + next.ServeHTTP(w, r) + } else { + handleErr(w, r, err) + } + return + } + + // store user id in context + ctx := r.Context() + ctx = context.WithValue(ctx, types.CtxUserIdKey, userId) + r = r.WithContext(ctx) + next.ServeHTTP(w, r) + }) +} + +// middleware that stores user id in context, using the session to get the user id +func (h *HandlerService) StoreUserIdBySessionMiddleware(next http.Handler) http.Handler { + return StoreUserIdMiddleware(next, func(r *http.Request) (uint64, error) { + return h.GetUserIdBySession(r) + }) +} + +// middleware that stores user id in context, using the api key to get the user id +func (h *HandlerService) StoreUserIdByApiKeyMiddleware(next http.Handler) http.Handler { + return StoreUserIdMiddleware(next, func(r *http.Request) (uint64, error) { + return h.GetUserIdByApiKey(r) + }) +} + +// middleware that checks if user has access to dashboard when a primary id is used +func (h *HandlerService) VDBAuthMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // if mock data is used, no need to check access + if isMocked, ok := r.Context().Value(types.CtxIsMockedKey).(bool); ok && isMocked { + next.ServeHTTP(w, r) + return + } + var err error + dashboardId, err := strconv.ParseUint(mux.Vars(r)["dashboard_id"], 10, 64) + if err != nil { + // if primary id is not used, no need to check access + next.ServeHTTP(w, r) + return + } + // primary id is used -> user needs to have access to dashboard + + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + dashboardUser, err := h.daService.GetValidatorDashboardUser(r.Context(), types.VDBIdPrimary(dashboardId)) + if err != nil { + handleErr(w, r, err) + return + } + + if dashboardUser.UserId != userId { + // user does not have access to dashboard + // the proper error would be 403 Forbidden, but we don't want to leak information so we return 404 Not Found + handleErr(w, r, newNotFoundErr("dashboard with id %v not found", dashboardId)) + return + } + + next.ServeHTTP(w, r) + }) +} + +// Common middleware logic for checking user premium perks +func (h *HandlerService) PremiumPerkCheckMiddleware(next http.Handler, hasRequiredPerk func(premiumPerks types.PremiumPerks) bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // get user id from context + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + + // get user info + userInfo, err := h.daService.GetUserInfo(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + + // check if user has the required premium perk + if !hasRequiredPerk(userInfo.PremiumPerks) { + handleErr(w, r, newForbiddenErr("users premium perks do not allow usage of this endpoint")) + return + } + + next.ServeHTTP(w, r) + }) +} + +// Middleware for managing dashboards via API +func (h *HandlerService) ManageDashboardsViaApiCheckMiddleware(next http.Handler) http.Handler { + return h.PremiumPerkCheckMiddleware(next, func(premiumPerks types.PremiumPerks) bool { + return premiumPerks.ManageDashboardViaApi + }) +} + +// Middleware for managing notifications via API +func (h *HandlerService) ManageNotificationsViaApiCheckMiddleware(next http.Handler) http.Handler { + return h.PremiumPerkCheckMiddleware(next, func(premiumPerks types.PremiumPerks) bool { + return premiumPerks.ConfigureNotificationsViaApi + }) +} + +// middleware check to return if specified dashboard is not archived (and accessible) +func (h *HandlerService) VDBArchivedCheckMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if isMocked, ok := r.Context().Value(types.CtxIsMockedKey).(bool); ok && isMocked { + next.ServeHTTP(w, r) + return + } + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + // store dashboard id in context + ctx := r.Context() + ctx = context.WithValue(ctx, types.CtxDashboardIdKey, dashboardId) + r = r.WithContext(ctx) + + if len(dashboardId.Validators) > 0 { // don't check guest dashboards + next.ServeHTTP(w, r) + return + } + dashboard, err := h.daService.GetValidatorDashboardInfo(r.Context(), dashboardId.Id) + if err != nil { + handleErr(w, r, err) + return + } + if dashboard.IsArchived { + handleErr(w, r, newForbiddenErr("dashboard with id %v is archived", dashboardId)) + return + } + next.ServeHTTP(w, r) + }) +} + +// middleware that checks for `is_mocked` query param and stores it in the request context. +// should bypass auth checks if the flag is set and cause handlers to return mocked data. +// only allowed for users in the admin or dev group. +// note that mocked data is only returned by handlers that check for it. +func (h *HandlerService) StoreIsMockedFlagMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var v validationError + q := r.URL.Query() + isMocked := v.checkBool(q.Get("is_mocked"), "is_mocked") + var mockSeed int64 + if mockSeedStr := q.Get("mock_seed"); mockSeedStr != "" { + mockSeed = v.checkInt(mockSeedStr, "mock_seed") + } + if v.hasErrors() { + handleErr(w, r, v) + return + } + if !isMocked { + next.ServeHTTP(w, r) + return + } + // fetch user group + userId, err := h.GetUserIdBySession(r) + if err != nil { + handleErr(w, r, err) + return + } + userCredentials, err := h.daService.GetUserInfo(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + allowedGroups := []string{types.UserGroupAdmin, types.UserGroupDev} + if !slices.Contains(allowedGroups, userCredentials.UserGroup) { + handleErr(w, r, newForbiddenErr("user is not allowed to use mock data")) + return + } + // store isMocked flag in context + ctx := r.Context() + ctx = context.WithValue(ctx, types.CtxIsMockedKey, true) + if mockSeed != 0 { + ctx = context.WithValue(ctx, types.CtxMockSeedKey, mockSeed) + } + r = r.WithContext(ctx) + next.ServeHTTP(w, r) + }) +} diff --git a/backend/pkg/api/handlers/public.go b/backend/pkg/api/handlers/public.go index 3e80c2769..72813b142 100644 --- a/backend/pkg/api/handlers/public.go +++ b/backend/pkg/api/handlers/public.go @@ -4,19 +4,40 @@ import ( "context" "errors" "fmt" + "math" "net/http" - "reflect" "time" "github.com/gobitfly/beaconchain/pkg/api/enums" "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gorilla/mux" + "github.com/shopspring/decimal" ) // All handler function names must include the HTTP method and the path they handle // Public handlers may only be authenticated by an API key // Public handlers must never call internal handlers +// @title beaconcha.in API +// @version 2.0 +// @description To authenticate your API request beaconcha.in uses API Keys. Set your API Key either by: +// @description - Setting the `Authorization` header in the following format: `Authorization: Bearer `. (recommended) +// @description - Setting the URL query parameter in the following format: `api_key={your_api_key}`.\ +// @description Example: `https://beaconcha.in/api/v2/example?field=value&api_key={your_api_key}` + +// @BasePath /api/v2 + +// @securitydefinitions.apikey ApiKeyInHeader +// @in header +// @name Authorization +// @description Use your API key as a Bearer token, e.g. `Bearer ` + +// @securitydefinitions.apikey ApiKeyInQuery +// @in query +// @name api_key + +// @Validator Dashboard Management.n + func (h *HandlerService) PublicGetHealthz(w http.ResponseWriter, r *http.Request) { var v validationError showAll := v.checkBool(r.URL.Query().Get("show_all"), "show_all") @@ -26,7 +47,7 @@ func (h *HandlerService) PublicGetHealthz(w http.ResponseWriter, r *http.Request } ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) defer cancel() - data := h.dai.GetHealthz(ctx, showAll) + data := h.getDataAccessor(r).GetHealthz(ctx, showAll) responseCode := http.StatusOK if data.TotalOkPercentage != 1 { @@ -39,13 +60,21 @@ func (h *HandlerService) PublicGetHealthzLoadbalancer(w http.ResponseWriter, r * returnOk(w, r, nil) } +// PublicGetUserDashboards godoc +// +// @Description Get all dashboards of the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Dashboards +// @Produce json +// @Success 200 {object} types.ApiDataResponse[types.UserDashboardsData] +// @Router /users/me/dashboards [get] func (h *HandlerService) PublicGetUserDashboards(w http.ResponseWriter, r *http.Request) { userId, err := GetUserIdByContext(r) if err != nil { handleErr(w, r, err) return } - data, err := h.dai.GetUserDashboards(r.Context(), userId) + data, err := h.getDataAccessor(r).GetUserDashboards(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -112,6 +141,18 @@ func (h *HandlerService) PublicPutAccountDashboardTransactionsSettings(w http.Re returnOk(w, r, nil) } +// PublicPostValidatorDashboards godoc +// +// @Description Create a new validator dashboard. **Note**: New dashboards will automatically have a default group created. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Accept json +// @Produce json +// @Param request body handlers.PublicPostValidatorDashboards.request true "`name`: Specify the name of the dashboard.
`network`: Specify the network for the dashboard. Possible options are:
  • `ethereum`
  • `gnosis`
" +// @Success 201 {object} types.ApiDataResponse[types.VDBPostReturnData] +// @Failure 400 {object} types.ApiErrorResponse +// @Failure 409 {object} types.ApiErrorResponse "Conflict. The request could not be performed by the server because the authenticated user has already reached their dashboard limit." +// @Router /validator-dashboards [post] func (h *HandlerService) PublicPostValidatorDashboards(w http.ResponseWriter, r *http.Request) { var v validationError userId, err := GetUserIdByContext(r) @@ -136,22 +177,22 @@ func (h *HandlerService) PublicPostValidatorDashboards(w http.ResponseWriter, r return } - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return } - dashboardCount, err := h.dai.GetUserValidatorDashboardCount(r.Context(), userId, true) + dashboardCount, err := h.getDataAccessor(r).GetUserValidatorDashboardCount(r.Context(), userId, true) if err != nil { handleErr(w, r, err) return } - if dashboardCount >= userInfo.PremiumPerks.ValidatorDashboards && !isUserAdmin(userInfo) { + if dashboardCount >= userInfo.PremiumPerks.ValidatorDashboards { returnConflict(w, r, errors.New("maximum number of validator dashboards reached")) return } - data, err := h.dai.CreateValidatorDashboard(r.Context(), userId, name, chainId) + data, err := h.getDataAccessor(r).CreateValidatorDashboard(r.Context(), userId, name, chainId) if err != nil { handleErr(w, r, err) return @@ -162,6 +203,16 @@ func (h *HandlerService) PublicPostValidatorDashboards(w http.ResponseWriter, r returnCreated(w, r, response) } +// PublicGetValidatorDashboards godoc +// +// @Description Get overview information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardResponse +// @Failure 400 {object} types.ApiErrorResponse "Bad Request" +// @Router /validator-dashboards/{dashboard_id} [get] func (h *HandlerService) PublicGetValidatorDashboard(w http.ResponseWriter, r *http.Request) { var v validationError dashboardIdParam := mux.Vars(r)["dashboard_id"] @@ -181,10 +232,10 @@ func (h *HandlerService) PublicGetValidatorDashboard(w http.ResponseWriter, r *h // set name depending on dashboard id var name string if reInteger.MatchString(dashboardIdParam) { - name, err = h.dai.GetValidatorDashboardName(r.Context(), dashboardId.Id) + name, err = h.getDataAccessor(r).GetValidatorDashboardName(r.Context(), dashboardId.Id) } else if reValidatorDashboardPublicId.MatchString(dashboardIdParam) { var publicIdInfo *types.VDBPublicId - publicIdInfo, err = h.dai.GetValidatorDashboardPublicId(r.Context(), types.VDBIdPublic(dashboardIdParam)) + publicIdInfo, err = h.getDataAccessor(r).GetValidatorDashboardPublicId(r.Context(), types.VDBIdPublic(dashboardIdParam)) name = publicIdInfo.Name } if err != nil { @@ -198,7 +249,7 @@ func (h *HandlerService) PublicGetValidatorDashboard(w http.ResponseWriter, r *h handleErr(w, r, err) return } - data, err := h.dai.GetValidatorDashboardOverview(r.Context(), *dashboardId, protocolModes) + data, err := h.getDataAccessor(r).GetValidatorDashboardOverview(r.Context(), *dashboardId, protocolModes) if err != nil { handleErr(w, r, err) return @@ -213,6 +264,16 @@ func (h *HandlerService) PublicGetValidatorDashboard(w http.ResponseWriter, r *h returnOk(w, r, response) } +// PublicPutValidatorDashboard godoc +// +// @Description Delete a specified validator dashboard. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Produce json +// @Param dashboard_id path integer true "The ID of the dashboard." +// @Success 204 "Dashboard deleted successfully." +// @Failure 400 {object} types.ApiErrorResponse "Bad Request" +// @Router /validator-dashboards/{dashboard_id} [delete] func (h *HandlerService) PublicDeleteValidatorDashboard(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) @@ -220,7 +281,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboard(w http.ResponseWriter, r handleErr(w, r, v) return } - err := h.dai.RemoveValidatorDashboard(r.Context(), dashboardId) + err := h.getDataAccessor(r).RemoveValidatorDashboard(r.Context(), dashboardId) if err != nil { handleErr(w, r, err) return @@ -228,6 +289,18 @@ func (h *HandlerService) PublicDeleteValidatorDashboard(w http.ResponseWriter, r returnNoContent(w, r) } +// PublicPutValidatorDashboard godoc +// +// @Description Update the name of a specified validator dashboard. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Accept json +// @Produce json +// @Param dashboard_id path integer true "The ID of the dashboard." +// @Param request body handlers.PublicPutValidatorDashboardName.request true "request" +// @Success 200 {object} types.ApiDataResponse[types.VDBPostReturnData] +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/name [put] func (h *HandlerService) PublicPutValidatorDashboardName(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) @@ -244,7 +317,7 @@ func (h *HandlerService) PublicPutValidatorDashboardName(w http.ResponseWriter, handleErr(w, r, v) return } - data, err := h.dai.UpdateValidatorDashboardName(r.Context(), dashboardId, name) + data, err := h.getDataAccessor(r).UpdateValidatorDashboardName(r.Context(), dashboardId, name) if err != nil { handleErr(w, r, err) return @@ -255,6 +328,19 @@ func (h *HandlerService) PublicPutValidatorDashboardName(w http.ResponseWriter, returnOk(w, r, response) } +// PublicPostValidatorDashboardGroups godoc +// +// @Description Create a new group in a specified validator dashboard. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Accept json +// @Produce json +// @Param dashboard_id path integer true "The ID of the dashboard." +// @Param request body handlers.PublicPostValidatorDashboardGroups.request true "request" +// @Success 201 {object} types.ApiDataResponse[types.VDBPostCreateGroupData] +// @Failure 400 {object} types.ApiErrorResponse +// @Failure 409 {object} types.ApiErrorResponse "Conflict. The request could not be performed by the server because the authenticated user has already reached their group limit." +// @Router /validator-dashboards/{dashboard_id}/groups [post] func (h *HandlerService) PublicPostValidatorDashboardGroups(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) @@ -278,22 +364,22 @@ func (h *HandlerService) PublicPostValidatorDashboardGroups(w http.ResponseWrite handleErr(w, r, err) return } - userInfo, err := h.dai.GetUserInfo(ctx, userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(ctx, userId) if err != nil { handleErr(w, r, err) return } - groupCount, err := h.dai.GetValidatorDashboardGroupCount(ctx, dashboardId) + groupCount, err := h.getDataAccessor(r).GetValidatorDashboardGroupCount(ctx, dashboardId) if err != nil { handleErr(w, r, err) return } - if groupCount >= userInfo.PremiumPerks.ValidatorGroupsPerDashboard && !isUserAdmin(userInfo) { + if groupCount >= userInfo.PremiumPerks.ValidatorGroupsPerDashboard { returnConflict(w, r, errors.New("maximum number of validator dashboard groups reached")) return } - data, err := h.dai.CreateValidatorDashboardGroup(ctx, dashboardId, name) + data, err := h.getDataAccessor(r).CreateValidatorDashboardGroup(ctx, dashboardId, name) if err != nil { handleErr(w, r, err) return @@ -306,6 +392,19 @@ func (h *HandlerService) PublicPostValidatorDashboardGroups(w http.ResponseWrite returnCreated(w, r, response) } +// PublicGetValidatorDashboardGroups godoc +// +// @Description Update a groups name in a specified validator dashboard. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Accept json +// @Produce json +// @Param dashboard_id path integer true "The ID of the dashboard." +// @Param group_id path integer true "The ID of the group." +// @Param request body handlers.PublicPutValidatorDashboardGroups.request true "request" +// @Success 200 {object} types.ApiDataResponse[types.VDBPostCreateGroupData] +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/groups/{group_id} [put] func (h *HandlerService) PublicPutValidatorDashboardGroups(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -324,7 +423,7 @@ func (h *HandlerService) PublicPutValidatorDashboardGroups(w http.ResponseWriter handleErr(w, r, v) return } - groupExists, err := h.dai.GetValidatorDashboardGroupExists(r.Context(), dashboardId, groupId) + groupExists, err := h.getDataAccessor(r).GetValidatorDashboardGroupExists(r.Context(), dashboardId, groupId) if err != nil { handleErr(w, r, err) return @@ -333,7 +432,7 @@ func (h *HandlerService) PublicPutValidatorDashboardGroups(w http.ResponseWriter returnNotFound(w, r, errors.New("group not found")) return } - data, err := h.dai.UpdateValidatorDashboardGroup(r.Context(), dashboardId, groupId, name) + data, err := h.getDataAccessor(r).UpdateValidatorDashboardGroup(r.Context(), dashboardId, groupId, name) if err != nil { handleErr(w, r, err) return @@ -346,6 +445,18 @@ func (h *HandlerService) PublicPutValidatorDashboardGroups(w http.ResponseWriter returnOk(w, r, response) } +// PublicDeleteValidatorDashboardGroups godoc +// +// @Description Delete a group in a specified validator dashboard. +// @Tags Validator Dashboard Management +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Accept json +// @Produce json +// @Param dashboard_id path integer true "The ID of the dashboard." +// @Param group_id path integer true "The ID of the group." +// @Success 204 "Group deleted successfully." +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/groups/{group_id} [delete] func (h *HandlerService) PublicDeleteValidatorDashboardGroup(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -359,7 +470,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardGroup(w http.ResponseWrit returnBadRequest(w, r, errors.New("cannot delete default group")) return } - groupExists, err := h.dai.GetValidatorDashboardGroupExists(r.Context(), dashboardId, groupId) + groupExists, err := h.getDataAccessor(r).GetValidatorDashboardGroupExists(r.Context(), dashboardId, groupId) if err != nil { handleErr(w, r, err) return @@ -368,7 +479,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardGroup(w http.ResponseWrit returnNotFound(w, r, errors.New("group not found")) return } - err = h.dai.RemoveValidatorDashboardGroup(r.Context(), dashboardId, groupId) + err = h.getDataAccessor(r).RemoveValidatorDashboardGroup(r.Context(), dashboardId, groupId) if err != nil { handleErr(w, r, err) return @@ -377,6 +488,18 @@ func (h *HandlerService) PublicDeleteValidatorDashboardGroup(w http.ResponseWrit returnNoContent(w, r) } +// PublicGetValidatorDashboardGroups godoc +// +// @Description Add new validators to a specified dashboard or update the group of already-added validators. This endpoint will always add as many validators as possible, even if more validators are provided than allowed by the subscription plan. The response will contain a list of added validators. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Accept json +// @Produce json +// @Param dashboard_id path integer true "The ID of the dashboard." +// @Param request body handlers.PublicPostValidatorDashboardValidators.request true "`group_id`: (optional) Provide a single group id, to which all validators get added to. If omitted, the default group will be used.

To add validators or update their group, only one of the following fields can be set:
  • `validators`: Provide a list of validator indices or public keys.
  • `deposit_address`: (limited to subscription tiers with 'Bulk adding') Provide a deposit address from which as many validators as possible will be added to the dashboard.
  • `withdrawal_address`: (limited to subscription tiers with 'Bulk adding') Provide a withdrawal address from which as many validators as possible will be added to the dashboard.
  • `graffiti`: (limited to subscription tiers with 'Bulk adding') Provide a graffiti string from which as many validators as possible will be added to the dashboard.
" +// @Success 201 {object} types.ApiDataResponse[[]types.VDBPostValidatorsData] "Returns a list of added validators." +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/validators [post] func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) @@ -387,7 +510,9 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW WithdrawalAddress string `json:"withdrawal_address,omitempty"` Graffiti string `json:"graffiti,omitempty"` } - var req request + req := request{ + GroupId: types.DefaultGroupId, // default value + } if err := v.checkBody(&req, r); err != nil { handleErr(w, r, err) return @@ -396,11 +521,17 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, v) return } + groupId := req.GroupId // check if exactly one of validators, deposit_address, withdrawal_address, graffiti is set - fields := []interface{}{req.Validators, req.DepositAddress, req.WithdrawalAddress, req.Graffiti} + nilFields := []bool{ + req.Validators == nil, + req.DepositAddress == "", + req.WithdrawalAddress == "", + req.Graffiti == "", + } var count int - for _, set := range fields { - if !reflect.ValueOf(set).IsZero() { + for _, isNil := range nilFields { + if !isNil { count++ } } @@ -412,9 +543,8 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW return } - groupId := req.GroupId ctx := r.Context() - groupExists, err := h.dai.GetValidatorDashboardGroupExists(ctx, dashboardId, groupId) + groupExists, err := h.getDataAccessor(r).GetValidatorDashboardGroupExists(ctx, dashboardId, groupId) if err != nil { handleErr(w, r, err) return @@ -428,16 +558,28 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, err) return } - userInfo, err := h.dai.GetUserInfo(ctx, userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(ctx, userId) if err != nil { handleErr(w, r, err) return } - limit := userInfo.PremiumPerks.ValidatorsPerDashboard - if req.Validators == nil && !userInfo.PremiumPerks.BulkAdding && !isUserAdmin(userInfo) { - returnConflict(w, r, errors.New("bulk adding not allowed with current subscription plan")) + if req.Validators == nil && !userInfo.PremiumPerks.BulkAdding { + returnForbidden(w, r, errors.New("bulk adding not allowed with current subscription plan")) + return + } + dashboardLimit := userInfo.PremiumPerks.ValidatorsPerDashboard + existingValidatorCount, err := h.getDataAccessor(r).GetValidatorDashboardValidatorsCount(ctx, dashboardId) + if err != nil { + handleErr(w, r, err) return } + var limit uint64 + if isUserAdmin(userInfo) { + limit = math.MaxUint32 // no limit for admins + } else if dashboardLimit >= existingValidatorCount { + limit = dashboardLimit - existingValidatorCount + } + var data []types.VDBPostValidatorsData var dataErr error switch { @@ -447,22 +589,15 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, v) return } - validators, err := h.dai.GetValidatorsFromSlices(indices, pubkeys) + validators, err := h.getDataAccessor(r).GetValidatorsFromSlices(ctx, indices, pubkeys) if err != nil { handleErr(w, r, err) return } - // check if adding more validators than allowed - existingValidatorCount, err := h.dai.GetValidatorDashboardExistingValidatorCount(ctx, dashboardId, validators) - if err != nil { - handleErr(w, r, err) - return - } - if uint64(len(validators)) > existingValidatorCount+limit { - returnConflict(w, r, fmt.Errorf("adding more validators than allowed, limit is %v new validators", limit)) - return + if len(validators) > int(limit) { + validators = validators[:limit] } - data, dataErr = h.dai.AddValidatorDashboardValidators(ctx, dashboardId, groupId, validators) + data, dataErr = h.getDataAccessor(r).AddValidatorDashboardValidators(ctx, dashboardId, groupId, validators) case req.DepositAddress != "": depositAddress := v.checkRegex(reEthereumAddress, req.DepositAddress, "deposit_address") @@ -470,7 +605,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, v) return } - data, dataErr = h.dai.AddValidatorDashboardValidatorsByDepositAddress(ctx, dashboardId, groupId, depositAddress, limit) + data, dataErr = h.getDataAccessor(r).AddValidatorDashboardValidatorsByDepositAddress(ctx, dashboardId, groupId, depositAddress, limit) case req.WithdrawalAddress != "": withdrawalAddress := v.checkRegex(reWithdrawalCredential, req.WithdrawalAddress, "withdrawal_address") @@ -478,15 +613,15 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, v) return } - data, dataErr = h.dai.AddValidatorDashboardValidatorsByWithdrawalAddress(ctx, dashboardId, groupId, withdrawalAddress, limit) + data, dataErr = h.getDataAccessor(r).AddValidatorDashboardValidatorsByWithdrawalAddress(ctx, dashboardId, groupId, withdrawalAddress, limit) case req.Graffiti != "": - graffiti := v.checkRegex(reNonEmpty, req.Graffiti, "graffiti") + graffiti := v.checkRegex(reGraffiti, req.Graffiti, "graffiti") if v.hasErrors() { handleErr(w, r, v) return } - data, dataErr = h.dai.AddValidatorDashboardValidatorsByGraffiti(ctx, dashboardId, groupId, graffiti, limit) + data, dataErr = h.getDataAccessor(r).AddValidatorDashboardValidatorsByGraffiti(ctx, dashboardId, groupId, graffiti, limit) } if dataErr != nil { @@ -500,6 +635,19 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW returnCreated(w, r, response) } +// PublicGetValidatorDashboardValidators godoc +// +// @Description Get a list of validators in a specified validator dashboard. +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id query integer false "The ID of the group." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(index, public_key, balance, status, withdrawal_credentials) +// @Param search query string false "Search for Address, ENS." +// @Success 200 {object} types.GetValidatorDashboardValidatorsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/validators [get] func (h *HandlerService) PublicGetValidatorDashboardValidators(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -515,7 +663,7 @@ func (h *HandlerService) PublicGetValidatorDashboardValidators(w http.ResponseWr handleErr(w, r, v) return } - data, paging, err := h.dai.GetValidatorDashboardValidators(r.Context(), *dashboardId, groupId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardValidators(r.Context(), *dashboardId, groupId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -527,29 +675,41 @@ func (h *HandlerService) PublicGetValidatorDashboardValidators(w http.ResponseWr returnOk(w, r, response) } +// PublicDeleteValidatorDashboardValidators godoc +// +// @Description Remove validators from a specified dashboard. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Accept json +// @Produce json +// @Param dashboard_id path integer true "The ID of the dashboard." +// @Param request body handlers.PublicDeleteValidatorDashboardValidators.request true "`validators`: Provide an array of validator indices or public keys that should get removed from the dashboard." +// @Success 204 "Validators removed successfully." +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/validators/bulk-deletions [post] func (h *HandlerService) PublicDeleteValidatorDashboardValidators(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) - var indices []uint64 - var publicKeys []string - req := struct { + type request struct { Validators []intOrString `json:"validators"` - }{} + } + var req request if err := v.checkBody(&req, r); err != nil { handleErr(w, r, err) return } - indices, publicKeys = v.checkValidators(req.Validators, false) + indices, publicKeys := v.checkValidators(req.Validators, forbidEmpty) if v.hasErrors() { handleErr(w, r, v) return } - validators, err := h.dai.GetValidatorsFromSlices(indices, publicKeys) + ctx := r.Context() + validators, err := h.getDataAccessor(r).GetValidatorsFromSlices(ctx, indices, publicKeys) if err != nil { handleErr(w, r, err) return } - err = h.dai.RemoveValidatorDashboardValidators(r.Context(), dashboardId, validators) + err = h.getDataAccessor(r).RemoveValidatorDashboardValidators(ctx, dashboardId, validators) if err != nil { handleErr(w, r, err) return @@ -558,6 +718,19 @@ func (h *HandlerService) PublicDeleteValidatorDashboardValidators(w http.Respons returnNoContent(w, r) } +// PublicPostValidatorDashboardPublicIds godoc +// +// @Description Create a new public ID for a specified dashboard. This can be used as an ID by other users for non-modyfing (i.e. GET) endpoints only. Currently limited to one per dashboard. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Accept json +// @Produce json +// @Param dashboard_id path integer true "The ID of the dashboard." +// @Param request body handlers.PublicPostValidatorDashboardPublicIds.request true "`name`: Provide a public name for the dashboard
`share_settings`:
  • `share_groups`: If set to `true`, accessing the dashboard through the public ID will not reveal any group information.
" +// @Success 201 {object} types.ApiDataResponse[types.VDBPublicId] +// @Failure 400 {object} types.ApiErrorResponse +// @Failure 409 {object} types.ApiErrorResponse "Conflict. The request could not be performed by the server because the authenticated user has already reached their public ID limit." +// @Router /validator-dashboards/{dashboard_id}/public-ids [post] func (h *HandlerService) PublicPostValidatorDashboardPublicIds(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) @@ -577,7 +750,7 @@ func (h *HandlerService) PublicPostValidatorDashboardPublicIds(w http.ResponseWr handleErr(w, r, v) return } - publicIdCount, err := h.dai.GetValidatorDashboardPublicIdCount(r.Context(), dashboardId) + publicIdCount, err := h.getDataAccessor(r).GetValidatorDashboardPublicIdCount(r.Context(), dashboardId) if err != nil { handleErr(w, r, err) return @@ -587,18 +760,31 @@ func (h *HandlerService) PublicPostValidatorDashboardPublicIds(w http.ResponseWr return } - data, err := h.dai.CreateValidatorDashboardPublicId(r.Context(), dashboardId, name, req.ShareSettings.ShareGroups) + data, err := h.getDataAccessor(r).CreateValidatorDashboardPublicId(r.Context(), dashboardId, name, req.ShareSettings.ShareGroups) if err != nil { handleErr(w, r, err) return } - response := types.ApiResponse{ - Data: data, + response := types.ApiDataResponse[types.VDBPublicId]{ + Data: *data, } returnCreated(w, r, response) } +// PublicPutValidatorDashboardPublicId godoc +// +// @Description Update a specified public ID for a specified dashboard. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Accept json +// @Produce json +// @Param dashboard_id path integer true "The ID of the dashboard." +// @Param public_id path string true "The ID of the public ID." +// @Param request body handlers.PublicPutValidatorDashboardPublicId.request true "`name`: Provide a public name for the dashboard
`share_settings`:
  • `share_groups`: If set to `true`, accessing the dashboard through the public ID will not reveal any group information.
" +// @Success 200 {object} types.ApiDataResponse[types.VDBPublicId] +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/public-ids/{public_id} [put] func (h *HandlerService) PublicPutValidatorDashboardPublicId(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -620,7 +806,7 @@ func (h *HandlerService) PublicPutValidatorDashboardPublicId(w http.ResponseWrit handleErr(w, r, v) return } - fetchedId, err := h.dai.GetValidatorDashboardIdByPublicId(r.Context(), publicDashboardId) + fetchedId, err := h.getDataAccessor(r).GetValidatorDashboardIdByPublicId(r.Context(), publicDashboardId) if err != nil { handleErr(w, r, err) return @@ -630,18 +816,29 @@ func (h *HandlerService) PublicPutValidatorDashboardPublicId(w http.ResponseWrit return } - data, err := h.dai.UpdateValidatorDashboardPublicId(r.Context(), publicDashboardId, name, req.ShareSettings.ShareGroups) + data, err := h.getDataAccessor(r).UpdateValidatorDashboardPublicId(r.Context(), publicDashboardId, name, req.ShareSettings.ShareGroups) if err != nil { handleErr(w, r, err) return } - response := types.ApiResponse{ - Data: data, + response := types.ApiDataResponse[types.VDBPublicId]{ + Data: *data, } returnOk(w, r, response) } +// PublicDeleteValidatorDashboardPublicId godoc +// +// @Description Delete a specified public ID for a specified dashboard. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Produce json +// @Param dashboard_id path integer true "The ID of the dashboard." +// @Param public_id path string true "The ID of the public ID." +// @Success 204 "Public ID deleted successfully." +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/public-ids/{public_id} [delete] func (h *HandlerService) PublicDeleteValidatorDashboardPublicId(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -651,7 +848,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardPublicId(w http.ResponseW handleErr(w, r, v) return } - fetchedId, err := h.dai.GetValidatorDashboardIdByPublicId(r.Context(), publicDashboardId) + fetchedId, err := h.getDataAccessor(r).GetValidatorDashboardIdByPublicId(r.Context(), publicDashboardId) if err != nil { handleErr(w, r, err) return @@ -661,7 +858,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardPublicId(w http.ResponseW return } - err = h.dai.RemoveValidatorDashboardPublicId(r.Context(), publicDashboardId) + err = h.getDataAccessor(r).RemoveValidatorDashboardPublicId(r.Context(), publicDashboardId) if err != nil { handleErr(w, r, err) return @@ -670,12 +867,26 @@ func (h *HandlerService) PublicDeleteValidatorDashboardPublicId(w http.ResponseW returnNoContent(w, r) } +// PublicPutValidatorDashboardArchiving godoc +// +// @Description Archive or unarchive a specified validator dashboard. Archived dashboards cannot be accessed by other endpoints. Archiving happens automatically if the number of dashboards, validators, or groups exceeds the limit allowed by your subscription plan. For example, this might occur if you downgrade your subscription to a lower tier. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Accept json +// @Produce json +// @Param dashboard_id path integer true "The ID of the dashboard." +// @Param request body handlers.PublicPutValidatorDashboardArchiving.request true "`is_archived`: Set to `true` to archive the dashboard, or `false` to unarchive it." +// @Success 200 {object} types.ApiDataResponse[types.VDBPostArchivingReturnData] +// @Failure 400 {object} types.ApiErrorResponse +// @Failure 409 {object} types.ApiErrorResponse "Conflict. The request could not be performed by the server because the authenticated user has already reached their subscription limit." +// @Router /validator-dashboards/{dashboard_id}/archiving [put] func (h *HandlerService) PublicPutValidatorDashboardArchiving(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) - req := struct { + type request struct { IsArchived bool `json:"is_archived"` - }{} + } + var req request if err := v.checkBody(&req, r); err != nil { handleErr(w, r, err) return @@ -686,7 +897,7 @@ func (h *HandlerService) PublicPutValidatorDashboardArchiving(w http.ResponseWri } // check conditions for changing archival status - dashboardInfo, err := h.dai.GetValidatorDashboardInfo(r.Context(), dashboardId) + dashboardInfo, err := h.getDataAccessor(r).GetValidatorDashboardInfo(r.Context(), dashboardId) if err != nil { handleErr(w, r, err) return @@ -704,36 +915,34 @@ func (h *HandlerService) PublicPutValidatorDashboardArchiving(w http.ResponseWri handleErr(w, r, err) return } - dashboardCount, err := h.dai.GetUserValidatorDashboardCount(r.Context(), userId, !req.IsArchived) + dashboardCount, err := h.getDataAccessor(r).GetUserValidatorDashboardCount(r.Context(), userId, !req.IsArchived) if err != nil { handleErr(w, r, err) return } - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return } - if !isUserAdmin(userInfo) { - if req.IsArchived { - if dashboardCount >= MaxArchivedDashboardsCount { - returnConflict(w, r, errors.New("maximum number of archived validator dashboards reached")) - return - } - } else { - if dashboardCount >= userInfo.PremiumPerks.ValidatorDashboards { - returnConflict(w, r, errors.New("maximum number of active validator dashboards reached")) - return - } - if dashboardInfo.GroupCount >= userInfo.PremiumPerks.ValidatorGroupsPerDashboard { - returnConflict(w, r, errors.New("maximum number of groups in dashboards reached")) - return - } - if dashboardInfo.ValidatorCount >= userInfo.PremiumPerks.ValidatorsPerDashboard { - returnConflict(w, r, errors.New("maximum number of validators in dashboards reached")) - return - } + if req.IsArchived { + if dashboardCount >= MaxArchivedDashboardsCount && !isUserAdmin(userInfo) { + returnConflict(w, r, errors.New("maximum number of archived validator dashboards reached")) + return + } + } else { + if dashboardCount >= userInfo.PremiumPerks.ValidatorDashboards { + returnConflict(w, r, errors.New("maximum number of active validator dashboards reached")) + return + } + if dashboardInfo.GroupCount >= userInfo.PremiumPerks.ValidatorGroupsPerDashboard { + returnConflict(w, r, errors.New("maximum number of groups in dashboards reached")) + return + } + if dashboardInfo.ValidatorCount >= userInfo.PremiumPerks.ValidatorsPerDashboard { + returnConflict(w, r, errors.New("maximum number of validators in dashboards reached")) + return } } @@ -742,7 +951,7 @@ func (h *HandlerService) PublicPutValidatorDashboardArchiving(w http.ResponseWri archivedReason = &enums.VDBArchivedReasons.User } - data, err := h.dai.UpdateValidatorDashboardArchiving(r.Context(), dashboardId, archivedReason) + data, err := h.getDataAccessor(r).UpdateValidatorDashboardArchiving(r.Context(), dashboardId, archivedReason) if err != nil { handleErr(w, r, err) return @@ -753,6 +962,16 @@ func (h *HandlerService) PublicPutValidatorDashboardArchiving(w http.ResponseWri returnOk(w, r, response) } +// PublicGetValidatorDashboardSlotViz godoc +// +// @Description Get slot viz information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_ids query string false "Provide a comma separated list of group IDs to filter the results by. If omitted, all groups will be included." +// @Success 200 {object} types.GetValidatorDashboardSlotVizResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/slot-viz [get] func (h *HandlerService) PublicGetValidatorDashboardSlotViz(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -766,7 +985,7 @@ func (h *HandlerService) PublicGetValidatorDashboardSlotViz(w http.ResponseWrite handleErr(w, r, v) return } - data, err := h.dai.GetValidatorDashboardSlotViz(r.Context(), *dashboardId, groupIds) + data, err := h.getDataAccessor(r).GetValidatorDashboardSlotViz(r.Context(), *dashboardId, groupIds) if err != nil { handleErr(w, r, err) return @@ -778,6 +997,21 @@ func (h *HandlerService) PublicGetValidatorDashboardSlotViz(w http.ResponseWrite returnOk(w, r, response) } +// PublicGetValidatorDashboardSummary godoc +// +// @Description Get summary information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param period query string true "Time period to get data for." Enums(all_time, last_30d, last_7d, last_24h, last_1h) +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(group_id, validators, efficiency, attestations, proposals, reward) +// @Param search query string false "Search for Index, Public Key, Group." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardSummaryResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/summary [get] func (h *HandlerService) PublicGetValidatorDashboardSummary(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -791,15 +1025,12 @@ func (h *HandlerService) PublicGetValidatorDashboardSummary(w http.ResponseWrite protocolModes := v.checkProtocolModes(q.Get("modes")) period := checkEnum[enums.TimePeriod](&v, q.Get("period"), "period") - // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h - allowedPeriods := []enums.Enum{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} - v.checkEnumIsAllowed(period, allowedPeriods, "period") if v.hasErrors() { handleErr(w, r, v) return } - data, paging, err := h.dai.GetValidatorDashboardSummary(r.Context(), *dashboardId, period, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardSummary(r.Context(), *dashboardId, period, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) if err != nil { handleErr(w, r, err) return @@ -811,6 +1042,18 @@ func (h *HandlerService) PublicGetValidatorDashboardSummary(w http.ResponseWrite returnOk(w, r, response) } +// PublicGetValidatorDashboardGroupSummary godoc +// +// @Description Get summary information for a specified group in a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id path integer true "The ID of the group." +// @Param period query string true "Time period to get data for." Enums(all_time, last_30d, last_7d, last_24h, last_1h) +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardGroupSummaryResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/groups/{group_id}/summary [get] func (h *HandlerService) PublicGetValidatorDashboardGroupSummary(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -827,15 +1070,12 @@ func (h *HandlerService) PublicGetValidatorDashboardGroupSummary(w http.Response } groupId := v.checkGroupId(vars["group_id"], forbidEmpty) period := checkEnum[enums.TimePeriod](&v, r.URL.Query().Get("period"), "period") - // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h - allowedPeriods := []enums.Enum{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} - v.checkEnumIsAllowed(period, allowedPeriods, "period") if v.hasErrors() { handleErr(w, r, v) return } - data, err := h.dai.GetValidatorDashboardGroupSummary(r.Context(), *dashboardId, groupId, period, protocolModes) + data, err := h.getDataAccessor(r).GetValidatorDashboardGroupSummary(r.Context(), *dashboardId, groupId, period, protocolModes) if err != nil { handleErr(w, r, err) return @@ -846,6 +1086,20 @@ func (h *HandlerService) PublicGetValidatorDashboardGroupSummary(w http.Response returnOk(w, r, response) } +// PublicGetValidatorDashboardSummaryChart godoc +// +// @Description Get summary chart data for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_ids query string false "Provide a comma separated list of group IDs to filter the results by." +// @Param efficiency_type query string false "Efficiency type to get data for." Enums(all, attestation, sync, proposal) +// @Param aggregation query string false "Aggregation type to get data for." Enums(epoch, hourly, daily, weekly) Default(hourly) +// @Param after_ts query string false "Return data after this timestamp." +// @Param before_ts query string false "Return data before this timestamp." +// @Success 200 {object} types.GetValidatorDashboardSummaryChartResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/summary-chart [get] func (h *HandlerService) PublicGetValidatorDashboardSummaryChart(w http.ResponseWriter, r *http.Request) { var v validationError ctx := r.Context() @@ -874,7 +1128,7 @@ func (h *HandlerService) PublicGetValidatorDashboardSummaryChart(w http.Response return } - data, err := h.dai.GetValidatorDashboardSummaryChart(ctx, *dashboardId, groupIds, efficiencyType, aggregation, afterTs, beforeTs) + data, err := h.getDataAccessor(r).GetValidatorDashboardSummaryChart(ctx, *dashboardId, groupIds, efficiencyType, aggregation, afterTs, beforeTs) if err != nil { handleErr(w, r, err) return @@ -885,6 +1139,18 @@ func (h *HandlerService) PublicGetValidatorDashboardSummaryChart(w http.Response returnOk(w, r, response) } +// PublicGetValidatorDashboardSummaryValidators godoc +// +// @Description Get summary information for validators in a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id query integer false "The ID of the group." +// @Param duty query string false "Validator duty to get data for." Enums(none, sync, slashed, proposal) Default(none) +// @Param period query string true "Time period to get data for." Enums(all_time, last_30d, last_7d, last_24h, last_1h) +// @Success 200 {object} types.GetValidatorDashboardSummaryValidatorsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/summary/validators [get] func (h *HandlerService) PublicGetValidatorDashboardSummaryValidators(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -896,9 +1162,6 @@ func (h *HandlerService) PublicGetValidatorDashboardSummaryValidators(w http.Res q := r.URL.Query() duty := checkEnum[enums.ValidatorDuty](&v, q.Get("duty"), "duty") period := checkEnum[enums.TimePeriod](&v, q.Get("period"), "period") - // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h - allowedPeriods := []enums.Enum{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} - v.checkEnumIsAllowed(period, allowedPeriods, "period") if v.hasErrors() { handleErr(w, r, v) return @@ -909,13 +1172,13 @@ func (h *HandlerService) PublicGetValidatorDashboardSummaryValidators(w http.Res duties := enums.ValidatorDuties switch duty { case duties.None: - indices, err = h.dai.GetValidatorDashboardSummaryValidators(r.Context(), *dashboardId, groupId) + indices, err = h.getDataAccessor(r).GetValidatorDashboardSummaryValidators(r.Context(), *dashboardId, groupId) case duties.Sync: - indices, err = h.dai.GetValidatorDashboardSyncSummaryValidators(r.Context(), *dashboardId, groupId, period) + indices, err = h.getDataAccessor(r).GetValidatorDashboardSyncSummaryValidators(r.Context(), *dashboardId, groupId, period) case duties.Slashed: - indices, err = h.dai.GetValidatorDashboardSlashingsSummaryValidators(r.Context(), *dashboardId, groupId, period) + indices, err = h.getDataAccessor(r).GetValidatorDashboardSlashingsSummaryValidators(r.Context(), *dashboardId, groupId, period) case duties.Proposal: - indices, err = h.dai.GetValidatorDashboardProposalSummaryValidators(r.Context(), *dashboardId, groupId, period) + indices, err = h.getDataAccessor(r).GetValidatorDashboardProposalSummaryValidators(r.Context(), *dashboardId, groupId, period) } if err != nil { handleErr(w, r, err) @@ -935,6 +1198,20 @@ func (h *HandlerService) PublicGetValidatorDashboardSummaryValidators(w http.Res returnOk(w, r, response) } +// PublicGetValidatorDashboardRewards godoc +// +// @Description Get rewards information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(epoch) +// @Param search query string false "Search for Epoch, Index, Public Key, Group." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardRewardsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/rewards [get] func (h *HandlerService) PublicGetValidatorDashboardRewards(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -951,7 +1228,7 @@ func (h *HandlerService) PublicGetValidatorDashboardRewards(w http.ResponseWrite return } - data, paging, err := h.dai.GetValidatorDashboardRewards(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardRewards(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) if err != nil { handleErr(w, r, err) return @@ -963,6 +1240,18 @@ func (h *HandlerService) PublicGetValidatorDashboardRewards(w http.ResponseWrite returnOk(w, r, response) } +// PublicGetValidatorDashboardGroupRewards godoc +// +// @Description Get rewards information for a specified group in a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id path integer true "The ID of the group." +// @Param epoch path integer true "The epoch to get data for." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardGroupRewardsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/groups/{group_id}/rewards/{epoch} [get] func (h *HandlerService) PublicGetValidatorDashboardGroupRewards(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -980,7 +1269,7 @@ func (h *HandlerService) PublicGetValidatorDashboardGroupRewards(w http.Response return } - data, err := h.dai.GetValidatorDashboardGroupRewards(r.Context(), *dashboardId, groupId, epoch, protocolModes) + data, err := h.getDataAccessor(r).GetValidatorDashboardGroupRewards(r.Context(), *dashboardId, groupId, epoch, protocolModes) if err != nil { handleErr(w, r, err) return @@ -991,6 +1280,16 @@ func (h *HandlerService) PublicGetValidatorDashboardGroupRewards(w http.Response returnOk(w, r, response) } +// PublicGetValidatorDashboardRewardsChart godoc +// +// @Description Get rewards chart data for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardRewardsChartResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/rewards-chart [get] func (h *HandlerService) PublicGetValidatorDashboardRewardsChart(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -1006,7 +1305,7 @@ func (h *HandlerService) PublicGetValidatorDashboardRewardsChart(w http.Response return } - data, err := h.dai.GetValidatorDashboardRewardsChart(r.Context(), *dashboardId, protocolModes) + data, err := h.getDataAccessor(r).GetValidatorDashboardRewardsChart(r.Context(), *dashboardId, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1017,6 +1316,22 @@ func (h *HandlerService) PublicGetValidatorDashboardRewardsChart(w http.Response returnOk(w, r, response) } +// PublicGetValidatorDashboardDuties godoc +// +// @Description Get duties information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param epoch path integer true "The epoch to get data for." +// @Param group_id query integer false "The ID of the group." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(validator, reward) +// @Param search query string false "Search for Index, Public Key." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardDutiesResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/duties/{epoch} [get] func (h *HandlerService) PublicGetValidatorDashboardDuties(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -1036,7 +1351,7 @@ func (h *HandlerService) PublicGetValidatorDashboardDuties(w http.ResponseWriter return } - data, paging, err := h.dai.GetValidatorDashboardDuties(r.Context(), *dashboardId, epoch, groupId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardDuties(r.Context(), *dashboardId, epoch, groupId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1048,6 +1363,20 @@ func (h *HandlerService) PublicGetValidatorDashboardDuties(w http.ResponseWriter returnOk(w, r, response) } +// PublicGetValidatorDashboardBlocks godoc +// +// @Description Get blocks information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(proposer, slot, block, status, reward) +// @Param search query string false "Search for Index, Public Key, Group." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardBlocksResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/blocks [get] func (h *HandlerService) PublicGetValidatorDashboardBlocks(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -1064,7 +1393,7 @@ func (h *HandlerService) PublicGetValidatorDashboardBlocks(w http.ResponseWriter return } - data, paging, err := h.dai.GetValidatorDashboardBlocks(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardBlocks(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1076,6 +1405,19 @@ func (h *HandlerService) PublicGetValidatorDashboardBlocks(w http.ResponseWriter returnOk(w, r, response) } +// PublicGetValidatorDashboardHeatmap godoc +// +// @Description Get heatmap information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param aggregation query string false "Aggregation type to get data for." Enums(epoch, hourly, daily, weekly) Default(hourly) +// @Param after_ts query string false "Return data after this timestamp." +// @Param before_ts query string false "Return data before this timestamp." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardHeatmapResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/heatmap [get] func (h *HandlerService) PublicGetValidatorDashboardHeatmap(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -1101,7 +1443,7 @@ func (h *HandlerService) PublicGetValidatorDashboardHeatmap(w http.ResponseWrite return } - data, err := h.dai.GetValidatorDashboardHeatmap(r.Context(), *dashboardId, protocolModes, aggregation, afterTs, beforeTs) + data, err := h.getDataAccessor(r).GetValidatorDashboardHeatmap(r.Context(), *dashboardId, protocolModes, aggregation, afterTs, beforeTs) if err != nil { handleErr(w, r, err) return @@ -1112,6 +1454,19 @@ func (h *HandlerService) PublicGetValidatorDashboardHeatmap(w http.ResponseWrite returnOk(w, r, response) } +// PublicGetValidatorDashboardGroupHeatmap godoc +// +// @Description Get heatmap information for a specified group in a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id path integer true "The ID of the group." +// @Param timestamp path integer true "The timestamp to get data for." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Param aggregation query string false "Aggregation type to get data for." Enums(epoch, hourly, daily, weekly) Default(hourly) +// @Success 200 {object} types.GetValidatorDashboardGroupHeatmapResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/groups/{group_id}/heatmap/{timestamp} [get] func (h *HandlerService) PublicGetValidatorDashboardGroupHeatmap(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -1138,7 +1493,7 @@ func (h *HandlerService) PublicGetValidatorDashboardGroupHeatmap(w http.Response return } - data, err := h.dai.GetValidatorDashboardGroupHeatmap(r.Context(), *dashboardId, groupId, protocolModes, aggregation, requestedTimestamp) + data, err := h.getDataAccessor(r).GetValidatorDashboardGroupHeatmap(r.Context(), *dashboardId, groupId, protocolModes, aggregation, requestedTimestamp) if err != nil { handleErr(w, r, err) return @@ -1149,6 +1504,17 @@ func (h *HandlerService) PublicGetValidatorDashboardGroupHeatmap(w http.Response returnOk(w, r, response) } +// PublicGetValidatorDashboardExecutionLayerDeposits godoc +// +// @Description Get execution layer deposits information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Success 200 {object} types.GetValidatorDashboardExecutionLayerDepositsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/execution-layer-deposits [get] func (h *HandlerService) PublicGetValidatorDashboardExecutionLayerDeposits(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -1162,7 +1528,7 @@ func (h *HandlerService) PublicGetValidatorDashboardExecutionLayerDeposits(w htt return } - data, paging, err := h.dai.GetValidatorDashboardElDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardElDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -1174,6 +1540,17 @@ func (h *HandlerService) PublicGetValidatorDashboardExecutionLayerDeposits(w htt returnOk(w, r, response) } +// PublicGetValidatorDashboardConsensusLayerDeposits godoc +// +// @Description Get consensus layer deposits information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Success 200 {object} types.GetValidatorDashboardConsensusLayerDepositsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/consensus-layer-deposits [get] func (h *HandlerService) PublicGetValidatorDashboardConsensusLayerDeposits(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -1187,7 +1564,7 @@ func (h *HandlerService) PublicGetValidatorDashboardConsensusLayerDeposits(w htt return } - data, paging, err := h.dai.GetValidatorDashboardClDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardClDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -1200,6 +1577,15 @@ func (h *HandlerService) PublicGetValidatorDashboardConsensusLayerDeposits(w htt returnOk(w, r, response) } +// PublicGetValidatorDashboardTotalConsensusLayerDeposits godoc +// +// @Description Get total consensus layer deposits information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Success 200 {object} types.GetValidatorDashboardTotalConsensusDepositsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/total-consensus-layer-deposits [get] func (h *HandlerService) PublicGetValidatorDashboardTotalConsensusLayerDeposits(w http.ResponseWriter, r *http.Request) { var err error dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -1207,7 +1593,7 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalConsensusLayerDeposits( handleErr(w, r, err) return } - data, err := h.dai.GetValidatorDashboardTotalClDeposits(r.Context(), *dashboardId) + data, err := h.getDataAccessor(r).GetValidatorDashboardTotalClDeposits(r.Context(), *dashboardId) if err != nil { handleErr(w, r, err) return @@ -1219,6 +1605,15 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalConsensusLayerDeposits( returnOk(w, r, response) } +// PublicGetValidatorDashboardTotalExecutionLayerDeposits godoc +// +// @Description Get total execution layer deposits information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Success 200 {object} types.GetValidatorDashboardTotalExecutionDepositsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/total-execution-layer-deposits [get] func (h *HandlerService) PublicGetValidatorDashboardTotalExecutionLayerDeposits(w http.ResponseWriter, r *http.Request) { var err error dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -1226,7 +1621,7 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalExecutionLayerDeposits( handleErr(w, r, err) return } - data, err := h.dai.GetValidatorDashboardTotalElDeposits(r.Context(), *dashboardId) + data, err := h.getDataAccessor(r).GetValidatorDashboardTotalElDeposits(r.Context(), *dashboardId) if err != nil { handleErr(w, r, err) return @@ -1238,6 +1633,20 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalExecutionLayerDeposits( returnOk(w, r, response) } +// PublicGetValidatorDashboardWithdrawals godoc +// +// @Description Get withdrawals information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(epoch, slot, index, recipient, amount) +// @Param search query string false "Search for Index, Public Key, Address." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardWithdrawalsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/withdrawals [get] func (h *HandlerService) PublicGetValidatorDashboardWithdrawals(w http.ResponseWriter, r *http.Request) { var v validationError q := r.URL.Query() @@ -1254,7 +1663,7 @@ func (h *HandlerService) PublicGetValidatorDashboardWithdrawals(w http.ResponseW return } - data, paging, err := h.dai.GetValidatorDashboardWithdrawals(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardWithdrawals(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1266,6 +1675,16 @@ func (h *HandlerService) PublicGetValidatorDashboardWithdrawals(w http.ResponseW returnOk(w, r, response) } +// PublicGetValidatorDashboardTotalWithdrawals godoc +// +// @Description Get total withdrawals information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardTotalWithdrawalsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/total-withdrawals [get] func (h *HandlerService) PublicGetValidatorDashboardTotalWithdrawals(w http.ResponseWriter, r *http.Request) { var v validationError q := r.URL.Query() @@ -1281,7 +1700,7 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalWithdrawals(w http.Resp return } - data, err := h.dai.GetValidatorDashboardTotalWithdrawals(r.Context(), *dashboardId, pagingParams.search, protocolModes) + data, err := h.getDataAccessor(r).GetValidatorDashboardTotalWithdrawals(r.Context(), *dashboardId, pagingParams.search, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1293,6 +1712,19 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalWithdrawals(w http.Resp returnOk(w, r, response) } +// PublicGetValidatorDashboardRocketPool godoc +// +// @Description Get an aggregated list of the Rocket Pool nodes details associated with a specified dashboard. +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(node, minipools, collateral, rpl, effective_rpl, rpl_apr, smoothing_pool) +// @Param search query string false "Search for Node address." +// @Success 200 {object} types.GetValidatorDashboardRocketPoolResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/rocket-pool [get] func (h *HandlerService) PublicGetValidatorDashboardRocketPool(w http.ResponseWriter, r *http.Request) { var v validationError q := r.URL.Query() @@ -1308,7 +1740,7 @@ func (h *HandlerService) PublicGetValidatorDashboardRocketPool(w http.ResponseWr return } - data, paging, err := h.dai.GetValidatorDashboardRocketPool(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardRocketPool(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -1320,6 +1752,15 @@ func (h *HandlerService) PublicGetValidatorDashboardRocketPool(w http.ResponseWr returnOk(w, r, response) } +// PublicGetValidatorDashboardTotalRocketPool godoc +// +// @Description Get a summary of all Rocket Pool nodes details associated with a specified dashboard. +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Success 200 {object} types.GetValidatorDashboardTotalRocketPoolResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/total-rocket-pool [get] func (h *HandlerService) PublicGetValidatorDashboardTotalRocketPool(w http.ResponseWriter, r *http.Request) { var v validationError q := r.URL.Query() @@ -1334,7 +1775,7 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalRocketPool(w http.Respo return } - data, err := h.dai.GetValidatorDashboardTotalRocketPool(r.Context(), *dashboardId, pagingParams.search) + data, err := h.getDataAccessor(r).GetValidatorDashboardTotalRocketPool(r.Context(), *dashboardId, pagingParams.search) if err != nil { handleErr(w, r, err) return @@ -1345,9 +1786,24 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalRocketPool(w http.Respo returnOk(w, r, response) } -func (h *HandlerService) PublicGetValidatorDashboardNodeRocketPool(w http.ResponseWriter, r *http.Request) { +// PublicGetValidatorDashboardRocketPoolMinipools godoc +// +// @Description Get minipools information for a specified Rocket Pool node associated with a specified dashboard. +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param node_address path string true "The address of the node." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(group_id) +// @Param search query string false "Search for Index, Node." +// @Success 200 {object} types.GetValidatorDashboardRocketPoolMinipoolsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/rocket-pool/{node_address}/minipools [get] +func (h *HandlerService) PublicGetValidatorDashboardRocketPoolMinipools(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) + q := r.URL.Query() dashboardId, err := h.handleDashboardId(r.Context(), vars["dashboard_id"]) if err != nil { handleErr(w, r, err) @@ -1355,52 +1811,936 @@ func (h *HandlerService) PublicGetValidatorDashboardNodeRocketPool(w http.Respon } // support ENS names ? nodeAddress := v.checkAddress(vars["node_address"]) + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.VDBRocketPoolMinipoolsColumn](&v, q.Get("sort")) if v.hasErrors() { handleErr(w, r, v) return } - data, err := h.dai.GetValidatorDashboardNodeRocketPool(r.Context(), *dashboardId, nodeAddress) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardRocketPoolMinipools(r.Context(), *dashboardId, nodeAddress, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardRocketPoolMinipoolsResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) +} + +// ---------------------------------------------- +// Notifications +// ---------------------------------------------- + +// PublicGetUserNotifications godoc +// +// @Description Get an overview of your recent notifications. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notifications +// @Produce json +// @Success 200 {object} types.InternalGetUserNotificationsResponse +// @Router /users/me/notifications [get] +func (h *HandlerService) PublicGetUserNotifications(w http.ResponseWriter, r *http.Request) { + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + data, err := h.getDataAccessor(r).GetNotificationOverview(r.Context(), userId) if err != nil { handleErr(w, r, err) return } - response := types.GetValidatorDashboardNodeRocketPoolResponse{ + response := types.InternalGetUserNotificationsResponse{ Data: *data, } returnOk(w, r, response) } -func (h *HandlerService) PublicGetValidatorDashboardRocketPoolMinipools(w http.ResponseWriter, r *http.Request) { +// PublicGetUserNotificationDashboards godoc +// +// @Description Get a list of triggered notifications related to your dashboards. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notifications +// @Produce json +// @Param networks query string false "If set, results will be filtered to only include networks given. Provide a comma separated list." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query integer false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." " Enums(chain_id, timestamp, dashboard_id) +// @Param search query string false "Search for Dashboard, Group" +// @Success 200 {object} types.InternalGetUserNotificationDashboardsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/dashboards [get] +func (h *HandlerService) PublicGetUserNotificationDashboards(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.NotificationDashboardsColumn](&v, q.Get("sort")) + chainIds := v.checkNetworksParameter(q.Get("networks")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + + data, paging, err := h.getDataAccessor(r).GetDashboardNotifications(r.Context(), userId, chainIds, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetUserNotificationDashboardsResponse{ + Data: data, + Paging: *paging, // @Param epoch path strings + } + returnOk(w, r, response) +} + +// PublicGetUserNotificationValidators godoc +// +// @Description Get a detailed view of a triggered notification related to a validator dashboard group at a specific epoch. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notifications +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id path integer true "The ID of the group." +// @Param epoch path integer true "The epoch of the notification." +// @Param search query string false "Search for Index" +// @Success 200 {object} types.InternalGetUserNotificationsValidatorDashboardResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/validator-dashboards/{dashboard_id}/groups/{group_id}/epochs/{epoch} [get] +func (h *HandlerService) PublicGetUserNotificationsValidatorDashboard(w http.ResponseWriter, r *http.Request) { + var v validationError + vars := mux.Vars(r) + dashboardId := v.checkPrimaryDashboardId(vars["dashboard_id"]) + groupId := v.checkExistingGroupId(vars["group_id"]) + epoch := v.checkUint(vars["epoch"], "epoch") + search := r.URL.Query().Get("search") + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, err := h.getDataAccessor(r).GetValidatorDashboardNotificationDetails(r.Context(), dashboardId, groupId, epoch, search) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetUserNotificationsValidatorDashboardResponse{ + Data: *data, + } + returnOk(w, r, response) +} + +// PublicGetUserNotificationsAccountDashboard godoc +// +// @Description Get a detailed view of a triggered notification related to an account dashboard group at a specific epoch. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notifications +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id path integer true "The ID of the group." +// @Param epoch path integer true "The epoch of the notification." +// @Param search query string false "Search for Address, ENS" +// @Success 200 {object} types.InternalGetUserNotificationsAccountDashboardResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/account-dashboards/{dashboard_id}/groups/{group_id}/epochs/{epoch} [get] +func (h *HandlerService) PublicGetUserNotificationsAccountDashboard(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) + dashboardId := v.checkUint(vars["dashboard_id"], "dashboard_id") + groupId := v.checkExistingGroupId(vars["group_id"]) + epoch := v.checkUint(vars["epoch"], "epoch") + search := r.URL.Query().Get("search") + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, err := h.getDataAccessor(r).GetAccountDashboardNotificationDetails(r.Context(), dashboardId, groupId, epoch, search) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetUserNotificationsAccountDashboardResponse{ + Data: *data, + } + returnOk(w, r, response) +} + +// PublicGetUserNotificationMachines godoc +// +// @Description Get a list of triggered notifications related to your machines. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notifications +// @Produce json +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query integer false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(machine_name, threshold, event_type, timestamp) +// @Param search query string false "Search for Machine" +// @Success 200 {object} types.InternalGetUserNotificationMachinesResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/machines [get] +func (h *HandlerService) PublicGetUserNotificationMachines(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } q := r.URL.Query() - dashboardId, err := h.handleDashboardId(r.Context(), vars["dashboard_id"]) + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.NotificationMachinesColumn](&v, q.Get("sort")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, paging, err := h.getDataAccessor(r).GetMachineNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return } - // support ENS names ? - nodeAddress := v.checkAddress(vars["node_address"]) + response := types.InternalGetUserNotificationMachinesResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) +} + +// PublicGetUserNotificationClients godoc +// +// @Description Get a list of triggered notifications related to your clients. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notifications +// @Produce json +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query integer false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(client_name, timestamp) +// @Param search query string false "Search for Client" +// @Success 200 {object} types.InternalGetUserNotificationClientsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/clients [get] +func (h *HandlerService) PublicGetUserNotificationClients(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.VDBRocketPoolMinipoolsColumn](&v, q.Get("sort")) + sort := checkSort[enums.NotificationClientsColumn](&v, q.Get("sort")) if v.hasErrors() { handleErr(w, r, v) return } + data, paging, err := h.getDataAccessor(r).GetClientNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetUserNotificationClientsResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) +} - data, paging, err := h.dai.GetValidatorDashboardRocketPoolMinipools(r.Context(), *dashboardId, nodeAddress, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) +// PublicGetUserNotificationRocketPool godoc +// +// @Description Get a list of triggered notifications related to Rocket Pool. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notifications +// @Produce json +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query integer false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(timestamp, event_type, node_address) +// @Param search query string false "Search for Node Address" +// @Success 200 {object} types.InternalGetUserNotificationRocketPoolResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/rocket-pool [get] +func (h *HandlerService) PublicGetUserNotificationRocketPool(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) if err != nil { handleErr(w, r, err) return } - response := types.GetValidatorDashboardRocketPoolMinipoolsResponse{ + q := r.URL.Query() + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.NotificationRocketPoolColumn](&v, q.Get("sort")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, paging, err := h.getDataAccessor(r).GetRocketPoolNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetUserNotificationRocketPoolResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) +} + +// PublicGetUserNotificationNetworks godoc +// +// @Description Get a list of triggered notifications related to networks. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notifications +// @Produce json +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query integer false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(timestamp, event_type) +// @Success 200 {object} types.InternalGetUserNotificationNetworksResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/networks [get] +func (h *HandlerService) PublicGetUserNotificationNetworks(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.NotificationNetworksColumn](&v, q.Get("sort")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, paging, err := h.getDataAccessor(r).GetNetworkNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetUserNotificationNetworksResponse{ Data: data, Paging: *paging, } returnOk(w, r, response) } +const diffTolerance = 0.0001 + +// PublicGetUserNotificationPairedDevices godoc +// +// @Description Get notification settings for the authenticated user. Excludes dashboard notification settings. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Produce json +// @Success 200 {object} types.InternalGetUserNotificationSettingsResponse +// @Router /users/me/notifications/settings [get] +func (h *HandlerService) PublicGetUserNotificationSettings(w http.ResponseWriter, r *http.Request) { + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + data, err := h.getDataAccessor(r).GetNotificationSettings(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + + // check premium perks + userInfo, err := h.getDataAccessor(r).GetUserInfo(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + defaultSettings, err := h.getDataAccessor(r).GetNotificationSettingsDefaultValues(r.Context()) + if err != nil { + handleErr(w, r, err) + return + } + userGeneralSettings := data.GeneralSettings + + // if users premium perks do not allow custom thresholds, set them to default in the response + // TODO: once stripe payments run in v2, this should be removed and the notification settings should be updated upon a tier change instead + if !userInfo.PremiumPerks.NotificationsMachineCustomThreshold { + if math.Abs(userGeneralSettings.MachineStorageUsageThreshold-defaultSettings.MachineStorageUsageThreshold) > diffTolerance { + userGeneralSettings.MachineStorageUsageThreshold = defaultSettings.MachineStorageUsageThreshold + userGeneralSettings.IsMachineStorageUsageSubscribed = false + } + if math.Abs(userGeneralSettings.MachineCpuUsageThreshold-defaultSettings.MachineCpuUsageThreshold) > diffTolerance { + userGeneralSettings.MachineCpuUsageThreshold = defaultSettings.MachineCpuUsageThreshold + userGeneralSettings.IsMachineCpuUsageSubscribed = false + } + if math.Abs(userGeneralSettings.MachineMemoryUsageThreshold-defaultSettings.MachineMemoryUsageThreshold) > diffTolerance { + userGeneralSettings.MachineMemoryUsageThreshold = defaultSettings.MachineMemoryUsageThreshold + userGeneralSettings.IsMachineMemoryUsageSubscribed = false + } + data.GeneralSettings = userGeneralSettings + } + + response := types.InternalGetUserNotificationSettingsResponse{ + Data: *data, + } + returnOk(w, r, response) +} + +// PublicPutUserNotificationSettingsGeneral godoc +// +// @Description Update general notification settings for the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Accept json +// @Produce json +// @Param request body types.NotificationSettingsGeneral true "Description TODO" +// @Success 200 {object} types.InternalPutUserNotificationSettingsGeneralResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/settings/general [put] +func (h *HandlerService) PublicPutUserNotificationSettingsGeneral(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + var req types.NotificationSettingsGeneral + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + checkMinMax(&v, req.MachineStorageUsageThreshold, 0, 1, "machine_storage_usage_threshold") + checkMinMax(&v, req.MachineCpuUsageThreshold, 0, 1, "machine_cpu_usage_threshold") + checkMinMax(&v, req.MachineMemoryUsageThreshold, 0, 1, "machine_memory_usage_threshold") + if v.hasErrors() { + handleErr(w, r, v) + return + } + req.DoNotDisturbTimestamp = min(req.DoNotDisturbTimestamp, math.MaxInt32) + + // check premium perks + userInfo, err := h.getDataAccessor(r).GetUserInfo(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + defaultSettings, err := h.getDataAccessor(r).GetNotificationSettingsDefaultValues(r.Context()) + if err != nil { + handleErr(w, r, err) + return + } + + // use tolarance for float comparison + isCustomThresholdUsed := math.Abs(req.MachineStorageUsageThreshold-defaultSettings.MachineStorageUsageThreshold) > diffTolerance || + math.Abs(req.MachineCpuUsageThreshold-defaultSettings.MachineCpuUsageThreshold) > diffTolerance || + math.Abs(req.MachineMemoryUsageThreshold-defaultSettings.MachineMemoryUsageThreshold) > diffTolerance + + if !userInfo.PremiumPerks.NotificationsMachineCustomThreshold && isCustomThresholdUsed { + returnForbidden(w, r, errors.New("user does not have premium perks to set machine settings thresholds")) + return + } + + err = h.getDataAccessor(r).UpdateNotificationSettingsGeneral(r.Context(), userId, req) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalPutUserNotificationSettingsGeneralResponse{ + Data: req, + } + returnOk(w, r, response) +} + +// PublicPutUserNotificationSettingsNetworks godoc +// +// @Description Update network notification settings for the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Accept json +// @Produce json +// @Param network path string true "The networks name or chain ID." +// @Param request body handlers.PublicPutUserNotificationSettingsNetworks.request true "Description Todo" +// @Success 200 {object} types.InternalPutUserNotificationSettingsNetworksResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/settings/networks/{network} [put] +func (h *HandlerService) PublicPutUserNotificationSettingsNetworks(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + type request struct { + IsGasAboveSubscribed bool `json:"is_gas_above_subscribed"` + GasAboveThreshold string `json:"gas_above_threshold"` + IsGasBelowSubscribed bool `json:"is_gas_below_subscribed"` + GasBelowThreshold string `json:"gas_below_threshold" ` + IsParticipationRateSubscribed bool `json:"is_participation_rate_subscribed"` + ParticipationRateThreshold float64 `json:"participation_rate_threshold"` + IsNewRewardRoundSubscribed bool `json:"is_new_reward_round_subscribed"` + } + var req request + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + checkMinMax(&v, req.ParticipationRateThreshold, 0, 1, "participation_rate_threshold") + chainId := v.checkNetworkParameter(mux.Vars(r)["network"]) + + minWei := decimal.New(1000000, 1) // 0.001 Gwei + maxWei := decimal.New(1000000000000, 1) // 1000 Gwei + gasAboveThreshold := v.checkWeiMinMax(req.GasAboveThreshold, "gas_above_threshold", minWei, maxWei) + gasBelowThreshold := v.checkWeiMinMax(req.GasBelowThreshold, "gas_below_threshold", minWei, maxWei) + if v.hasErrors() { + handleErr(w, r, v) + return + } + settings := types.NotificationSettingsNetwork{ + IsGasAboveSubscribed: req.IsGasAboveSubscribed, + GasAboveThreshold: gasAboveThreshold, + IsGasBelowSubscribed: req.IsGasBelowSubscribed, + GasBelowThreshold: gasBelowThreshold, + IsParticipationRateSubscribed: req.IsParticipationRateSubscribed, + ParticipationRateThreshold: req.ParticipationRateThreshold, + IsNewRewardRoundSubscribed: req.IsNewRewardRoundSubscribed, + } + + err = h.getDataAccessor(r).UpdateNotificationSettingsNetworks(r.Context(), userId, chainId, settings) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalPutUserNotificationSettingsNetworksResponse{ + Data: types.NotificationNetwork{ + ChainId: chainId, + Settings: settings, + }, + } + returnOk(w, r, response) +} + +// PublicPutUserNotificationSettingsPairedDevices godoc +// +// @Description Update paired device notification settings for the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Accept json +// @Produce json +// @Param paired_device_id path string true "The paired device ID." +// @Param request body handlers.PublicPutUserNotificationSettingsPairedDevices.request true "Description TODO" +// @Success 200 {object} types.InternalPutUserNotificationSettingsPairedDevicesResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/settings/paired-devices/{paired_device_id} [put] +func (h *HandlerService) PublicPutUserNotificationSettingsPairedDevices(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + type request struct { + Name string `json:"name,omitempty"` + IsNotificationsEnabled bool `json:"is_notifications_enabled"` + } + var req request + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + // TODO use a better way to validate the paired device id + pairedDeviceId := v.checkUint(mux.Vars(r)["paired_device_id"], "paired_device_id") + name := v.checkNameNotEmpty(req.Name) + if v.hasErrors() { + handleErr(w, r, v) + return + } + pairedDeviceUserId, err := h.getDataAccessor(r).GetPairedDeviceUserId(r.Context(), pairedDeviceId) + if err != nil { + handleErr(w, r, err) + return + } + if userId != pairedDeviceUserId { + returnNotFound(w, r, fmt.Errorf("not found: paired device with id %d not found", pairedDeviceId)) // return 404 to not leak information + return + } + err = h.getDataAccessor(r).UpdateNotificationSettingsPairedDevice(r.Context(), pairedDeviceId, name, req.IsNotificationsEnabled) + if err != nil { + handleErr(w, r, err) + return + } + // TODO timestamp + response := types.InternalPutUserNotificationSettingsPairedDevicesResponse{ + Data: types.NotificationPairedDevice{ + Id: pairedDeviceId, + Name: req.Name, + IsNotificationsEnabled: req.IsNotificationsEnabled, + }, + } + + returnOk(w, r, response) +} + +// PublicDeleteUserNotificationSettingsPairedDevices godoc +// +// @Description Delete paired device notification settings for the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Produce json +// @Param paired_device_id path string true "The paired device ID." +// @Success 204 +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/settings/paired-devices/{paired_device_id} [delete] +func (h *HandlerService) PublicDeleteUserNotificationSettingsPairedDevices(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + pairedDeviceId := v.checkUint(mux.Vars(r)["paired_device_id"], "paired_device_id") + if v.hasErrors() { + handleErr(w, r, v) + return + } + pairedDeviceUserId, err := h.getDataAccessor(r).GetPairedDeviceUserId(r.Context(), pairedDeviceId) + if err != nil { + handleErr(w, r, err) + return + } + if userId != pairedDeviceUserId { + returnNotFound(w, r, fmt.Errorf("not found: paired device with id %d not found", pairedDeviceId)) // return 404 to not leak information + return + } + err = h.getDataAccessor(r).DeleteNotificationSettingsPairedDevice(r.Context(), pairedDeviceId) + if err != nil { + handleErr(w, r, err) + return + } + returnNoContent(w, r) +} + +// PublicPutUserNotificationSettingsClient godoc +// +// @Description Update client notification settings for the authenticated user. When a client is subscribed, notifications will be sent when a new version is available. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Accept json +// @Produce json +// @Param client_id path integer true "The ID of the client." +// @Param request body handlers.PublicPutUserNotificationSettingsClient.request true "`is_subscribed`: Set to `true` to subscribe to notifications; set to `false` to unsubscribe." +// @Success 200 {object} types.InternalPutUserNotificationSettingsClientResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/settings/clients/{client_id} [put] +func (h *HandlerService) PublicPutUserNotificationSettingsClient(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + type request struct { + IsSubscribed bool `json:"is_subscribed"` + } + var req request + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + clientId := v.checkUint(mux.Vars(r)["client_id"], "client_id") + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, err := h.getDataAccessor(r).UpdateNotificationSettingsClients(r.Context(), userId, clientId, req.IsSubscribed) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalPutUserNotificationSettingsClientResponse{ + Data: *data, + } + returnOk(w, r, response) +} + +// PublicGetUserNotificationSettingsDashboards godoc +// +// @Description Get a list of notification settings for the dashboards of the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Produce json +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query integer false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums (dashboard_id, group_name) +// @Param search query string false "Search for Dashboard, Group" +// @Success 200 {object} types.InternalGetUserNotificationSettingsDashboardsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/settings/dashboards [get] +func (h *HandlerService) PublicGetUserNotificationSettingsDashboards(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.NotificationSettingsDashboardColumn](&v, q.Get("sort")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, paging, err := h.getDataAccessor(r).GetNotificationSettingsDashboards(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + // if users premium perks do not allow subscriptions, set them to false in the response + // TODO: once stripe payments run in v2, this should be removed and the notification settings should be updated upon a tier change instead + userInfo, err := h.getDataAccessor(r).GetUserInfo(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + defaultSettings, err := h.getDataAccessor(r).GetNotificationSettingsDefaultValues(r.Context()) + if err != nil { + handleErr(w, r, err) + return + } + for i, dashboard := range data { + if dashboard.IsAccountDashboard { + continue + } + settings, ok := dashboard.Settings.(types.NotificationSettingsValidatorDashboard) + if !ok { + handleErr(w, r, errors.New("invalid settings type")) + return + } + if !userInfo.PremiumPerks.NotificationsValidatorDashboardGroupEfficiency && settings.IsGroupEfficiencyBelowSubscribed { + settings.IsGroupEfficiencyBelowSubscribed = false + settings.GroupEfficiencyBelowThreshold = defaultSettings.GroupEfficiencyBelowThreshold + } + data[i].Settings = settings + } + response := types.InternalGetUserNotificationSettingsDashboardsResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) +} + +// PublicPutUserNotificationSettingsValidatorDashboard godoc +// +// @Description Update the notification settings for a specific group of a validator dashboard for the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Accept json +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id path integer true "The ID of the group." +// @Param request body types.NotificationSettingsValidatorDashboard true "Notification settings" +// @Success 200 {object} types.InternalPutUserNotificationSettingsValidatorDashboardResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/settings/validator-dashboards/{dashboard_id}/groups/{group_id} [put] +func (h *HandlerService) PublicPutUserNotificationSettingsValidatorDashboard(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + + var req types.NotificationSettingsValidatorDashboard + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + checkMinMax(&v, req.GroupEfficiencyBelowThreshold, 0, 1, "group_offline_threshold") + vars := mux.Vars(r) + dashboardId := v.checkPrimaryDashboardId(vars["dashboard_id"]) + groupId := v.checkExistingGroupId(vars["group_id"]) + + checkMinMax(&v, req.MaxCollateralThreshold, 0, 1, "max_collateral_threshold") + checkMinMax(&v, req.MinCollateralThreshold, 0, 1, "min_collateral_threshold") + if v.hasErrors() { + handleErr(w, r, v) + return + } + userInfo, err := h.getDataAccessor(r).GetUserInfo(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + if !userInfo.PremiumPerks.NotificationsValidatorDashboardGroupEfficiency && req.IsGroupEfficiencyBelowSubscribed { + returnForbidden(w, r, errors.New("user does not have premium perks to subscribe group efficiency event")) + return + } + + err = h.getDataAccessor(r).UpdateNotificationSettingsValidatorDashboard(r.Context(), userId, dashboardId, groupId, req) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalPutUserNotificationSettingsValidatorDashboardResponse{ + Data: req, + } + returnOk(w, r, response) +} + +// PublicPutUserNotificationSettingsAccountDashboard godoc +// +// @Description Update the notification settings for a specific group of an account dashboard for the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Accept json +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id path integer true "The ID of the group." +// @Param request body handlers.PublicPutUserNotificationSettingsAccountDashboard.request true "Notification settings" +// @Success 200 {object} types.InternalPutUserNotificationSettingsAccountDashboardResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/settings/account-dashboards/{dashboard_id}/groups/{group_id} [put] +func (h *HandlerService) PublicPutUserNotificationSettingsAccountDashboard(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + + // uses a different struct due to `subscribed_chain_ids`, which is a slice of intOrString in the payload but a slice of uint64 in the response + type request struct { + WebhookUrl string `json:"webhook_url"` + IsWebhookDiscordEnabled bool `json:"is_webhook_discord_enabled"` + IsIgnoreSpamTransactionsEnabled bool `json:"is_ignore_spam_transactions_enabled"` + SubscribedChainIds []intOrString `json:"subscribed_chain_ids"` + + IsIncomingTransactionsSubscribed bool `json:"is_incoming_transactions_subscribed"` + IsOutgoingTransactionsSubscribed bool `json:"is_outgoing_transactions_subscribed"` + IsERC20TokenTransfersSubscribed bool `json:"is_erc20_token_transfers_subscribed"` + ERC20TokenTransfersValueThreshold float64 `json:"erc20_token_transfers_value_threshold"` // 0 does not disable, is_erc20_token_transfers_subscribed determines if it's enabled + IsERC721TokenTransfersSubscribed bool `json:"is_erc721_token_transfers_subscribed"` + IsERC1155TokenTransfersSubscribed bool `json:"is_erc1155_token_transfers_subscribed"` + } + var req request + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + chainIds := v.checkNetworkSlice(req.SubscribedChainIds) + checkMinMax(&v, req.ERC20TokenTransfersValueThreshold, 0, math.MaxFloat64, "group_offline_threshold") + vars := mux.Vars(r) + dashboardId := v.checkPrimaryDashboardId(vars["dashboard_id"]) + groupId := v.checkExistingGroupId(vars["group_id"]) + if v.hasErrors() { + handleErr(w, r, v) + return + } + settings := types.NotificationSettingsAccountDashboard{ + WebhookUrl: req.WebhookUrl, + IsWebhookDiscordEnabled: req.IsWebhookDiscordEnabled, + IsIgnoreSpamTransactionsEnabled: req.IsIgnoreSpamTransactionsEnabled, + SubscribedChainIds: chainIds, + + IsIncomingTransactionsSubscribed: req.IsIncomingTransactionsSubscribed, + IsOutgoingTransactionsSubscribed: req.IsOutgoingTransactionsSubscribed, + IsERC20TokenTransfersSubscribed: req.IsERC20TokenTransfersSubscribed, + ERC20TokenTransfersValueThreshold: req.ERC20TokenTransfersValueThreshold, + IsERC721TokenTransfersSubscribed: req.IsERC721TokenTransfersSubscribed, + IsERC1155TokenTransfersSubscribed: req.IsERC1155TokenTransfersSubscribed, + } + err = h.getDataAccessor(r).UpdateNotificationSettingsAccountDashboard(r.Context(), userId, dashboardId, groupId, settings) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalPutUserNotificationSettingsAccountDashboardResponse{ + Data: settings, + } + returnOk(w, r, response) +} + +// PublicPostUserNotificationsTestEmail godoc +// +// @Description Send a test email notification to the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Produce json +// @Success 204 +// @Router /users/me/notifications/test-email [post] +func (h *HandlerService) PublicPostUserNotificationsTestEmail(w http.ResponseWriter, r *http.Request) { + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + err = h.getDataAccessor(r).QueueTestEmailNotification(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + returnNoContent(w, r) +} + +// PublicPostUserNotificationsTestPush godoc +// +// @Description Send a test push notification to the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Produce json +// @Success 204 +// @Router /users/me/notifications/test-push [post] +func (h *HandlerService) PublicPostUserNotificationsTestPush(w http.ResponseWriter, r *http.Request) { + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + err = h.getDataAccessor(r).QueueTestPushNotification(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + returnNoContent(w, r) +} + +// PublicPostUserNotificationsTestWebhook godoc +// +// @Description Send a test webhook notification from the authenticated user to the given URL. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Accept json +// @Produce json +// @Param request body handlers.PublicPostUserNotificationsTestWebhook.request true "Request" +// @Success 204 +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/test-webhook [post] +func (h *HandlerService) PublicPostUserNotificationsTestWebhook(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + type request struct { + WebhookUrl string `json:"webhook_url"` + IsWebhookDiscordEnabled bool `json:"is_webhook_discord_enabled,omitempty"` + } + var req request + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + if v.hasErrors() { + handleErr(w, r, v) + return + } + err = h.getDataAccessor(r).QueueTestWebhookNotification(r.Context(), userId, req.WebhookUrl, req.IsWebhookDiscordEnabled) + if err != nil { + handleErr(w, r, err) + return + } + returnNoContent(w, r) +} + func (h *HandlerService) PublicGetNetworkValidators(w http.ResponseWriter, r *http.Request) { returnOk(w, r, nil) } diff --git a/backend/pkg/api/handlers/search_handlers.go b/backend/pkg/api/handlers/search_handlers.go index 6793e4ef0..49d43acdd 100644 --- a/backend/pkg/api/handlers/search_handlers.go +++ b/backend/pkg/api/handlers/search_handlers.go @@ -5,8 +5,10 @@ import ( "encoding/hex" "errors" "fmt" + "maps" "net/http" "regexp" + "slices" "strconv" "strings" @@ -38,7 +40,7 @@ var searchTypeToRegex = map[searchTypeKey]*regexp.Regexp{ validatorsByWithdrawalCredential: reWithdrawalCredential, validatorsByWithdrawalAddress: reEthereumAddress, validatorsByWithdrawalEns: reEnsName, - validatorsByGraffiti: reNonEmpty, + validatorsByGraffiti: reGraffiti, } // -------------------------------------- @@ -68,12 +70,12 @@ func (h *HandlerService) InternalPostSearch(w http.ResponseWriter, r *http.Reque searchResultChan := make(chan types.SearchResult) // iterate over all combinations of search types and networks - for searchType := range searchTypeSet { + for _, searchType := range searchTypeSet { // check if input matches the regex for the search type if !searchTypeToRegex[searchType].MatchString(req.Input) { continue } - for chainId := range chainIdSet { + for _, chainId := range chainIdSet { chainId := chainId searchType := searchType g.Go(func() error { @@ -118,222 +120,177 @@ func (h *HandlerService) InternalPostSearch(w http.ResponseWriter, r *http.Reque // Search Helper Functions func (h *HandlerService) handleSearch(ctx context.Context, input string, searchType searchTypeKey, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil + switch searchType { + case validatorByIndex: + return h.handleSearchValidatorByIndex(ctx, input, chainId) + case validatorByPublicKey: + return h.handleSearchValidatorByPublicKey(ctx, input, chainId) + case validatorsByDepositAddress: + return h.handleSearchValidatorsByDepositAddress(ctx, input, chainId) + case validatorsByDepositEnsName: + return h.handleSearchValidatorsByDepositEnsName(ctx, input, chainId) + case validatorsByWithdrawalCredential: + return h.handleSearchValidatorsByWithdrawalCredential(ctx, input, chainId) + case validatorsByWithdrawalAddress: + return h.handleSearchValidatorsByWithdrawalAddress(ctx, input, chainId) + case validatorsByWithdrawalEns: + return h.handleSearchValidatorsByWithdrawalEnsName(ctx, input, chainId) + case validatorsByGraffiti: + return h.handleSearchValidatorsByGraffiti(ctx, input, chainId) default: - switch searchType { - case validatorByIndex: - return h.handleSearchValidatorByIndex(ctx, input, chainId) - case validatorByPublicKey: - return h.handleSearchValidatorByPublicKey(ctx, input, chainId) - case validatorsByDepositAddress: - return h.handleSearchValidatorsByDepositAddress(ctx, input, chainId) - case validatorsByDepositEnsName: - return h.handleSearchValidatorsByDepositEnsName(ctx, input, chainId) - case validatorsByWithdrawalCredential: - return h.handleSearchValidatorsByWithdrawalCredential(ctx, input, chainId) - case validatorsByWithdrawalAddress: - return h.handleSearchValidatorsByWithdrawalAddress(ctx, input, chainId) - case validatorsByWithdrawalEns: - return h.handleSearchValidatorsByWithdrawalEnsName(ctx, input, chainId) - case validatorsByGraffiti: - return h.handleSearchValidatorsByGraffiti(ctx, input, chainId) - default: - return nil, errors.New("invalid search type") - } + return nil, errors.New("invalid search type") } } func (h *HandlerService) handleSearchValidatorByIndex(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - index, err := strconv.ParseUint(input, 10, 64) - if err != nil { - // input should've been checked by the regex before, this should never happen - return nil, err - } - result, err := h.dai.GetSearchValidatorByIndex(ctx, chainId, index) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorByIndex), - ChainId: chainId, - HashValue: "0x" + hex.EncodeToString(result.PublicKey), - NumValue: &result.Index, - }, nil + index, err := strconv.ParseUint(input, 10, 64) + if err != nil { + // input should've been checked by the regex before, this should never happen + return nil, err + } + result, err := h.daService.GetSearchValidatorByIndex(ctx, chainId, index) + if err != nil { + return nil, err } + + return &types.SearchResult{ + Type: string(validatorByIndex), + ChainId: chainId, + HashValue: "0x" + hex.EncodeToString(result.PublicKey), + NumValue: &result.Index, + }, nil } func (h *HandlerService) handleSearchValidatorByPublicKey(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - publicKey, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) - if err != nil { - // input should've been checked by the regex before, this should never happen - return nil, err - } - result, err := h.dai.GetSearchValidatorByPublicKey(ctx, chainId, publicKey) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorByPublicKey), - ChainId: chainId, - HashValue: "0x" + hex.EncodeToString(result.PublicKey), - NumValue: &result.Index, - }, nil + publicKey, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) + if err != nil { + // input should've been checked by the regex before, this should never happen + return nil, err } + result, err := h.daService.GetSearchValidatorByPublicKey(ctx, chainId, publicKey) + if err != nil { + return nil, err + } + + return &types.SearchResult{ + Type: string(validatorByPublicKey), + ChainId: chainId, + HashValue: "0x" + hex.EncodeToString(result.PublicKey), + NumValue: &result.Index, + }, nil } func (h *HandlerService) handleSearchValidatorsByDepositAddress(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - address, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) - if err != nil { - return nil, err - } - result, err := h.dai.GetSearchValidatorsByDepositAddress(ctx, chainId, address) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorsByDepositAddress), - ChainId: chainId, - HashValue: "0x" + hex.EncodeToString(result.Address), - NumValue: &result.Count, - }, nil + address, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) + if err != nil { + return nil, err + } + result, err := h.daService.GetSearchValidatorsByDepositAddress(ctx, chainId, address) + if err != nil { + return nil, err } + + return &types.SearchResult{ + Type: string(validatorsByDepositAddress), + ChainId: chainId, + HashValue: "0x" + hex.EncodeToString(result.Address), + NumValue: &result.Count, + }, nil } func (h *HandlerService) handleSearchValidatorsByDepositEnsName(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - result, err := h.dai.GetSearchValidatorsByDepositEnsName(ctx, chainId, input) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorsByDepositEnsName), - ChainId: chainId, - StrValue: result.EnsName, - HashValue: "0x" + hex.EncodeToString(result.Address), - NumValue: &result.Count, - }, nil + result, err := h.daService.GetSearchValidatorsByDepositEnsName(ctx, chainId, input) + if err != nil { + return nil, err } + + return &types.SearchResult{ + Type: string(validatorsByDepositEnsName), + ChainId: chainId, + StrValue: result.EnsName, + HashValue: "0x" + hex.EncodeToString(result.Address), + NumValue: &result.Count, + }, nil } func (h *HandlerService) handleSearchValidatorsByWithdrawalCredential(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - withdrawalCredential, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) - if err != nil { - return nil, err - } - result, err := h.dai.GetSearchValidatorsByWithdrawalCredential(ctx, chainId, withdrawalCredential) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorsByWithdrawalCredential), - ChainId: chainId, - HashValue: "0x" + hex.EncodeToString(result.WithdrawalCredential), - NumValue: &result.Count, - }, nil + withdrawalCredential, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) + if err != nil { + return nil, err } + result, err := h.daService.GetSearchValidatorsByWithdrawalCredential(ctx, chainId, withdrawalCredential) + if err != nil { + return nil, err + } + + return &types.SearchResult{ + Type: string(validatorsByWithdrawalCredential), + ChainId: chainId, + HashValue: "0x" + hex.EncodeToString(result.WithdrawalCredential), + NumValue: &result.Count, + }, nil } func (h *HandlerService) handleSearchValidatorsByWithdrawalAddress(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - withdrawalString := "010000000000000000000000" + strings.TrimPrefix(input, "0x") - withdrawalCredential, err := hex.DecodeString(withdrawalString) - if err != nil { - return nil, err - } - result, err := h.dai.GetSearchValidatorsByWithdrawalCredential(ctx, chainId, withdrawalCredential) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorsByWithdrawalAddress), - ChainId: chainId, - HashValue: "0x" + hex.EncodeToString(result.WithdrawalCredential), - NumValue: &result.Count, - }, nil + withdrawalString := "010000000000000000000000" + strings.TrimPrefix(input, "0x") + withdrawalCredential, err := hex.DecodeString(withdrawalString) + if err != nil { + return nil, err + } + result, err := h.daService.GetSearchValidatorsByWithdrawalCredential(ctx, chainId, withdrawalCredential) + if err != nil { + return nil, err } + + return &types.SearchResult{ + Type: string(validatorsByWithdrawalAddress), + ChainId: chainId, + HashValue: "0x" + hex.EncodeToString(result.WithdrawalCredential), + NumValue: &result.Count, + }, nil } func (h *HandlerService) handleSearchValidatorsByWithdrawalEnsName(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - result, err := h.dai.GetSearchValidatorsByWithdrawalEnsName(ctx, chainId, input) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorsByWithdrawalEns), - ChainId: chainId, - StrValue: result.EnsName, - HashValue: "0x" + hex.EncodeToString(result.Address), - NumValue: &result.Count, - }, nil + result, err := h.daService.GetSearchValidatorsByWithdrawalEnsName(ctx, chainId, input) + if err != nil { + return nil, err } + + return &types.SearchResult{ + Type: string(validatorsByWithdrawalEns), + ChainId: chainId, + StrValue: result.EnsName, + HashValue: "0x" + hex.EncodeToString(result.Address), + NumValue: &result.Count, + }, nil } func (h *HandlerService) handleSearchValidatorsByGraffiti(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - result, err := h.dai.GetSearchValidatorsByGraffiti(ctx, chainId, input) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorsByGraffiti), - ChainId: chainId, - StrValue: result.Graffiti, - NumValue: &result.Count, - }, nil + result, err := h.daService.GetSearchValidatorsByGraffiti(ctx, chainId, input) + if err != nil { + return nil, err } + + return &types.SearchResult{ + Type: string(validatorsByGraffiti), + ChainId: chainId, + StrValue: result.Graffiti, + NumValue: &result.Count, + }, nil } // -------------------------------------- // Input Validation // if the passed slice is empty, return a set with all chain IDs; otherwise check if the passed networks are valid -func (v *validationError) checkNetworkSlice(networks []intOrString) map[uint64]struct{} { +func (v *validationError) checkNetworkSlice(networks []intOrString) []uint64 { networkSet := map[uint64]struct{}{} // if the list is empty, query all networks if len(networks) == 0 { for _, n := range allNetworks { networkSet[n.ChainId] = struct{}{} } - return networkSet + return slices.Collect(maps.Keys(networkSet)) } // list not empty, check if networks are valid for _, network := range networks { @@ -344,18 +301,18 @@ func (v *validationError) checkNetworkSlice(networks []intOrString) map[uint64]s } networkSet[chainId] = struct{}{} } - return networkSet + return slices.Collect(maps.Keys(networkSet)) } // if the passed slice is empty, return a set with all search types; otherwise check if the passed types are valid -func (v *validationError) checkSearchTypes(types []searchTypeKey) map[searchTypeKey]struct{} { +func (v *validationError) checkSearchTypes(types []searchTypeKey) []searchTypeKey { typeSet := map[searchTypeKey]struct{}{} // if the list is empty, query all types if len(types) == 0 { for t := range searchTypeToRegex { typeSet[t] = struct{}{} } - return typeSet + return slices.Collect(maps.Keys(typeSet)) } // list not empty, check if types are valid for _, t := range types { @@ -365,5 +322,5 @@ func (v *validationError) checkSearchTypes(types []searchTypeKey) map[searchType } typeSet[t] = struct{}{} } - return typeSet + return slices.Collect(maps.Keys(typeSet)) } diff --git a/backend/pkg/api/router.go b/backend/pkg/api/router.go index 0b3d6a819..7e4f3699d 100644 --- a/backend/pkg/api/router.go +++ b/backend/pkg/api/router.go @@ -5,6 +5,7 @@ import ( "regexp" dataaccess "github.com/gobitfly/beaconchain/pkg/api/data_access" + "github.com/gobitfly/beaconchain/pkg/api/docs" handlers "github.com/gobitfly/beaconchain/pkg/api/handlers" "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/metrics" @@ -20,25 +21,34 @@ type endpoint struct { InternalHander func(w http.ResponseWriter, r *http.Request) } -func NewApiRouter(dataAccessor dataaccess.DataAccessor, cfg *types.Config) *mux.Router { +func NewApiRouter(dataAccessor dataaccess.DataAccessor, dummy dataaccess.DataAccessor, cfg *types.Config) *mux.Router { router := mux.NewRouter() apiRouter := router.PathPrefix("/api").Subrouter() publicRouter := apiRouter.PathPrefix("/v2").Subrouter() + legacyRouter := apiRouter.PathPrefix("/v1").Subrouter() internalRouter := apiRouter.PathPrefix("/i").Subrouter() sessionManager := newSessionManager(cfg) - internalRouter.Use(sessionManager.LoadAndSave) + internalRouter.Use(sessionManager.LoadAndSave, getSlidingSessionExpirationMiddleware(sessionManager)) if !(cfg.Frontend.CsrfInsecure || cfg.Frontend.Debug) { internalRouter.Use(getCsrfProtectionMiddleware(cfg), csrfInjecterMiddleware) } - handlerService := handlers.NewHandlerService(dataAccessor, sessionManager) + handlerService := handlers.NewHandlerService(dataAccessor, dummy, sessionManager, !cfg.Frontend.DisableStatsInserts) // store user id in context, if available - publicRouter.Use(handlers.GetUserIdStoreMiddleware(handlerService.GetUserIdByApiKey)) - internalRouter.Use(handlers.GetUserIdStoreMiddleware(handlerService.GetUserIdBySession)) + publicRouter.Use(handlerService.StoreUserIdByApiKeyMiddleware) + internalRouter.Use(handlerService.StoreUserIdBySessionMiddleware) + + if cfg.DeploymentType != "production" { + publicRouter.Use(handlerService.StoreIsMockedFlagMiddleware) + internalRouter.Use(handlerService.StoreIsMockedFlagMiddleware) + } addRoutes(handlerService, publicRouter, internalRouter, cfg) + addLegacyRoutes(handlerService, legacyRouter) + // serve static files + publicRouter.PathPrefix("/docs/").Handler(http.StripPrefix("/api/v2/docs/", http.FileServer(http.FS(docs.Files)))) router.Use(metrics.HttpMiddleware) return router @@ -88,6 +98,8 @@ func addRoutes(hs *handlers.HandlerService, publicRouter, internalRouter *mux.Ro {http.MethodGet, "/healthz", hs.PublicGetHealthz, nil}, {http.MethodGet, "/healthz-loadbalancer", hs.PublicGetHealthzLoadbalancer, nil}, + {http.MethodGet, "/ratelimit-weights", nil, hs.InternalGetRatelimitWeights}, + {http.MethodPost, "/login", nil, hs.InternalPostLogin}, {http.MethodGet, "/mobile/authorize", nil, hs.InternalPostMobileAuthorize}, @@ -118,6 +130,8 @@ func addRoutes(hs *handlers.HandlerService, publicRouter, internalRouter *mux.Ro {http.MethodGet, "/users/me/dashboards", hs.PublicGetUserDashboards, hs.InternalGetUserDashboards}, {http.MethodPut, "/users/me/notifications/settings/paired-devices/{client_id}/token", nil, hs.InternalPostUsersMeNotificationSettingsPairedDevicesToken}, + {http.MethodGet, "/users/me/machine-metrics", hs.PublicGetUserMachineMetrics, hs.InternalGetUserMachineMetrics}, + {http.MethodPost, "/search", nil, hs.InternalPostSearch}, {http.MethodPost, "/account-dashboards", hs.PublicPostAccountDashboards, hs.InternalPostAccountDashboards}, @@ -236,6 +250,11 @@ func addRoutes(hs *handlers.HandlerService, publicRouter, internalRouter *mux.Ro addEndpointsToRouters(endpoints, publicRouter, internalRouter) } +// Legacy routes are available behind the /v1 prefix and guarantee backwards compatibility with the old API +func addLegacyRoutes(hs *handlers.HandlerService, publicRouter *mux.Router) { + publicRouter.HandleFunc("/client/metrics", hs.LegacyPostUserMachineMetrics).Methods(http.MethodPost, http.MethodOptions) +} + func addValidatorDashboardRoutes(hs *handlers.HandlerService, publicRouter, internalRouter *mux.Router, cfg *types.Config) { vdbPath := "/validator-dashboards" publicRouter.HandleFunc(vdbPath, hs.PublicPostValidatorDashboards).Methods(http.MethodPost, http.MethodOptions) @@ -246,7 +265,7 @@ func addValidatorDashboardRoutes(hs *handlers.HandlerService, publicRouter, inte // add middleware to check if user has access to dashboard if !cfg.Frontend.Debug { - publicDashboardRouter.Use(hs.VDBAuthMiddleware, hs.ManageViaApiCheckMiddleware) + publicDashboardRouter.Use(hs.VDBAuthMiddleware, hs.ManageDashboardsViaApiCheckMiddleware) internalDashboardRouter.Use(hs.VDBAuthMiddleware) } @@ -297,8 +316,9 @@ func addValidatorDashboardRoutes(hs *handlers.HandlerService, publicRouter, inte {http.MethodGet, "/{dashboard_id}/total-withdrawals", hs.PublicGetValidatorDashboardTotalWithdrawals, hs.InternalGetValidatorDashboardTotalWithdrawals}, {http.MethodGet, "/{dashboard_id}/rocket-pool", hs.PublicGetValidatorDashboardRocketPool, hs.InternalGetValidatorDashboardRocketPool}, {http.MethodGet, "/{dashboard_id}/total-rocket-pool", hs.PublicGetValidatorDashboardTotalRocketPool, hs.InternalGetValidatorDashboardTotalRocketPool}, - {http.MethodGet, "/{dashboard_id}/rocket-pool/{node_address}", hs.PublicGetValidatorDashboardNodeRocketPool, hs.InternalGetValidatorDashboardNodeRocketPool}, {http.MethodGet, "/{dashboard_id}/rocket-pool/{node_address}/minipools", hs.PublicGetValidatorDashboardRocketPoolMinipools, hs.InternalGetValidatorDashboardRocketPoolMinipools}, + {http.MethodGet, "/{dashboard_id}/mobile/widget", nil, hs.InternalGetValidatorDashboardMobileWidget}, + {http.MethodGet, "/{dashboard_id}/mobile/validators", nil, hs.InternalGetValidatorDashboardMobileValidators}, } addEndpointsToRouters(endpoints, publicDashboardRouter, internalDashboardRouter) } @@ -309,38 +329,40 @@ func addNotificationRoutes(hs *handlers.HandlerService, publicRouter, internalRo internalNotificationRouter := internalRouter.PathPrefix(path).Subrouter() if !debug { - publicNotificationRouter.Use(hs.ManageViaApiCheckMiddleware) + publicNotificationRouter.Use(hs.ManageNotificationsViaApiCheckMiddleware) } endpoints := []endpoint{ - {http.MethodGet, "", nil, hs.InternalGetUserNotifications}, - {http.MethodGet, "/dashboards", nil, hs.InternalGetUserNotificationDashboards}, - {http.MethodGet, "/validator-dashboards/{notification_id}", nil, hs.InternalGetUserNotificationsValidatorDashboard}, - {http.MethodGet, "/account-dashboards/{notification_id}", nil, hs.InternalGetUserNotificationsAccountDashboard}, - {http.MethodGet, "/machines", nil, hs.InternalGetUserNotificationMachines}, - {http.MethodGet, "/clients", nil, hs.InternalGetUserNotificationClients}, - {http.MethodGet, "/rocket-pool", nil, hs.InternalGetUserNotificationRocketPool}, - {http.MethodGet, "/networks", nil, hs.InternalGetUserNotificationNetworks}, - {http.MethodGet, "/settings", nil, hs.InternalGetUserNotificationSettings}, - {http.MethodPut, "/settings/general", nil, hs.InternalPutUserNotificationSettingsGeneral}, - {http.MethodPut, "/settings/networks/{network}", nil, hs.InternalPutUserNotificationSettingsNetworks}, - {http.MethodPut, "/settings/paired-devices/{paired_device_id}", nil, hs.InternalPutUserNotificationSettingsPairedDevices}, - {http.MethodDelete, "/settings/paired-devices/{paired_device_id}", nil, hs.InternalDeleteUserNotificationSettingsPairedDevices}, - {http.MethodGet, "/settings/dashboards", nil, hs.InternalGetUserNotificationSettingsDashboards}, - {http.MethodPost, "/test-email", nil, hs.InternalPostUserNotificationsTestEmail}, - {http.MethodPost, "/test-push", nil, hs.InternalPostUserNotificationsTestPush}, - {http.MethodPost, "/test-webhook", nil, hs.InternalPostUserNotificationsTestWebhook}, + {http.MethodGet, "", hs.PublicGetUserNotifications, hs.InternalGetUserNotifications}, + {http.MethodGet, "/dashboards", hs.PublicGetUserNotificationDashboards, hs.InternalGetUserNotificationDashboards}, + {http.MethodGet, "/machines", hs.PublicGetUserNotificationMachines, hs.InternalGetUserNotificationMachines}, + {http.MethodGet, "/clients", hs.PublicGetUserNotificationClients, hs.InternalGetUserNotificationClients}, + {http.MethodGet, "/rocket-pool", hs.PublicGetUserNotificationRocketPool, hs.InternalGetUserNotificationRocketPool}, + {http.MethodGet, "/networks", hs.PublicGetUserNotificationNetworks, hs.InternalGetUserNotificationNetworks}, + {http.MethodGet, "/settings", hs.PublicGetUserNotificationSettings, hs.InternalGetUserNotificationSettings}, + {http.MethodPut, "/settings/general", hs.PublicPutUserNotificationSettingsGeneral, hs.InternalPutUserNotificationSettingsGeneral}, + {http.MethodPut, "/settings/networks/{network}", hs.PublicPutUserNotificationSettingsNetworks, hs.InternalPutUserNotificationSettingsNetworks}, + {http.MethodPut, "/settings/paired-devices/{paired_device_id}", hs.PublicPutUserNotificationSettingsPairedDevices, hs.InternalPutUserNotificationSettingsPairedDevices}, + {http.MethodDelete, "/settings/paired-devices/{paired_device_id}", hs.PublicDeleteUserNotificationSettingsPairedDevices, hs.InternalDeleteUserNotificationSettingsPairedDevices}, + {http.MethodPut, "/settings/clients/{client_id}", hs.PublicPutUserNotificationSettingsClient, hs.InternalPutUserNotificationSettingsClient}, + {http.MethodGet, "/settings/dashboards", hs.PublicGetUserNotificationSettingsDashboards, hs.InternalGetUserNotificationSettingsDashboards}, + {http.MethodPost, "/test-email", hs.PublicPostUserNotificationsTestEmail, hs.InternalPostUserNotificationsTestEmail}, + {http.MethodPost, "/test-push", hs.PublicPostUserNotificationsTestPush, hs.InternalPostUserNotificationsTestPush}, + {http.MethodPost, "/test-webhook", hs.PublicPostUserNotificationsTestWebhook, hs.InternalPostUserNotificationsTestWebhook}, } addEndpointsToRouters(endpoints, publicNotificationRouter, internalNotificationRouter) publicDashboardNotificationSettingsRouter := publicNotificationRouter.NewRoute().Subrouter() internalDashboardNotificationSettingsRouter := internalNotificationRouter.NewRoute().Subrouter() + // TODO add adb auth middleware to account dashboard endpoints once they are implemented if !debug { publicDashboardNotificationSettingsRouter.Use(hs.VDBAuthMiddleware) internalDashboardNotificationSettingsRouter.Use(hs.VDBAuthMiddleware) } dashboardSettingsEndpoints := []endpoint{ - {http.MethodPut, "/settings/validator-dashboards/{dashboard_id}/groups/{group_id}", nil, hs.InternalPutUserNotificationSettingsValidatorDashboard}, - {http.MethodPut, "/settings/account-dashboards/{dashboard_id}/groups/{group_id}", nil, hs.InternalPutUserNotificationSettingsAccountDashboard}, + {http.MethodGet, "/validator-dashboards/{dashboard_id}/groups/{group_id}/epochs/{epoch}", hs.PublicGetUserNotificationsValidatorDashboard, hs.InternalGetUserNotificationsValidatorDashboard}, + {http.MethodGet, "/account-dashboards/{dashboard_id}/groups/{group_id}/epochs/{epoch}", hs.PublicGetUserNotificationsAccountDashboard, hs.InternalGetUserNotificationsAccountDashboard}, + {http.MethodPut, "/settings/validator-dashboards/{dashboard_id}/groups/{group_id}", hs.PublicPutUserNotificationSettingsValidatorDashboard, hs.InternalPutUserNotificationSettingsValidatorDashboard}, + {http.MethodPut, "/settings/account-dashboards/{dashboard_id}/groups/{group_id}", hs.PublicPutUserNotificationSettingsAccountDashboard, hs.InternalPutUserNotificationSettingsAccountDashboard}, } addEndpointsToRouters(dashboardSettingsEndpoints, publicDashboardNotificationSettingsRouter, internalDashboardNotificationSettingsRouter) } diff --git a/backend/pkg/api/types/common.go b/backend/pkg/api/types/common.go index e658afa80..24e2b9522 100644 --- a/backend/pkg/api/types/common.go +++ b/backend/pkg/api/types/common.go @@ -34,14 +34,16 @@ type PubKey string type Hash string // blocks, txs etc. type Address struct { - Hash Hash `json:"hash"` - Ens string `json:"ens,omitempty"` + Hash Hash `json:"hash"` + IsContract bool `json:"is_contract"` + Ens string `json:"ens,omitempty"` + Label string `json:"label,omitempty"` } type LuckItem struct { Percent float64 `json:"percent"` - Expected time.Time `json:"expected"` - Average time.Duration `json:"average"` + Expected time.Time `json:"expected" swaggertype:"string" format:"date-time"` + Average time.Duration `json:"average" swaggertype:"primitive,integer"` } type Luck struct { @@ -139,7 +141,25 @@ type ChartHistorySeconds struct { Weekly uint64 `json:"weekly"` } +type IndexEpoch struct { + Index uint64 `json:"index"` + Epoch uint64 `json:"epoch"` +} + type IndexBlocks struct { Index uint64 `json:"index"` Blocks []uint64 `json:"blocks"` } + +type IndexSlots struct { + Index uint64 `json:"index"` + Slots []uint64 `json:"slots"` +} + +type ValidatorStateCounts struct { + Online uint64 `json:"online"` + Offline uint64 `json:"offline"` + Pending uint64 `json:"pending"` + Exited uint64 `json:"exited"` + Slashed uint64 `json:"slashed"` +} diff --git a/backend/pkg/api/types/dashboard.go b/backend/pkg/api/types/dashboard.go index d3e334722..340a21b01 100644 --- a/backend/pkg/api/types/dashboard.go +++ b/backend/pkg/api/types/dashboard.go @@ -5,14 +5,14 @@ type AccountDashboard struct { Name string `json:"name"` } type ValidatorDashboard struct { - Id uint64 `json:"id"` - Name string `json:"name"` - Network uint64 `json:"network"` - PublicIds []VDBPublicId `json:"public_ids,omitempty"` - IsArchived bool `json:"is_archived"` - ArchivedReason string `json:"archived_reason,omitempty" tstype:"'user' | 'dashboard_limit' | 'validator_limit' | 'group_limit'"` - ValidatorCount uint64 `json:"validator_count"` - GroupCount uint64 `json:"group_count"` + Id uint64 `json:"id" extensions:"x-order=1"` + Name string `json:"name" extensions:"x-order=2"` + Network uint64 `json:"network" extensions:"x-order=3"` + PublicIds []VDBPublicId `json:"public_ids,omitempty" extensions:"x-order=4"` + IsArchived bool `json:"is_archived" extensions:"x-order=5"` + ArchivedReason string `json:"archived_reason,omitempty" tstype:"'user' | 'dashboard_limit' | 'validator_limit' | 'group_limit'" extensions:"x-order=6"` + ValidatorCount uint64 `json:"validator_count" extensions:"x-order=7"` + GroupCount uint64 `json:"group_count" extensions:"x-order=8"` } type UserDashboardsData struct { diff --git a/backend/pkg/api/types/data_access.go b/backend/pkg/api/types/data_access.go index 49d88b926..5a21aa0f9 100644 --- a/backend/pkg/api/types/data_access.go +++ b/backend/pkg/api/types/data_access.go @@ -1,6 +1,7 @@ package types import ( + "database/sql" "time" "github.com/gobitfly/beaconchain/pkg/api/enums" @@ -16,12 +17,20 @@ const DefaultGroupId = 0 const AllGroups = -1 const NetworkAverage = -2 const DefaultGroupName = "default" +const DefaultDashboardName = DefaultGroupName type Sort[T enums.Enum] struct { Column T Desc bool } +type SortColumn struct { + // defaults + Column enums.OrderableSortable + Desc bool + Offset any // nil to indicate null value +} + type VDBIdPrimary int type VDBIdPublic string type VDBIdValidatorSet []VDBValidator @@ -105,6 +114,47 @@ type WithdrawalsCursor struct { Amount uint64 } +type NotificationSettingsCursor struct { + GenericCursor + + IsAccountDashboard bool // if false it's a validator dashboard + DashboardId uint64 + GroupId uint64 +} + +type NotificationMachinesCursor struct { + GenericCursor + + MachineId uint64 + MachineName string + EventType string + EventThreshold float64 + Epoch uint64 +} + +type NotificationClientsCursor struct { + GenericCursor + + Client string + Epoch uint64 +} + +type NotificationRocketPoolsCursor struct { + GenericCursor + + NodeAddress []byte + EventType string + Epoch uint64 +} + +type NotificationNetworksCursor struct { + GenericCursor + + Network uint64 + Epoch uint64 + EventType string +} + type UserCredentialInfo struct { Id uint64 `db:"id"` Email string `db:"email"` @@ -116,18 +166,36 @@ type UserCredentialInfo struct { type BlocksCursor struct { GenericCursor - Slot uint64 // basically the same as Block, Epoch, Age; mandatory, used to index - // optional, max one of those (for now) Proposer uint64 - Group uint64 + Slot uint64 // same as Age + Block sql.NullInt64 Status uint64 Reward decimal.Decimal } +type NotificationsDashboardsCursor struct { + GenericCursor + + Epoch uint64 + ChainId uint64 + DashboardName string + DashboardId uint64 + GroupName string + GroupId uint64 +} + type NetworkInfo struct { - ChainId uint64 - Name string + ChainId uint64 + Name string + NotificationsName string +} + +type ClientInfo struct { + Id uint64 + Name string + DbName string + Category string } // ------------------------- @@ -212,7 +280,6 @@ type VDBValidatorSummaryChartRow struct { SyncScheduled float64 `db:"sync_scheduled"` } -// ------------------------- // healthz structs type HealthzResult struct { @@ -232,9 +299,35 @@ type HealthzData struct { // Mobile structs type MobileAppBundleStats struct { - LatestBundleVersion uint64 - BundleUrl string - TargetCount uint64 // coalesce to 0 if column is null - DeliveryCount uint64 - MaxNativeVersion uint64 // the max native version of the whole table for the given environment + LatestBundleVersion uint64 `db:"bundle_version"` + BundleUrl string `db:"bundle_url"` + TargetCount int64 `db:"target_count"` // coalesce to -1 if column is null + DeliveryCount int64 `db:"delivered_count"` + MaxNativeVersion uint64 `db:"max_native_version"` // the max native version of the whole table for the given environment } + +// Notification structs + +type NotificationSettingsDefaultValues struct { + GroupEfficiencyBelowThreshold float64 + MaxCollateralThreshold float64 + MinCollateralThreshold float64 + ERC20TokenTransfersValueThreshold float64 + + MachineStorageUsageThreshold float64 + MachineCpuUsageThreshold float64 + MachineMemoryUsageThreshold float64 + + GasAboveThreshold decimal.Decimal + GasBelowThreshold decimal.Decimal + NetworkParticipationRateThreshold float64 +} + +// ------------------------------ + +type CtxKey string + +const CtxUserIdKey CtxKey = "user_id" +const CtxIsMockedKey CtxKey = "is_mocked" +const CtxMockSeedKey CtxKey = "mock_seed" +const CtxDashboardIdKey CtxKey = "dashboard_id" diff --git a/backend/pkg/api/types/machine_metrics.go b/backend/pkg/api/types/machine_metrics.go new file mode 100644 index 000000000..dd0b05fab --- /dev/null +++ b/backend/pkg/api/types/machine_metrics.go @@ -0,0 +1,11 @@ +package types + +import "github.com/gobitfly/beaconchain/pkg/commons/types" + +type MachineMetricsData struct { + SystemMetrics []*types.MachineMetricSystem `json:"system_metrics" faker:"slice_len=30"` + ValidatorMetrics []*types.MachineMetricValidator `json:"validator_metrics" faker:"slice_len=30"` + NodeMetrics []*types.MachineMetricNode `json:"node_metrics" faker:"slice_len=30"` +} + +type GetUserMachineMetricsRespone ApiDataResponse[MachineMetricsData] diff --git a/backend/pkg/api/types/mobile.go b/backend/pkg/api/types/mobile.go index 7f84a999d..41a5e21d5 100644 --- a/backend/pkg/api/types/mobile.go +++ b/backend/pkg/api/types/mobile.go @@ -1,8 +1,46 @@ package types +import "github.com/shopspring/decimal" + type MobileBundleData struct { BundleUrl string `json:"bundle_url,omitempty"` HasNativeUpdateAvailable bool `json:"has_native_update_available"` } type GetMobileLatestBundleResponse ApiDataResponse[MobileBundleData] + +type MobileWidgetData struct { + ValidatorStateCounts ValidatorStateCounts `json:"validator_state_counts"` + Last24hIncome decimal.Decimal `json:"last_24h_income" faker:"eth"` + Last7dIncome decimal.Decimal `json:"last_7d_income" faker:"eth"` + Last30dApr float64 `json:"last_30d_apr"` + Last30dEfficiency float64 `json:"last_30d_efficiency"` + NetworkEfficiency float64 `json:"network_efficiency"` + RplPrice decimal.Decimal `json:"rpl_price" faker:"eth"` + RplApr float64 `json:"rpl_apr"` +} + +type InternalGetValidatorDashboardMobileWidgetResponse ApiDataResponse[MobileWidgetData] + +type MobileValidatorDashboardValidatorsRocketPool struct { + DepositAmount decimal.Decimal `json:"deposit_Amount"` + Commission float64 `json:"commission"` // percentage, 0-1 + Status string `json:"status" tstype:"'Staking' | 'Dissolved' | 'Prelaunch' | 'Initialized' | 'Withdrawable'" faker:"oneof: Staking, Dissolved, Prelaunch, Initialized, Withdrawable"` + PenaltyCount uint64 `json:"penalty_count"` + IsInSmoothingPool bool `json:"is_in_smokaothing_pool"` +} +type MobileValidatorDashboardValidatorsTableRow struct { + Index uint64 `json:"index"` + PublicKey PubKey `json:"public_key"` + GroupId uint64 `json:"group_id"` + Balance decimal.Decimal `json:"balance"` + Status string `json:"status" tstype:"'slashed' | 'exited' | 'deposited' | 'pending' | 'slashing_offline' | 'slashing_online' | 'exiting_offline' | 'exiting_online' | 'active_offline' | 'active_online'" faker:"oneof: slashed, exited, deposited, pending, slashing_offline, slashing_online, exiting_offline, exiting_online, active_offline, active_online"` + QueuePosition *uint64 `json:"queue_position,omitempty"` + WithdrawalCredential Hash `json:"withdrawal_credential"` + // additional mobile fields + IsInSyncCommittee bool `json:"is_in_sync_committee"` + Efficiency float64 `json:"efficiency"` + RocketPool *MobileValidatorDashboardValidatorsRocketPool `json:"rocket_pool,omitempty"` +} + +type InternalGetValidatorDashboardMobileValidatorsResponse ApiPagingResponse[MobileValidatorDashboardValidatorsTableRow] diff --git a/backend/pkg/api/types/notifications.go b/backend/pkg/api/types/notifications.go index 323f6ca00..777cad8b3 100644 --- a/backend/pkg/api/types/notifications.go +++ b/backend/pkg/api/types/notifications.go @@ -1,6 +1,9 @@ package types -import "github.com/shopspring/decimal" +import ( + "github.com/lib/pq" + "github.com/shopspring/decimal" +) // ------------------------------------------------------------ // Overview @@ -17,12 +20,11 @@ type NotificationOverviewData struct { Last24hWebhookCount uint64 `json:"last_24h_webhook_count"` // counts are shown in their respective tables - VDBSubscriptionsCount uint64 `json:"vdb_subscriptions_count"` - ADBSubscriptionsCount uint64 `json:"adb_subscriptions_count"` - MachinesSubscriptionCount uint64 `json:"machines_subscription_count"` - ClientsSubscriptionCount uint64 `json:"clients_subscription_count"` - RocketPoolSubscriptionCount uint64 `json:"rocket_pool_subscription_count"` - NetworksSubscriptionCount uint64 `json:"networks_subscription_count"` + VDBSubscriptionsCount uint64 `db:"vdb_subscriptions_count" json:"vdb_subscriptions_count"` + ADBSubscriptionsCount uint64 `db:"adb_subscriptions_count" json:"adb_subscriptions_count"` + MachinesSubscriptionCount uint64 `db:"machines_subscription_count" json:"machines_subscription_count"` + ClientsSubscriptionCount uint64 `db:"clients_subscription_count" json:"clients_subscription_count"` + NetworksSubscriptionCount uint64 `db:"networks_subscription_count" json:"networks_subscription_count"` } type InternalGetUserNotificationsResponse ApiDataResponse[NotificationOverviewData] @@ -30,14 +32,15 @@ type InternalGetUserNotificationsResponse ApiDataResponse[NotificationOverviewDa // ------------------------------------------------------------ // Dashboards Table type NotificationDashboardsTableRow struct { - IsAccountDashboard bool `json:"is_account_dashboard"` // if false it's a validator dashboard - ChainId uint64 `json:"chain_id"` - Timestamp int64 `json:"timestamp"` - DashboardId uint64 `json:"dashboard_id"` - GroupName string `json:"group_name"` - NotificationId uint64 `json:"notification_id"` // may be string? db schema is not defined afaik - EntityCount uint64 `json:"entity_count"` - EventTypes []string `json:"event_types" tstype:"('validator_online' | 'validator_offline' | 'group_online' | 'group_offline' | 'attestation_missed' | 'proposal_success' | 'proposal_missed' | 'proposal_upcoming' | 'sync' | 'withdrawal' | 'got_slashed' | 'has_slashed' | 'incoming_tx' | 'outgoing_tx' | 'transfer_erc20' | 'transfer_erc721' | 'transfer_erc1155')[]" faker:"oneof: validator_offline, group_offline, attestation_missed, proposal_success, proposal_missed, proposal_upcoming, sync, withdrawal, slashed_own, incoming_tx, outgoing_tx, transfer_erc20, transfer_erc721, transfer_erc1155"` + IsAccountDashboard bool `db:"is_account_dashboard" json:"is_account_dashboard"` // if false it's a validator dashboard + ChainId uint64 `db:"chain_id" json:"chain_id"` + Epoch uint64 `db:"epoch" json:"epoch"` + DashboardId uint64 `db:"dashboard_id" json:"dashboard_id"` + DashboardName string `db:"dashboard_name" json:"dashboard_name"` + GroupId uint64 `db:"group_id" json:"group_id"` + GroupName string `db:"group_name" json:"group_name"` + EntityCount uint64 `db:"entity_count" json:"entity_count"` + EventTypes pq.StringArray `db:"event_types" json:"event_types" tstype:"('validator_online' | 'validator_offline' | 'group_efficiency_below' | 'attestation_missed' | 'proposal_success' | 'proposal_missed' | 'proposal_upcoming' | 'max_collateral' | 'min_collateral' | 'sync' | 'withdrawal' | 'validator_got_slashed' | 'validator_has_slashed' | 'incoming_tx' | 'outgoing_tx' | 'transfer_erc20' | 'transfer_erc721' | 'transfer_erc1155')[]" faker:"slice_len=2, oneof: validator_online, validator_offline, group_efficiency_below, attestation_missed, proposal_success, proposal_missed, proposal_upcoming, max_collateral, min_collateral, sync, withdrawal, validator_got_slashed, validator_has_slashed, incoming_tx, outgoing_tx, transfer_erc20, transfer_erc721, transfer_erc1155"` } type InternalGetUserNotificationDashboardsResponse ApiPagingResponse[NotificationDashboardsTableRow] @@ -45,35 +48,33 @@ type InternalGetUserNotificationDashboardsResponse ApiPagingResponse[Notificatio // ------------------------------------------------------------ // Validator Dashboard Notification Detail -type NotificationEventGroup struct { - GroupName string `json:"group_name"` - DashboardID uint64 `json:"dashboard_id"` -} -type NotificationEventGroupBackOnline struct { - GroupName string `json:"group_name"` - DashboardID uint64 `json:"dashboard_id"` - EpochCount uint64 `json:"epoch_count"` -} - type NotificationEventValidatorBackOnline struct { Index uint64 `json:"index"` EpochCount uint64 `json:"epoch_count"` } +type NotificationEventWithdrawal struct { + Index uint64 `json:"index"` + Amount decimal.Decimal `json:"amount"` + Address Address `json:"address"` +} + type NotificationValidatorDashboardDetail struct { - ValidatorOffline []uint64 `json:"validator_offline"` // validator indices - GroupOffline []NotificationEventGroup `json:"group_offline"` - ProposalMissed []IndexBlocks `json:"proposal_missed"` + DashboardName string `db:"dashboard_name" json:"dashboard_name"` + GroupName string `db:"group_name" json:"group_name"` + ValidatorOffline []uint64 `json:"validator_offline"` // validator indices + GroupEfficiencyBelow float64 `json:"group_efficiency_below,omitempty"` // fill with the `group_efficiency_below` threshold if event is present + ProposalMissed []IndexSlots `json:"proposal_missed"` ProposalDone []IndexBlocks `json:"proposal_done"` - UpcomingProposals []uint64 `json:"upcoming_proposals"` // slot numbers + UpcomingProposals []IndexSlots `json:"upcoming_proposals"` Slashed []uint64 `json:"slashed"` // validator indices SyncCommittee []uint64 `json:"sync_committee"` // validator indices - AttestationMissed []IndexBlocks `json:"attestation_missed"` - Withdrawal []IndexBlocks `json:"withdrawal"` - ValidatorOfflineReminder []uint64 `json:"validator_offline_reminder"` // validator indices - GroupOfflineReminder []NotificationEventGroup `json:"group_offline_reminder"` + AttestationMissed []IndexEpoch `json:"attestation_missed"` // index (epoch) + Withdrawal []NotificationEventWithdrawal `json:"withdrawal"` + ValidatorOfflineReminder []uint64 `json:"validator_offline_reminder"` // validator indices; TODO not filled yet ValidatorBackOnline []NotificationEventValidatorBackOnline `json:"validator_back_online"` - GroupBackOnline []NotificationEventGroupBackOnline `json:"group_back_online"` + MinimumCollateralReached []Address `json:"min_collateral_reached"` // node addresses + MaximumCollateralReached []Address `json:"max_collateral_reached"` // node addresses } type InternalGetUserNotificationsValidatorDashboardResponse ApiDataResponse[NotificationValidatorDashboardDetail] @@ -99,7 +100,7 @@ type InternalGetUserNotificationsAccountDashboardResponse ApiDataResponse[Notifi // Machines Table type NotificationMachinesTableRow struct { MachineName string `json:"machine_name"` - Threshold float64 `json:"threshold" faker:"boundary_start=0, boundary_end=1"` + Threshold float64 `json:"threshold,omitempty" faker:"boundary_start=0, boundary_end=1"` EventType string `json:"event_type" tstype:"'offline' | 'storage' | 'cpu' | 'memory'" faker:"oneof: offline, storage, cpu, memory"` Timestamp int64 `json:"timestamp"` } @@ -111,6 +112,7 @@ type InternalGetUserNotificationMachinesResponse ApiPagingResponse[NotificationM type NotificationClientsTableRow struct { ClientName string `json:"client_name"` Version string `json:"version"` + Url string `json:"url"` Timestamp int64 `json:"timestamp"` } @@ -119,10 +121,10 @@ type InternalGetUserNotificationClientsResponse ApiPagingResponse[NotificationCl // ------------------------------------------------------------ // Rocket Pool Table type NotificationRocketPoolTableRow struct { - Timestamp int64 `json:"timestamp"` - EventType string `json:"event_type" tstype:"'reward_round' | 'collateral_max' | 'collateral_min'" faker:"oneof: reward_round, collateral_max, collateral_min"` - AlertValue float64 `json:"alert_value,omitempty"` // only for some notification types, e.g. max collateral - NodeAddress Hash `json:"node_address"` + Timestamp int64 `json:"timestamp"` + EventType string `json:"event_type" tstype:"'reward_round' | 'collateral_max' | 'collateral_min'" faker:"oneof: reward_round, collateral_max, collateral_min"` + Threshold float64 `json:"threshold,omitempty"` // only for some notification types, e.g. max collateral + Node Address `json:"node"` } type InternalGetUserNotificationRocketPoolResponse ApiPagingResponse[NotificationRocketPoolTableRow] @@ -130,10 +132,10 @@ type InternalGetUserNotificationRocketPoolResponse ApiPagingResponse[Notificatio // ------------------------------------------------------------ // Networks Table type NotificationNetworksTableRow struct { - ChainId uint64 `json:"chain_id"` - Timestamp int64 `json:"timestamp"` - EventType string `json:"event_type" tstype:"'gas_above' | 'gas_below' | 'participation_rate'" faker:"oneof: gas_above, gas_below, participation_rate"` - AlertValue decimal.Decimal `json:"alert_value"` // wei string for gas alerts, otherwise percentage (0-1) for participation rate + ChainId uint64 `json:"chain_id"` + Timestamp int64 `json:"timestamp"` + EventType string `json:"event_type" tstype:"'new_reward_round' | 'gas_above' | 'gas_below' | 'participation_rate'" faker:"oneof: new_reward_round, gas_above, gas_below, participation_rate"` + Threshold decimal.Decimal `json:"threshold,omitempty"` // participation rate threshold should also be passed as decimal string } type InternalGetUserNotificationNetworksResponse ApiPagingResponse[NotificationNetworksTableRow] @@ -141,9 +143,13 @@ type InternalGetUserNotificationNetworksResponse ApiPagingResponse[NotificationN // ------------------------------------------------------------ // Notification Settings type NotificationSettingsNetwork struct { - GasAboveThreshold decimal.Decimal `json:"gas_above_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 is disabled - GasBelowThreshold decimal.Decimal `json:"gas_below_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 is disabled - ParticipationRateThreshold float64 `json:"participation_rate_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 is disabled + IsGasAboveSubscribed bool `json:"is_gas_above_subscribed"` + GasAboveThreshold decimal.Decimal `json:"gas_above_threshold" faker:"eth"` + IsGasBelowSubscribed bool `json:"is_gas_below_subscribed"` + GasBelowThreshold decimal.Decimal `json:"gas_below_threshold" faker:"eth"` + IsParticipationRateSubscribed bool `json:"is_participation_rate_subscribed"` + ParticipationRateThreshold float64 `json:"participation_rate_threshold" faker:"boundary_start=0, boundary_end=1"` + IsNewRewardRoundSubscribed bool `json:"is_new_reward_round_subscribed"` } type NotificationNetwork struct { ChainId uint64 `json:"chain_id"` @@ -152,49 +158,63 @@ type NotificationNetwork struct { type InternalPutUserNotificationSettingsNetworksResponse ApiDataResponse[NotificationNetwork] type NotificationPairedDevice struct { - Id string `json:"id"` + Id uint64 `json:"id"` PairedTimestamp int64 `json:"paired_timestamp"` Name string `json:"name,omitempty"` IsNotificationsEnabled bool `json:"is_notifications_enabled"` } type InternalPutUserNotificationSettingsPairedDevicesResponse ApiDataResponse[NotificationPairedDevice] +type NotificationSettingsClient struct { + Id uint64 `json:"id"` + Name string `json:"name"` + Category string `json:"category" tstype:"'execution_layer' | 'consensus_layer' | 'other'" faker:"oneof: execution_layer, consensus_layer, other"` + IsSubscribed bool `json:"is_subscribed"` +} + +type InternalPutUserNotificationSettingsClientResponse ApiDataResponse[NotificationSettingsClient] + type NotificationSettingsGeneral struct { DoNotDisturbTimestamp int64 `json:"do_not_disturb_timestamp"` // notifications are disabled until this timestamp IsEmailNotificationsEnabled bool `json:"is_email_notifications_enabled"` IsPushNotificationsEnabled bool `json:"is_push_notifications_enabled"` - IsMachineOfflineSubscribed bool `json:"is_machine_offline_subscribed"` - MachineStorageUsageThreshold float64 `json:"machine_storage_usage_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 means disabled - MachineCpuUsageThreshold float64 `json:"machine_cpu_usage_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 means disabled - MachineMemoryUsageThreshold float64 `json:"machine_memory_usage_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 means disabled - - SubscribedClients []string `json:"subscribed_clients"` - IsRocketPoolNewRewardRoundSubscribed bool `json:"is_rocket_pool_new_reward_round_subscribed"` - RocketPoolMaxCollateralThreshold float64 `json:"rocket_pool_max_collateral_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 means disabled - RocketPoolMinCollateralThreshold float64 `json:"rocket_pool_min_collateral_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 means disabled + IsMachineOfflineSubscribed bool `json:"is_machine_offline_subscribed"` + IsMachineStorageUsageSubscribed bool `json:"is_machine_storage_usage_subscribed"` + MachineStorageUsageThreshold float64 `json:"machine_storage_usage_threshold" faker:"boundary_start=0, boundary_end=1"` + IsMachineCpuUsageSubscribed bool `json:"is_machine_cpu_usage_subscribed"` + MachineCpuUsageThreshold float64 `json:"machine_cpu_usage_threshold" faker:"boundary_start=0, boundary_end=1"` + IsMachineMemoryUsageSubscribed bool `json:"is_machine_memory_usage_subscribed"` + MachineMemoryUsageThreshold float64 `json:"machine_memory_usage_threshold" faker:"boundary_start=0, boundary_end=1"` } type InternalPutUserNotificationSettingsGeneralResponse ApiDataResponse[NotificationSettingsGeneral] type NotificationSettings struct { - GeneralSettings NotificationSettingsGeneral `json:"general_settings"` - Networks []NotificationNetwork `json:"networks"` - PairedDevices []NotificationPairedDevice `json:"paired_devices"` + GeneralSettings NotificationSettingsGeneral `json:"general_settings"` + HasMachines bool `json:"has_machines"` + Networks []NotificationNetwork `json:"networks"` + PairedDevices []NotificationPairedDevice `json:"paired_devices"` + Clients []NotificationSettingsClient `json:"clients" faker:"slice_len=10"` } type InternalGetUserNotificationSettingsResponse ApiDataResponse[NotificationSettings] type NotificationSettingsValidatorDashboard struct { WebhookUrl string `json:"webhook_url" faker:"url"` IsWebhookDiscordEnabled bool `json:"is_webhook_discord_enabled"` - IsRealTimeModeEnabled bool `json:"is_real_time_mode_enabled"` IsValidatorOfflineSubscribed bool `json:"is_validator_offline_subscribed"` - GroupOfflineThreshold float64 `json:"group_offline_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 is disabled + IsGroupEfficiencyBelowSubscribed bool `json:"is_group_efficiency_below_subscribed"` + GroupEfficiencyBelowThreshold float64 `json:"group_efficiency_below_threshold" faker:"boundary_start=0, boundary_end=1"` IsAttestationsMissedSubscribed bool `json:"is_attestations_missed_subscribed"` IsBlockProposalSubscribed bool `json:"is_block_proposal_subscribed"` IsUpcomingBlockProposalSubscribed bool `json:"is_upcoming_block_proposal_subscribed"` IsSyncSubscribed bool `json:"is_sync_subscribed"` IsWithdrawalProcessedSubscribed bool `json:"is_withdrawal_processed_subscribed"` IsSlashedSubscribed bool `json:"is_slashed_subscribed"` + + IsMaxCollateralSubscribed bool `json:"is_max_collateral_subscribed"` + MaxCollateralThreshold float64 `json:"max_collateral_threshold" faker:"boundary_start=0, boundary_end=1"` + IsMinCollateralSubscribed bool `json:"is_min_collateral_subscribed"` + MinCollateralThreshold float64 `json:"min_collateral_threshold" faker:"boundary_start=0, boundary_end=1"` } type InternalPutUserNotificationSettingsValidatorDashboardResponse ApiDataResponse[NotificationSettingsValidatorDashboard] @@ -208,7 +228,7 @@ type NotificationSettingsAccountDashboard struct { IsIncomingTransactionsSubscribed bool `json:"is_incoming_transactions_subscribed"` IsOutgoingTransactionsSubscribed bool `json:"is_outgoing_transactions_subscribed"` IsERC20TokenTransfersSubscribed bool `json:"is_erc20_token_transfers_subscribed"` - ERC20TokenTransfersValueThreshold float64 `json:"erc20_token_transfers_value_threshold" faker:"boundary_start=0, boundary_end=1000000"` // 0 does not disable, is_erc20_token_transfers_subscribed determines if it's enabled + ERC20TokenTransfersValueThreshold float64 `json:"erc20_token_transfers_value_threshold" faker:"boundary_start=0, boundary_end=1000000"` IsERC721TokenTransfersSubscribed bool `json:"is_erc721_token_transfers_subscribed"` IsERC1155TokenTransfersSubscribed bool `json:"is_erc1155_token_transfers_subscribed"` } @@ -217,9 +237,10 @@ type InternalPutUserNotificationSettingsAccountDashboardResponse ApiDataResponse type NotificationSettingsDashboardsTableRow struct { IsAccountDashboard bool `json:"is_account_dashboard"` // if false it's a validator dashboard DashboardId uint64 `json:"dashboard_id"` + DashboardName string `json:"dashboard_name"` GroupId uint64 `json:"group_id"` GroupName string `json:"group_name"` - // if it's a validator dashboard, SubscribedEvents is NotificationSettingsAccountDashboard, otherwise NotificationSettingsValidatorDashboard + // if it's a validator dashboard, Settings is NotificationSettingsAccountDashboard, otherwise NotificationSettingsValidatorDashboard Settings interface{} `json:"settings" tstype:"NotificationSettingsAccountDashboard | NotificationSettingsValidatorDashboard" faker:"-"` ChainIds []uint64 `json:"chain_ids" faker:"chain_ids"` } diff --git a/backend/pkg/api/types/ratelimit.go b/backend/pkg/api/types/ratelimit.go new file mode 100644 index 000000000..6a1155096 --- /dev/null +++ b/backend/pkg/api/types/ratelimit.go @@ -0,0 +1,10 @@ +package types + +type ApiWeightItem struct { + Bucket string `db:"bucket"` + Endpoint string `db:"endpoint"` + Method string `db:"method"` + Weight int `db:"weight"` +} + +type InternalGetRatelimitWeightsResponse ApiDataResponse[[]ApiWeightItem] diff --git a/backend/pkg/api/types/rocketpool.go b/backend/pkg/api/types/rocketpool.go new file mode 100644 index 000000000..1d173bce8 --- /dev/null +++ b/backend/pkg/api/types/rocketpool.go @@ -0,0 +1,10 @@ +package types + +import "github.com/shopspring/decimal" + +type RPNetworkStats struct { + ClaimIntervalHours float64 `db:"claim_interval_hours"` + NodeOperatorRewards decimal.Decimal `db:"node_operator_rewards"` + EffectiveRPLStaked decimal.Decimal `db:"effective_rpl_staked"` + RPLPrice decimal.Decimal `db:"rpl_price"` +} diff --git a/backend/pkg/api/types/user.go b/backend/pkg/api/types/user.go index 177159ad7..8e4a30dc7 100644 --- a/backend/pkg/api/types/user.go +++ b/backend/pkg/api/types/user.go @@ -1,6 +1,7 @@ package types const UserGroupAdmin = "ADMIN" +const UserGroupDev = "DEV" type UserInfo struct { Id uint64 `json:"id"` @@ -116,23 +117,24 @@ type ExtraDashboardValidatorsPremiumAddon struct { } type PremiumPerks struct { - AdFree bool `json:"ad_free"` // note that this is somhow redunant, since there is already ApiPerks.NoAds - ValidatorDashboards uint64 `json:"validator_dashboards"` - ValidatorsPerDashboard uint64 `json:"validators_per_dashboard"` - ValidatorGroupsPerDashboard uint64 `json:"validator_groups_per_dashboard"` - ShareCustomDashboards bool `json:"share_custom_dashboards"` - ManageDashboardViaApi bool `json:"manage_dashboard_via_api"` - BulkAdding bool `json:"bulk_adding"` - ChartHistorySeconds ChartHistorySeconds `json:"chart_history_seconds"` - EmailNotificationsPerDay uint64 `json:"email_notifications_per_day"` - ConfigureNotificationsViaApi bool `json:"configure_notifications_via_api"` - ValidatorGroupNotifications uint64 `json:"validator_group_notifications"` - WebhookEndpoints uint64 `json:"webhook_endpoints"` - MobileAppCustomThemes bool `json:"mobile_app_custom_themes"` - MobileAppWidget bool `json:"mobile_app_widget"` - MonitorMachines uint64 `json:"monitor_machines"` - MachineMonitoringHistorySeconds uint64 `json:"machine_monitoring_history_seconds"` - CustomMachineAlerts bool `json:"custom_machine_alerts"` + AdFree bool `json:"ad_free"` // note that this is somhow redunant, since there is already ApiPerks.NoAds + ValidatorDashboards uint64 `json:"validator_dashboards"` + ValidatorsPerDashboard uint64 `json:"validators_per_dashboard"` + ValidatorGroupsPerDashboard uint64 `json:"validator_groups_per_dashboard"` + ShareCustomDashboards bool `json:"share_custom_dashboards"` + ManageDashboardViaApi bool `json:"manage_dashboard_via_api"` + BulkAdding bool `json:"bulk_adding"` + ChartHistorySeconds ChartHistorySeconds `json:"chart_history_seconds"` + EmailNotificationsPerDay uint64 `json:"email_notifications_per_day"` + ConfigureNotificationsViaApi bool `json:"configure_notifications_via_api"` + ValidatorGroupNotifications uint64 `json:"validator_group_notifications"` + WebhookEndpoints uint64 `json:"webhook_endpoints"` + MobileAppCustomThemes bool `json:"mobile_app_custom_themes"` + MobileAppWidget bool `json:"mobile_app_widget"` + MonitorMachines uint64 `json:"monitor_machines"` + MachineMonitoringHistorySeconds uint64 `json:"machine_monitoring_history_seconds"` + NotificationsMachineCustomThreshold bool `json:"notifications_machine_custom_threshold"` + NotificationsValidatorDashboardGroupEfficiency bool `json:"notifications_validator_dashboard_group_efficiency"` } // TODO @patrick post-beta StripeCreateCheckoutSession and StripeCustomerPortal are currently served from v1 (loadbalanced), Once V1 is not affected by this anymore, consider wrapping this with ApiDataResponse diff --git a/backend/pkg/api/types/validator_dashboard.go b/backend/pkg/api/types/validator_dashboard.go index 39d5e5d0d..d8de98b71 100644 --- a/backend/pkg/api/types/validator_dashboard.go +++ b/backend/pkg/api/types/validator_dashboard.go @@ -6,13 +6,6 @@ import ( // ------------------------------------------------------------ // Overview -type VDBOverviewValidators struct { - Online uint64 `json:"online"` - Offline uint64 `json:"offline"` - Pending uint64 `json:"pending"` - Exited uint64 `json:"exited"` - Slashed uint64 `json:"slashed"` -} type VDBOverviewGroup struct { Id uint64 `json:"id"` @@ -27,10 +20,10 @@ type VDBOverviewBalances struct { } type VDBOverviewData struct { - Name string `json:"name,omitempty"` + Name string `json:"name,omitempty" extensions:"x-order=1"` Network uint64 `json:"network"` Groups []VDBOverviewGroup `json:"groups"` - Validators VDBOverviewValidators `json:"validators"` + Validators ValidatorStateCounts `json:"validators"` Efficiency PeriodicValues[float64] `json:"efficiency"` Rewards PeriodicValues[ClElValue[decimal.Decimal]] `json:"rewards"` Apr PeriodicValues[ClElValue[float64]] `json:"apr"` @@ -60,7 +53,7 @@ type VDBSummaryValidators struct { } type VDBSummaryTableRow struct { - GroupId int64 `json:"group_id"` + GroupId int64 `json:"group_id" extensions:"x-order=1"` Status VDBSummaryStatus `json:"status"` Validators VDBSummaryValidators `json:"validators"` Efficiency float64 `json:"efficiency"` @@ -116,7 +109,7 @@ type GetValidatorDashboardSummaryChartResponse ApiDataResponse[ChartData[int, fl // ------------------------------------------------------------ // Summary Validators type VDBSummaryValidator struct { - Index uint64 `json:"index"` + Index uint64 `json:"index" extensions:"x-order=1"` DutyObjects []uint64 `json:"duty_objects,omitempty"` } type VDBSummaryValidatorsData struct { @@ -169,7 +162,7 @@ type GetValidatorDashboardRewardsChartResponse ApiDataResponse[ChartData[int, de // Duties Modal type VDBEpochDutiesTableRow struct { - Validator uint64 `json:"validator"` + Validator uint64 `json:"validator" extensions:"x-order=1"` Duties ValidatorHistoryDuties `json:"duties"` } type GetValidatorDashboardDutiesResponse ApiPagingResponse[VDBEpochDutiesTableRow] @@ -177,12 +170,12 @@ type GetValidatorDashboardDutiesResponse ApiPagingResponse[VDBEpochDutiesTableRo // ------------------------------------------------------------ // Blocks Tab type VDBBlocksTableRow struct { - Proposer uint64 `json:"proposer"` - GroupId uint64 `json:"group_id"` - Epoch uint64 `json:"epoch"` - Slot uint64 `json:"slot"` + Proposer uint64 `json:"proposer" extensions:"x-order=1"` + GroupId uint64 `json:"group_id" extensions:"x-order=2"` + Epoch uint64 `json:"epoch" extensions:"x-order=3"` + Slot uint64 `json:"slot" extensions:"x-order=4"` + Block *uint64 `json:"block,omitempty" extensions:"x-order=5"` Status string `json:"status" tstype:"'success' | 'missed' | 'orphaned' | 'scheduled'" faker:"oneof: success, missed, orphaned, scheduled"` - Block *uint64 `json:"block,omitempty"` RewardRecipient *Address `json:"reward_recipient,omitempty"` Reward *ClElValue[decimal.Decimal] `json:"reward,omitempty"` Graffiti *string `json:"graffiti,omitempty"` @@ -198,22 +191,22 @@ type VDBHeatmapEvents struct { Sync bool `json:"sync"` } type VDBHeatmapCell struct { - X int64 `json:"x"` // Timestamp - Y uint64 `json:"y"` // Group ID + X int64 `json:"x" extensions:"x-order=1"` // Timestamp + Y uint64 `json:"y" extensions:"x-order=2"` // Group ID - Value float64 `json:"value"` // Attestaton Rewards + Value float64 `json:"value" extensions:"x-order=3"` // Attestaton Rewards Events *VDBHeatmapEvents `json:"events,omitempty"` } type VDBHeatmap struct { - Timestamps []int64 `json:"timestamps"` // X-Axis Categories (unix timestamp) - GroupIds []uint64 `json:"group_ids"` // Y-Axis Categories - Data []VDBHeatmapCell `json:"data"` + Timestamps []int64 `json:"timestamps" extensions:"x-order=1"` // X-Axis Categories (unix timestamp) + GroupIds []uint64 `json:"group_ids" extensions:"x-order=2"` // Y-Axis Categories + Data []VDBHeatmapCell `json:"data" extensions:"x-order=3"` Aggregation string `json:"aggregation" tstype:"'epoch' | 'hourly' | 'daily' | 'weekly'" faker:"oneof: epoch, hourly, daily, weekly"` } type GetValidatorDashboardHeatmapResponse ApiDataResponse[VDBHeatmap] type VDBHeatmapTooltipData struct { - Timestamp int64 `json:"timestamp"` + Timestamp int64 `json:"timestamp" extensions:"x-order=1"` Proposers StatusCount `json:"proposers"` Syncs uint64 `json:"syncs"` @@ -290,7 +283,7 @@ type GetValidatorDashboardTotalWithdrawalsResponse ApiDataResponse[VDBTotalWithd // ------------------------------------------------------------ // Rocket Pool Tab type VDBRocketPoolTableRow struct { - Node Address `json:"node"` + Node Address `json:"node" extensions:"x-order=1"` Staked struct { Eth decimal.Decimal `json:"eth"` Rpl decimal.Decimal `json:"rpl"` @@ -315,12 +308,7 @@ type VDBRocketPoolTableRow struct { Claimed decimal.Decimal `json:"claimed"` Unclaimed decimal.Decimal `json:"unclaimed"` } `json:"smoothing_pool"` -} -type GetValidatorDashboardRocketPoolResponse ApiPagingResponse[VDBRocketPoolTableRow] - -type GetValidatorDashboardTotalRocketPoolResponse ApiDataResponse[VDBRocketPoolTableRow] -type VDBNodeRocketPoolData struct { Timezone string `json:"timezone"` RefundBalance decimal.Decimal `json:"refund_balance"` DepositCredit decimal.Decimal `json:"deposit_credit"` @@ -329,8 +317,9 @@ type VDBNodeRocketPoolData struct { Max decimal.Decimal `json:"max"` } `json:"rpl_stake"` } +type GetValidatorDashboardRocketPoolResponse ApiPagingResponse[VDBRocketPoolTableRow] -type GetValidatorDashboardNodeRocketPoolResponse ApiDataResponse[VDBNodeRocketPoolData] +type GetValidatorDashboardTotalRocketPoolResponse ApiDataResponse[VDBRocketPoolTableRow] type VDBRocketPoolMinipoolsTableRow struct { Node Address `json:"node"` diff --git a/backend/pkg/blobindexer/blobindexer.go b/backend/pkg/blobindexer/blobindexer.go new file mode 100644 index 000000000..a690fea16 --- /dev/null +++ b/backend/pkg/blobindexer/blobindexer.go @@ -0,0 +1,470 @@ +package blobindexer + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "sync" + "time" + + "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/metrics" + "github.com/gobitfly/beaconchain/pkg/commons/services" + "github.com/gobitfly/beaconchain/pkg/commons/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" + "github.com/gobitfly/beaconchain/pkg/commons/version" + "github.com/gobitfly/beaconchain/pkg/consapi" + "go.uber.org/atomic" + + "github.com/gobitfly/beaconchain/pkg/consapi/network" + constypes "github.com/gobitfly/beaconchain/pkg/consapi/types" + + "github.com/aws/aws-sdk-go-v2/aws" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/s3" + lru "github.com/hashicorp/golang-lru/v2" + "golang.org/x/sync/errgroup" +) + +var enableCheckingBeforePutting = false +var waitForOtherBlobIndexerDuration = time.Second * 60 + +type BlobIndexer struct { + S3Client *s3.Client + running bool + runningMu *sync.Mutex + clEndpoint string + cl consapi.Client + id string + networkID string + writtenBlobsCache *lru.Cache[string, bool] +} + +func NewBlobIndexer() (*BlobIndexer, error) { + initDB() + cfg, err := config.LoadDefaultConfig(context.TODO(), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( + utils.Config.BlobIndexer.S3.AccessKeyId, + utils.Config.BlobIndexer.S3.AccessKeySecret, + "", + )), + config.WithRegion("auto"), + ) + if err != nil { + return nil, err + } + s3Client := s3.NewFromConfig(cfg, func(o *s3.Options) { + o.UsePathStyle = true + o.BaseEndpoint = aws.String(utils.Config.BlobIndexer.S3.Endpoint) + }) + + writtenBlobsCache, err := lru.New[string, bool](1000) + if err != nil { + return nil, err + } + + id := utils.GetUUID() + bi := &BlobIndexer{ + S3Client: s3Client, + runningMu: &sync.Mutex{}, + clEndpoint: "http://" + utils.Config.Indexer.Node.Host + ":" + utils.Config.Indexer.Node.Port, + cl: consapi.NewClient("http://" + utils.Config.Indexer.Node.Host + ":" + utils.Config.Indexer.Node.Port), + id: id, + writtenBlobsCache: writtenBlobsCache, + } + return bi, nil +} + +func initDB() { + if utils.Config.BlobIndexer.DisableStatusReports { + return + } + if db.WriterDb != nil && db.ReaderDb != nil { + return + } + db.WriterDb, db.ReaderDb = db.MustInitDB(&types.DatabaseConfig{ + Username: utils.Config.WriterDatabase.Username, + Password: utils.Config.WriterDatabase.Password, + Name: utils.Config.WriterDatabase.Name, + Host: utils.Config.WriterDatabase.Host, + Port: utils.Config.WriterDatabase.Port, + MaxOpenConns: utils.Config.WriterDatabase.MaxOpenConns, + MaxIdleConns: utils.Config.WriterDatabase.MaxIdleConns, + SSL: utils.Config.WriterDatabase.SSL, + }, &types.DatabaseConfig{ + Username: utils.Config.ReaderDatabase.Username, + Password: utils.Config.ReaderDatabase.Password, + Name: utils.Config.ReaderDatabase.Name, + Host: utils.Config.ReaderDatabase.Host, + Port: utils.Config.ReaderDatabase.Port, + MaxOpenConns: utils.Config.ReaderDatabase.MaxOpenConns, + MaxIdleConns: utils.Config.ReaderDatabase.MaxIdleConns, + SSL: utils.Config.ReaderDatabase.SSL, + }, "pgx", "postgres") +} + +func (bi *BlobIndexer) Start() { + bi.runningMu.Lock() + if bi.running { + bi.runningMu.Unlock() + return + } + bi.running = true + bi.runningMu.Unlock() + + log.InfoWithFields(log.Fields{"version": version.Version, "clEndpoint": bi.clEndpoint, "s3Endpoint": utils.Config.BlobIndexer.S3.Endpoint, "id": bi.id}, "starting blobindexer") + for { + err := bi.index() + if err != nil { + log.Error(err, "failed indexing blobs", 0) + } + time.Sleep(time.Second * 10) + } +} + +func (bi *BlobIndexer) index() error { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + headHeader := &constypes.StandardBeaconHeaderResponse{} + finalizedHeader := &constypes.StandardBeaconHeaderResponse{} + spec := &constypes.StandardSpecResponse{} + + g, gCtx := errgroup.WithContext(ctx) + g.SetLimit(3) + g.Go(func() error { + var err error + spec, err = bi.cl.GetSpec() + if err != nil { + return fmt.Errorf("error bi.cl.GetSpec: %w", err) + } + return nil + }) + g.Go(func() error { + var err error + headHeader, err = bi.cl.GetBlockHeader("head") + if err != nil { + return fmt.Errorf("error bi.cl.GetBlockHeader(head): %w", err) + } + return nil + }) + g.Go(func() error { + var err error + finalizedHeader, err = bi.cl.GetBlockHeader("finalized") + if err != nil { + return fmt.Errorf("error bi.cl.GetBlockHeader(finalized): %w", err) + } + return nil + }) + err := g.Wait() + if err != nil { + return err + } + + if spec.Data.DenebForkEpoch == nil { + return fmt.Errorf("DENEB_FORK_EPOCH not set in spec") + } + if spec.Data.MinEpochsForBlobSidecarsRequests == nil { + return fmt.Errorf("MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS not set in spec") + } + + nodeDepositNetworkId := uint64(spec.Data.DepositNetworkID) + if utils.Config.Chain.ClConfig.DepositNetworkID != nodeDepositNetworkId { + return fmt.Errorf("config.DepositNetworkId != node.DepositNetworkId: %v != %v", utils.Config.Chain.ClConfig.DepositNetworkID, nodeDepositNetworkId) + } + bi.networkID = fmt.Sprintf("%d", nodeDepositNetworkId) + + status, err := bi.GetIndexerStatus() + if err != nil { + return fmt.Errorf("error bi.GetIndexerStatus: %w", err) + } + + // skip if another blobIndexer is already indexing - it is ok if multiple blobIndexers are indexing the same finalized slot, this is just best effort to avoid duplicate work + if status.CurrentBlobIndexerId != bi.id && status.LastUpdate.After(time.Now().Add(-waitForOtherBlobIndexerDuration)) { + log.InfoWithFields(log.Fields{"lastIndexedFinalizedSlot": status.LastIndexedFinalizedSlot, "currentBlobIndexerId": status.CurrentBlobIndexerId, "finalizedSlot": finalizedHeader.Data.Header.Message.Slot, "lastUpdate": status.LastUpdate}, "found other blobIndexer indexing, skipping") + return nil + } + + // check if node still has last indexed blobs (if its outside the range defined by MAX_REQUEST_BLOCKS_DENEB), otherwise assume that the node has pruned too far and we would miss blobs + minBlobSlotRange := *spec.Data.MinEpochsForBlobSidecarsRequests * uint64(spec.Data.SlotsPerEpoch) + minBlobSlot := uint64(0) + if headHeader.Data.Header.Message.Slot > minBlobSlotRange { + minBlobSlot = headHeader.Data.Header.Message.Slot - minBlobSlotRange + } + pruneMarginSlotRange := utils.Config.BlobIndexer.PruneMarginEpochs * uint64(spec.Data.SlotsPerEpoch) + if minBlobSlot > pruneMarginSlotRange { + minBlobSlot = minBlobSlot - pruneMarginSlotRange + } + if status.LastIndexedFinalizedSlot < minBlobSlot && status.LastIndexedFinalizedBlobSlot > 0 { + bs, err := bi.cl.GetBlobSidecars(status.LastIndexedFinalizedBlobSlot) + if err != nil { + return err + } + if len(bs.Data) == 0 { + return fmt.Errorf("no blobs found at lastIndexedFinalizedBlobSlot: %v, node has pruned too far?", status.LastIndexedFinalizedBlobSlot) + } + } + + lastIndexedFinalizedBlobSlot := atomic.NewUint64(status.LastIndexedFinalizedBlobSlot) + + denebForkSlot := *spec.Data.DenebForkEpoch * uint64(spec.Data.SlotsPerEpoch) + startSlot := status.LastIndexedFinalizedSlot + 1 + if status.LastIndexedFinalizedSlot <= denebForkSlot { + startSlot = denebForkSlot + } + + if headHeader.Data.Header.Message.Slot <= startSlot { + return fmt.Errorf("headHeader.Data.Header.Message.Slot <= startSlot: %v < %v (denebForkEpoch: %v, denebForkSlot: %v, slotsPerEpoch: %v)", headHeader.Data.Header.Message.Slot, startSlot, utils.Config.Chain.ClConfig.DenebForkEpoch, denebForkSlot, utils.Config.Chain.ClConfig.SlotsPerEpoch) + } + + start := time.Now() + log.InfoWithFields(log.Fields{ + "lastIndexedFinalizedSlot": status.LastIndexedFinalizedSlot, + "headSlot": headHeader.Data.Header.Message.Slot, + "finalizedSlot": finalizedHeader.Data.Header.Message.Slot, + "startSlot": startSlot, + "networkID": bi.networkID, + }, "indexing blobs") + defer func() { + log.InfoWithFields(log.Fields{ + "startSlot": startSlot, + "headSlot": headHeader.Data.Header.Message.Slot, + "finalizedSlot": finalizedHeader.Data.Header.Message.Slot, + "duration": time.Since(start), + "networkID": bi.networkID, + }, "finished indexing blobs") + }() + + batchSize := uint64(100) + for batchStart := startSlot; batchStart <= headHeader.Data.Header.Message.Slot; batchStart += batchSize { + batchStartTs := time.Now() + batchBlobsIndexed := atomic.NewInt64(0) + batchEnd := batchStart + batchSize + if batchEnd > headHeader.Data.Header.Message.Slot { + batchEnd = headHeader.Data.Header.Message.Slot + } + g, gCtx = errgroup.WithContext(context.Background()) + g.SetLimit(4) + for slot := batchStart; slot <= batchEnd; slot++ { + slot := slot + g.Go(func() error { + select { + case <-gCtx.Done(): + return gCtx.Err() + default: + } + numBlobs, err := bi.indexBlobsAtSlot(slot) + if err != nil { + return fmt.Errorf("error bi.IndexBlobsAtSlot(%v): %w", slot, err) + } + if numBlobs > 0 && slot <= finalizedHeader.Data.Header.Message.Slot && slot > lastIndexedFinalizedBlobSlot.Load() { + lastIndexedFinalizedBlobSlot.Store(slot) + } + batchBlobsIndexed.Add(int64(numBlobs)) + return nil + }) + } + err = g.Wait() + if err != nil { + return err + } + lastIndexedFinalizedSlot := uint64(0) + if batchEnd <= finalizedHeader.Data.Header.Message.Slot { + lastIndexedFinalizedSlot = batchEnd + } else { + lastIndexedFinalizedSlot = finalizedHeader.Data.Header.Message.Slot + } + newBlobIndexerStatus := BlobIndexerStatus{ + LastIndexedFinalizedSlot: lastIndexedFinalizedSlot, + LastIndexedFinalizedBlobSlot: lastIndexedFinalizedBlobSlot.Load(), + CurrentBlobIndexerId: bi.id, + LastUpdate: time.Now(), + BlobIndexerVersion: version.Version, + } + if status.LastIndexedFinalizedBlobSlot > newBlobIndexerStatus.LastIndexedFinalizedBlobSlot { + newBlobIndexerStatus.LastIndexedFinalizedBlobSlot = status.LastIndexedFinalizedBlobSlot + } + err := bi.putIndexerStatus(newBlobIndexerStatus) + if err != nil { + return fmt.Errorf("error updating indexer status at slot %v: %w", batchEnd, err) + } + slotsPerSecond := float64(batchEnd-batchStart) / time.Since(batchStartTs).Seconds() + blobsPerSecond := float64(batchBlobsIndexed.Load()) / time.Since(batchStartTs).Seconds() + estimatedTimeToHead := float64(headHeader.Data.Header.Message.Slot-batchStart) / slotsPerSecond + estimatedTimeToHeadDuration := time.Duration(estimatedTimeToHead) * time.Second + log.InfoWithFields(log.Fields{ + "lastIdxFinSlot": newBlobIndexerStatus.LastIndexedFinalizedSlot, + "lastIdxFinBlobSlot": newBlobIndexerStatus.LastIndexedFinalizedBlobSlot, + "batch": fmt.Sprintf("%d-%d", batchStart, batchEnd), + "duration": time.Since(batchStartTs), + "slotsPerSecond": fmt.Sprintf("%.3f", slotsPerSecond), + "blobsPerSecond": fmt.Sprintf("%.3f", blobsPerSecond), + "estimatedTimeToHead": estimatedTimeToHeadDuration, + "blobsIndexed": batchBlobsIndexed.Load(), + }, "updated indexer status") + if !utils.Config.BlobIndexer.DisableStatusReports { + services.ReportStatus("blobindexer", "Running", nil) + } + } + return nil +} + +func (bi *BlobIndexer) indexBlobsAtSlot(slot uint64) (int, error) { + tGetBlobSidcar := time.Now() + + blobSidecar, err := bi.cl.GetBlobSidecars(slot) + if err != nil { + httpErr := network.SpecificError(err) + if httpErr != nil && httpErr.StatusCode == http.StatusNotFound { + // no sidecar for this slot + return 0, nil + } + return 0, err + } + metrics.TaskDuration.WithLabelValues("blobindexer_get_blob_sidecars").Observe(time.Since(tGetBlobSidcar).Seconds()) + + if len(blobSidecar.Data) <= 0 { + return 0, nil + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*20) + defer cancel() + + g, gCtx := errgroup.WithContext(ctx) + g.SetLimit(4) + for _, d := range blobSidecar.Data { + d := d + versionedBlobHash := fmt.Sprintf("%#x", utils.VersionedBlobHash(d.KzgCommitment).Bytes()) + key := fmt.Sprintf("%s/blobs/%s", bi.networkID, versionedBlobHash) + + if bi.writtenBlobsCache.Contains(key) { + continue + } + + g.Go(func() error { + select { + case <-gCtx.Done(): + return gCtx.Err() + default: + } + + if enableCheckingBeforePutting { + tS3HeadObj := time.Now() + _, err = bi.S3Client.HeadObject(gCtx, &s3.HeadObjectInput{ + Bucket: &utils.Config.BlobIndexer.S3.Bucket, + Key: &key, + }) + metrics.TaskDuration.WithLabelValues("blobindexer_check_blob").Observe(time.Since(tS3HeadObj).Seconds()) + if err != nil { + // Only put the object if it does not exist yet + var httpResponseErr *awshttp.ResponseError + if errors.As(err, &httpResponseErr) && (httpResponseErr.HTTPStatusCode() == http.StatusNotFound || httpResponseErr.HTTPStatusCode() == 403) { + return nil + } + return fmt.Errorf("error getting headObject: %s (%v/%v): %w", key, d.SignedBlockHeader.Message.Slot, d.Index, err) + } + } + + tS3PutObj := time.Now() + _, putErr := bi.S3Client.PutObject(gCtx, &s3.PutObjectInput{ + Bucket: &utils.Config.BlobIndexer.S3.Bucket, + Key: &key, + Body: bytes.NewReader(d.Blob), + Metadata: map[string]string{ + "blob_index": fmt.Sprintf("%d", d.Index), + "block_slot": fmt.Sprintf("%d", d.SignedBlockHeader.Message.Slot), + "block_proposer": fmt.Sprintf("%d", d.SignedBlockHeader.Message.ProposerIndex), + "block_state_root": d.SignedBlockHeader.Message.StateRoot.String(), + "block_parent_root": d.SignedBlockHeader.Message.ParentRoot.String(), + "block_body_root": d.SignedBlockHeader.Message.BodyRoot.String(), + "kzg_commitment": d.KzgCommitment.String(), + "kzg_proof": d.KzgProof.String(), + }, + }) + metrics.TaskDuration.WithLabelValues("blobindexer_put_blob").Observe(time.Since(tS3PutObj).Seconds()) + if putErr != nil { + return fmt.Errorf("error putting object: %s (%v/%v): %w", key, d.SignedBlockHeader.Message.Slot, d.Index, putErr) + } + bi.writtenBlobsCache.Add(key, true) + + return nil + }) + } + err = g.Wait() + if err != nil { + return len(blobSidecar.Data), fmt.Errorf("error indexing blobs at slot %v: %w", slot, err) + } + + return len(blobSidecar.Data), nil +} + +func (bi *BlobIndexer) GetIndexerStatus() (*BlobIndexerStatus, error) { + start := time.Now() + defer func() { + metrics.TaskDuration.WithLabelValues("blobindexer_get_indexer_status").Observe(time.Since(start).Seconds()) + }() + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + key := fmt.Sprintf("%s/blob-indexer-status.json", bi.networkID) + obj, err := bi.S3Client.GetObject(ctx, &s3.GetObjectInput{ + Bucket: &utils.Config.BlobIndexer.S3.Bucket, + Key: &key, + }) + if err != nil { + // If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 (Not Found) error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 ("access denied") error. + var httpResponseErr *awshttp.ResponseError + if errors.As(err, &httpResponseErr) && (httpResponseErr.HTTPStatusCode() == 404 || httpResponseErr.HTTPStatusCode() == 403) { + return &BlobIndexerStatus{}, nil + } + return nil, err + } + status := &BlobIndexerStatus{} + err = json.NewDecoder(obj.Body).Decode(status) + return status, err +} + +func (bi *BlobIndexer) putIndexerStatus(status BlobIndexerStatus) error { + start := time.Now() + defer func() { + metrics.TaskDuration.WithLabelValues("blobindexer_put_indexer_status").Observe(time.Since(start).Seconds()) + }() + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + key := fmt.Sprintf("%s/blob-indexer-status.json", bi.networkID) + contentType := "application/json" + body, err := json.Marshal(&status) + if err != nil { + return err + } + _, err = bi.S3Client.PutObject(ctx, &s3.PutObjectInput{ + Bucket: &utils.Config.BlobIndexer.S3.Bucket, + Key: &key, + Body: bytes.NewReader(body), + ContentType: &contentType, + Metadata: map[string]string{ + "last_indexed_finalized_slot": fmt.Sprintf("%d", status.LastIndexedFinalizedSlot), + "last_indexed_finalized_blob_slot": fmt.Sprintf("%d", status.LastIndexedFinalizedBlobSlot), + "current_blob_indexer_id": status.CurrentBlobIndexerId, + "last_update": status.LastUpdate.Format(time.RFC3339), + "blob_indexer_version": status.BlobIndexerVersion, + }, + }) + if err != nil { + return err + } + return nil +} + +type BlobIndexerStatus struct { + LastIndexedFinalizedSlot uint64 `json:"last_indexed_finalized_slot"` // last finalized slot that was indexed + LastIndexedFinalizedBlobSlot uint64 `json:"last_indexed_finalized_blob_slot"` // last finalized slot that included a blob + CurrentBlobIndexerId string `json:"current_blob_indexer_id"` + LastUpdate time.Time `json:"last_update"` + BlobIndexerVersion string `json:"blob_indexer_version"` +} diff --git a/backend/pkg/blobindexer/blobs.go b/backend/pkg/blobindexer/blobs.go deleted file mode 100644 index ae99d2d1b..000000000 --- a/backend/pkg/blobindexer/blobs.go +++ /dev/null @@ -1,333 +0,0 @@ -package blobindexer - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "sync" - "time" - - "github.com/gobitfly/beaconchain/pkg/commons/log" - "github.com/gobitfly/beaconchain/pkg/commons/metrics" - "github.com/gobitfly/beaconchain/pkg/commons/utils" - "github.com/gobitfly/beaconchain/pkg/commons/version" - "github.com/gobitfly/beaconchain/pkg/consapi" - - "github.com/gobitfly/beaconchain/pkg/consapi/network" - constypes "github.com/gobitfly/beaconchain/pkg/consapi/types" - - "github.com/aws/aws-sdk-go-v2/aws" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/coocood/freecache" - "golang.org/x/sync/errgroup" -) - -type BlobIndexer struct { - S3Client *s3.Client - running bool - runningMu *sync.Mutex - clEndpoint string - cache *freecache.Cache - cl consapi.Client -} - -func NewBlobIndexer() (*BlobIndexer, error) { - s3Resolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) { - return aws.Endpoint{ - PartitionID: "aws", - URL: utils.Config.BlobIndexer.S3.Endpoint, - SigningRegion: "us-east-2", - HostnameImmutable: true, - }, nil - }) - s3Client := s3.NewFromConfig(aws.Config{ - Region: "us-east-2", - Credentials: credentials.NewStaticCredentialsProvider( - utils.Config.BlobIndexer.S3.AccessKeyId, - utils.Config.BlobIndexer.S3.AccessKeySecret, - "", - ), - EndpointResolverWithOptions: s3Resolver, - }, func(o *s3.Options) { - o.UsePathStyle = true - }) - bi := &BlobIndexer{ - S3Client: s3Client, - runningMu: &sync.Mutex{}, - clEndpoint: "http://" + utils.Config.Indexer.Node.Host + ":" + utils.Config.Indexer.Node.Port, - cache: freecache.NewCache(1024 * 1024), - cl: consapi.NewClient("http://" + utils.Config.Indexer.Node.Host + ":" + utils.Config.Indexer.Node.Port), - } - return bi, nil -} - -func (bi *BlobIndexer) Start() { - bi.runningMu.Lock() - if bi.running { - bi.runningMu.Unlock() - return - } - bi.running = true - bi.runningMu.Unlock() - - log.InfoWithFields(log.Fields{"version": version.Version, "clEndpoint": bi.clEndpoint, "s3Endpoint": utils.Config.BlobIndexer.S3.Endpoint}, "starting blobindexer") - for { - err := bi.Index() - if err != nil { - log.Error(err, "failed indexing blobs", 0) - } - time.Sleep(time.Second * 10) - } -} - -func (bi *BlobIndexer) Index() error { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - - headHeader := &constypes.StandardBeaconHeaderResponse{} - finalizedHeader := &constypes.StandardBeaconHeaderResponse{} - spec := &constypes.StandardSpecResponse{} - - g, gCtx := errgroup.WithContext(ctx) - g.SetLimit(3) - g.Go(func() error { - var err error - spec, err = bi.cl.GetSpec() - if err != nil { - return err - } - return nil - }) - g.Go(func() error { - var err error - headHeader, err = bi.cl.GetBlockHeader("head") - if err != nil { - return err - } - return nil - }) - g.Go(func() error { - var err error - finalizedHeader, err = bi.cl.GetBlockHeader("finalized") - if err != nil { - return err - } - return nil - }) - err := g.Wait() - if err != nil { - return err - } - - nodeDepositNetworkId := uint64(spec.Data.DepositNetworkID) - if utils.Config.Chain.ClConfig.DepositNetworkID != nodeDepositNetworkId { - return fmt.Errorf("config.DepositNetworkId != node.DepositNetworkId: %v != %v", utils.Config.Chain.ClConfig.DepositNetworkID, nodeDepositNetworkId) - } - - status, err := bi.GetIndexerStatus() - if err != nil { - return err - } - - denebForkSlot := utils.Config.Chain.ClConfig.DenebForkEpoch * utils.Config.Chain.ClConfig.SlotsPerEpoch - startSlot := status.LastIndexedFinalizedSlot + 1 - if status.LastIndexedFinalizedSlot <= denebForkSlot { - startSlot = denebForkSlot - } - - if headHeader.Data.Header.Message.Slot <= startSlot { - return fmt.Errorf("headHeader.Data.Header.Message.Slot <= startSlot: %v < %v", headHeader.Data.Header.Message.Slot, startSlot) - } - - start := time.Now() - log.InfoWithFields(log.Fields{"lastIndexedFinalizedSlot": status.LastIndexedFinalizedSlot, "headSlot": headHeader.Data.Header.Message.Slot}, "indexing blobs") - defer func() { - log.InfoWithFields(log.Fields{ - "startSlot": startSlot, - "endSlot": headHeader.Data.Header.Message.Slot, - "duration": time.Since(start), - }, "finished indexing blobs") - }() - - batchSize := uint64(100) - for batchStart := startSlot; batchStart <= headHeader.Data.Header.Message.Slot; batchStart += batchSize { - batchEnd := batchStart + batchSize - if batchEnd > headHeader.Data.Header.Message.Slot { - batchEnd = headHeader.Data.Header.Message.Slot - } - g, gCtx = errgroup.WithContext(context.Background()) - g.SetLimit(4) - for slot := batchStart; slot <= batchEnd; slot++ { - slot := slot - g.Go(func() error { - select { - case <-gCtx.Done(): - return gCtx.Err() - default: - } - err := bi.IndexBlobsAtSlot(slot) - if err != nil { - return err - } - return nil - }) - } - err = g.Wait() - if err != nil { - return err - } - if batchEnd <= finalizedHeader.Data.Header.Message.Slot { - err := bi.PutIndexerStatus(BlobIndexerStatus{ - LastIndexedFinalizedSlot: batchEnd, - }) - if err != nil { - return fmt.Errorf("error updating indexer status at slot %v: %w", batchEnd, err) - } - log.InfoWithFields(log.Fields{"lastIndexedFinalizedSlot": batchEnd}, "updated indexer status") - } - } - return nil -} - -func (bi *BlobIndexer) IndexBlobsAtSlot(slot uint64) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - - tGetBlobSidcar := time.Now() - - blobSidecar, err := bi.cl.GetBlobSidecars(slot) - if err != nil { - httpErr := network.SpecificError(err) - if httpErr != nil && httpErr.StatusCode == http.StatusNotFound { - // no sidecar for this slot - return nil - } - return err - } - metrics.TaskDuration.WithLabelValues("blobindexer_get_blob_sidecars").Observe(time.Since(tGetBlobSidcar).Seconds()) - - if len(blobSidecar.Data) <= 0 { - return nil - } - - g, gCtx := errgroup.WithContext(ctx) - g.SetLimit(4) - for _, d := range blobSidecar.Data { - d := d - g.Go(func() error { - select { - case <-gCtx.Done(): - return gCtx.Err() - default: - } - - versionedBlobHash := fmt.Sprintf("%#x", utils.VersionedBlobHash(d.KzgCommitment).Bytes()) - key := fmt.Sprintf("blobs/%s", versionedBlobHash) - - tS3HeadObj := time.Now() - _, err = bi.S3Client.HeadObject(gCtx, &s3.HeadObjectInput{ - Bucket: &utils.Config.BlobIndexer.S3.Bucket, - Key: &key, - }) - metrics.TaskDuration.WithLabelValues("blobindexer_check_blob").Observe(time.Since(tS3HeadObj).Seconds()) - if err != nil { - // Only put the object if it does not exist yet - var httpResponseErr *awshttp.ResponseError - if errors.As(err, &httpResponseErr) && (httpResponseErr.HTTPStatusCode() == http.StatusNotFound || httpResponseErr.HTTPStatusCode() == 403) { - tS3PutObj := time.Now() - _, putErr := bi.S3Client.PutObject(gCtx, &s3.PutObjectInput{ - Bucket: &utils.Config.BlobIndexer.S3.Bucket, - Key: &key, - Body: bytes.NewReader(d.Blob), - Metadata: map[string]string{ - "slot": fmt.Sprintf("%d", d.Slot), - "index": fmt.Sprintf("%d", d.Index), - "block_root": d.BlockRoot.String(), - "block_parent_root": d.BlockParentRoot.String(), - "proposer_index": fmt.Sprintf("%d", d.ProposerIndex), - "kzg_commitment": d.KzgCommitment.String(), - "kzg_proof": d.KzgProof.String(), - }, - }) - metrics.TaskDuration.WithLabelValues("blobindexer_put_blob").Observe(time.Since(tS3PutObj).Seconds()) - if putErr != nil { - return fmt.Errorf("error putting object: %s (%v/%v): %w", key, d.Slot, d.Index, putErr) - } - return nil - } - return fmt.Errorf("error getting headObject: %s (%v/%v): %w", key, d.Slot, d.Index, err) - } - return nil - }) - } - err = g.Wait() - if err != nil { - return fmt.Errorf("error indexing blobs at slot %v: %w", slot, err) - } - - return nil -} - -func (bi *BlobIndexer) GetIndexerStatus() (*BlobIndexerStatus, error) { - start := time.Now() - defer func() { - metrics.TaskDuration.WithLabelValues("blobindexer_get_indexer_status").Observe(time.Since(start).Seconds()) - }() - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - key := "blob-indexer-status.json" - obj, err := bi.S3Client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: &utils.Config.BlobIndexer.S3.Bucket, - Key: &key, - }) - if err != nil { - // If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 (Not Found) error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 ("access denied") error. - var httpResponseErr *awshttp.ResponseError - if errors.As(err, &httpResponseErr) && (httpResponseErr.HTTPStatusCode() == 404 || httpResponseErr.HTTPStatusCode() == 403) { - return &BlobIndexerStatus{}, nil - } - return nil, err - } - status := &BlobIndexerStatus{} - err = json.NewDecoder(obj.Body).Decode(status) - return status, err -} - -func (bi *BlobIndexer) PutIndexerStatus(status BlobIndexerStatus) error { - start := time.Now() - defer func() { - metrics.TaskDuration.WithLabelValues("blobindexer_put_indexer_status").Observe(time.Since(start).Seconds()) - }() - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - key := "blob-indexer-status.json" - contentType := "application/json" - body, err := json.Marshal(&status) - if err != nil { - return err - } - _, err = bi.S3Client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: &utils.Config.BlobIndexer.S3.Bucket, - Key: &key, - Body: bytes.NewReader(body), - ContentType: &contentType, - Metadata: map[string]string{ - "last_indexed_finalized_slot": fmt.Sprintf("%d", status.LastIndexedFinalizedSlot), - }, - }) - if err != nil { - return err - } - return nil -} - -type BlobIndexerStatus struct { - LastIndexedFinalizedSlot uint64 `json:"last_indexed_finalized_slot"` - // LastIndexedFinalizedRoot string `json:"last_indexed_finalized_root"` - // IndexedUnfinalized map[string]uint64 `json:"indexed_unfinalized"` -} diff --git a/backend/pkg/commons/db/bigtable.go b/backend/pkg/commons/db/bigtable.go index edd9b3c17..7b7cd147e 100644 --- a/backend/pkg/commons/db/bigtable.go +++ b/backend/pkg/commons/db/bigtable.go @@ -194,7 +194,7 @@ func (bigtable *Bigtable) GetClient() *gcp_bigtable.Client { return bigtable.client } -func (bigtable *Bigtable) SaveMachineMetric(process string, userID uint64, machine string, data []byte) error { +func (bigtable *Bigtable) SaveMachineMetric(process string, userID types.UserId, machine string, data []byte) error { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() @@ -234,7 +234,7 @@ func (bigtable *Bigtable) SaveMachineMetric(process string, userID uint64, machi return nil } -func (bigtable Bigtable) getMachineMetricNamesMap(userID uint64, searchDepth int) (map[string]bool, error) { +func (bigtable Bigtable) getMachineMetricNamesMap(userID types.UserId, searchDepth int) (map[string]bool, error) { ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*30)) defer cancel() @@ -265,7 +265,7 @@ func (bigtable Bigtable) getMachineMetricNamesMap(userID uint64, searchDepth int return machineNames, nil } -func (bigtable Bigtable) GetMachineMetricsMachineNames(userID uint64) ([]string, error) { +func (bigtable Bigtable) GetMachineMetricsMachineNames(userID types.UserId) ([]string, error) { tmr := time.AfterFunc(REPORT_TIMEOUT, func() { log.WarnWithFields(log.Fields{ "userId": userID, @@ -288,7 +288,7 @@ func (bigtable Bigtable) GetMachineMetricsMachineNames(userID uint64) ([]string, return result, nil } -func (bigtable Bigtable) GetMachineMetricsMachineCount(userID uint64) (uint64, error) { +func (bigtable Bigtable) GetMachineMetricsMachineCount(userID types.UserId) (uint64, error) { tmr := time.AfterFunc(REPORT_TIMEOUT, func() { log.WarnWithFields(log.Fields{ "userId": userID, @@ -310,7 +310,7 @@ func (bigtable Bigtable) GetMachineMetricsMachineCount(userID uint64) (uint64, e return uint64(card), nil } -func (bigtable Bigtable) GetMachineMetricsNode(userID uint64, limit, offset int) ([]*types.MachineMetricNode, error) { +func (bigtable Bigtable) GetMachineMetricsNode(userID types.UserId, limit, offset int) ([]*types.MachineMetricNode, error) { tmr := time.AfterFunc(REPORT_TIMEOUT, func() { log.WarnWithFields(log.Fields{ "userId": userID, @@ -335,7 +335,7 @@ func (bigtable Bigtable) GetMachineMetricsNode(userID uint64, limit, offset int) ) } -func (bigtable Bigtable) GetMachineMetricsValidator(userID uint64, limit, offset int) ([]*types.MachineMetricValidator, error) { +func (bigtable Bigtable) GetMachineMetricsValidator(userID types.UserId, limit, offset int) ([]*types.MachineMetricValidator, error) { tmr := time.AfterFunc(REPORT_TIMEOUT, func() { log.WarnWithFields(log.Fields{ "userId": userID, @@ -360,7 +360,7 @@ func (bigtable Bigtable) GetMachineMetricsValidator(userID uint64, limit, offset ) } -func (bigtable Bigtable) GetMachineMetricsSystem(userID uint64, limit, offset int) ([]*types.MachineMetricSystem, error) { +func (bigtable Bigtable) GetMachineMetricsSystem(userID types.UserId, limit, offset int) ([]*types.MachineMetricSystem, error) { tmr := time.AfterFunc(REPORT_TIMEOUT, func() { log.WarnWithFields(log.Fields{ "userId": userID, @@ -385,7 +385,7 @@ func (bigtable Bigtable) GetMachineMetricsSystem(userID uint64, limit, offset in ) } -func getMachineMetrics[T types.MachineMetricSystem | types.MachineMetricNode | types.MachineMetricValidator](bigtable Bigtable, process string, userID uint64, limit, offset int, marshler func(data []byte, machine string) *T) ([]*T, error) { +func getMachineMetrics[T types.MachineMetricSystem | types.MachineMetricNode | types.MachineMetricValidator](bigtable Bigtable, process string, userID types.UserId, limit, offset int, marshler func(data []byte, machine string) *T) ([]*T, error) { ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*30)) defer cancel() @@ -429,7 +429,7 @@ func getMachineMetrics[T types.MachineMetricSystem | types.MachineMetricNode | t return res, nil } -func (bigtable Bigtable) GetMachineRowKey(userID uint64, process string, machine string) string { +func (bigtable Bigtable) GetMachineRowKey(userID types.UserId, process string, machine string) string { return fmt.Sprintf("u:%s:p:%s:m:%s", bigtable.reversePaddedUserID(userID), process, machine) } @@ -437,7 +437,7 @@ func (bigtable Bigtable) GetMachineRowKey(userID uint64, process string, machine // machineData contains the latest machine data in CurrentData // and 5 minute old data in fiveMinuteOldData (defined in limit) // as well as the insert timestamps of both -func (bigtable Bigtable) GetMachineMetricsForNotifications(rowKeys gcp_bigtable.RowList) (map[uint64]map[string]*types.MachineMetricSystemUser, error) { +func (bigtable Bigtable) GetMachineMetricsForNotifications(rowKeys gcp_bigtable.RowList) (map[types.UserId]map[string]*types.MachineMetricSystemUser, error) { tmr := time.AfterFunc(REPORT_TIMEOUT, func() { log.WarnWithFields(log.Fields{ "rowKeys": rowKeys, @@ -449,7 +449,7 @@ func (bigtable Bigtable) GetMachineMetricsForNotifications(rowKeys gcp_bigtable. ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*200)) defer cancel() - res := make(map[uint64]map[string]*types.MachineMetricSystemUser) // userID -> machine -> data + res := make(map[types.UserId]map[string]*types.MachineMetricSystemUser) // userID -> machine -> data limit := 5 @@ -509,7 +509,7 @@ func (bigtable Bigtable) GetMachineMetricsForNotifications(rowKeys gcp_bigtable. } //nolint:unparam -func machineMetricRowParts(r string) (bool, uint64, string, string) { +func machineMetricRowParts(r string) (bool, types.UserId, string, string) { keySplit := strings.Split(r, ":") userID, err := strconv.ParseUint(keySplit[1], 10, 64) @@ -526,7 +526,7 @@ func machineMetricRowParts(r string) (bool, uint64, string, string) { process := keySplit[3] - return true, userID, machine, process + return true, types.UserId(userID), machine, process } func (bigtable *Bigtable) SaveValidatorBalances(epoch uint64, validators []*types.Validator) error { @@ -2678,8 +2678,8 @@ func GetCurrentDayClIncome(validator_indices []uint64) (map[uint64]int64, error) return dayIncome, nil } -func (bigtable *Bigtable) reversePaddedUserID(userID uint64) string { - return fmt.Sprintf("%09d", ^uint64(0)-userID) +func (bigtable *Bigtable) reversePaddedUserID(userID types.UserId) string { + return fmt.Sprintf("%09d", ^uint64(0)-uint64(userID)) } func (bigtable *Bigtable) reversedPaddedEpoch(epoch uint64) string { diff --git a/backend/pkg/commons/db/bigtable_eth1.go b/backend/pkg/commons/db/bigtable_eth1.go index e2fca3052..3861374f2 100644 --- a/backend/pkg/commons/db/bigtable_eth1.go +++ b/backend/pkg/commons/db/bigtable_eth1.go @@ -3138,7 +3138,7 @@ func (bigtable *Bigtable) GetAddressName(address []byte) (string, error) { add := common.Address{} add.SetBytes(address) - name, err := GetEnsNameForAddress(add) + name, err := GetEnsNameForAddress(add, time.Time{}) if err == nil && len(name) > 0 { return name, nil } @@ -3215,11 +3215,11 @@ type isContractInfo struct { ts gcp_bigtable.Timestamp } -type contractInteractionAtRequest struct { - address string - block int64 - txIdx int64 - traceIdx int64 +type ContractInteractionAtRequest struct { + Address string // expected all lowercase without 0x prefix + Block int64 + TxIdx int64 + TraceIdx int64 } func (bigtable *Bigtable) getAddressIsContractHistories(histories map[string][]isContractInfo) error { @@ -3270,7 +3270,7 @@ func (bigtable *Bigtable) getAddressIsContractHistories(histories map[string][]i // returns account state after the given execution state // -1 is latest (e.g. "txIdx" = -1 returns the contract state after execution of "block", "block" = -1 returns the state at chain head) -func (bigtable *Bigtable) GetAddressContractInteractionsAt(requests []contractInteractionAtRequest) ([]types.ContractInteractionType, error) { +func (bigtable *Bigtable) GetAddressContractInteractionsAt(requests []ContractInteractionAtRequest) ([]types.ContractInteractionType, error) { results := make([]types.ContractInteractionType, len(requests)) if len(requests) == 0 { return results, nil @@ -3279,7 +3279,7 @@ func (bigtable *Bigtable) GetAddressContractInteractionsAt(requests []contractIn // get histories histories := make(map[string][]isContractInfo, len(requests)) for _, request := range requests { - histories[request.address] = nil + histories[request.Address] = nil } err := bigtable.getAddressIsContractHistories(histories) if err != nil { @@ -3288,22 +3288,22 @@ func (bigtable *Bigtable) GetAddressContractInteractionsAt(requests []contractIn // evaluate requests; CONTRACT_NONE is default for i, request := range requests { - history, ok := histories[request.address] + history, ok := histories[request.Address] if !ok || history == nil || len(history) == 0 { continue } latestUpdateIdxBeforeReq := 0 - if request.block != -1 { + if request.Block != -1 { var block, tx, itx uint64 - if request.txIdx == -1 { - block = uint64(request.block + 1) - } else if request.traceIdx == -1 { - block = uint64(request.block) - tx = uint64(request.txIdx + 1) + if request.TxIdx == -1 { + block = uint64(request.Block + 1) + } else if request.TraceIdx == -1 { + block = uint64(request.Block) + tx = uint64(request.TxIdx + 1) } else { - block = uint64(request.block) - tx = uint64(request.txIdx) - itx = uint64(request.traceIdx + 1) + block = uint64(request.Block) + tx = uint64(request.TxIdx) + itx = uint64(request.TraceIdx + 1) } req_ts, err := encodeIsContractUpdateTs(block, tx, itx) if err != nil { @@ -3319,7 +3319,7 @@ func (bigtable *Bigtable) GetAddressContractInteractionsAt(requests []contractIn } b, tx, trace := decodeIsContractUpdateTs(history[latestUpdateIdxBeforeReq].ts) - exact_match := request.block == -1 || request.block == int64(b) && (request.txIdx == -1 || request.txIdx == int64(tx) && (request.traceIdx == -1 || request.traceIdx == int64(trace))) + exact_match := request.Block == -1 || request.Block == int64(b) && (request.TxIdx == -1 || request.TxIdx == int64(tx) && (request.TraceIdx == -1 || request.TraceIdx == int64(trace))) if exact_match { results[i] = types.CONTRACT_DESTRUCTION @@ -3343,17 +3343,17 @@ func (bigtable *Bigtable) GetAddressContractInteractionsAt(requests []contractIn // convenience function to get contract interaction status per transaction of a block func (bigtable *Bigtable) GetAddressContractInteractionsAtBlock(block *types.Eth1Block) ([]types.ContractInteractionType, error) { - requests := make([]contractInteractionAtRequest, len(block.GetTransactions())) + requests := make([]ContractInteractionAtRequest, len(block.GetTransactions())) for i, tx := range block.GetTransactions() { address := tx.GetTo() if len(address) == 0 { address = tx.GetContractAddress() } - requests[i] = contractInteractionAtRequest{ - address: fmt.Sprintf("%x", address), - block: int64(block.GetNumber()), - txIdx: int64(i), - traceIdx: -1, + requests[i] = ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", address), + Block: int64(block.GetNumber()), + TxIdx: int64(i), + TraceIdx: -1, } } @@ -3363,19 +3363,19 @@ func (bigtable *Bigtable) GetAddressContractInteractionsAtBlock(block *types.Eth // convenience function to get contract interaction status per subtransaction of a transaction // 2nd parameter specifies [tx_idx, trace_idx] for each internal tx func (bigtable *Bigtable) GetAddressContractInteractionsAtITransactions(itransactions []*types.Eth1InternalTransactionIndexed, idxs [][2]int64) ([][2]types.ContractInteractionType, error) { - requests := make([]contractInteractionAtRequest, 0, len(itransactions)*2) + requests := make([]ContractInteractionAtRequest, 0, len(itransactions)*2) for i, tx := range itransactions { - requests = append(requests, contractInteractionAtRequest{ - address: fmt.Sprintf("%x", tx.GetFrom()), - block: int64(tx.GetBlockNumber()), - txIdx: idxs[i][0], - traceIdx: idxs[i][1], + requests = append(requests, ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", tx.GetFrom()), + Block: int64(tx.GetBlockNumber()), + TxIdx: idxs[i][0], + TraceIdx: idxs[i][1], }) - requests = append(requests, contractInteractionAtRequest{ - address: fmt.Sprintf("%x", tx.GetTo()), - block: int64(tx.GetBlockNumber()), - txIdx: idxs[i][0], - traceIdx: idxs[i][1], + requests = append(requests, ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", tx.GetTo()), + Block: int64(tx.GetBlockNumber()), + TxIdx: idxs[i][0], + TraceIdx: idxs[i][1], }) } results, err := bigtable.GetAddressContractInteractionsAt(requests) @@ -3392,20 +3392,20 @@ func (bigtable *Bigtable) GetAddressContractInteractionsAtITransactions(itransac // convenience function to get contract interaction status per parity trace func (bigtable *Bigtable) GetAddressContractInteractionsAtParityTraces(traces []*rpc.ParityTraceResult) ([][2]types.ContractInteractionType, error) { - requests := make([]contractInteractionAtRequest, 0, len(traces)*2) + requests := make([]ContractInteractionAtRequest, 0, len(traces)*2) for i, itx := range traces { from, to, _, _ := itx.ConvertFields() - requests = append(requests, contractInteractionAtRequest{ - address: fmt.Sprintf("%x", from), - block: int64(itx.BlockNumber), - txIdx: int64(itx.TransactionPosition), - traceIdx: int64(i), + requests = append(requests, ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", from), + Block: int64(itx.BlockNumber), + TxIdx: int64(itx.TransactionPosition), + TraceIdx: int64(i), }) - requests = append(requests, contractInteractionAtRequest{ - address: fmt.Sprintf("%x", to), - block: int64(itx.BlockNumber), - txIdx: int64(itx.TransactionPosition), - traceIdx: int64(i), + requests = append(requests, ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", to), + Block: int64(itx.BlockNumber), + TxIdx: int64(itx.TransactionPosition), + TraceIdx: int64(i), }) } results, err := bigtable.GetAddressContractInteractionsAt(requests) @@ -3422,13 +3422,13 @@ func (bigtable *Bigtable) GetAddressContractInteractionsAtParityTraces(traces [] // convenience function to get contract interaction status per transaction func (bigtable *Bigtable) GetAddressContractInteractionsAtTransactions(transactions []*types.Eth1TransactionIndexed, idxs []int64) ([]types.ContractInteractionType, error) { - requests := make([]contractInteractionAtRequest, len(transactions)) + requests := make([]ContractInteractionAtRequest, len(transactions)) for i, tx := range transactions { - requests[i] = contractInteractionAtRequest{ - address: fmt.Sprintf("%x", tx.GetTo()), - block: int64(tx.GetBlockNumber()), - txIdx: idxs[i], - traceIdx: -1, + requests[i] = ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", tx.GetTo()), + Block: int64(tx.GetBlockNumber()), + TxIdx: idxs[i], + TraceIdx: -1, } } return bigtable.GetAddressContractInteractionsAt(requests) diff --git a/backend/pkg/commons/db/db.go b/backend/pkg/commons/db/db.go index bc1489514..929728950 100644 --- a/backend/pkg/commons/db/db.go +++ b/backend/pkg/commons/db/db.go @@ -957,22 +957,8 @@ func GetTotalEligibleEther() (uint64, error) { } // GetValidatorsGotSlashed returns the validators that got slashed after `epoch` either by an attestation violation or a proposer violation -func GetValidatorsGotSlashed(epoch uint64) ([]struct { - Epoch uint64 `db:"epoch"` - SlasherIndex uint64 `db:"slasher"` - SlasherPubkey string `db:"slasher_pubkey"` - SlashedValidatorIndex uint64 `db:"slashedvalidator"` - SlashedValidatorPubkey []byte `db:"slashedvalidator_pubkey"` - Reason string `db:"reason"` -}, error) { - var dbResult []struct { - Epoch uint64 `db:"epoch"` - SlasherIndex uint64 `db:"slasher"` - SlasherPubkey string `db:"slasher_pubkey"` - SlashedValidatorIndex uint64 `db:"slashedvalidator"` - SlashedValidatorPubkey []byte `db:"slashedvalidator_pubkey"` - Reason string `db:"reason"` - } +func GetValidatorsGotSlashed(epoch uint64) ([]*types.SlashingInfo, error) { + var dbResult []*types.SlashingInfo err := ReaderDb.Select(&dbResult, ` WITH slashings AS ( diff --git a/backend/pkg/commons/db/ens.go b/backend/pkg/commons/db/ens.go index 46001df56..119f6c00b 100644 --- a/backend/pkg/commons/db/ens.go +++ b/backend/pkg/commons/db/ens.go @@ -436,7 +436,7 @@ func validateEnsAddress(client *ethclient.Client, address common.Address, alread err.Error() == "no resolution" || err.Error() == "execution reverted" || strings.HasPrefix(err.Error(), "name is not valid") { - log.Warnf("reverse resolving address [%v] resulted in a skippable error [%s], skipping it", address, err.Error()) + // log.Warnf("reverse resolving address [%v] resulted in a skippable error [%s], skipping it", address, err.Error()) } else { return fmt.Errorf("error could not reverse resolve address [%v]: %w", address, err) } @@ -475,7 +475,7 @@ func validateEnsName(client *ethclient.Client, name string, alreadyChecked *EnsC nameHash, err := go_ens.NameHash(name) if err != nil { - log.Warnf("error could not hash name [%v]: %v -> removing ens entry", name, err) + // log.Warnf("error could not hash name [%v]: %v -> removing ens entry", name, err) err = removeEnsName(name) if err != nil { return fmt.Errorf("error removing ens name [%v]: %w", name, err) @@ -488,12 +488,12 @@ func validateEnsName(client *ethclient.Client, name string, alreadyChecked *EnsC if err.Error() == "unregistered name" || err.Error() == "no address" || err.Error() == "no resolver" || - err.Error() == "abi: attempting to unmarshall an empty string while arguments are expected" || + err.Error() == "abi: attempting to unmarshal an empty string while arguments are expected" || strings.Contains(err.Error(), "execution reverted") || err.Error() == "invalid jump destination" || err.Error() == "invalid opcode: INVALID" { // the given name is not available anymore or resolving it did not work properly => we can remove it from the db (if it is there) - log.Warnf("could not resolve name [%v]: %v -> removing ens entry", name, err) + // log.Warnf("could not resolve name [%v]: %v -> removing ens entry", name, err) err = removeEnsName(name) if err != nil { return fmt.Errorf("error removing ens name after resolve failed [%v]: %w", name, err) @@ -516,7 +516,7 @@ func validateEnsName(client *ethclient.Client, name string, alreadyChecked *EnsC reverseName, err := go_ens.ReverseResolve(client, addr) if err != nil { if err.Error() == "not a resolver" || err.Error() == "no resolution" || err.Error() == "execution reverted" { - log.Warnf("reverse resolving address [%v] for name [%v] resulted in an error [%s], marking entry as not primary", addr, name, err.Error()) + // log.Warnf("reverse resolving address [%v] for name [%v] resulted in an error [%s], marking entry as not primary", addr, name, err.Error()) } else { return fmt.Errorf("error could not reverse resolve address [%v]: %w", addr, err) } @@ -549,12 +549,12 @@ func validateEnsName(client *ethclient.Client, name string, alreadyChecked *EnsC return fmt.Errorf("error writing ens data for name [%v]: %w", name, err) } - log.InfoWithFields(log.Fields{ - "name": name, - "address": addr, - "expires": expires, - "reverseName": reverseName, - }, "validated ens name") + // log.InfoWithFields(log.Fields{ + // "name": name, + // "address": addr, + // "expires": expires, + // "reverseName": reverseName, + // }, "validated ens name") return nil } @@ -609,15 +609,19 @@ func GetAddressForEnsName(name string) (address *common.Address, err error) { return address, err } -func GetEnsNameForAddress(address common.Address) (name string, err error) { +// pass invalid time to get latest data +func GetEnsNameForAddress(address common.Address, validUntil time.Time) (name string, err error) { + if validUntil.IsZero() { + validUntil = time.Now() + } err = ReaderDb.Get(&name, ` SELECT ens_name FROM ens WHERE address = $1 AND is_primary_name AND - valid_to >= now() - ;`, address.Bytes()) + valid_to >= $2 + ;`, address.Bytes(), validUntil) return name, err } diff --git a/backend/pkg/commons/db/migrations/postgres/20240822134034_add_address_tags.sql b/backend/pkg/commons/db/migrations/postgres/20240822134034_add_address_tags.sql new file mode 100644 index 000000000..3579f7ef6 --- /dev/null +++ b/backend/pkg/commons/db/migrations/postgres/20240822134034_add_address_tags.sql @@ -0,0 +1,15 @@ +-- +goose Up +-- +goose StatementBegin +SELECT('up SQL query - create address_names table'); +CREATE TABLE IF NOT EXISTS address_names ( + address bytea NOT NULL UNIQUE, + name TEXT NOT NULL, + PRIMARY KEY (address, name) +); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +SELECT('down SQL query - drop address_names table'); +DROP TABLE IF EXISTS address_names; +-- +goose StatementEnd diff --git a/backend/pkg/commons/db/migrations/postgres/20241003062452_validator_status_counts.sql b/backend/pkg/commons/db/migrations/postgres/20241003062452_validator_status_counts.sql new file mode 100644 index 000000000..dbb6d2ebc --- /dev/null +++ b/backend/pkg/commons/db/migrations/postgres/20241003062452_validator_status_counts.sql @@ -0,0 +1,11 @@ +-- +goose Up +-- +goose StatementBegin +SELECT 'up SQL query'; +CREATE TABLE IF NOT EXISTS validators_status_counts (status varchar(20) primary key, validator_count int not null); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +SELECT 'down SQL query'; +DROP TABLE IF EXISTS validators_status_counts; +-- +goose StatementEnd diff --git a/backend/pkg/commons/db/migrations/postgres/20241003111051_mobile_app_bundles.sql b/backend/pkg/commons/db/migrations/postgres/20241003111051_mobile_app_bundles.sql new file mode 100644 index 000000000..9541726c2 --- /dev/null +++ b/backend/pkg/commons/db/migrations/postgres/20241003111051_mobile_app_bundles.sql @@ -0,0 +1,19 @@ +-- +goose Up +-- +goose StatementBegin + +CREATE TABLE IF NOT EXISTS mobile_app_bundles ( + bundle_version INT NOT NULL PRIMARY KEY, + bundle_url TEXT NOT NULL, + min_native_version INT NOT NULL, + target_count INT, + delivered_count INT NOT NULL DEFAULT 0 +); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +DROP TABLE IF EXISTS mobile_app_bundles; + +-- +goose StatementEnd diff --git a/backend/pkg/commons/db/migrations/postgres/20241010134000_dashboard_notifications.sql b/backend/pkg/commons/db/migrations/postgres/20241010134000_dashboard_notifications.sql new file mode 100644 index 000000000..8bb6e1fee --- /dev/null +++ b/backend/pkg/commons/db/migrations/postgres/20241010134000_dashboard_notifications.sql @@ -0,0 +1,120 @@ +-- +goose Up +-- +goose StatementBegin +SELECT + 'up SQL query'; + +SELECT + 'create users_val_dashboards_notifications_history table'; + +CREATE TABLE + IF NOT EXISTS users_val_dashboards_notifications_history ( + user_id INT NOT NULL, + dashboard_id INT NOT NULL, + group_id INT NOT NULL, + epoch INT NOT NULL, + event_type TEXT NOT NULL, + event_count INT NOT NULL, + details bytea NOT NULL, + PRIMARY KEY ( + user_id, + epoch, + dashboard_id, + group_id, + event_type + ) + ); + +/* On the users db */ +SELECT + 'create machine_notifications_history table'; + +CREATE TABLE + IF NOT EXISTS machine_notifications_history ( + user_id INT NOT NULL, + epoch INT NOT NULL, + machine_id BIGINT NOT NULL, + machine_name TEXT NOT NULL, + event_type TEXT NOT NULL, + event_threshold REAL NOT NULL, + PRIMARY KEY (user_id, epoch, machine_id, event_type) + ); + +/* On the users db */ +SELECT + 'create client_notifications_history table'; + +CREATE TABLE + IF NOT EXISTS client_notifications_history ( + user_id INT NOT NULL, + epoch INT NOT NULL, + client TEXT NOT NULL, + client_version TEXT NOT NULL, + client_url TEXT NOT NULL, + PRIMARY KEY (user_id, epoch, client) + ); + +/* On the users db */ +SELECT + 'create network_notifications_history table'; + +CREATE TABLE + IF NOT EXISTS network_notifications_history ( + user_id INT NOT NULL, + epoch INT NOT NULL, + network SMALLINT NOT NULL, + event_type TEXT NOT NULL, + event_threshold REAL NOT NULL, + PRIMARY KEY (user_id, epoch, network, event_type) + ); + +SELECT + 'create notifications_do_not_disturb_ts column'; + +ALTER TABLE users +ADD COLUMN IF NOT EXISTS notifications_do_not_disturb_ts TIMESTAMP WITHOUT TIME ZONE; + +SELECT + 'create webhook_target column'; + +ALTER TABLE users_val_dashboards_groups +ADD COLUMN IF NOT EXISTS webhook_target TEXT; + +SELECT + 'create discord_webhook_target column'; + +ALTER TABLE users_val_dashboards_groups +ADD COLUMN IF NOT EXISTS webhook_format TEXT; + +SELECT + 'create realtime_notifications column'; + +ALTER TABLE users_val_dashboards_groups +ADD COLUMN IF NOT EXISTS realtime_notifications BOOL; + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +SELECT + 'down SQL query'; + +DROP TABLE IF EXISTS users_val_dashboards_notifications_history; + +DROP TABLE IF EXISTS machine_notifications_history; + +DROP TABLE IF EXISTS client_notifications_history; + +DROP TABLE IF EXISTS network_notifications_history; + +ALTER TABLE users +DROP COLUMN IF EXISTS notifications_do_not_disturb_ts; + +ALTER TABLE users_val_dashboards_groups +DROP COLUMN IF EXISTS webhook_target; + +ALTER TABLE users_val_dashboards_groups +DROP COLUMN IF EXISTS webhook_format; + +ALTER TABLE users_val_dashboards_groups +DROP COLUMN IF EXISTS realtime_notifications; + +-- +goose StatementEnd \ No newline at end of file diff --git a/backend/pkg/commons/db/migrations/postgres/20241022072552_head_notification_status_tracking_table.sql b/backend/pkg/commons/db/migrations/postgres/20241022072552_head_notification_status_tracking_table.sql new file mode 100644 index 000000000..637704a10 --- /dev/null +++ b/backend/pkg/commons/db/migrations/postgres/20241022072552_head_notification_status_tracking_table.sql @@ -0,0 +1,16 @@ +-- +goose Up +-- +goose StatementBegin +SELECT 'creating epochs_notified_head table'; +CREATE TABLE IF NOT EXISTS epochs_notified_head ( + epoch INTEGER NOT NULL, + event_name VARCHAR(255) NOT NULL, + senton TIMESTAMP WITHOUT TIME ZONE NOT NULL, + PRIMARY KEY (epoch, event_name) +); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +SELECT 'dropping epochs_notified_head table'; +DROP TABLE IF EXISTS epochs_notified_head; +-- +goose StatementEnd diff --git a/backend/pkg/commons/db/subscriptions.go b/backend/pkg/commons/db/subscriptions.go index 3edbc3d5f..b732d2db6 100644 --- a/backend/pkg/commons/db/subscriptions.go +++ b/backend/pkg/commons/db/subscriptions.go @@ -1,7 +1,7 @@ package db import ( - "database/sql" + "context" "encoding/hex" "fmt" @@ -261,8 +261,8 @@ func GetSubscriptions(filter GetSubscriptionsFilter) ([]*types.Subscription, err } // UpdateSubscriptionsLastSent updates `last_sent_ts` column of the `users_subscriptions` table. -func UpdateSubscriptionsLastSent(subscriptionIDs []uint64, sent time.Time, epoch uint64, useDB *sqlx.DB) error { - _, err := useDB.Exec(` +func UpdateSubscriptionsLastSent(subscriptionIDs []uint64, sent time.Time, epoch uint64) error { + _, err := FrontendWriterDB.Exec(` UPDATE users_subscriptions SET last_sent_ts = TO_TIMESTAMP($1), last_sent_epoch = $2 WHERE id = ANY($3)`, sent.Unix(), epoch, pq.Array(subscriptionIDs)) @@ -278,22 +278,19 @@ func UpdateSubscriptionLastSent(tx *sqlx.Tx, ts uint64, epoch uint64, subID uint return err } -// CountSentMail increases the count of sent mails in the table `mails_sent` for this day. -func CountSentMail(email string) error { +// CountSentMessage increases the count of sent messages for this day given a specific prefix and userId +func CountSentMessage(prefix string, userId types.UserId) (int64, error) { day := time.Now().Truncate(utils.Day).Unix() - _, err := FrontendWriterDB.Exec(` - INSERT INTO mails_sent (email, ts, cnt) VALUES ($1, TO_TIMESTAMP($2), 1) - ON CONFLICT (email, ts) DO UPDATE SET cnt = mails_sent.cnt+1`, email, day) - return err -} + key := fmt.Sprintf("%s:%d:%d", prefix, userId, day) + + pipe := PersistentRedisDbClient.TxPipeline() + incr := pipe.Incr(context.Background(), key) + pipe.Expire(context.Background(), key, utils.Day) + _, err := pipe.Exec(context.Background()) -// GetMailsSentCount returns the number of sent mails for the day of the passed time. -func GetMailsSentCount(email string, t time.Time) (int, error) { - day := t.Truncate(utils.Day).Unix() - count := 0 - err := FrontendWriterDB.Get(&count, "SELECT cnt FROM mails_sent WHERE email = $1 AND ts = TO_TIMESTAMP($2)", email, day) - if err == sql.ErrNoRows { - return 0, nil + if incr.Err() != nil { + return 0, incr.Err() } - return count, err + + return incr.Val(), err } diff --git a/backend/pkg/commons/db/user.go b/backend/pkg/commons/db/user.go new file mode 100644 index 000000000..8ad275f8b --- /dev/null +++ b/backend/pkg/commons/db/user.go @@ -0,0 +1,466 @@ +package db + +import ( + "context" + "database/sql" + "errors" + "fmt" + "math" + "time" + + t "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" + "github.com/jmoiron/sqlx" +) + +var ErrNotFound = errors.New("not found") + +const hour uint64 = 3600 +const day = 24 * hour +const week = 7 * day +const month = 30 * day +const maxJsInt uint64 = 9007199254740991 // 2^53-1 (max safe int in JS) + +var freeTierProduct t.PremiumProduct = t.PremiumProduct{ + ProductName: "Free", + PremiumPerks: t.PremiumPerks{ + AdFree: false, + ValidatorDashboards: 1, + ValidatorsPerDashboard: 20, + ValidatorGroupsPerDashboard: 1, + ShareCustomDashboards: false, + ManageDashboardViaApi: false, + BulkAdding: false, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: 0, + Hourly: 12 * hour, + Daily: 0, + Weekly: 0, + }, + EmailNotificationsPerDay: 10, + ConfigureNotificationsViaApi: false, + ValidatorGroupNotifications: 1, + WebhookEndpoints: 1, + MobileAppCustomThemes: false, + MobileAppWidget: false, + MonitorMachines: 1, + MachineMonitoringHistorySeconds: 3600 * 3, + NotificationsMachineCustomThreshold: false, + NotificationsValidatorDashboardGroupEfficiency: false, + }, + PricePerMonthEur: 0, + PricePerYearEur: 0, + ProductIdMonthly: "premium_free", + ProductIdYearly: "premium_free.yearly", +} + +var adminPerks = t.PremiumPerks{ + AdFree: false, // admins want to see ads to check ad configuration + ValidatorDashboards: maxJsInt, + ValidatorsPerDashboard: maxJsInt, + ValidatorGroupsPerDashboard: maxJsInt, + ShareCustomDashboards: true, + ManageDashboardViaApi: true, + BulkAdding: true, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: maxJsInt, + Hourly: maxJsInt, + Daily: maxJsInt, + Weekly: maxJsInt, + }, + EmailNotificationsPerDay: maxJsInt, + ConfigureNotificationsViaApi: true, + ValidatorGroupNotifications: maxJsInt, + WebhookEndpoints: maxJsInt, + MobileAppCustomThemes: true, + MobileAppWidget: true, + MonitorMachines: maxJsInt, + MachineMonitoringHistorySeconds: maxJsInt, + NotificationsMachineCustomThreshold: true, + NotificationsValidatorDashboardGroupEfficiency: true, +} + +func GetUserInfo(ctx context.Context, userId uint64, userDbReader *sqlx.DB) (*t.UserInfo, error) { + // TODO @patrick post-beta improve and unmock + userInfo := &t.UserInfo{ + Id: userId, + ApiKeys: []string{}, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 10, + UnitsPerMonth: 10, + ApiKeys: 4, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + Subscriptions: []t.UserSubscription{}, + } + + productSummary, err := GetProductSummary(ctx) + if err != nil { + return nil, fmt.Errorf("error getting productSummary: %w", err) + } + + result := struct { + Email string `db:"email"` + UserGroup string `db:"user_group"` + }{} + err = userDbReader.GetContext(ctx, &result, `SELECT email, COALESCE(user_group, '') as user_group FROM users WHERE id = $1`, userId) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("%w: user not found", ErrNotFound) + } + return nil, err + } + userInfo.Email = result.Email + userInfo.UserGroup = result.UserGroup + + userInfo.Email = utils.CensorEmail(userInfo.Email) + + err = userDbReader.SelectContext(ctx, &userInfo.ApiKeys, `SELECT api_key FROM api_keys WHERE user_id = $1`, userId) + if err != nil && err != sql.ErrNoRows { + return nil, fmt.Errorf("error getting userApiKeys for user %v: %w", userId, err) + } + + premiumProduct := struct { + ProductId string `db:"product_id"` + Store string `db:"store"` + Start time.Time `db:"start"` + End time.Time `db:"end"` + }{} + err = userDbReader.GetContext(ctx, &premiumProduct, ` + SELECT + COALESCE(uas.product_id, '') AS product_id, + COALESCE(uas.store, '') AS store, + COALESCE(to_timestamp((uss.payload->>'current_period_start')::bigint),uas.created_at) AS start, + COALESCE(to_timestamp((uss.payload->>'current_period_end')::bigint),uas.expires_at) AS end + FROM users_app_subscriptions uas + LEFT JOIN users_stripe_subscriptions uss ON uss.subscription_id = uas.subscription_id + WHERE uas.user_id = $1 AND uas.active = true AND product_id IN ('orca.yearly', 'orca', 'dolphin.yearly', 'dolphin', 'guppy.yearly', 'guppy', 'whale', 'goldfish', 'plankton') + ORDER BY CASE uas.product_id + WHEN 'orca.yearly' THEN 1 + WHEN 'orca' THEN 2 + WHEN 'dolphin.yearly' THEN 3 + WHEN 'dolphin' THEN 4 + WHEN 'guppy.yearly' THEN 5 + WHEN 'guppy' THEN 6 + WHEN 'whale' THEN 7 + WHEN 'goldfish' THEN 8 + WHEN 'plankton' THEN 9 + ELSE 10 -- For any other product_id values + END, uas.id DESC + LIMIT 1`, userId) + if err != nil { + if err != sql.ErrNoRows { + return nil, fmt.Errorf("error getting premiumProduct for userId %v: %w", userId, err) + } + premiumProduct.ProductId = "premium_free" + premiumProduct.Store = "" + } + + foundProduct := false + for _, p := range productSummary.PremiumProducts { + effectiveProductId := premiumProduct.ProductId + productName := p.ProductName + switch premiumProduct.ProductId { + case "whale": + effectiveProductId = "dolphin" + productName = "Whale" + case "goldfish": + effectiveProductId = "guppy" + productName = "Goldfish" + case "plankton": + effectiveProductId = "guppy" + productName = "Plankton" + } + if p.ProductIdMonthly == effectiveProductId || p.ProductIdYearly == effectiveProductId { + userInfo.PremiumPerks = p.PremiumPerks + foundProduct = true + + store := t.ProductStoreStripe + switch premiumProduct.Store { + case "ios-appstore": + store = t.ProductStoreIosAppstore + case "android-playstore": + store = t.ProductStoreAndroidPlaystore + case "ethpool": + store = t.ProductStoreEthpool + case "manuall": + store = t.ProductStoreCustom + } + + if effectiveProductId != "premium_free" { + userInfo.Subscriptions = append(userInfo.Subscriptions, t.UserSubscription{ + ProductId: premiumProduct.ProductId, + ProductName: productName, + ProductCategory: t.ProductCategoryPremium, + ProductStore: store, + Start: premiumProduct.Start.Unix(), + End: premiumProduct.End.Unix(), + }) + } + break + } + } + if !foundProduct { + return nil, fmt.Errorf("product %s not found", premiumProduct.ProductId) + } + + premiumAddons := []struct { + PriceId string `db:"price_id"` + Start time.Time `db:"start"` + End time.Time `db:"end"` + Quantity int `db:"quantity"` + }{} + err = userDbReader.SelectContext(ctx, &premiumAddons, ` + SELECT + price_id, + to_timestamp((uss.payload->>'current_period_start')::bigint) AS start, + to_timestamp((uss.payload->>'current_period_end')::bigint) AS end, + COALESCE((uss.payload->>'quantity')::int,1) AS quantity + FROM users_stripe_subscriptions uss + INNER JOIN users u ON u.stripe_customer_id = uss.customer_id + WHERE u.id = $1 AND uss.active = true AND uss.purchase_group = 'addon'`, userId) + if err != nil { + return nil, fmt.Errorf("error getting premiumAddons for userId %v: %w", userId, err) + } + for _, addon := range premiumAddons { + foundAddon := false + for _, p := range productSummary.ExtraDashboardValidatorsPremiumAddon { + if p.StripePriceIdMonthly == addon.PriceId || p.StripePriceIdYearly == addon.PriceId { + foundAddon = true + for i := 0; i < addon.Quantity; i++ { + userInfo.PremiumPerks.ValidatorsPerDashboard += p.ExtraDashboardValidators + userInfo.Subscriptions = append(userInfo.Subscriptions, t.UserSubscription{ + ProductId: utils.PriceIdToProductId(addon.PriceId), + ProductName: p.ProductName, + ProductCategory: t.ProductCategoryPremiumAddon, + ProductStore: t.ProductStoreStripe, + Start: addon.Start.Unix(), + End: addon.End.Unix(), + }) + } + } + } + if !foundAddon { + return nil, fmt.Errorf("addon not found: %v", addon.PriceId) + } + } + + if productSummary.ValidatorsPerDashboardLimit < userInfo.PremiumPerks.ValidatorsPerDashboard { + userInfo.PremiumPerks.ValidatorsPerDashboard = productSummary.ValidatorsPerDashboardLimit + } + + if userInfo.UserGroup == t.UserGroupAdmin { + userInfo.PremiumPerks = adminPerks + } + + return userInfo, nil +} + +func GetProductSummary(ctx context.Context) (*t.ProductSummary, error) { // TODO @patrick post-beta put into db instead of hardcoding here and make it configurable + return &t.ProductSummary{ + ValidatorsPerDashboardLimit: 102_000, + StripePublicKey: utils.Config.Frontend.Stripe.PublicKey, + ApiProducts: []t.ApiProduct{ // TODO @patrick post-beta this data is not final yet + { + ProductId: "api_free", + ProductName: "Free", + PricePerMonthEur: 0, + PricePerYearEur: 0 * 12, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 10, + UnitsPerMonth: 10_000_000, + ApiKeys: 2, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + }, + { + ProductId: "iron", + ProductName: "Iron", + PricePerMonthEur: 1.99, + PricePerYearEur: math.Floor(1.99*12*0.9*100) / 100, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 20, + UnitsPerMonth: 20_000_000, + ApiKeys: 10, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + }, + { + ProductId: "silver", + ProductName: "Silver", + PricePerMonthEur: 2.99, + PricePerYearEur: math.Floor(2.99*12*0.9*100) / 100, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 30, + UnitsPerMonth: 100_000_000, + ApiKeys: 20, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + }, + { + ProductId: "gold", + ProductName: "Gold", + PricePerMonthEur: 3.99, + PricePerYearEur: math.Floor(3.99*12*0.9*100) / 100, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 40, + UnitsPerMonth: 200_000_000, + ApiKeys: 40, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + }, + }, + PremiumProducts: []t.PremiumProduct{ + freeTierProduct, + { + ProductName: "Guppy", + PremiumPerks: t.PremiumPerks{ + AdFree: true, + ValidatorDashboards: 1, + ValidatorsPerDashboard: 100, + ValidatorGroupsPerDashboard: 3, + ShareCustomDashboards: true, + ManageDashboardViaApi: false, + BulkAdding: true, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: day, + Hourly: 7 * day, + Daily: month, + Weekly: 0, + }, + EmailNotificationsPerDay: 15, + ConfigureNotificationsViaApi: false, + ValidatorGroupNotifications: 3, + WebhookEndpoints: 3, + MobileAppCustomThemes: true, + MobileAppWidget: true, + MonitorMachines: 2, + MachineMonitoringHistorySeconds: 3600 * 24 * 30, + NotificationsMachineCustomThreshold: true, + NotificationsValidatorDashboardGroupEfficiency: true, + }, + PricePerMonthEur: 9.99, + PricePerYearEur: 107.88, + ProductIdMonthly: "guppy", + ProductIdYearly: "guppy.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.Guppy, + StripePriceIdYearly: utils.Config.Frontend.Stripe.GuppyYearly, + }, + { + ProductName: "Dolphin", + PremiumPerks: t.PremiumPerks{ + AdFree: true, + ValidatorDashboards: 2, + ValidatorsPerDashboard: 300, + ValidatorGroupsPerDashboard: 10, + ShareCustomDashboards: true, + ManageDashboardViaApi: false, + BulkAdding: true, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: 5 * day, + Hourly: month, + Daily: 2 * month, + Weekly: 8 * week, + }, + EmailNotificationsPerDay: 20, + ConfigureNotificationsViaApi: false, + ValidatorGroupNotifications: 10, + WebhookEndpoints: 10, + MobileAppCustomThemes: true, + MobileAppWidget: true, + MonitorMachines: 10, + MachineMonitoringHistorySeconds: 3600 * 24 * 30, + NotificationsMachineCustomThreshold: true, + NotificationsValidatorDashboardGroupEfficiency: true, + }, + PricePerMonthEur: 29.99, + PricePerYearEur: 311.88, + ProductIdMonthly: "dolphin", + ProductIdYearly: "dolphin.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.Dolphin, + StripePriceIdYearly: utils.Config.Frontend.Stripe.DolphinYearly, + }, + { + ProductName: "Orca", + PremiumPerks: t.PremiumPerks{ + AdFree: true, + ValidatorDashboards: 2, + ValidatorsPerDashboard: 1000, + ValidatorGroupsPerDashboard: 30, + ShareCustomDashboards: true, + ManageDashboardViaApi: true, + BulkAdding: true, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: 3 * week, + Hourly: 6 * month, + Daily: 12 * month, + Weekly: maxJsInt, + }, + EmailNotificationsPerDay: 50, + ConfigureNotificationsViaApi: true, + ValidatorGroupNotifications: 60, + WebhookEndpoints: 30, + MobileAppCustomThemes: true, + MobileAppWidget: true, + MonitorMachines: 10, + MachineMonitoringHistorySeconds: 3600 * 24 * 30, + NotificationsMachineCustomThreshold: true, + NotificationsValidatorDashboardGroupEfficiency: true, + }, + PricePerMonthEur: 49.99, + PricePerYearEur: 479.88, + ProductIdMonthly: "orca", + ProductIdYearly: "orca.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.Orca, + StripePriceIdYearly: utils.Config.Frontend.Stripe.OrcaYearly, + IsPopular: true, + }, + }, + ExtraDashboardValidatorsPremiumAddon: []t.ExtraDashboardValidatorsPremiumAddon{ + { + ProductName: "1k extra valis per dashboard", + ExtraDashboardValidators: 1000, + PricePerMonthEur: 74.99, + PricePerYearEur: 719.88, + ProductIdMonthly: "vdb_addon_1k", + ProductIdYearly: "vdb_addon_1k.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon1k, + StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon1kYearly, + }, + { + ProductName: "10k extra valis per dashboard", + ExtraDashboardValidators: 10000, + PricePerMonthEur: 449.99, + PricePerYearEur: 4319.88, + ProductIdMonthly: "vdb_addon_10k", + ProductIdYearly: "vdb_addon_10k.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon10k, + StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon10kYearly, + }, + }, + }, nil +} + +func GetFreeTierPerks(ctx context.Context) (*t.PremiumPerks, error) { + return &freeTierProduct.PremiumPerks, nil +} diff --git a/backend/pkg/commons/hexutil/hexutil.go b/backend/pkg/commons/hexutil/hexutil.go new file mode 100644 index 000000000..bd24d6ff9 --- /dev/null +++ b/backend/pkg/commons/hexutil/hexutil.go @@ -0,0 +1,56 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package hexutil + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "strings" +) + +// Bytes marshals/unmarshals as a JSON string with 0x prefix. +// The empty slice marshals as "0x". +type Bytes []byte + +// UnmarshalJSON implements json.Unmarshaler. +func (b *Bytes) UnmarshalJSON(input []byte) error { + var v string + if err := json.Unmarshal(input, &v); err != nil { + return err + } + + v = strings.Replace(v, "0x", "", 1) + + // make sure to have an even length hex string by prefixing odd strings with a single 0, 0x0 will become 0x00 for example + // while hashes and addresses have always an even length, numbers usually don't + if len(v)%2 != 0 { + v = "0" + v + } + + var err error + *b, err = hex.DecodeString(v) + + if err != nil { + return fmt.Errorf("error decoding %s: %v", string(input), err) + } + return err +} + +func (b *Bytes) String() string { + return fmt.Sprintf("0x%x", *b) +} diff --git a/backend/pkg/commons/log/log.go b/backend/pkg/commons/log/log.go index 60684f6cb..15f27eb10 100644 --- a/backend/pkg/commons/log/log.go +++ b/backend/pkg/commons/log/log.go @@ -14,17 +14,17 @@ import ( // Fatal logs a fatal error with callstack info that skips callerSkip many levels with arbitrarily many additional infos. // callerSkip equal to 0 gives you info directly where Fatal is called. func Fatal(err error, errorMsg interface{}, callerSkip int, additionalInfos ...Fields) { - logErrorInfo(err, callerSkip, additionalInfos...).Fatal(errorMsg) + logErrorInfo(err, callerSkip, false, additionalInfos...).Fatal(errorMsg) } // Error logs an error with callstack info that skips callerSkip many levels with arbitrarily many additional infos. // callerSkip equal to 0 gives you info directly where Error is called. func Error(err error, errorMsg interface{}, callerSkip int, additionalInfos ...Fields) { - logErrorInfo(err, callerSkip, additionalInfos...).Error(errorMsg) + logErrorInfo(err, callerSkip, false, additionalInfos...).Error(errorMsg) } func WarnWithStackTrace(err error, errorMsg interface{}, callerSkip int, additionalInfos ...Fields) { - logErrorInfo(err, callerSkip, additionalInfos...).Warn(errorMsg) + logErrorInfo(err, callerSkip, true, additionalInfos...).Warn(errorMsg) } func Info(args ...interface{}) { @@ -67,7 +67,7 @@ func Debugf(format string, args ...interface{}) { logrus.Debugf(format, args...) } -func logErrorInfo(err error, callerSkip int, additionalInfos ...Fields) *logrus.Entry { +func logErrorInfo(err error, callerSkip int, isWarning bool, additionalInfos ...Fields) *logrus.Entry { logFields := logrus.NewEntry(logrus.New()) metricName := "unknown" @@ -88,7 +88,9 @@ func logErrorInfo(err error, callerSkip int, additionalInfos ...Fields) *logrus. if len(metricName) > 30 { metricName = metricName[len(metricName)-30:] } - metrics.Errors.WithLabelValues(metricName).Inc() + if !isWarning { + metrics.Errors.WithLabelValues(metricName).Inc() + } errColl := []string{} for { diff --git a/backend/pkg/commons/mail/mail.go b/backend/pkg/commons/mail/mail.go index df0ae9e3f..7e2491514 100644 --- a/backend/pkg/commons/mail/mail.go +++ b/backend/pkg/commons/mail/mail.go @@ -3,6 +3,7 @@ package mail import ( "bytes" "context" + "html/template" "fmt" "net/smtp" @@ -71,28 +72,40 @@ func createTextMessage(msg types.Email) string { // SendMailRateLimited sends an email to a given address with the given message. // It will return a ratelimit-error if the configured ratelimit is exceeded. -func SendMailRateLimited(to, subject string, msg types.Email, attachment []types.EmailAttachment) error { - if utils.Config.Frontend.MaxMailsPerEmailPerDay > 0 { - now := time.Now() - count, err := db.GetMailsSentCount(to, now) - if err != nil { - return err - } - if count >= utils.Config.Frontend.MaxMailsPerEmailPerDay { - timeLeft := now.Add(utils.Day).Truncate(utils.Day).Sub(now) - return &types.RateLimitError{TimeLeft: timeLeft} - } +func SendMailRateLimited(content types.TransitEmailContent, maxEmailsPerDay int64, bucket string) error { + sendThresholdReachedMail := false + count, err := db.CountSentMessage(bucket, content.UserId) + if err != nil { + return err } + timeLeft := time.Until(time.Now().Add(utils.Day).Truncate(utils.Day)) - err := db.CountSentMail(to) - if err != nil { - // only log if counting did not work - return fmt.Errorf("error counting sent email: %v", err) + log.Debugf("user %d has sent %d of %d emails today, time left is %v", content.UserId, count, maxEmailsPerDay, timeLeft) + if count > maxEmailsPerDay { + return &types.RateLimitError{TimeLeft: timeLeft} + } else if count == maxEmailsPerDay { + sendThresholdReachedMail = true } - err = SendHTMLMail(to, subject, msg, attachment) + err = SendHTMLMail(content.Address, content.Subject, content.Email, content.Attachments) if err != nil { - return err + log.Error(err, "error sending email", 0) + } + + // make sure the threshold reached email arrives last + if sendThresholdReachedMail { + // send an email if this was the last email for today + err := SendHTMLMail(content.Address, + "beaconcha.in - Email notification threshold limit reached", + types.Email{ + Title: "Email notification threshold limit reached", + //nolint: gosec + Body: template.HTML(fmt.Sprintf("You have reached the email notification threshold limit of %d emails per day. Further notification emails will be suppressed for %.1f hours.", maxEmailsPerDay, timeLeft.Hours())), + }, + []types.EmailAttachment{}) + if err != nil { + return err + } } return nil diff --git a/backend/pkg/commons/ratelimit/ratelimit.go b/backend/pkg/commons/ratelimit/ratelimit.go index 8a79d9a75..a57900bba 100644 --- a/backend/pkg/commons/ratelimit/ratelimit.go +++ b/backend/pkg/commons/ratelimit/ratelimit.go @@ -56,6 +56,7 @@ const ( FallbackRateLimitSecond = 20 // RateLimit per second for when redis is offline FallbackRateLimitBurst = 20 // RateLimit burst for when redis is offline + defaultWeight = 1 // if no weight is set for a route, use this one defaultBucket = "default" // if no bucket is set for a route, use this one statsTruncateDuration = time.Hour * 1 // ratelimit-stats are truncated to this duration @@ -951,7 +952,7 @@ func getWeight(r *http.Request) (cost int64, identifier, bucket string) { bucket, bucketOk := buckets[route] weightsMu.RUnlock() if !weightOk { - weight = 1 + weight = defaultWeight } if !bucketOk { bucket = defaultBucket diff --git a/backend/pkg/commons/types/config.go b/backend/pkg/commons/types/config.go index 5518cb131..c43910f39 100644 --- a/backend/pkg/commons/types/config.go +++ b/backend/pkg/commons/types/config.go @@ -61,11 +61,13 @@ type Config struct { } `yaml:"bigtable"` BlobIndexer struct { S3 struct { - Endpoint string `yaml:"endpoint" envconfig:"BLOB_INDEXER_S3_ENDPOINT"` - Bucket string `yaml:"bucket" envconfig:"BLOB_INDEXER_S3_BUCKET"` - AccessKeyId string `yaml:"accessKeyId" envconfig:"BLOB_INDEXER_S3_ACCESS_KEY_ID"` - AccessKeySecret string `yaml:"accessKeySecret" envconfig:"BLOB_INDEXER_S3_ACCESS_KEY_SECRET"` + Endpoint string `yaml:"endpoint" envconfig:"BLOB_INDEXER_S3_ENDPOINT"` // s3 endpoint + Bucket string `yaml:"bucket" envconfig:"BLOB_INDEXER_S3_BUCKET"` // s3 bucket + AccessKeyId string `yaml:"accessKeyId" envconfig:"BLOB_INDEXER_S3_ACCESS_KEY_ID"` // s3 access key id + AccessKeySecret string `yaml:"accessKeySecret" envconfig:"BLOB_INDEXER_S3_ACCESS_KEY_SECRET"` // s3 access key secret } `yaml:"s3"` + PruneMarginEpochs uint64 `yaml:"pruneMarginEpochs" envconfig:"BLOB_INDEXER_PRUNE_MARGIN_EPOCHS"` // PruneMarginEpochs helps blobindexer to decide if connected node has pruned too far to have no holes in the data, set it to same value as lighthouse flag --blob-prune-margin-epochs + DisableStatusReports bool `yaml:"disableStatusReports" envconfig:"BLOB_INDEXER_DISABLE_STATUS_REPORTS"` // disable status reports (no connection to db needed) } `yaml:"blobIndexer"` Chain struct { Name string `yaml:"name" envconfig:"CHAIN_NAME"` @@ -313,6 +315,8 @@ type Config struct { ApiKeySecret string `yaml:"apiKeySecret" envconfig:"API_KEY_SECRET"` CorsAllowedHosts []string `yaml:"corsAllowedHosts" envconfig:"CORS_ALLOWED_HOSTS"` + + SkipDataAccessServiceInitWait bool `yaml:"skipDataAccessServiceInitWait" envconfig:"SKIP_DATA_ACCESS_SERVICE_INIT_WAIT"` } type InternalAlertDiscord struct { diff --git a/backend/pkg/commons/types/eth1.go b/backend/pkg/commons/types/eth1.go index b9c0873cf..dbaa91f84 100644 --- a/backend/pkg/commons/types/eth1.go +++ b/backend/pkg/commons/types/eth1.go @@ -1,9 +1,114 @@ package types -import "time" +import ( + "time" + + "github.com/gobitfly/beaconchain/pkg/commons/hexutil" +) type GetBlockTimings struct { Headers time.Duration Receipts time.Duration Traces time.Duration } + +type Eth1RpcGetBlockResponse struct { + JsonRpc string `json:"jsonrpc"` + Id int `json:"id"` + Result struct { + BaseFeePerGas hexutil.Bytes `json:"baseFeePerGas"` + Difficulty hexutil.Bytes `json:"difficulty"` + ExtraData hexutil.Bytes `json:"extraData"` + GasLimit hexutil.Bytes `json:"gasLimit"` + GasUsed hexutil.Bytes `json:"gasUsed"` + Hash hexutil.Bytes `json:"hash"` + LogsBloom hexutil.Bytes `json:"logsBloom"` + Miner hexutil.Bytes `json:"miner"` + MixHash hexutil.Bytes `json:"mixHash"` + Nonce hexutil.Bytes `json:"nonce"` + Number hexutil.Bytes `json:"number"` + ParentHash hexutil.Bytes `json:"parentHash"` + ReceiptsRoot hexutil.Bytes `json:"receiptsRoot"` + Sha3Uncles hexutil.Bytes `json:"sha3Uncles"` + Size hexutil.Bytes `json:"size"` + StateRoot hexutil.Bytes `json:"stateRoot"` + Timestamp hexutil.Bytes `json:"timestamp"` + TotalDifficulty hexutil.Bytes `json:"totalDifficulty"` + Transactions []struct { + BlockHash hexutil.Bytes `json:"blockHash"` + BlockNumber hexutil.Bytes `json:"blockNumber"` + From hexutil.Bytes `json:"from"` + Gas hexutil.Bytes `json:"gas"` + GasPrice hexutil.Bytes `json:"gasPrice"` + Hash hexutil.Bytes `json:"hash"` + Input hexutil.Bytes `json:"input"` + Nonce hexutil.Bytes `json:"nonce"` + To hexutil.Bytes `json:"to"` + TransactionIndex hexutil.Bytes `json:"transactionIndex"` + Value hexutil.Bytes `json:"value"` + Type hexutil.Bytes `json:"type"` + V hexutil.Bytes `json:"v"` + R hexutil.Bytes `json:"r"` + S hexutil.Bytes `json:"s"` + ChainID hexutil.Bytes `json:"chainId"` + MaxFeePerGas hexutil.Bytes `json:"maxFeePerGas"` + MaxPriorityFeePerGas hexutil.Bytes `json:"maxPriorityFeePerGas"` + + AccessList []struct { + Address hexutil.Bytes `json:"address"` + StorageKeys []hexutil.Bytes `json:"storageKeys"` + } `json:"accessList"` + + // Optimism specific fields + YParity hexutil.Bytes `json:"yParity"` + Mint hexutil.Bytes `json:"mint"` // The ETH value to mint on L2. + SourceHash hexutil.Bytes `json:"sourceHash"` // the source-hash, uniquely identifies the origin of the deposit. + + // Arbitrum specific fields + // Arbitrum Nitro + RequestId hexutil.Bytes `json:"requestId"` // On L1 to L2 transactions, this field is added to indicate position in the Inbox queue + RefundTo hexutil.Bytes `json:"refundTo"` // + L1BaseFee hexutil.Bytes `json:"l1BaseFee"` // + DepositValue hexutil.Bytes `json:"depositValue"` // + RetryTo hexutil.Bytes `json:"retryTo"` // nil means contract creation + RetryValue hexutil.Bytes `json:"retryValue"` // wei amount + RetryData hexutil.Bytes `json:"retryData"` // contract invocation input data + Beneficiary hexutil.Bytes `json:"beneficiary"` // + MaxSubmissionFee hexutil.Bytes `json:"maxSubmissionFee"` // + TicketId hexutil.Bytes `json:"ticketId"` // + MaxRefund hexutil.Bytes `json:"maxRefund"` // the maximum refund sent to RefundTo (the rest goes to From) + SubmissionFeeRefund hexutil.Bytes `json:"submissionFeeRefund"` // the submission fee to refund if successful (capped by MaxRefund) + + // Arbitrum Classic + L1SequenceNumber hexutil.Bytes `json:"l1SequenceNumber"` + ParentRequestId hexutil.Bytes `json:"parentRequestId"` + IndexInParent hexutil.Bytes `json:"indexInParent"` + ArbType hexutil.Bytes `json:"arbType"` + ArbSubType hexutil.Bytes `json:"arbSubType"` + L1BlockNumber hexutil.Bytes `json:"l1BlockNumber"` + } `json:"transactions"` + TransactionsRoot hexutil.Bytes `json:"transactionsRoot"` + + Withdrawals []struct { + Index hexutil.Bytes `json:"index"` + ValidatorIndex hexutil.Bytes `json:"validatorIndex"` + Address hexutil.Bytes `json:"address"` + Amount hexutil.Bytes `json:"amount"` + } `json:"withdrawals"` + WithdrawalsRoot hexutil.Bytes `json:"withdrawalsRoot"` + + Uncles []hexutil.Bytes `json:"uncles"` + + // Optimism specific fields + + // Arbitrum specific fields + L1BlockNumber hexutil.Bytes `json:"l1BlockNumber"` // An approximate L1 block number that occurred before this L2 block. + SendCount hexutil.Bytes `json:"sendCount"` // The number of L2 to L1 messages since Nitro genesis + SendRoot hexutil.Bytes `json:"sendRoot"` // The Merkle root of the outbox tree state + } `json:"result"` + + Error struct { + Code int `json:"code"` + Message string `json:"message"` + } `json:"error"` +} diff --git a/backend/pkg/commons/types/exporter.go b/backend/pkg/commons/types/exporter.go index 5a3f3a397..197638472 100644 --- a/backend/pkg/commons/types/exporter.go +++ b/backend/pkg/commons/types/exporter.go @@ -709,3 +709,12 @@ type RedisCachedValidatorsMapping struct { Epoch Epoch Mapping []*CachedValidator } + +type SlashingInfo struct { + Epoch uint64 `db:"epoch"` + SlasherIndex uint64 `db:"slasher"` + SlasherPubkey string `db:"slasher_pubkey"` + SlashedValidatorIndex uint64 `db:"slashedvalidator"` + SlashedValidatorPubkey []byte `db:"slashedvalidator_pubkey"` + Reason string `db:"reason"` +} diff --git a/backend/pkg/commons/types/frontend.go b/backend/pkg/commons/types/frontend.go index ee589fcbb..60e0a7f48 100644 --- a/backend/pkg/commons/types/frontend.go +++ b/backend/pkg/commons/types/frontend.go @@ -4,14 +4,16 @@ import ( "database/sql" "database/sql/driver" "encoding/json" + "fmt" "html/template" "math/big" "strings" "time" - "firebase.google.com/go/messaging" + "firebase.google.com/go/v4/messaging" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/lib/pq" "github.com/pkg/errors" "golang.org/x/text/cases" @@ -19,141 +21,240 @@ import ( ) type EventName string +type EventFilter string + +type NotificationsPerUserId map[UserId]NotificationsPerDashboard +type NotificationsPerDashboard map[DashboardId]NotificationsPerDashboardGroup +type NotificationsPerDashboardGroup map[DashboardGroupId]NotificationsPerEventName +type NotificationsPerEventName map[EventName]NotificationsPerEventFilter +type NotificationsPerEventFilter map[EventFilter]Notification + +func (npui NotificationsPerUserId) AddNotification(n Notification) { + if n.GetUserId() == 0 { + log.Fatal(fmt.Errorf("Notification user id is 0"), fmt.Sprintf("Notification: %v", n), 1) + } + if n.GetEventName() == "" { + log.Fatal(fmt.Errorf("Notification event name is empty"), fmt.Sprintf("Notification: %v", n), 1) + } + + dashboardId := DashboardId(0) + dashboardGroupId := DashboardGroupId(0) + if n.GetDashboardId() != nil { + dashboardId = DashboardId(*n.GetDashboardId()) + dashboardGroupId = DashboardGroupId(*n.GetDashboardGroupId()) + } + + // next check is disabled as there are events that do not require a filter (rocketpool, network events) + // if n.GetEventFilter() == "" { + // log.Fatal(fmt.Errorf("Notification event filter is empty"), fmt.Sprintf("Notification: %v", n), 0) + // } + + if _, ok := npui[n.GetUserId()]; !ok { + npui[n.GetUserId()] = make(NotificationsPerDashboard) + } + if _, ok := npui[n.GetUserId()][dashboardId]; !ok { + npui[n.GetUserId()][dashboardId] = make(NotificationsPerDashboardGroup) + } + if _, ok := npui[n.GetUserId()][dashboardId][dashboardGroupId]; !ok { + npui[n.GetUserId()][dashboardId][dashboardGroupId] = make(NotificationsPerEventName) + } + if _, ok := npui[n.GetUserId()][dashboardId][dashboardGroupId][n.GetEventName()]; !ok { + npui[n.GetUserId()][dashboardId][dashboardGroupId][n.GetEventName()] = make(NotificationsPerEventFilter) + } + npui[n.GetUserId()][dashboardId][dashboardGroupId][n.GetEventName()][EventFilter(n.GetEventFilter())] = n +} const ( - ValidatorBalanceDecreasedEventName EventName = "validator_balance_decreased" - ValidatorMissedProposalEventName EventName = "validator_proposal_missed" - ValidatorExecutedProposalEventName EventName = "validator_proposal_submitted" - ValidatorMissedAttestationEventName EventName = "validator_attestation_missed" - ValidatorGotSlashedEventName EventName = "validator_got_slashed" - ValidatorDidSlashEventName EventName = "validator_did_slash" - ValidatorIsOfflineEventName EventName = "validator_is_offline" - ValidatorReceivedWithdrawalEventName EventName = "validator_withdrawal" - ValidatorReceivedDepositEventName EventName = "validator_received_deposit" - NetworkSlashingEventName EventName = "network_slashing" - NetworkValidatorActivationQueueFullEventName EventName = "network_validator_activation_queue_full" - NetworkValidatorActivationQueueNotFullEventName EventName = "network_validator_activation_queue_not_full" - NetworkValidatorExitQueueFullEventName EventName = "network_validator_exit_queue_full" - NetworkValidatorExitQueueNotFullEventName EventName = "network_validator_exit_queue_not_full" - NetworkLivenessIncreasedEventName EventName = "network_liveness_increased" - EthClientUpdateEventName EventName = "eth_client_update" - MonitoringMachineOfflineEventName EventName = "monitoring_machine_offline" - MonitoringMachineDiskAlmostFullEventName EventName = "monitoring_hdd_almostfull" - MonitoringMachineCpuLoadEventName EventName = "monitoring_cpu_load" - MonitoringMachineMemoryUsageEventName EventName = "monitoring_memory_usage" - MonitoringMachineSwitchedToETH2FallbackEventName EventName = "monitoring_fallback_eth2inuse" - MonitoringMachineSwitchedToETH1FallbackEventName EventName = "monitoring_fallback_eth1inuse" - TaxReportEventName EventName = "user_tax_report" - //nolint:misspell - RocketpoolCommissionThresholdEventName EventName = "rocketpool_commision_threshold" - RocketpoolNewClaimRoundStartedEventName EventName = "rocketpool_new_claimround" - //nolint:misspell - RocketpoolCollateralMinReached EventName = "rocketpool_colleteral_min" + ValidatorMissedProposalEventName EventName = "validator_proposal_missed" + ValidatorExecutedProposalEventName EventName = "validator_proposal_submitted" + + ValidatorDidSlashEventName EventName = "validator_did_slash" + + ValidatorReceivedDepositEventName EventName = "validator_received_deposit" + NetworkSlashingEventName EventName = "network_slashing" + NetworkValidatorActivationQueueFullEventName EventName = "network_validator_activation_queue_full" + NetworkValidatorActivationQueueNotFullEventName EventName = "network_validator_activation_queue_not_full" + NetworkValidatorExitQueueFullEventName EventName = "network_validator_exit_queue_full" + NetworkValidatorExitQueueNotFullEventName EventName = "network_validator_exit_queue_not_full" + NetworkLivenessIncreasedEventName EventName = "network_liveness_increased" + TaxReportEventName EventName = "user_tax_report" + SyncCommitteeSoonEventName EventName = "validator_synccommittee_soon" //nolint:misspell - RocketpoolCollateralMaxReached EventName = "rocketpool_colleteral_max" - SyncCommitteeSoon EventName = "validator_synccommittee_soon" + RocketpoolCommissionThresholdEventName EventName = "rocketpool_commision_threshold" + + // Validator dashboard events + ValidatorIsOfflineEventName EventName = "validator_is_offline" + ValidatorIsOnlineEventName EventName = "validator_is_online" + ValidatorGroupEfficiencyEventName EventName = "validator_group_efficiency" + ValidatorMissedAttestationEventName EventName = "validator_attestation_missed" + ValidatorProposalEventName EventName = "validator_proposal" + ValidatorUpcomingProposalEventName EventName = "validator_proposal_upcoming" + SyncCommitteeSoon EventName = "validator_synccommittee_soon" + ValidatorReceivedWithdrawalEventName EventName = "validator_withdrawal" + ValidatorGotSlashedEventName EventName = "validator_got_slashed" + RocketpoolCollateralMinReachedEventName EventName = "rocketpool_colleteral_min" //nolint:misspell + RocketpoolCollateralMaxReachedEventName EventName = "rocketpool_colleteral_max" //nolint:misspell + + // Account dashboard events + IncomingTransactionEventName EventName = "incoming_transaction" + OutgoingTransactionEventName EventName = "outgoing_transaction" + ERC20TokenTransferEventName EventName = "erc20_token_transfer" // #nosec G101 + ERC721TokenTransferEventName EventName = "erc721_token_transfer" // #nosec G101 + ERC1155TokenTransferEventName EventName = "erc1155_token_transfer" // #nosec G101 + + // Machine events + MonitoringMachineOfflineEventName EventName = "monitoring_machine_offline" + MonitoringMachineDiskAlmostFullEventName EventName = "monitoring_hdd_almostfull" + MonitoringMachineCpuLoadEventName EventName = "monitoring_cpu_load" + MonitoringMachineMemoryUsageEventName EventName = "monitoring_memory_usage" + + // Client events + EthClientUpdateEventName EventName = "eth_client_update" + + // Network events + RocketpoolNewClaimRoundStartedEventName EventName = "rocketpool_new_claimround" + NetworkGasAboveThresholdEventName EventName = "network_gas_above_threshold" + NetworkGasBelowThresholdEventName EventName = "network_gas_below_threshold" + NetworkParticipationRateThresholdEventName EventName = "network_participation_rate_threshold" ) +var EventSortOrder = []EventName{ + ValidatorUpcomingProposalEventName, + ValidatorGotSlashedEventName, + ValidatorDidSlashEventName, + ValidatorMissedProposalEventName, + ValidatorExecutedProposalEventName, + MonitoringMachineOfflineEventName, + MonitoringMachineDiskAlmostFullEventName, + MonitoringMachineCpuLoadEventName, + MonitoringMachineMemoryUsageEventName, + SyncCommitteeSoonEventName, + ValidatorIsOfflineEventName, + ValidatorIsOnlineEventName, + ValidatorGroupEfficiencyEventName, + ValidatorReceivedWithdrawalEventName, + NetworkLivenessIncreasedEventName, + EthClientUpdateEventName, + TaxReportEventName, + RocketpoolCommissionThresholdEventName, + RocketpoolNewClaimRoundStartedEventName, + RocketpoolCollateralMinReachedEventName, + RocketpoolCollateralMaxReachedEventName, + ValidatorMissedAttestationEventName, +} + var MachineEvents = []EventName{ MonitoringMachineCpuLoadEventName, MonitoringMachineOfflineEventName, MonitoringMachineDiskAlmostFullEventName, - MonitoringMachineCpuLoadEventName, MonitoringMachineMemoryUsageEventName, - MonitoringMachineSwitchedToETH2FallbackEventName, - MonitoringMachineSwitchedToETH1FallbackEventName, } var UserIndexEvents = []EventName{ EthClientUpdateEventName, MonitoringMachineCpuLoadEventName, - EthClientUpdateEventName, MonitoringMachineOfflineEventName, MonitoringMachineDiskAlmostFullEventName, - MonitoringMachineCpuLoadEventName, MonitoringMachineMemoryUsageEventName, - MonitoringMachineSwitchedToETH2FallbackEventName, - MonitoringMachineSwitchedToETH1FallbackEventName, +} + +var UserIndexEventsMap = map[EventName]struct{}{ + EthClientUpdateEventName: {}, + MonitoringMachineCpuLoadEventName: {}, + MonitoringMachineOfflineEventName: {}, + MonitoringMachineDiskAlmostFullEventName: {}, + MonitoringMachineMemoryUsageEventName: {}, +} + +var MachineEventsMap = map[EventName]struct{}{ + MonitoringMachineCpuLoadEventName: {}, + MonitoringMachineOfflineEventName: {}, + MonitoringMachineDiskAlmostFullEventName: {}, + MonitoringMachineMemoryUsageEventName: {}, +} + +var LegacyEventLabel map[EventName]string = map[EventName]string{ + ValidatorUpcomingProposalEventName: "Your validator(s) will soon propose a block", + ValidatorGroupEfficiencyEventName: "Your validator group efficiency is low", + ValidatorMissedProposalEventName: "Your validator(s) missed a proposal", + ValidatorExecutedProposalEventName: "Your validator(s) submitted a proposal", + ValidatorMissedAttestationEventName: "Your validator(s) missed an attestation", + ValidatorGotSlashedEventName: "Your validator(s) got slashed", + ValidatorDidSlashEventName: "Your validator(s) slashed another validator", + ValidatorIsOfflineEventName: "Your validator(s) went offline", + ValidatorIsOnlineEventName: "Your validator(s) came back online", + ValidatorReceivedWithdrawalEventName: "A withdrawal was initiated for your validators", + NetworkLivenessIncreasedEventName: "The network is experiencing liveness issues", + EthClientUpdateEventName: "An Ethereum client has a new update available", + MonitoringMachineOfflineEventName: "Your machine(s) might be offline", + MonitoringMachineDiskAlmostFullEventName: "Your machine(s) disk space is running low", + MonitoringMachineCpuLoadEventName: "Your machine(s) has a high CPU load", + MonitoringMachineMemoryUsageEventName: "Your machine(s) has a high memory load", + TaxReportEventName: "You have an available tax report", + RocketpoolCommissionThresholdEventName: "Your configured Rocket Pool commission threshold is reached", + RocketpoolNewClaimRoundStartedEventName: "Your Rocket Pool claim from last round is available", + RocketpoolCollateralMinReachedEventName: "You reached the Rocket Pool min RPL collateral", + RocketpoolCollateralMaxReachedEventName: "You reached the Rocket Pool max RPL collateral", + SyncCommitteeSoonEventName: "Your validator(s) will soon be part of the sync committee", } var EventLabel map[EventName]string = map[EventName]string{ - ValidatorBalanceDecreasedEventName: "Your validator(s) balance decreased", - ValidatorMissedProposalEventName: "Your validator(s) missed a proposal", - ValidatorExecutedProposalEventName: "Your validator(s) submitted a proposal", - ValidatorMissedAttestationEventName: "Your validator(s) missed an attestation", - ValidatorGotSlashedEventName: "Your validator(s) got slashed", - ValidatorDidSlashEventName: "Your validator(s) slashed another validator", - ValidatorIsOfflineEventName: "Your validator(s) state changed", - ValidatorReceivedDepositEventName: "Your validator(s) received a deposit", - ValidatorReceivedWithdrawalEventName: "A withdrawal was initiated for your validators", - NetworkSlashingEventName: "A slashing event has been registered by the network", - NetworkValidatorActivationQueueFullEventName: "The activation queue is full", - NetworkValidatorActivationQueueNotFullEventName: "The activation queue is empty", - NetworkValidatorExitQueueFullEventName: "The validator exit queue is full", - NetworkValidatorExitQueueNotFullEventName: "The validator exit queue is empty", - NetworkLivenessIncreasedEventName: "The network is experiencing liveness issues", - EthClientUpdateEventName: "An Ethereum client has a new update available", - MonitoringMachineOfflineEventName: "Your machine(s) might be offline", - MonitoringMachineDiskAlmostFullEventName: "Your machine(s) disk space is running low", - MonitoringMachineCpuLoadEventName: "Your machine(s) has a high CPU load", - MonitoringMachineMemoryUsageEventName: "Your machine(s) has a high memory load", - MonitoringMachineSwitchedToETH2FallbackEventName: "Your machine(s) is using its consensus client fallback", - MonitoringMachineSwitchedToETH1FallbackEventName: "Your machine(s) is using its execution client fallback", - TaxReportEventName: "You have an available tax report", - RocketpoolCommissionThresholdEventName: "Your configured Rocket Pool commission threshold is reached", - RocketpoolNewClaimRoundStartedEventName: "Your Rocket Pool claim from last round is available", - RocketpoolCollateralMinReached: "You reached the Rocket Pool min RPL collateral", - RocketpoolCollateralMaxReached: "You reached the Rocket Pool max RPL collateral", - SyncCommitteeSoon: "Your validator(s) will soon be part of the sync committee", + ValidatorUpcomingProposalEventName: "Upcoming block proposal", + ValidatorGroupEfficiencyEventName: "Low validator group efficiency", + ValidatorMissedProposalEventName: "Block proposal missed", + ValidatorExecutedProposalEventName: "Block proposal submitted", + ValidatorMissedAttestationEventName: "Attestation missed", + ValidatorGotSlashedEventName: "Validator slashed", + ValidatorDidSlashEventName: "Validator has slashed", + ValidatorIsOfflineEventName: "Validator offline", + ValidatorIsOnlineEventName: "Validator back online", + ValidatorReceivedWithdrawalEventName: "Withdrawal processed", + NetworkLivenessIncreasedEventName: "The network is experiencing liveness issues", + EthClientUpdateEventName: "An Ethereum client has a new update available", + MonitoringMachineOfflineEventName: "Machine offline", + MonitoringMachineDiskAlmostFullEventName: "Machine low disk space", + MonitoringMachineCpuLoadEventName: "Machine high CPU load", + MonitoringMachineMemoryUsageEventName: "Machine high memory load", + TaxReportEventName: "Tax report available", + RocketpoolCommissionThresholdEventName: "Rocket pool commission threshold is reached", + RocketpoolNewClaimRoundStartedEventName: "Rocket pool claim from last round is available", + RocketpoolCollateralMinReachedEventName: "Rocket pool node min RPL collateral reached", + RocketpoolCollateralMaxReachedEventName: "Rocket pool node max RPL collateral reached", + SyncCommitteeSoonEventName: "Upcoming sync committee", } func IsUserIndexed(event EventName) bool { - for _, ev := range UserIndexEvents { - if ev == event { - return true - } - } - return false + _, ok := UserIndexEventsMap[event] + return ok } func IsMachineNotification(event EventName) bool { - for _, ev := range MachineEvents { - if ev == event { - return true - } - } - return false + _, ok := MachineEventsMap[event] + return ok } var EventNames = []EventName{ - ValidatorBalanceDecreasedEventName, ValidatorExecutedProposalEventName, + ValidatorGroupEfficiencyEventName, ValidatorMissedProposalEventName, ValidatorMissedAttestationEventName, ValidatorGotSlashedEventName, ValidatorDidSlashEventName, ValidatorIsOfflineEventName, - ValidatorReceivedDepositEventName, + ValidatorIsOnlineEventName, ValidatorReceivedWithdrawalEventName, - NetworkSlashingEventName, - NetworkValidatorActivationQueueFullEventName, - NetworkValidatorActivationQueueNotFullEventName, - NetworkValidatorExitQueueFullEventName, - NetworkValidatorExitQueueNotFullEventName, NetworkLivenessIncreasedEventName, EthClientUpdateEventName, MonitoringMachineOfflineEventName, MonitoringMachineDiskAlmostFullEventName, MonitoringMachineCpuLoadEventName, - MonitoringMachineSwitchedToETH2FallbackEventName, - MonitoringMachineSwitchedToETH1FallbackEventName, MonitoringMachineMemoryUsageEventName, TaxReportEventName, RocketpoolCommissionThresholdEventName, RocketpoolNewClaimRoundStartedEventName, - RocketpoolCollateralMinReached, - RocketpoolCollateralMaxReached, - SyncCommitteeSoon, + RocketpoolCollateralMinReachedEventName, + RocketpoolCollateralMaxReachedEventName, + SyncCommitteeSoonEventName, } type EventNameDesc struct { @@ -164,7 +265,7 @@ type EventNameDesc struct { } type MachineMetricSystemUser struct { - UserID uint64 + UserID UserId Machine string CurrentData *MachineMetricSystem CurrentDataInsertTs int64 @@ -193,7 +294,7 @@ var AddWatchlistEvents = []EventNameDesc{ }, { Desc: "Sync committee", - Event: SyncCommitteeSoon, + Event: SyncCommitteeSoonEventName, }, { Desc: "Attestations missed", @@ -238,36 +339,114 @@ const ( ValidatorTagsWatchlist Tag = "watchlist" ) +type NotificationFormat string + +var NotifciationFormatHtml NotificationFormat = "html" +var NotifciationFormatText NotificationFormat = "text" +var NotifciationFormatMarkdown NotificationFormat = "markdown" + type Notification interface { GetLatestState() string GetSubscriptionID() uint64 GetEventName() EventName GetEpoch() uint64 - GetInfo(includeUrl bool) string + GetInfo(format NotificationFormat) string GetTitle() string + GetLegacyInfo() string + GetLegacyTitle() string GetEventFilter() string + SetEventFilter(filter string) GetEmailAttachment() *EmailAttachment - GetUnsubscribeHash() string - GetInfoMarkdown() string + GetUserId() UserId + GetDashboardId() *int64 + GetDashboardName() string + GetDashboardGroupId() *int64 + GetDashboardGroupName() string + GetEntitiyId() string } -// func UnMarschal +type NotificationBaseImpl struct { + LatestState string + SubscriptionID uint64 + EventName EventName + Epoch uint64 + Info string + Title string + EventFilter string + EmailAttachment *EmailAttachment + UserID UserId + DashboardId *int64 + DashboardName string + DashboardGroupId *int64 + DashboardGroupName string +} + +func (n *NotificationBaseImpl) GetLatestState() string { + return n.LatestState +} + +func (n *NotificationBaseImpl) GetSubscriptionID() uint64 { + return n.SubscriptionID +} + +func (n *NotificationBaseImpl) GetEventName() EventName { + return n.EventName +} + +func (n *NotificationBaseImpl) GetEpoch() uint64 { + return n.Epoch +} -type Subscription struct { - ID *uint64 `db:"id,omitempty"` - UserID *uint64 `db:"user_id,omitempty"` - EventName string `db:"event_name"` - EventFilter string `db:"event_filter"` - LastSent *time.Time `db:"last_sent_ts"` - LastEpoch *uint64 `db:"last_sent_epoch"` - // Channels pq.StringArray `db:"channels"` - CreatedTime time.Time `db:"created_ts"` - CreatedEpoch uint64 `db:"created_epoch"` - EventThreshold float64 `db:"event_threshold"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash" swaggertype:"string"` - State sql.NullString `db:"internal_state" swaggertype:"string"` +func (n *NotificationBaseImpl) GetInfo(format NotificationFormat) string { + return n.Info } +func (n *NotificationBaseImpl) GetTitle() string { + return n.Title +} + +func (n *NotificationBaseImpl) GetLegacyInfo() string { + return n.Info +} + +func (n *NotificationBaseImpl) GetLegacyTitle() string { + return n.Title +} + +func (n *NotificationBaseImpl) GetEventFilter() string { + return n.EventFilter +} + +func (n *NotificationBaseImpl) SetEventFilter(filter string) { + n.EventFilter = filter +} + +func (n *NotificationBaseImpl) GetEmailAttachment() *EmailAttachment { + return n.EmailAttachment +} + +func (n *NotificationBaseImpl) GetUserId() UserId { + return n.UserID +} + +func (n *NotificationBaseImpl) GetDashboardId() *int64 { + return n.DashboardId +} + +func (n *NotificationBaseImpl) GetDashboardName() string { + return n.DashboardName +} + +func (n *NotificationBaseImpl) GetDashboardGroupId() *int64 { + return n.DashboardGroupId +} + +func (n *NotificationBaseImpl) GetDashboardGroupName() string { + return n.DashboardGroupName +} + +// func UnMarschal + type TaggedValidators struct { UserID uint64 `db:"user_id"` Tag string `db:"tag"` @@ -342,6 +521,8 @@ type TransitEmailContent struct { Subject string `json:"subject,omitempty"` Email Email `json:"email,omitempty"` Attachments []EmailAttachment `json:"attachments,omitempty"` + UserId UserId `json:"userId,omitempty"` + CreatedTs time.Time `json:"-"` } func (e *TransitEmailContent) Scan(value interface{}) error { @@ -369,6 +550,7 @@ type TransitWebhook struct { type TransitWebhookContent struct { Webhook UserWebhook Event WebhookEvent `json:"event"` + UserId UserId `json:"userId"` } type WebhookEvent struct { @@ -405,6 +587,7 @@ type TransitDiscord struct { type TransitDiscordContent struct { Webhook UserWebhook DiscordRequest DiscordReq `json:"discordRequest"` + UserId UserId `json:"userId"` } func (e *TransitDiscordContent) Scan(value interface{}) error { @@ -431,6 +614,7 @@ type TransitPush struct { type TransitPushContent struct { Messages []*messaging.Message + UserId UserId `json:"userId"` } func (e *TransitPushContent) Scan(value interface{}) error { @@ -455,7 +639,6 @@ type Email struct { Title string Body template.HTML SubscriptionManageURL template.HTML - UnsubURL template.HTML } type UserWebhook struct { diff --git a/backend/pkg/commons/types/notifications.go b/backend/pkg/commons/types/notifications.go new file mode 100644 index 000000000..faceca92c --- /dev/null +++ b/backend/pkg/commons/types/notifications.go @@ -0,0 +1,43 @@ +package types + +import ( + "time" + + "github.com/gobitfly/beaconchain/pkg/consapi/types" +) + +type UserId uint64 +type DashboardId uint64 +type DashboardGroupId uint64 +type ValidatorDashboardConfig struct { + DashboardsById map[DashboardId]*ValidatorDashboard + RocketpoolNodeByPubkey map[string]string +} + +type Subscription struct { + ID *uint64 `db:"id,omitempty"` + UserID *UserId `db:"user_id,omitempty"` + EventName EventName `db:"event_name"` + EventFilter string `db:"event_filter"` + LastSent *time.Time `db:"last_sent_ts"` + LastEpoch *uint64 `db:"last_sent_epoch"` + // Channels pq.StringArray `db:"channels"` + CreatedTime time.Time `db:"created_ts"` + CreatedEpoch uint64 `db:"created_epoch"` + EventThreshold float64 `db:"event_threshold"` + // State sql.NullString `db:"internal_state" swaggertype:"string"` + DashboardId *int64 `db:"-"` + DashboardName string `db:"-"` + DashboardGroupId *int64 `db:"-"` + DashboardGroupName string `db:"-"` +} + +type ValidatorDashboard struct { + Name string `db:"name"` + Groups map[DashboardGroupId]*ValidatorDashboardGroup +} + +type ValidatorDashboardGroup struct { + Name string `db:"name"` + Validators []types.ValidatorIndex +} diff --git a/backend/pkg/commons/utils/config.go b/backend/pkg/commons/utils/config.go index 1cc3179eb..64a783b78 100644 --- a/backend/pkg/commons/utils/config.go +++ b/backend/pkg/commons/utils/config.go @@ -262,6 +262,7 @@ func ReadConfig(cfg *types.Config, path string) error { "mainCurrency": cfg.Frontend.MainCurrency, }, "did init config") + Config = cfg return nil } @@ -358,15 +359,19 @@ func setCLConfig(cfg *types.Config) error { maxForkEpoch := uint64(18446744073709551615) if jr.Data.AltairForkEpoch == nil { + log.Warnf("AltairForkEpoch not set, defaulting to maxForkEpoch") jr.Data.AltairForkEpoch = &maxForkEpoch } if jr.Data.BellatrixForkEpoch == nil { + log.Warnf("BellatrixForkEpoch not set, defaulting to maxForkEpoch") jr.Data.BellatrixForkEpoch = &maxForkEpoch } if jr.Data.CapellaForkEpoch == nil { + log.Warnf("CapellaForkEpoch not set, defaulting to maxForkEpoch") jr.Data.CapellaForkEpoch = &maxForkEpoch } if jr.Data.DenebForkEpoch == nil { + log.Warnf("DenebForkEpoch not set, defaulting to maxForkEpoch") jr.Data.DenebForkEpoch = &maxForkEpoch } diff --git a/backend/pkg/commons/utils/db.go b/backend/pkg/commons/utils/db.go index 55563405f..e078b1f11 100644 --- a/backend/pkg/commons/utils/db.go +++ b/backend/pkg/commons/utils/db.go @@ -14,7 +14,13 @@ var duplicateEntryError = &pgconn.PgError{Code: "23505"} func Rollback(tx *sqlx.Tx) { err := tx.Rollback() if err != nil && !errors.Is(err, sql.ErrTxDone) { - log.Error(err, "error rolling back transaction", 0) + log.Error(err, "error rolling back transaction", 1) + } +} +func ClosePreparedStatement(stmt *sqlx.Stmt) { + err := stmt.Close() + if err != nil { + log.Error(err, "error closing prepared statement", 1) } } diff --git a/backend/pkg/commons/utils/efficiency.go b/backend/pkg/commons/utils/efficiency.go new file mode 100644 index 000000000..5bb7cd57c --- /dev/null +++ b/backend/pkg/commons/utils/efficiency.go @@ -0,0 +1,25 @@ +package utils + +import "database/sql" + +func CalculateTotalEfficiency(attestationEff, proposalEff, syncEff sql.NullFloat64) float64 { + efficiency := float64(0) + + if !attestationEff.Valid && !proposalEff.Valid && !syncEff.Valid { + efficiency = 0 + } else if attestationEff.Valid && !proposalEff.Valid && !syncEff.Valid { + efficiency = attestationEff.Float64 * 100.0 + } else if attestationEff.Valid && proposalEff.Valid && !syncEff.Valid { + efficiency = ((56.0 / 64.0 * attestationEff.Float64) + (8.0 / 64.0 * proposalEff.Float64)) * 100.0 + } else if attestationEff.Valid && !proposalEff.Valid && syncEff.Valid { + efficiency = ((62.0 / 64.0 * attestationEff.Float64) + (2.0 / 64.0 * syncEff.Float64)) * 100.0 + } else { + efficiency = (((54.0 / 64.0) * attestationEff.Float64) + ((8.0 / 64.0) * proposalEff.Float64) + ((2.0 / 64.0) * syncEff.Float64)) * 100.0 + } + + if efficiency < 0 { + efficiency = 0 + } + + return efficiency +} diff --git a/backend/pkg/commons/utils/utils.go b/backend/pkg/commons/utils/utils.go index c84aca8d3..4a9e1538a 100644 --- a/backend/pkg/commons/utils/utils.go +++ b/backend/pkg/commons/utils/utils.go @@ -396,3 +396,10 @@ func Deduplicate(slice []uint64) []uint64 { } return list } + +func FirstN(input string, n int) string { + if len(input) <= n { + return input + } + return input[:n] +} diff --git a/backend/pkg/consapi/types/blobs.go b/backend/pkg/consapi/types/blobs.go index 236cefd8f..c8a94c60b 100644 --- a/backend/pkg/consapi/types/blobs.go +++ b/backend/pkg/consapi/types/blobs.go @@ -4,13 +4,20 @@ import "github.com/ethereum/go-ethereum/common/hexutil" type StandardBlobSidecarsResponse struct { Data []struct { - BlockRoot hexutil.Bytes `json:"block_root"` - Index uint64 `json:"index,string"` - Slot uint64 `json:"slot,string"` - BlockParentRoot hexutil.Bytes `json:"block_parent_root"` - ProposerIndex uint64 `json:"proposer_index,string"` - KzgCommitment hexutil.Bytes `json:"kzg_commitment"` - KzgProof hexutil.Bytes `json:"kzg_proof"` - Blob hexutil.Bytes `json:"blob"` + Index uint64 `json:"index,string"` + Blob hexutil.Bytes `json:"blob"` + KzgCommitment hexutil.Bytes `json:"kzg_commitment"` + KzgProof hexutil.Bytes `json:"kzg_proof"` + SignedBlockHeader struct { + Message struct { + Slot uint64 `json:"slot,string"` + ProposerIndex uint64 `json:"proposer_index,string"` + ParentRoot hexutil.Bytes `json:"parent_root"` + StateRoot hexutil.Bytes `json:"state_root"` + BodyRoot hexutil.Bytes `json:"body_root"` + } `json:"message"` + Signature hexutil.Bytes `json:"signature"` + } `json:"signed_block_header"` + KzgCommitmentInclusionProof []hexutil.Bytes `json:"kzg_commitment_inclusion_proof"` } } diff --git a/backend/pkg/consapi/types/spec.go b/backend/pkg/consapi/types/spec.go index 4241b15a4..181fd7b65 100644 --- a/backend/pkg/consapi/types/spec.go +++ b/backend/pkg/consapi/types/spec.go @@ -112,4 +112,9 @@ type StandardSpec struct { DomainSyncCommittee string `json:"DOMAIN_SYNC_COMMITTEE"` BlsWithdrawalPrefix string `json:"BLS_WITHDRAWAL_PREFIX"` ZeroHash [32]byte // ZeroHash is used to represent a zeroed out 32 byte array. + // DENEB + MaxRequestBlocksDeneb *uint64 `json:"MAX_REQUEST_BLOCKS_DENEB,string"` + MaxRequestBlobSidecars *uint64 `json:"MAX_REQUEST_BLOB_SIDECARS,string"` + MinEpochsForBlobSidecarsRequests *uint64 `json:"MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS,string"` + BlobSidecarSubnetCount *uint64 `json:"BLOB_SIDECAR_SUBNET_COUNT,string"` } diff --git a/backend/pkg/exporter/db/db.go b/backend/pkg/exporter/db/db.go index b58c72c6a..033d652ea 100644 --- a/backend/pkg/exporter/db/db.go +++ b/backend/pkg/exporter/db/db.go @@ -523,6 +523,16 @@ func SaveValidators(epoch uint64, validators []*types.Validator, client rpc.Clie return fmt.Errorf("error preparing insert validator statement: %w", err) } + validatorStatusUpdateStmt, err := tx.Prepare(`UPDATE validators SET status = $1 WHERE validatorindex = $2;`) + if err != nil { + return fmt.Errorf("error preparing update validator status statement: %w", err) + } + + log.Info("updating validator status and metadata") + valiudatorUpdateTs := time.Now() + + validatorStatusCounts := make(map[string]int) + updates := 0 for _, v := range validators { // exchange farFutureEpoch with the corresponding max sql value @@ -564,6 +574,7 @@ func SaveValidators(epoch uint64, validators []*types.Validator, client rpc.Clie if err != nil { log.Error(err, "error saving new validator", 0, map[string]interface{}{"index": v.Index}) } + validatorStatusCounts[v.Status]++ } else { // status = // CASE @@ -609,11 +620,16 @@ func SaveValidators(epoch uint64, validators []*types.Validator, client rpc.Clie v.Status = string(constypes.DbActiveOnline) } + validatorStatusCounts[v.Status]++ if c.Status != v.Status { - log.Infof("Status changed for validator %v from %v to %v", v.Index, c.Status, v.Status) - log.Infof("v.ActivationEpoch %v, latestEpoch %v, lastAttestationSlots[v.Index] %v, thresholdSlot %v, lastGlobalAttestedEpoch: %v, lastValidatorAttestedEpoch: %v", v.ActivationEpoch, latestEpoch, lastAttestationSlot, thresholdSlot, lastGlobalAttestedEpoch, lastValidatorAttestedEpoch) - queries.WriteString(fmt.Sprintf("UPDATE validators SET status = '%s' WHERE validatorindex = %d;\n", v.Status, c.Index)) - updates++ + log.Debugf("Status changed for validator %v from %v to %v", v.Index, c.Status, v.Status) + log.Debugf("v.ActivationEpoch %v, latestEpoch %v, lastAttestationSlots[v.Index] %v, thresholdSlot %v, lastGlobalAttestedEpoch: %v, lastValidatorAttestedEpoch: %v", v.ActivationEpoch, latestEpoch, lastAttestationSlot, thresholdSlot, lastGlobalAttestedEpoch, lastValidatorAttestedEpoch) + //queries.WriteString(fmt.Sprintf("UPDATE validators SET status = '%s' WHERE validatorindex = %d;\n", v.Status, c.Index)) + _, err := validatorStatusUpdateStmt.Exec(v.Status, c.Index) + if err != nil { + return fmt.Errorf("error updating validator status: %w", err) + } + //updates++ } // if c.Balance != v.Balance { // // log.LogInfo("Balance changed for validator %v from %v to %v", v.Index, c.Balance, v.Balance) @@ -658,6 +674,11 @@ func SaveValidators(epoch uint64, validators []*types.Validator, client rpc.Clie } } + err = validatorStatusUpdateStmt.Close() + if err != nil { + return fmt.Errorf("error closing validator status update statement: %w", err) + } + err = insertStmt.Close() if err != nil { return fmt.Errorf("error closing insert validator statement: %w", err) @@ -673,6 +694,7 @@ func SaveValidators(epoch uint64, validators []*types.Validator, client rpc.Clie } log.Infof("validator table update completed, took %v", time.Since(updateStart)) } + log.Infof("updating validator status and metadata completed, took %v", time.Since(valiudatorUpdateTs)) s := time.Now() newValidators := []struct { @@ -734,9 +756,22 @@ func SaveValidators(epoch uint64, validators []*types.Validator, client rpc.Clie return fmt.Errorf("error updating activation epoch balance for validator %v: %w", newValidator.Validatorindex, err) } } - log.Infof("updating validator activation epoch balance completed, took %v", time.Since(s)) + log.Infof("updating validator status counts") + s = time.Now() + _, err = tx.Exec("TRUNCATE TABLE validators_status_counts;") + if err != nil { + return fmt.Errorf("error truncating validators_status_counts table: %w", err) + } + for status, count := range validatorStatusCounts { + _, err = tx.Exec("INSERT INTO validators_status_counts (status, validator_count) VALUES ($1, $2);", status, count) + if err != nil { + return fmt.Errorf("error updating validator status counts: %w", err) + } + } + log.Infof("updating validator status counts completed, took %v", time.Since(s)) + s = time.Now() _, err = tx.Exec("ANALYZE (SKIP_LOCKED) validators;") if err != nil { diff --git a/backend/pkg/exporter/modules/base.go b/backend/pkg/exporter/modules/base.go index 300f44229..1dba9ed01 100644 --- a/backend/pkg/exporter/modules/base.go +++ b/backend/pkg/exporter/modules/base.go @@ -32,8 +32,8 @@ type ModuleInterface interface { var Client *rpc.Client // Start will start the export of data from rpc into the database -func StartAll(context ModuleContext) { - if !utils.Config.JustV2 { +func StartAll(context ModuleContext, modules []ModuleInterface, justV2 bool) { + if !justV2 { go networkLivenessUpdater(context.ConsClient) go genesisDepositsExporter(context.ConsClient) go syncCommitteesExporter(context.ConsClient) @@ -65,19 +65,6 @@ func StartAll(context ModuleContext) { } // start subscription modules - - modules := []ModuleInterface{} - - if utils.Config.JustV2 { - modules = append(modules, NewDashboardDataModule(context)) - } else { - modules = append(modules, - NewSlotExporter(context), - NewExecutionDepositsExporter(context), - NewExecutionPayloadsExporter(context), - ) - } - startSubscriptionModules(&context, modules) } diff --git a/backend/pkg/exporter/modules/execution_deposits_exporter.go b/backend/pkg/exporter/modules/execution_deposits_exporter.go index 41fab0f25..ef7d2129f 100644 --- a/backend/pkg/exporter/modules/execution_deposits_exporter.go +++ b/backend/pkg/exporter/modules/execution_deposits_exporter.go @@ -8,7 +8,7 @@ import ( "encoding/hex" "fmt" "math/big" - "sync" + "sync/atomic" "time" "github.com/attestantio/go-eth2-client/spec/phase0" @@ -19,13 +19,16 @@ import ( gethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" gethrpc "github.com/ethereum/go-ethereum/rpc" + "github.com/go-redis/redis/v8" "golang.org/x/exp/maps" + "golang.org/x/sync/errgroup" "github.com/gobitfly/beaconchain/pkg/commons/contracts/deposit_contract" "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/metrics" "github.com/gobitfly/beaconchain/pkg/commons/rpc" + "github.com/gobitfly/beaconchain/pkg/commons/services" "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" constypes "github.com/gobitfly/beaconchain/pkg/consapi/types" @@ -38,37 +41,34 @@ import ( type executionDepositsExporter struct { ModuleContext - Client rpc.Client - ErigonClient *gethrpc.Client - GethClient *gethrpc.Client - LogClient *ethclient.Client - LogFilterer *deposit_contract.DepositContractFilterer - DepositContractAddress common.Address - LastExportedBlock uint64 - ExportMutex *sync.Mutex - StopEarlyMutex *sync.Mutex - StopEarly context.CancelFunc - Signer gethtypes.Signer - DepositMethod abi.Method + Client rpc.Client + ErigonClient *gethrpc.Client + GethClient *gethrpc.Client + LogClient *ethclient.Client + LogFilterer *deposit_contract.DepositContractFilterer + DepositContractAddress common.Address + LastExportedBlock uint64 + LastExportedFinalizedBlock uint64 + LastExportedFinalizedBlockRedisKey string + CurrentHeadBlock atomic.Uint64 + Signer gethtypes.Signer + DepositMethod abi.Method } func NewExecutionDepositsExporter(moduleContext ModuleContext) ModuleInterface { return &executionDepositsExporter{ - ModuleContext: moduleContext, - Client: moduleContext.ConsClient, - DepositContractAddress: common.HexToAddress(utils.Config.Chain.ClConfig.DepositContractAddress), - LastExportedBlock: 0, - ExportMutex: &sync.Mutex{}, - StopEarlyMutex: &sync.Mutex{}, + ModuleContext: moduleContext, + Client: moduleContext.ConsClient, + DepositContractAddress: common.HexToAddress(utils.Config.Chain.ClConfig.DepositContractAddress), + LastExportedBlock: 0, + LastExportedFinalizedBlock: 0, } } -func (d *executionDepositsExporter) OnHead(event *constypes.StandardEventHeadResponse) (err error) { - return nil // nop -} - func (d *executionDepositsExporter) Init() error { - d.Signer = gethtypes.NewCancunSigner(big.NewInt(int64(utils.Config.Chain.ClConfig.DepositChainID))) + d.Signer = gethtypes.NewCancunSigner(big.NewInt(0).SetUint64(utils.Config.Chain.ClConfig.DepositChainID)) + + d.LastExportedFinalizedBlockRedisKey = fmt.Sprintf("%d:execution_deposits_exporter:last_exported_finalized_block", utils.Config.Chain.ClConfig.DepositChainID) rpcClient, err := gethrpc.Dial(utils.Config.Eth1GethEndpoint) if err != nil { @@ -124,14 +124,29 @@ func (d *executionDepositsExporter) Init() error { d.LastExportedBlock = utils.Config.Indexer.ELDepositContractFirstBlock } - log.Infof("initialized execution deposits exporter with last exported block: %v", d.LastExportedBlock) + val, err := db.PersistentRedisDbClient.Get(context.Background(), d.LastExportedFinalizedBlockRedisKey).Uint64() + switch { + case err == redis.Nil: + log.Warnf("%v missing in redis, exporting from beginning", d.LastExportedFinalizedBlockRedisKey) + case err != nil: + log.Fatal(err, "error getting last exported finalized block from redis", 0) + } + + d.LastExportedFinalizedBlock = val + // avoid fetching old bocks on a chain without deposits + if d.LastExportedFinalizedBlock > d.LastExportedBlock { + d.LastExportedBlock = d.LastExportedFinalizedBlock + } + + log.Infof("initialized execution deposits exporter with last exported block/finalizedBlock: %v/%v", d.LastExportedBlock, d.LastExportedFinalizedBlock) - // quick kick-start go func() { - err := d.OnFinalizedCheckpoint(nil) + // quick kick-start + err = d.OnHead(nil) if err != nil { - log.Error(err, "error during kick-start", 0) + log.Error(err, "error kick-starting executionDepositsExporter", 0) } + d.exportLoop() }() return nil @@ -145,90 +160,119 @@ func (d *executionDepositsExporter) OnChainReorg(event *constypes.StandardEventC return nil // nop } -// can take however long it wants to run, is run in a separate goroutine, so no need to worry about blocking func (d *executionDepositsExporter) OnFinalizedCheckpoint(event *constypes.StandardFinalizedCheckpointResponse) (err error) { - // important: have to fetch the actual finalized epoch because even tho its called on finalized checkpoint it actually emits for each justified epoch - // so we have to do an extra request to get the actual latest finalized epoch - res, err := d.CL.GetFinalityCheckpoints("head") - if err != nil { - return err - } + return nil // nop +} - var nearestELBlock sql.NullInt64 - err = db.ReaderDb.Get(&nearestELBlock, "select exec_block_number from blocks where slot <= $1 and exec_block_number > 0 order by slot desc limit 1", res.Data.Finalized.Epoch*utils.Config.Chain.ClConfig.SlotsPerEpoch) - if err != nil { - return err - } - if !nearestELBlock.Valid { - return fmt.Errorf("no block found for finalized epoch %v", res.Data.Finalized.Epoch) +func (d *executionDepositsExporter) OnHead(event *constypes.StandardEventHeadResponse) (err error) { + return nil // nop +} + +func (d *executionDepositsExporter) exportLoop() { + ticker := time.NewTicker(time.Second * 10) + defer ticker.Stop() + for ; true; <-ticker.C { + err := d.export() + if err != nil { + log.Error(err, "error during export", 0) + services.ReportStatus("execution_deposits_exporter", err.Error(), nil) + } else { + services.ReportStatus("execution_deposits_exporter", "Running", nil) + } } - log.Debugf("exporting execution layer deposits till block %v", nearestELBlock.Int64) +} - err = d.exportTillBlock(uint64(nearestELBlock.Int64)) +func (d *executionDepositsExporter) export() (err error) { + var headBlock, finBlock uint64 + var g errgroup.Group + g.Go(func() error { + headSlot, err := d.CL.GetSlot("head") + if err != nil { + return fmt.Errorf("error getting head-slot: %w", err) + } + headBlock = headSlot.Data.Message.Body.ExecutionPayload.BlockNumber + return nil + }) + g.Go(func() error { + finSlot, err := d.CL.GetSlot("finalized") + if err != nil { + return fmt.Errorf("error getting finalized-slot: %w", err) + } + finBlock = finSlot.Data.Message.Body.ExecutionPayload.BlockNumber + return nil + }) + err = g.Wait() if err != nil { return err } - return nil -} - -// this is basically synchronous, each time it gets called it will kill the previous export and replace it with itself -func (d *executionDepositsExporter) exportTillBlock(block uint64) (err error) { - // following blocks if a previous function call is still waiting for an export to stop early - d.StopEarlyMutex.Lock() - if d.StopEarly != nil { - // this will run even if the previous export has already finished - // preventing this would require an overly complex solution - log.Debugf("asking potentially running export to stop early") - d.StopEarly() + if d.LastExportedBlock >= headBlock && d.LastExportedFinalizedBlock >= finBlock { + log.Debugf("skip exporting execution layer deposits: last exported block/finalizedBlock: %v/%v, headBlock/finalizedBlock: %v/%v", d.LastExportedBlock, d.LastExportedFinalizedBlock, headBlock, finBlock) + return nil } - // following blocks as long as the running export hasn't finished yet - d.ExportMutex.Lock() - ctx, cancel := context.WithCancel(context.Background()) - d.StopEarly = cancel - // we have over taken and allow potentially newer function calls to signal us to stop early - d.StopEarlyMutex.Unlock() + nextFinalizedBlock := finBlock blockOffset := d.LastExportedBlock + 1 - blockTarget := block - - defer d.ExportMutex.Unlock() - - log.Infof("exporting execution layer deposits from %v to %v", blockOffset, blockTarget) - - depositsToSave := make([]*types.ELDeposit, 0) - + // make sure to reexport every block since last exported finalized blocks to handle reorgs + if blockOffset > d.LastExportedFinalizedBlock { + blockOffset = d.LastExportedFinalizedBlock + 1 + } + blockTarget := headBlock + + log.InfoWithFields(log.Fields{ + "nextHeadBlock": headBlock, + "nextFinBlock": nextFinalizedBlock, + "lastHeadBlock": d.LastExportedBlock, + "lastFinBlock": d.LastExportedFinalizedBlock, + }, fmt.Sprintf("exporting execution layer deposits from %d to %d", blockOffset, blockTarget)) + + depositsToSaveBatchSize := 10_000 // limit how much deposits we save in one go + blockBatchSize := uint64(10_000) // limit how much blocks we fetch until updating the redis-key + depositsToSave := make([]*types.ELDeposit, 0, depositsToSaveBatchSize) for blockOffset < blockTarget { - tmpBlockTarget := blockOffset + 1000 - if tmpBlockTarget > blockTarget { - tmpBlockTarget = blockTarget + depositsToSave = depositsToSave[:0] + blockBatchStart := blockOffset + for blockOffset < blockTarget && len(depositsToSave) <= depositsToSaveBatchSize && blockOffset < blockBatchStart+blockBatchSize { + tmpBlockTarget := blockOffset + 1000 + if tmpBlockTarget > blockTarget { + tmpBlockTarget = blockTarget + } + log.Debugf("fetching deposits from %v to %v", blockOffset, tmpBlockTarget) + tmp, err := d.fetchDeposits(blockOffset, tmpBlockTarget) + if err != nil { + return err + } + depositsToSave = append(depositsToSave, tmp...) + blockOffset = tmpBlockTarget } - log.Debugf("fetching deposits from %v to %v", blockOffset, tmpBlockTarget) - tmp, err := d.fetchDeposits(blockOffset, tmpBlockTarget) + + log.Debugf("saving %v deposits", len(depositsToSave)) + err = d.saveDeposits(depositsToSave) if err != nil { return err } - depositsToSave = append(depositsToSave, tmp...) - blockOffset = tmpBlockTarget - - select { - case <-ctx.Done(): // a newer function call has asked us to stop early - log.Warnf("stop early signal received, stopping export early") - blockTarget = tmpBlockTarget - default: - continue + d.LastExportedBlock = blockOffset + + prevLastExportedFinalizedBlock := d.LastExportedFinalizedBlock + if nextFinalizedBlock > d.LastExportedBlock && d.LastExportedBlock > d.LastExportedFinalizedBlock { + d.LastExportedFinalizedBlock = d.LastExportedBlock + } else if nextFinalizedBlock > d.LastExportedFinalizedBlock { + d.LastExportedFinalizedBlock = nextFinalizedBlock } - } - log.Debugf("saving %v deposits", len(depositsToSave)) - err = d.saveDeposits(depositsToSave) - if err != nil { - return err + // update redis to keep track of last exported finalized block persistently + if prevLastExportedFinalizedBlock != d.LastExportedFinalizedBlock { + log.Infof("updating %v: %v", d.LastExportedFinalizedBlockRedisKey, d.LastExportedFinalizedBlock) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + err := db.PersistentRedisDbClient.Set(ctx, d.LastExportedFinalizedBlockRedisKey, d.LastExportedFinalizedBlock, 0).Err() + if err != nil { + log.Error(err, fmt.Sprintf("error setting redis %v = %v", d.LastExportedFinalizedBlockRedisKey, d.LastExportedFinalizedBlock), 0) + } + } } - d.LastExportedBlock = blockTarget - start := time.Now() // update cached view err = d.updateCachedView() @@ -271,8 +315,8 @@ func (d *executionDepositsExporter) fetchDeposits(fromBlock, toBlock uint64) (de return nil, fmt.Errorf("nil deposit-log") } - depositLog := - depositLogIterator.Event + depositLog := depositLogIterator.Event + err = utils.VerifyDepositSignature(&phase0.DepositData{ PublicKey: phase0.BLSPubKey(depositLog.Pubkey), WithdrawalCredentials: depositLog.WithdrawalCredentials, @@ -387,6 +431,10 @@ func (d *executionDepositsExporter) fetchDeposits(fromBlock, toBlock uint64) (de } func (d *executionDepositsExporter) saveDeposits(depositsToSave []*types.ELDeposit) error { + if len(depositsToSave) == 0 { + return nil + } + tx, err := db.WriterDb.Beginx() if err != nil { return err diff --git a/backend/pkg/exporter/modules/relays.go b/backend/pkg/exporter/modules/relays.go index 205586792..78d068f93 100644 --- a/backend/pkg/exporter/modules/relays.go +++ b/backend/pkg/exporter/modules/relays.go @@ -107,21 +107,21 @@ func fetchDeliveredPayloads(r types.Relay, offset uint64) ([]BidTrace, error) { if offset != 0 { url += fmt.Sprintf("&cursor=%v", offset) } - - //nolint:gosec - resp, err := http.Get(url) + client := &http.Client{ + Timeout: time.Second * 30, + } + resp, err := client.Get(url) if err != nil { - log.Error(err, "error retrieving delivered payloads", 0, map[string]interface{}{"relay": r.ID}) - return nil, err + log.Error(err, "error retrieving delivered payloads", 0, map[string]interface{}{"relay": r.ID, "offset": offset, "url": url}) + return nil, fmt.Errorf("error retrieving delivered payloads for relay: %v, offset: %v, url: %v: %w", r.ID, offset, url, err) } defer resp.Body.Close() err = json.NewDecoder(resp.Body).Decode(&payloads) - if err != nil { - return nil, err + return nil, fmt.Errorf("error decoding json for delivered payloads for relay: %v, offset: %v, url: %v: %w", r.ID, offset, url, err) } return payloads, nil @@ -175,7 +175,7 @@ func retrieveAndInsertPayloadsFromRelay(r types.Relay, low_bound uint64, high_bo for { resp, err := fetchDeliveredPayloads(r, offset) if err != nil { - return err + return fmt.Errorf("error calling fetchDeliveredPayloads with offset: %v for relay: %v: %w", offset, r.ID, err) } if resp == nil { diff --git a/backend/pkg/exporter/modules/rocketpool.go b/backend/pkg/exporter/modules/rocketpool.go index 134d65143..e7e1f0693 100644 --- a/backend/pkg/exporter/modules/rocketpool.go +++ b/backend/pkg/exporter/modules/rocketpool.go @@ -13,6 +13,8 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/metrics" + "github.com/gobitfly/beaconchain/pkg/commons/services" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/pkg/errors" @@ -195,6 +197,10 @@ func (rp *RocketpoolExporter) Run() error { continue } + services.ReportStatus("rocketpoolExporter", "Running", nil) + + metrics.TaskDuration.WithLabelValues("exporter_rocketpoolExporter").Observe(time.Since(t0).Seconds()) + log.InfoWithFields(log.Fields{"duration": time.Since(t0)}, "exported rocketpool-data") count++ <-t.C diff --git a/backend/pkg/monitoring/monitoring.go b/backend/pkg/monitoring/monitoring.go index b24c3cb9a..dbf021a77 100644 --- a/backend/pkg/monitoring/monitoring.go +++ b/backend/pkg/monitoring/monitoring.go @@ -1,6 +1,9 @@ package monitoring import ( + "sync" + "sync/atomic" + "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/metrics" @@ -11,11 +14,17 @@ import ( ) var monitoredServices []services.Service +var startedClickhouse atomic.Bool +var initMutex = sync.Mutex{} func Init(full bool) { + initMutex.Lock() + defer initMutex.Unlock() metrics.UUID.WithLabelValues(utils.GetUUID()).Set(1) // so we can find out where the uuid is set metrics.DeploymentType.WithLabelValues(utils.Config.DeploymentType).Set(1) if db.ClickHouseNativeWriter == nil { + log.Infof("initializing clickhouse writer") + startedClickhouse.Store(true) db.ClickHouseNativeWriter = db.MustInitClickhouseNative(&types.DatabaseConfig{ Username: utils.Config.ClickHouse.WriterDatabase.Username, Password: utils.Config.ClickHouse.WriterDatabase.Password, @@ -58,4 +67,7 @@ func Stop() { } // this prevents status reports that werent shut down cleanly from triggering alerts services.NewStatusReport(constants.CleanShutdownEvent, constants.Default, constants.Default)(constants.Success, nil) + if startedClickhouse.Load() { + db.ClickHouseNativeWriter.Close() + } } diff --git a/backend/pkg/monitoring/services/base.go b/backend/pkg/monitoring/services/base.go index 549b4d67f..04cbea094 100644 --- a/backend/pkg/monitoring/services/base.go +++ b/backend/pkg/monitoring/services/base.go @@ -65,8 +65,8 @@ func NewStatusReport(id string, timeout time.Duration, check_interval time.Durat if timeout != constants.Default { timeouts_at = now.Add(timeout) } - expires_at := timeouts_at.Add(5 * time.Minute) - if check_interval >= 5*time.Minute { + expires_at := timeouts_at.Add(1 * time.Minute) + if check_interval >= 1*time.Minute { expires_at = timeouts_at.Add(check_interval) } log.TraceWithFields(log.Fields{ @@ -83,7 +83,7 @@ func NewStatusReport(id string, timeout time.Duration, check_interval time.Durat err = db.ClickHouseNativeWriter.AsyncInsert( ctx, "INSERT INTO status_reports (emitter, event_id, deployment_type, insert_id, expires_at, timeouts_at, metadata) VALUES (?, ?, ?, ?, ?, ?, ?)", - true, + false, // true means wait for settlement, but we want to shoot and forget. false does mean we cant log any errors that occur during settlement utils.GetUUID(), id, utils.Config.DeploymentType, diff --git a/backend/pkg/notification/collection.go b/backend/pkg/notification/collection.go new file mode 100644 index 000000000..ba6b0568f --- /dev/null +++ b/backend/pkg/notification/collection.go @@ -0,0 +1,1977 @@ +package notification + +import ( + "database/sql" + "encoding/gob" + "encoding/hex" + "errors" + "fmt" + "strconv" + "strings" + "sync" + "time" + + gcp_bigtable "cloud.google.com/go/bigtable" + "github.com/ethereum/go-ethereum/common" + "github.com/gobitfly/beaconchain/pkg/commons/cache" + "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/ethclients" + "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/metrics" + "github.com/gobitfly/beaconchain/pkg/commons/services" + "github.com/gobitfly/beaconchain/pkg/commons/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" + constypes "github.com/gobitfly/beaconchain/pkg/consapi/types" + "github.com/gobitfly/beaconchain/pkg/exporter/modules" + "github.com/lib/pq" + "github.com/rocket-pool/rocketpool-go/utils/eth" + "github.com/shopspring/decimal" +) + +func InitNotificationCollector(pubkeyCachePath string) { + err := initPubkeyCache(pubkeyCachePath) + if err != nil { + log.Fatal(err, "error initializing pubkey cache path for notifications", 0) + } + + go ethclients.Init() + + go notificationCollector() +} + +// the notificationCollector is responsible for collecting & queuing notifications +// it is epoch based and will only collect notification for a given epoch once +// notifications are collected in ascending epoch order +// the epochs_notified sql table is used to keep track of already notified epochs +// before collecting notifications several db consistency checks are done +func notificationCollector() { + var once sync.Once + once.Do(func() { + gob.Register(&ValidatorProposalNotification{}) + gob.Register(&ValidatorUpcomingProposalNotification{}) + gob.Register(&ValidatorGroupEfficiencyNotification{}) + gob.Register(&ValidatorAttestationNotification{}) + gob.Register(&ValidatorIsOfflineNotification{}) + gob.Register(&ValidatorIsOnlineNotification{}) + gob.Register(&ValidatorGotSlashedNotification{}) + gob.Register(&ValidatorWithdrawalNotification{}) + gob.Register(&NetworkNotification{}) + gob.Register(&RocketpoolNotification{}) + gob.Register(&MonitorMachineNotification{}) + gob.Register(&TaxReportNotification{}) + gob.Register(&EthClientNotification{}) + gob.Register(&SyncCommitteeSoonNotification{}) + }) + + mc, err := modules.GetModuleContext() + if err != nil { + log.Fatal(err, "error getting module context", 0) + } + + go func() { + log.Infof("starting head notification collector") + for ; ; time.Sleep(time.Second * 30) { + // get the head epoch + head, err := mc.ConsClient.GetChainHead() + if err != nil { + log.Error(err, "error getting chain head", 0) + continue + } + + headEpoch := head.HeadEpoch + + var lastNotifiedEpoch uint64 + err = db.WriterDb.Get(&lastNotifiedEpoch, "SELECT COUNT(*) FROM epochs_notified_head WHERE epoch = $1 AND event_name = $2", headEpoch, types.ValidatorUpcomingProposalEventName) + + if err != nil { + log.Error(err, fmt.Sprintf("error checking if upcoming block proposal notifications for epoch %v have already been collected", headEpoch), 0) + continue + } + + if lastNotifiedEpoch > 0 { + log.Warnf("head epoch notifications for epoch %v have already been collected", headEpoch) + continue + } + + notifications, err := collectHeadNotifications(mc, headEpoch) + if err != nil { + log.Error(err, "error collecting head notifications", 0) + } + + _, err = db.WriterDb.Exec("INSERT INTO epochs_notified_head (epoch, event_name, senton) VALUES ($1, $2, NOW())", headEpoch, types.ValidatorUpcomingProposalEventName) + if err != nil { + log.Error(err, "error marking head notification status for epoch in db", 0) + continue + } + + if len(notifications) > 0 { + err = queueNotifications(headEpoch, notifications) + if err != nil { + log.Error(err, "error queuing head notifications", 0) + } + } + } + }() + + for { + latestFinalizedEpoch := cache.LatestFinalizedEpoch.Get() + + if latestFinalizedEpoch < 4 { + log.Error(nil, "pausing notifications until at least 5 epochs have been exported into the db", 0) + time.Sleep(time.Minute) + continue + } + + var lastNotifiedEpoch uint64 + err := db.WriterDb.Get(&lastNotifiedEpoch, "SELECT COALESCE(MAX(epoch), 0) FROM epochs_notified") + + if err != nil { + log.Error(err, "error retrieving last notified epoch from the db", 0) + time.Sleep(time.Minute) + continue + } + + log.Infof("latest finalized epoch is %v, latest notified epoch is %v", latestFinalizedEpoch, lastNotifiedEpoch) + + if latestFinalizedEpoch < lastNotifiedEpoch { + log.Error(nil, "notification consistency error, lastest finalized epoch is lower than the last notified epoch!", 0) + time.Sleep(time.Minute) + continue + } + + if latestFinalizedEpoch-lastNotifiedEpoch > 5 { + log.Infof("last notified epoch is more than 5 epochs behind the last finalized epoch, limiting lookback to last 5 epochs") + lastNotifiedEpoch = latestFinalizedEpoch - 5 + } + + for epoch := lastNotifiedEpoch + 1; epoch <= latestFinalizedEpoch; epoch++ { + var exported uint64 + err := db.WriterDb.Get(&exported, "SELECT COUNT(*) FROM epochs WHERE epoch <= $1 AND epoch >= $2", epoch, epoch-3) + if err != nil { + log.Error(err, "error retrieving export status of epoch", 0, log.Fields{"epoch": epoch}) + services.ReportStatus("notification-collector", "Error", nil) + break + } + + if exported != 4 { + log.Error(nil, "epoch notification consistency error, epochs are not all yet exported into the db", 0, log.Fields{"epoch start": epoch, "epoch end": epoch - 3, "exported": exported, "wanted": 4}) + } + + start := time.Now() + log.Infof("collecting notifications for epoch %v", epoch) + + // Network DB Notifications (network related) + notifications, err := collectNotifications(epoch, mc) + + if err != nil { + log.Error(err, "error collection notifications", 0) + services.ReportStatus("notification-collector", "Error", nil) + break + } + + _, err = db.WriterDb.Exec("INSERT INTO epochs_notified VALUES ($1, NOW())", epoch) + if err != nil { + log.Error(err, "error marking notification status for epoch %v in db: %v", 0, log.Fields{"epoch": epoch}) + services.ReportStatus("notification-collector", "Error", nil) + break + } + + err = queueNotifications(epoch, notifications) // this caused the collected notifications to be queued and sent + if err != nil { + log.Error(err, fmt.Sprintf("error queuing notifications for epoch %v in db", epoch), 0) + services.ReportStatus("notification-collector", "Error", nil) + break + } + + // Network DB Notifications (user related, must only run on one instance ever!!!!) + if utils.Config.Notifications.UserDBNotifications { + log.Infof("collecting user db notifications") + userNotifications, err := collectUserDbNotifications(epoch) + if err != nil { + log.Error(err, "error collection user db notifications", 0) + services.ReportStatus("notification-collector", "Error", nil) + time.Sleep(time.Minute * 2) + continue + } + + err = queueNotifications(epoch, userNotifications) + if err != nil { + log.Error(err, fmt.Sprintf("error queuing user notifications for epoch %v in db", epoch), 0) + services.ReportStatus("notification-collector", "Error", nil) + time.Sleep(time.Minute * 2) + continue + } + } + + log.InfoWithFields(log.Fields{"notifications": len(notifications), "duration": time.Since(start), "epoch": epoch}, "notifications completed") + + metrics.TaskDuration.WithLabelValues("service_notifications").Observe(time.Since(start).Seconds()) + } + + services.ReportStatus("notification-collector", "Running", nil) + time.Sleep(time.Second * 10) + } +} + +func collectHeadNotifications(mc modules.ModuleContext, headEpoch uint64) (types.NotificationsPerUserId, error) { + notificationsByUserID := types.NotificationsPerUserId{} + start := time.Now() + err := collectUpcomingBlockProposalNotifications(notificationsByUserID, mc, headEpoch) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_upcoming_block_proposal").Inc() + return nil, fmt.Errorf("error collecting upcoming block proposal notifications: %v", err) + } + log.Infof("collecting upcoming block proposal notifications took: %v", time.Since(start)) + + return notificationsByUserID, nil +} + +func collectUpcomingBlockProposalNotifications(notificationsByUserID types.NotificationsPerUserId, mc modules.ModuleContext, headEpoch uint64) (err error) { + nextEpoch := headEpoch + 1 + log.Infof("collecting upcoming block proposal notifications for epoch %v (head epoch is %d)", nextEpoch, headEpoch) + + if utils.EpochToTime(nextEpoch).Before(time.Now()) { + log.Error(fmt.Errorf("error upcoming block proposal notifications for epoch %v are already in the past", nextEpoch), "", 0) + return nil + } + + assignments, err := mc.CL.GetPropoalAssignments(nextEpoch) + if err != nil { + return fmt.Errorf("error getting proposal assignments: %w", err) + } + + subs, err := GetSubsForEventFilter(types.ValidatorUpcomingProposalEventName, "", nil, nil) + if err != nil { + return fmt.Errorf("error getting subscriptions for upcoming block proposal notifications: %w", err) + } + + log.Infof("retrieved %d subscriptions for upcoming block proposal notifications", len(subs)) + if len(subs) == 0 { + return nil + } + + for _, assignment := range assignments.Data { + log.Infof("upcoming block proposal for validator %d in slot %d", assignment.ValidatorIndex, assignment.Slot) + for _, sub := range subs[hex.EncodeToString(assignment.Pubkey)] { + if sub.UserID == nil || sub.ID == nil { + return fmt.Errorf("error expected userId and subId to be defined but got user: %v, sub: %v", sub.UserID, sub.ID) + } + + log.Infof("creating %v notification for validator %v in epoch %v (dashboard: %v)", sub.EventName, assignment.ValidatorIndex, nextEpoch, sub.DashboardId != nil) + n := &ValidatorUpcomingProposalNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: nextEpoch, + EventName: sub.EventName, + EventFilter: hex.EncodeToString(assignment.Pubkey), + DashboardId: sub.DashboardId, + DashboardName: sub.DashboardName, + DashboardGroupId: sub.DashboardGroupId, + DashboardGroupName: sub.DashboardGroupName, + }, + ValidatorIndex: assignment.ValidatorIndex, + Slot: uint64(assignment.Slot), + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } + } + return nil +} + +func collectNotifications(epoch uint64, mc modules.ModuleContext) (types.NotificationsPerUserId, error) { + notificationsByUserID := types.NotificationsPerUserId{} + start := time.Now() + var err error + var dbIsCoherent bool + + // do a consistency check to make sure that we have all the data we need in the db + err = db.WriterDb.Get(&dbIsCoherent, ` + SELECT + NOT (array[false] && array_agg(is_coherent)) AS is_coherent + FROM ( + SELECT + epoch - 1 = lead(epoch) OVER (ORDER BY epoch DESC) AS is_coherent + FROM epochs + ORDER BY epoch DESC + LIMIT 2^14 + ) coherency`) + + if err != nil { + log.Error(err, "error doing epochs table coherence check", 0) + return nil, err + } + if !dbIsCoherent { + log.Error(nil, "epochs coherence check failed, aborting", 0) + return nil, fmt.Errorf("epochs coherence check failed, aborting") + } + + log.Infof("started collecting notifications") + // The following functions will collect the notifications and add them to the + // notificationsByUserID map. The notifications will be queued and sent later + // by the notification sender process + err = collectGroupEfficiencyNotifications(notificationsByUserID, epoch, mc) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_group_efficiency").Inc() + return nil, fmt.Errorf("error collecting validator_group_efficiency notifications: %v", err) + } + log.Infof("collecting group efficiency notifications took: %v", time.Since(start)) + + err = collectAttestationAndOfflineValidatorNotifications(notificationsByUserID, epoch) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_missed_attestation").Inc() + return nil, fmt.Errorf("error collecting validator_attestation_missed notifications: %v", err) + } + log.Infof("collecting attestation & offline notifications took: %v", time.Since(start)) + + err = collectBlockProposalNotifications(notificationsByUserID, 1, types.ValidatorExecutedProposalEventName, epoch) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_executed_block_proposal").Inc() + return nil, fmt.Errorf("error collecting validator_proposal_submitted notifications: %v", err) + } + log.Infof("collecting block proposal proposed notifications took: %v", time.Since(start)) + + err = collectBlockProposalNotifications(notificationsByUserID, 2, types.ValidatorMissedProposalEventName, epoch) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_missed_block_proposal").Inc() + return nil, fmt.Errorf("error collecting validator_proposal_missed notifications: %v", err) + } + log.Infof("collecting block proposal missed notifications took: %v", time.Since(start)) + + err = collectBlockProposalNotifications(notificationsByUserID, 3, types.ValidatorMissedProposalEventName, epoch) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_missed_orphaned_block_proposal").Inc() + return nil, fmt.Errorf("error collecting validator_proposal_missed notifications for orphaned slots: %w", err) + } + log.Infof("collecting block proposal missed notifications for orphaned slots took: %v", time.Since(start)) + + err = collectValidatorGotSlashedNotifications(notificationsByUserID, epoch) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_validator_got_slashed").Inc() + return nil, fmt.Errorf("error collecting validator_got_slashed notifications: %v", err) + } + log.Infof("collecting validator got slashed notifications took: %v", time.Since(start)) + + err = collectWithdrawalNotifications(notificationsByUserID, epoch) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_validator_withdrawal").Inc() + return nil, fmt.Errorf("error collecting withdrawal notifications: %v", err) + } + log.Infof("collecting withdrawal notifications took: %v", time.Since(start)) + + err = collectNetworkNotifications(notificationsByUserID) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_network").Inc() + return nil, fmt.Errorf("error collecting network notifications: %v", err) + } + log.Infof("collecting network notifications took: %v", time.Since(start)) + + // Rocketpool + { + var ts int64 + err = db.ReaderDb.Get(&ts, `SELECT id FROM rocketpool_network_stats LIMIT 1;`) + + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + log.Infof("skipped the collecting of rocketpool notifications, because rocketpool_network_stats is empty") + } else { + metrics.Errors.WithLabelValues("notifications_collect_rocketpool_notifications").Inc() + return nil, fmt.Errorf("error collecting rocketpool notifications: %v", err) + } + } else { + err = collectRocketpoolComissionNotifications(notificationsByUserID) + if err != nil { + //nolint:misspell + metrics.Errors.WithLabelValues("notifications_collect_rocketpool_comission").Inc() + return nil, fmt.Errorf("error collecting rocketpool commission: %v", err) + } + log.Infof("collecting rocketpool commissions took: %v", time.Since(start)) + + err = collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_rocketpool_reward_claim").Inc() + return nil, fmt.Errorf("error collecting new rocketpool claim round: %v", err) + } + log.Infof("collecting rocketpool claim round took: %v", time.Since(start)) + + err = collectRocketpoolRPLCollateralNotifications(notificationsByUserID, types.RocketpoolCollateralMaxReachedEventName, epoch) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_rocketpool_rpl_collateral_max_reached").Inc() + return nil, fmt.Errorf("error collecting rocketpool max collateral: %v", err) + } + log.Infof("collecting rocketpool max collateral took: %v", time.Since(start)) + + err = collectRocketpoolRPLCollateralNotifications(notificationsByUserID, types.RocketpoolCollateralMinReachedEventName, epoch) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_rocketpool_rpl_collateral_min_reached").Inc() + return nil, fmt.Errorf("error collecting rocketpool min collateral: %v", err) + } + log.Infof("collecting rocketpool min collateral took: %v", time.Since(start)) + } + } + + err = collectSyncCommitteeNotifications(notificationsByUserID, epoch) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_sync_committee").Inc() + return nil, fmt.Errorf("error collecting sync committee: %v", err) + } + log.Infof("collecting sync committee took: %v", time.Since(start)) + + return notificationsByUserID, nil +} + +func collectUserDbNotifications(epoch uint64) (types.NotificationsPerUserId, error) { + notificationsByUserID := types.NotificationsPerUserId{} + var err error + + // Monitoring (premium): machine offline + err = collectMonitoringMachineOffline(notificationsByUserID, epoch) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_monitoring_machine_offline").Inc() + return nil, fmt.Errorf("error collecting Eth client offline notifications: %v", err) + } + + // Monitoring (premium): disk full warnings + err = collectMonitoringMachineDiskAlmostFull(notificationsByUserID, epoch) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_monitoring_machine_disk_almost_full").Inc() + return nil, fmt.Errorf("error collecting Eth client disk full notifications: %v", err) + } + + // Monitoring (premium): cpu load + err = collectMonitoringMachineCPULoad(notificationsByUserID, epoch) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_monitoring_machine_cpu_load").Inc() + return nil, fmt.Errorf("error collecting Eth client cpu notifications: %v", err) + } + + // Monitoring (premium): ram + err = collectMonitoringMachineMemoryUsage(notificationsByUserID, epoch) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_monitoring_machine_memory_usage").Inc() + return nil, fmt.Errorf("error collecting Eth client memory notifications: %v", err) + } + + // New ETH clients + err = collectEthClientNotifications(notificationsByUserID) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_eth_client").Inc() + return nil, fmt.Errorf("error collecting Eth client notifications: %v", err) + } + + //Tax Report + err = collectTaxReportNotificationNotifications(notificationsByUserID) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_tax_report").Inc() + return nil, fmt.Errorf("error collecting tax report notifications: %v", err) + } + + return notificationsByUserID, nil +} + +func collectGroupEfficiencyNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64, mc modules.ModuleContext) error { + type dbResult struct { + ValidatorIndex uint64 `db:"validator_index"` + AttestationReward decimal.Decimal `db:"attestations_reward"` + AttestationIdealReward decimal.Decimal `db:"attestations_ideal_reward"` + BlocksProposed uint64 `db:"blocks_proposed"` + BlocksScheduled uint64 `db:"blocks_scheduled"` + SyncExecuted uint64 `db:"sync_executed"` + SyncScheduled uint64 `db:"sync_scheduled"` + } + + // retrieve rewards for the epoch + log.Info("retrieving validator metadata") + validators, err := mc.CL.GetValidators(epoch*utils.Config.Chain.ClConfig.SlotsPerEpoch, nil, []constypes.ValidatorStatus{constypes.Active}) + if err != nil { + return fmt.Errorf("error getting validators: %w", err) + } + effectiveBalanceMap := make(map[uint64]uint64) + activeValidatorsMap := make(map[uint64]struct{}) + for _, validator := range validators.Data { + effectiveBalanceMap[validator.Index] = validator.Validator.EffectiveBalance + activeValidatorsMap[validator.Index] = struct{}{} + } + log.Info("retrieving attestation reward data") + attestationRewards, err := mc.CL.GetAttestationRewards(epoch) + if err != nil { + return fmt.Errorf("error getting attestation rewards: %w", err) + } + + efficiencyMap := make(map[types.ValidatorIndex]*dbResult, len(attestationRewards.Data.TotalRewards)) + + idealRewardsMap := make(map[uint64]decimal.Decimal) + for _, reward := range attestationRewards.Data.IdealRewards { + idealRewardsMap[uint64(reward.EffectiveBalance)] = decimal.NewFromInt(int64(reward.Head) + int64(reward.Target) + int64(reward.Source) + int64(reward.InclusionDelay) + int64(reward.Inactivity)) + } + for _, reward := range attestationRewards.Data.TotalRewards { + efficiencyMap[types.ValidatorIndex(reward.ValidatorIndex)] = &dbResult{ + ValidatorIndex: reward.ValidatorIndex, + AttestationReward: decimal.NewFromInt(int64(reward.Head) + int64(reward.Target) + int64(reward.Source) + int64(reward.InclusionDelay) + int64(reward.Inactivity)), + AttestationIdealReward: idealRewardsMap[effectiveBalanceMap[reward.ValidatorIndex]], + } + } + + log.Info("retrieving block proposal data") + proposalAssignments, err := mc.CL.GetPropoalAssignments(epoch) + if err != nil { + return fmt.Errorf("error getting proposal assignments: %w", err) + } + for _, assignment := range proposalAssignments.Data { + efficiencyMap[types.ValidatorIndex(assignment.ValidatorIndex)].BlocksScheduled++ + } + + syncAssignments, err := mc.CL.GetSyncCommitteesAssignments(nil, epoch*utils.Config.Chain.ClConfig.SlotsPerEpoch) + if err != nil { + return fmt.Errorf("error getting sync committee assignments: %w", err) + } + + for slot := epoch * utils.Config.Chain.ClConfig.SlotsPerEpoch; slot < (epoch+1)*utils.Config.Chain.ClConfig.SlotsPerEpoch; slot++ { + log.Infof("retrieving data for slot %v", slot) + s, err := mc.CL.GetSlot(slot) + if err != nil && strings.Contains(err.Error(), "NOT_FOUND") { + continue + } else if err != nil { + return fmt.Errorf("error getting block header for slot %v: %w", slot, err) + } + efficiencyMap[types.ValidatorIndex(s.Data.Message.ProposerIndex)].BlocksProposed++ + + for i, validatorIndex := range syncAssignments.Data.Validators { + efficiencyMap[types.ValidatorIndex(validatorIndex)].SyncScheduled++ + + if utils.BitAtVector(s.Data.Message.Body.SyncAggregate.SyncCommitteeBits, i) { + efficiencyMap[types.ValidatorIndex(validatorIndex)].SyncExecuted++ + } + } + } + + subMap, err := GetSubsForEventFilter(types.ValidatorGroupEfficiencyEventName, "", nil, nil) + if err != nil { + return fmt.Errorf("error getting subscriptions for (missed) block proposals %w", err) + } + + // create a lookup map for the dashboard & groups + type groupDetails struct { + Validators []types.ValidatorIndex + Subscription *types.Subscription + } + dashboardMap := make(map[types.UserId]map[types.DashboardId]map[types.DashboardGroupId]*groupDetails) + + for _, subs := range subMap { + for _, sub := range subs { + if sub.DashboardId == nil || sub.DashboardGroupId == nil { + continue + } + userId := *sub.UserID + dashboardId := types.DashboardId(*sub.DashboardId) + groupId := types.DashboardGroupId(*sub.DashboardGroupId) + if _, ok := dashboardMap[userId]; !ok { + dashboardMap[userId] = make(map[types.DashboardId]map[types.DashboardGroupId]*groupDetails) + } + if _, ok := dashboardMap[userId][dashboardId]; !ok { + dashboardMap[userId][dashboardId] = make(map[types.DashboardGroupId]*groupDetails) + } + if _, ok := dashboardMap[userId][dashboardId][groupId]; !ok { + dashboardMap[userId][dashboardId][groupId] = &groupDetails{ + Validators: []types.ValidatorIndex{}, + } + } + if sub.EventFilter != "" { + pubkeyDecoded, err := hex.DecodeString(sub.EventFilter) + if err != nil { + return fmt.Errorf("error decoding pubkey %v: %w", sub.EventFilter, err) + } + validatorIndex, err := GetIndexForPubkey(pubkeyDecoded) + if err != nil { + return fmt.Errorf("error getting validator index for pubkey %v: %w", sub.EventFilter, err) + } + dashboardMap[userId][dashboardId][groupId].Validators = append(dashboardMap[*sub.UserID][dashboardId][groupId].Validators, types.ValidatorIndex(validatorIndex)) + } + dashboardMap[userId][dashboardId][groupId].Subscription = sub + } + } + + // The commented code below can be used to validate data retrieved from the node against + // data in clickhouse + // var queryResult []*dbResult + // clickhouseTable := "validator_dashboard_data_epoch" + // // retrieve efficiency data for the epoch + // log.Infof("retrieving efficiency data for epoch %v", epoch) + // ds := goqu.Dialect("postgres"). + // From(goqu.L(fmt.Sprintf(`%s AS r`, clickhouseTable))). + // Select( + // goqu.L("validator_index"), + // goqu.L("COALESCE(r.attestations_reward, 0) AS attestations_reward"), + // goqu.L("COALESCE(r.attestations_ideal_reward, 0) AS attestations_ideal_reward"), + // goqu.L("COALESCE(r.blocks_proposed, 0) AS blocks_proposed"), + // goqu.L("COALESCE(r.blocks_scheduled, 0) AS blocks_scheduled"), + // goqu.L("COALESCE(r.sync_executed, 0) AS sync_executed"), + // goqu.L("COALESCE(r.sync_scheduled, 0) AS sync_scheduled")). + // Where(goqu.L("r.epoch_timestamp = ?", utils.EpochToTime(epoch))) + // query, args, err := ds.Prepared(true).ToSQL() + // if err != nil { + // return fmt.Errorf("error preparing query: %v", err) + // } + + // err = db.ClickHouseReader.Select(&queryResult, query, args...) + // if err != nil { + // return fmt.Errorf("error retrieving data from table %s: %v", clickhouseTable, err) + // } + + // if len(queryResult) == 0 { + // return fmt.Errorf("no efficiency data found for epoch %v", epoch) + // } + + // log.Infof("retrieved %v efficiency data rows", len(queryResult)) + + // for _, row := range queryResult { + // if _, ok := activeValidatorsMap[row.ValidatorIndex]; !ok { + // continue + // } + // existing := efficiencyMap[types.ValidatorIndex(row.ValidatorIndex)] + + // if existing == nil { + // existing = &dbResult{ + // ValidatorIndex: row.ValidatorIndex, + // AttestationReward: decimal.Decimal{}, + // AttestationIdealReward: decimal.Decimal{}, + // } + // } + // if !existing.AttestationIdealReward.Equal(row.AttestationIdealReward) { + // log.Fatal(fmt.Errorf("ideal reward mismatch for validator %v: %v != %v", row.ValidatorIndex, existing.AttestationIdealReward, row.AttestationIdealReward), "ideal reward mismatch", 0) + // } + // if !existing.AttestationReward.Equal(row.AttestationReward) { + // log.Fatal(fmt.Errorf("attestation reward mismatch for validator %v: %v != %v", row.ValidatorIndex, existing.AttestationReward, row.AttestationReward), "attestation reward mismatch", 0) + // } + // if existing.BlocksProposed != row.BlocksProposed { + // log.Fatal(fmt.Errorf("blocks proposed mismatch for validator %v: %v != %v", row.ValidatorIndex, existing.BlocksProposed, row.BlocksProposed), "blocks proposed mismatch", 0) + // } + // if existing.BlocksScheduled != row.BlocksScheduled { + // log.Fatal(fmt.Errorf("blocks scheduled mismatch for validator %v: %v != %v", row.ValidatorIndex, existing.BlocksScheduled, row.BlocksScheduled), "blocks scheduled mismatch", 0) + // } + // if existing.SyncExecuted != row.SyncExecuted { + // log.Fatal(fmt.Errorf("sync executed mismatch for validator %v: %v != %v", row.ValidatorIndex, existing.SyncExecuted, row.SyncExecuted), "sync executed mismatch", 0) + // } + // if existing.SyncScheduled != row.SyncScheduled { + // log.Fatal(fmt.Errorf("sync scheduled mismatch for validator %v: %v != %v", row.ValidatorIndex, existing.SyncScheduled, row.SyncScheduled), "sync scheduled mismatch", 0) + // } + // efficiencyMap[types.ValidatorIndex(row.ValidatorIndex)] = row + // } + + for userId, dashboards := range dashboardMap { + for dashboardId, groups := range dashboards { + for groupId, groupDetails := range groups { + attestationReward := decimal.Decimal{} + attestationIdealReward := decimal.Decimal{} + blocksProposed := uint64(0) + blocksScheduled := uint64(0) + syncExecuted := uint64(0) + syncScheduled := uint64(0) + + for _, validatorIndex := range groupDetails.Validators { + if row, ok := efficiencyMap[validatorIndex]; ok { + attestationReward = attestationReward.Add(row.AttestationReward) + attestationIdealReward = attestationIdealReward.Add(row.AttestationIdealReward) + blocksProposed += row.BlocksProposed + blocksScheduled += row.BlocksScheduled + syncExecuted += row.SyncExecuted + syncScheduled += row.SyncScheduled + } + } + + var attestationEfficiency, proposerEfficiency, syncEfficiency sql.NullFloat64 + + if !attestationIdealReward.IsZero() { + attestationEfficiency.Float64 = attestationReward.Div(attestationIdealReward).InexactFloat64() + attestationEfficiency.Valid = true + } + if blocksScheduled > 0 { + proposerEfficiency.Float64 = float64(blocksProposed) / float64(blocksScheduled) + proposerEfficiency.Valid = true + } + if syncScheduled > 0 { + syncEfficiency.Float64 = float64(syncExecuted) / float64(syncScheduled) + syncEfficiency.Valid = true + } + + efficiency := utils.CalculateTotalEfficiency(attestationEfficiency, proposerEfficiency, syncEfficiency) + + log.Infof("efficiency: %v, threshold: %v", efficiency, groupDetails.Subscription.EventThreshold*100) + + if efficiency < groupDetails.Subscription.EventThreshold*100 { + log.Infof("creating group efficiency notification for user %v, dashboard %v, group %v in epoch %v", userId, dashboardId, groupId, epoch) + n := &ValidatorGroupEfficiencyNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *groupDetails.Subscription.ID, + UserID: *groupDetails.Subscription.UserID, + Epoch: epoch, + EventName: groupDetails.Subscription.EventName, + EventFilter: "-", + DashboardId: groupDetails.Subscription.DashboardId, + DashboardName: groupDetails.Subscription.DashboardName, + DashboardGroupId: groupDetails.Subscription.DashboardGroupId, + DashboardGroupName: groupDetails.Subscription.DashboardGroupName, + }, + Threshold: groupDetails.Subscription.EventThreshold * 100, + Efficiency: efficiency, + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } + } + } + } + + log.Info("done collecting group efficiency notifications") + + return nil +} +func collectBlockProposalNotifications(notificationsByUserID types.NotificationsPerUserId, status uint64, eventName types.EventName, epoch uint64) error { + type dbResult struct { + Proposer uint64 `db:"proposer"` + Status uint64 `db:"status"` + Slot uint64 `db:"slot"` + ExecBlock uint64 `db:"exec_block_number"` + ExecRewardETH float64 + } + + subMap, err := GetSubsForEventFilter(eventName, "", nil, nil) + if err != nil { + return fmt.Errorf("error getting subscriptions for (missed) block proposals %w", err) + } + + events := make([]dbResult, 0) + err = db.WriterDb.Select(&events, "SELECT slot, proposer, status, COALESCE(exec_block_number, 0) AS exec_block_number FROM blocks WHERE epoch = $1 AND status = $2", epoch, fmt.Sprintf("%d", status)) + if err != nil { + return fmt.Errorf("error retrieving slots for epoch %v: %w", epoch, err) + } + + log.Infof("retrieved %v events", len(events)) + + // Get Execution reward for proposed blocks + if status == 1 { // if proposed + var blockList = []uint64{} + for _, data := range events { + if data.ExecBlock != 0 { + blockList = append(blockList, data.ExecBlock) + } + } + + if len(blockList) > 0 { + blocks, err := db.BigtableClient.GetBlocksIndexedMultiple(blockList, 10000) + if err != nil { + log.Error(err, "error loading blocks from bigtable", 0, log.Fields{"blockList": blockList}) + return err + } + var execBlockNrToExecBlockMap = map[uint64]*types.Eth1BlockIndexed{} + for _, block := range blocks { + execBlockNrToExecBlockMap[block.GetNumber()] = block + } + relaysData, err := db.GetRelayDataForIndexedBlocks(blocks) + if err != nil { + return err + } + + for j := 0; j < len(events); j++ { + execData, found := execBlockNrToExecBlockMap[events[j].ExecBlock] + if found { + reward := utils.Eth1TotalReward(execData) + relayData, found := relaysData[common.BytesToHash(execData.Hash)] + if found { + reward = relayData.MevBribe.BigInt() + } + events[j].ExecRewardETH = float64(int64(eth.WeiToEth(reward)*100000)) / 100000 + } + } + } + } + + for _, event := range events { + pubkey, err := GetPubkeyForIndex(event.Proposer) + if err != nil { + log.Error(err, "error retrieving pubkey for validator", 0, map[string]interface{}{"validator": event.Proposer}) + continue + } + subscribers, ok := subMap[hex.EncodeToString(pubkey)] + if !ok { + continue + } + for _, sub := range subscribers { + if sub.UserID == nil || sub.ID == nil { + return fmt.Errorf("error expected userId and subId to be defined but got user: %v, sub: %v", sub.UserID, sub.ID) + } + if sub.LastEpoch != nil { + lastSentEpoch := *sub.LastEpoch + if lastSentEpoch >= epoch || epoch < sub.CreatedEpoch { + continue + } + } + log.Infof("creating %v notification for validator %v in epoch %v (dashboard: %v)", sub.EventName, event.Proposer, epoch, sub.DashboardId != nil) + n := &ValidatorProposalNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: epoch, + EventName: sub.EventName, + EventFilter: hex.EncodeToString(pubkey), + DashboardId: sub.DashboardId, + DashboardName: sub.DashboardName, + DashboardGroupId: sub.DashboardGroupId, + DashboardGroupName: sub.DashboardGroupName, + }, + ValidatorIndex: event.Proposer, + Status: event.Status, + Reward: event.ExecRewardETH, + Slot: event.Slot, + Block: event.ExecBlock, + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } + } + + return nil +} + +// collectAttestationAndOfflineValidatorNotifications collects notifications for missed attestations and offline validators +func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { + // Retrieve subscriptions for missed attestations + subMapAttestationMissed, err := GetSubsForEventFilter(types.ValidatorMissedAttestationEventName, "", nil, nil) + if err != nil { + return fmt.Errorf("error getting subscriptions for missted attestations %w", err) + } + + type dbResult struct { + ValidatorIndex uint64 `db:"validatorindex"` + Epoch uint64 `db:"epoch"` + Status uint64 `db:"status"` + EventFilter []byte `db:"pubkey"` + } + + // get attestations for all validators for the last 4 epochs + // we need 4 epochs so that can detect the online / offline status of validators + validators, err := db.GetValidatorIndices() + if err != nil { + return err + } + + // this reads the submitted attestations for the last 4 epochs + participationPerEpoch, err := db.GetValidatorAttestationHistoryForNotifications(epoch-3, epoch) + if err != nil { + return fmt.Errorf("error getting validator attestations from db %w", err) + } + + log.Infof("retrieved validator attestation history data") + + events := make([]dbResult, 0) + + epochAttested := make(map[types.Epoch]uint64) + epochTotal := make(map[types.Epoch]uint64) + for currentEpoch, participation := range participationPerEpoch { + for validatorIndex, participated := range participation { + epochTotal[currentEpoch] = epochTotal[currentEpoch] + 1 // count the total attestations for each epoch + + if !participated { + pubkey, err := GetPubkeyForIndex(uint64(validatorIndex)) + if err == nil { + if currentEpoch != types.Epoch(epoch) || subMapAttestationMissed[hex.EncodeToString(pubkey)] == nil { + continue + } + + events = append(events, dbResult{ + ValidatorIndex: uint64(validatorIndex), + Epoch: uint64(currentEpoch), + Status: 0, + EventFilter: pubkey, + }) + } else { + log.Error(err, "error retrieving pubkey for validator", 0, map[string]interface{}{"validatorIndex": validatorIndex}) + } + } else { + epochAttested[currentEpoch] = epochAttested[currentEpoch] + 1 // count the total attested attestation for each epoch (exlude missing) + } + } + } + + // process missed attestation events + for _, event := range events { + subscribers, ok := subMapAttestationMissed[hex.EncodeToString(event.EventFilter)] + if !ok { + return fmt.Errorf("error event returned that does not exist: %x", event.EventFilter) + } + for _, sub := range subscribers { + if sub.UserID == nil || sub.ID == nil { + return fmt.Errorf("error expected userId and subId to be defined but got user: %v, sub: %v", sub.UserID, sub.ID) + } + if sub.LastEpoch != nil { + lastSentEpoch := *sub.LastEpoch + if lastSentEpoch >= event.Epoch || event.Epoch < sub.CreatedEpoch { + // log.Infof("skipping creating %v for validator %v (lastSentEpoch: %v, createdEpoch: %v)", types.ValidatorMissedAttestationEventName, event.ValidatorIndex, lastSentEpoch, sub.CreatedEpoch) + continue + } + } + + //log.Infof("creating %v notification for validator %v in epoch %v (dashboard: %v)", sub.EventName, event.ValidatorIndex, event.Epoch, sub.DashboardId != nil) + n := &ValidatorAttestationNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: event.Epoch, + EventName: sub.EventName, + EventFilter: hex.EncodeToString(event.EventFilter), + DashboardId: sub.DashboardId, + DashboardName: sub.DashboardName, + DashboardGroupId: sub.DashboardGroupId, + DashboardGroupName: sub.DashboardGroupName, + }, + ValidatorIndex: event.ValidatorIndex, + Status: event.Status, + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } + } + + // detect online & offline validators + type indexPubkeyPair struct { + Index uint64 + Pubkey []byte + } + var offlineValidators []*indexPubkeyPair + var onlineValidators []*indexPubkeyPair + + epochNMinus1 := types.Epoch(epoch - 1) + epochNMinus2 := types.Epoch(epoch - 2) + epochNMinus3 := types.Epoch(epoch - 3) + + if epochTotal[types.Epoch(epoch)] == 0 { + return fmt.Errorf("consistency error, did not retrieve attestation data for epoch %v", epoch) + } + if epochTotal[epochNMinus1] == 0 { + return fmt.Errorf("consistency error, did not retrieve attestation data for epoch %v", epochNMinus1) + } + if epochTotal[epochNMinus2] == 0 { + return fmt.Errorf("consistency error, did not retrieve attestation data for epoch %v", epochNMinus2) + } + if epochTotal[epochNMinus3] == 0 { + return fmt.Errorf("consistency error, did not retrieve attestation data for epoch %v", epochNMinus3) + } + + if epochAttested[types.Epoch(epoch)]*100/epochTotal[types.Epoch(epoch)] < 60 { + return fmt.Errorf("consistency error, did receive more than 60%% of missed attestation in epoch %v (total: %v, attested: %v)", epoch, epochTotal[types.Epoch(epoch)], epochAttested[types.Epoch(epoch)]) + } + if epochAttested[epochNMinus1]*100/epochTotal[epochNMinus1] < 60 { + return fmt.Errorf("consistency error, did receive more than 60%% of missed attestation in epoch %v (total: %v, attested: %v)", epochNMinus1, epochTotal[epochNMinus1], epochAttested[epochNMinus1]) + } + if epochAttested[epochNMinus2]*100/epochTotal[epochNMinus2] < 60 { + return fmt.Errorf("consistency error, did receive more than 60%% of missed attestation in epoch %v (total: %v, attested: %v)", epochNMinus2, epochTotal[epochNMinus2], epochAttested[epochNMinus2]) + } + if epochAttested[epochNMinus3]*100/epochTotal[epochNMinus3] < 60 { + return fmt.Errorf("consistency error, did receive more than 60%% of missed attestation in epoch %v (total: %v, attested: %v)", epochNMinus3, epochTotal[epochNMinus3], epochAttested[epochNMinus3]) + } + + for _, validator := range validators { + if participationPerEpoch[epochNMinus3][types.ValidatorIndex(validator)] && !participationPerEpoch[epochNMinus2][types.ValidatorIndex(validator)] && !participationPerEpoch[epochNMinus1][types.ValidatorIndex(validator)] && !participationPerEpoch[types.Epoch(epoch)][types.ValidatorIndex(validator)] { + //log.Infof("validator %v detected as offline in epoch %v (did not attest since epoch %v)", validator, epoch, epochNMinus2) + pubkey, err := GetPubkeyForIndex(validator) + if err != nil { + return err + } + offlineValidators = append(offlineValidators, &indexPubkeyPair{Index: validator, Pubkey: pubkey}) + } + + if !participationPerEpoch[epochNMinus3][types.ValidatorIndex(validator)] && !participationPerEpoch[epochNMinus2][types.ValidatorIndex(validator)] && !participationPerEpoch[epochNMinus1][types.ValidatorIndex(validator)] && participationPerEpoch[types.Epoch(epoch)][types.ValidatorIndex(validator)] { + //log.Infof("validator %v detected as online in epoch %v (attested again in epoch %v)", validator, epoch, epoch) + pubkey, err := GetPubkeyForIndex(validator) + if err != nil { + return err + } + onlineValidators = append(onlineValidators, &indexPubkeyPair{Index: validator, Pubkey: pubkey}) + } + } + + offlineValidatorsLimit := 5000 + if utils.Config.Notifications.OfflineDetectionLimit != 0 { + offlineValidatorsLimit = utils.Config.Notifications.OfflineDetectionLimit + } + + onlineValidatorsLimit := 5000 + if utils.Config.Notifications.OnlineDetectionLimit != 0 { + onlineValidatorsLimit = utils.Config.Notifications.OnlineDetectionLimit + } + + if len(offlineValidators) > offlineValidatorsLimit { + return fmt.Errorf("retrieved more than %v offline validators notifications: %v, exiting", offlineValidatorsLimit, len(offlineValidators)) + } + + if len(onlineValidators) > onlineValidatorsLimit { + return fmt.Errorf("retrieved more than %v online validators notifications: %v, exiting", onlineValidatorsLimit, len(onlineValidators)) + } + + subMapOnlineOffline, err := GetSubsForEventFilter(types.ValidatorIsOfflineEventName, "", nil, nil) + if err != nil { + return fmt.Errorf("failed to get subs for %v: %v", types.ValidatorIsOfflineEventName, err) + } + + for _, validator := range offlineValidators { + t := hex.EncodeToString(validator.Pubkey) + subs := subMapOnlineOffline[t] + for _, sub := range subs { + if sub.UserID == nil || sub.ID == nil { + return fmt.Errorf("error expected userId and subId to be defined but got user: %v, sub: %v", sub.UserID, sub.ID) + } + log.Infof("new event: validator %v detected as offline since epoch %v", validator.Index, epoch) + + n := &ValidatorIsOfflineNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + Epoch: epoch, + EventName: sub.EventName, + LatestState: fmt.Sprint(epoch - 2), // first epoch the validator stopped attesting + EventFilter: hex.EncodeToString(validator.Pubkey), + UserID: *sub.UserID, + DashboardId: sub.DashboardId, + DashboardName: sub.DashboardName, + DashboardGroupId: sub.DashboardGroupId, + DashboardGroupName: sub.DashboardGroupName, + }, + ValidatorIndex: validator.Index, + } + + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } + } + + for _, validator := range onlineValidators { + t := hex.EncodeToString(validator.Pubkey) + subs := subMapOnlineOffline[t] + for _, sub := range subs { + if sub.UserID == nil || sub.ID == nil { + return fmt.Errorf("error expected userId and subId to be defined but got user: %v, sub: %v", sub.UserID, sub.ID) + } + + log.Infof("new event: validator %v detected as online again at epoch %v", validator.Index, epoch) + + n := &ValidatorIsOnlineNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: epoch, + EventName: types.ValidatorIsOnlineEventName, + EventFilter: hex.EncodeToString(validator.Pubkey), + LatestState: "-", + DashboardId: sub.DashboardId, + DashboardName: sub.DashboardName, + DashboardGroupId: sub.DashboardGroupId, + DashboardGroupName: sub.DashboardGroupName, + }, + ValidatorIndex: validator.Index, + } + + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } + } + + // subMapGroupOnlineOffline, err := GetSubsForEventFilter(types.ValidatorGroupIsOfflineEventName, "", nil, nil, validatorDashboardConfig) + // if err != nil { + // return fmt.Errorf("failed to get subs for %v: %v", types.ValidatorGroupIsOfflineEventName, err) + // } + // type groupSummary struct { + // Online int + // Offline int + // Subscription types.Subscription + // } + // dashboardGroups := make(map[types.DashboardId]map[types.DashboardGroupId]*groupSummary) + + // for _, validator := range offlineValidators { + // t := hex.EncodeToString(validator.Pubkey) + // subs := subMapGroupOnlineOffline[t] + // for _, sub := range subs { + // if sub.UserID == nil || sub.ID == nil { + // return fmt.Errorf("error expected userId and subId to be defined but got user: %v, sub: %v", sub.UserID, sub.ID) + // } + + // if sub.DashboardId == nil || sub.DashboardGroupId == nil { + // continue + // } + // dashboardId := types.DashboardId(*sub.DashboardId) + // dashboardGroupId := types.DashboardGroupId(*sub.DashboardGroupId) + + // if dashboardGroups[dashboardId] == nil { + // dashboardGroups[dashboardId] = make(map[types.DashboardGroupId]*groupSummary) + // dashboardGroups[dashboardId][dashboardGroupId] = &groupSummary{ + // Subscription: sub, + // } + // } + // dashboardGroups[dashboardId][dashboardGroupId].Offline++ + // } + // } + // for _, validator := range onlineValidators { + // t := hex.EncodeToString(validator.Pubkey) + // subs := subMapGroupOnlineOffline[t] + // for _, sub := range subs { + // if sub.UserID == nil || sub.ID == nil { + // return fmt.Errorf("error expected userId and subId to be defined but got user: %v, sub: %v", sub.UserID, sub.ID) + // } + + // if sub.DashboardId == nil || sub.DashboardGroupId == nil { + // continue + // } + // dashboardId := types.DashboardId(*sub.DashboardId) + // dashboardGroupId := types.DashboardGroupId(*sub.DashboardGroupId) + + // if dashboardGroups[dashboardId] == nil { + // dashboardGroups[dashboardId] = make(map[types.DashboardGroupId]*groupSummary) + // dashboardGroups[dashboardId][dashboardGroupId] = &groupSummary{ + // Subscription: sub, + // } + // } + // dashboardGroups[dashboardId][dashboardGroupId].Online++ + // } + // } + + // for dashboardId, groups := range dashboardGroups { + // for groupId, data := range groups { + // totalGroupValidators := len(validatorDashboardConfig.DashboardsById[dashboardId].Groups[groupId].Validators) + + // offlinePercentage := float64(data.Offline) / float64(totalGroupValidators) * float64(100) + + // if offlinePercentage >= data.Subscription.EventThreshold { + // log.Infof("new event: group %v detected as offline in epoch %v (offline: %v, total: %v, percentage: %v, threshold: %v)", groupId, epoch, data.Offline, totalGroupValidators, offlinePercentage, data.Subscription.EventThreshold) + // } + // } + // } + return nil +} + +func collectValidatorGotSlashedNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { + dbResult, err := db.GetValidatorsGotSlashed(epoch) + if err != nil { + return fmt.Errorf("error getting slashed validators from database, err: %w", err) + } + pubkeyToSlashingInfoMap := make(map[string]*types.SlashingInfo) + for _, event := range dbResult { + pubkeyStr := hex.EncodeToString(event.SlashedValidatorPubkey) + pubkeyToSlashingInfoMap[pubkeyStr] = event + } + + subscribedUsers, err := GetSubsForEventFilter(types.ValidatorGotSlashedEventName, "", nil, nil) + if err != nil { + return fmt.Errorf("failed to get subs for %v: %v", types.ValidatorGotSlashedEventName, err) + } + + for _, subs := range subscribedUsers { + for _, sub := range subs { + event := pubkeyToSlashingInfoMap[sub.EventFilter] + if event == nil { // pubkey has not been slashed + //log.Error(fmt.Errorf("error retrieving slashing info for public key %s", sub.EventFilter), "", 0) + continue + } + log.Infof("creating %v notification for validator %v in epoch %v", event.Reason, sub.EventFilter, epoch) + + n := &ValidatorGotSlashedNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: epoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, + DashboardId: sub.DashboardId, + DashboardName: sub.DashboardName, + DashboardGroupId: sub.DashboardGroupId, + DashboardGroupName: sub.DashboardGroupName, + }, + Slasher: event.SlasherIndex, + Reason: event.Reason, + ValidatorIndex: event.SlashedValidatorIndex, + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } + } + + return nil +} + +// collectWithdrawalNotifications collects all notifications validator withdrawals +func collectWithdrawalNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { + // get all users that are subscribed to this event (scale: a few thousand rows depending on how many users we have) + subMap, err := GetSubsForEventFilter(types.ValidatorReceivedWithdrawalEventName, "", nil, nil) + if err != nil { + return fmt.Errorf("error getting subscriptions for missed attestations %w", err) + } + + // get all the withdrawal events for a specific epoch. Will be at most X per slot (currently 16 on mainnet, which is 32 * 16 per epoch; 512 rows). + events, err := db.GetEpochWithdrawals(epoch) + if err != nil { + return fmt.Errorf("error getting withdrawals from database, err: %w", err) + } + + log.Infof("retrieved %v events", len(events)) + for _, event := range events { + subscribers, ok := subMap[hex.EncodeToString(event.Pubkey)] + if ok { + for _, sub := range subscribers { + if sub.UserID == nil || sub.ID == nil { + return fmt.Errorf("error expected userId and subId to be defined but got user: %v, sub: %v", sub.UserID, sub.ID) + } + if sub.LastEpoch != nil { + lastSentEpoch := *sub.LastEpoch + if lastSentEpoch >= epoch || epoch < sub.CreatedEpoch { + continue + } + } + log.Infof("creating %v notification for validator %v in epoch %v", types.ValidatorReceivedWithdrawalEventName, event.ValidatorIndex, epoch) + n := &ValidatorWithdrawalNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + EventFilter: hex.EncodeToString(event.Pubkey), + EventName: sub.EventName, + DashboardId: sub.DashboardId, + DashboardName: sub.DashboardName, + DashboardGroupId: sub.DashboardGroupId, + DashboardGroupName: sub.DashboardGroupName, + Epoch: epoch, + }, + ValidatorIndex: event.ValidatorIndex, + Epoch: epoch, + Slot: event.Slot, + Amount: event.Amount, + Address: event.Address, + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } + } + } + + return nil +} + +func collectEthClientNotifications(notificationsByUserID types.NotificationsPerUserId) error { + updatedClients := ethclients.GetUpdatedClients() //only check if there are new updates + for _, client := range updatedClients { + // err := db.FrontendWriterDB.Select(&dbResult, ` + // SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash + // FROM users_subscriptions AS us + // WHERE + // us.event_name=$1 + // AND + // us.event_filter=$2 + // AND + // ((us.last_sent_ts <= NOW() - INTERVAL '2 DAY' AND TO_TIMESTAMP($3) > us.last_sent_ts) OR us.last_sent_ts IS NULL) + // `, + // eventName, strings.ToLower(client.Name), client.Date.Unix()) // was last notification sent 2 days ago for this client + + dbResult, err := GetSubsForEventFilter( + types.EthClientUpdateEventName, + "((last_sent_ts <= NOW() - INTERVAL '2 DAY' AND TO_TIMESTAMP(?) > last_sent_ts) OR last_sent_ts IS NULL)", + []interface{}{client.Date.Unix()}, + []string{strings.ToLower(client.Name)}) + if err != nil { + return err + } + + for _, subs := range dbResult { + for _, sub := range subs { + n := &EthClientNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, + DashboardId: sub.DashboardId, + DashboardName: sub.DashboardName, + DashboardGroupId: sub.DashboardGroupId, + DashboardGroupName: sub.DashboardGroupName, + }, + EthClient: client.Name, + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } + } + } + return nil +} + +func collectMonitoringMachineOffline(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { + nowTs := time.Now().Unix() + return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineOfflineEventName, 120, + // notify condition + func(subscribeData *types.Subscription, machineData *types.MachineMetricSystemUser) bool { + if machineData.CurrentDataInsertTs < nowTs-10*60 && machineData.CurrentDataInsertTs > nowTs-90*60 { + return true + } + return false + }, + epoch, + ) +} + +func isMachineDataRecent(machineData *types.MachineMetricSystemUser) bool { + nowTs := time.Now().Unix() + return machineData.CurrentDataInsertTs >= nowTs-60*60 +} + +func collectMonitoringMachineDiskAlmostFull(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { + return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineDiskAlmostFullEventName, 750, + // notify condition + func(subscribeData *types.Subscription, machineData *types.MachineMetricSystemUser) bool { + if !isMachineDataRecent(machineData) { + return false + } + + percentFree := float64(machineData.CurrentData.DiskNodeBytesFree) / float64(machineData.CurrentData.DiskNodeBytesTotal+1) + return percentFree < subscribeData.EventThreshold + }, + epoch, + ) +} + +func collectMonitoringMachineCPULoad(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { + return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineCpuLoadEventName, 10, + // notify condition + func(subscribeData *types.Subscription, machineData *types.MachineMetricSystemUser) bool { + if !isMachineDataRecent(machineData) { + return false + } + + if machineData.FiveMinuteOldData == nil { // no compare data found (5 min old data) + return false + } + + idle := float64(machineData.CurrentData.CpuNodeIdleSecondsTotal) - float64(machineData.FiveMinuteOldData.CpuNodeIdleSecondsTotal) + total := float64(machineData.CurrentData.CpuNodeSystemSecondsTotal) - float64(machineData.FiveMinuteOldData.CpuNodeSystemSecondsTotal) + percentLoad := float64(1) - (idle / total) + + return percentLoad > subscribeData.EventThreshold + }, + epoch, + ) +} + +func collectMonitoringMachineMemoryUsage(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { + return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineMemoryUsageEventName, 10, + // notify condition + func(subscribeData *types.Subscription, machineData *types.MachineMetricSystemUser) bool { + if !isMachineDataRecent(machineData) { + return false + } + + memFree := float64(machineData.CurrentData.MemoryNodeBytesFree) + float64(machineData.CurrentData.MemoryNodeBytesCached) + float64(machineData.CurrentData.MemoryNodeBytesBuffers) + memTotal := float64(machineData.CurrentData.MemoryNodeBytesTotal) + memUsage := float64(1) - (memFree / memTotal) + + return memUsage > subscribeData.EventThreshold + }, + epoch, + ) +} + +var isFirstNotificationCheck = true + +func collectMonitoringMachine( + notificationsByUserID types.NotificationsPerUserId, + eventName types.EventName, + epochWaitInBetween int, + notifyConditionFulfilled func(subscribeData *types.Subscription, machineData *types.MachineMetricSystemUser) bool, + epoch uint64, +) error { + // event_filter == machine name + + dbResult, err := GetSubsForEventFilter( + eventName, + "(created_epoch <= ? AND (last_sent_epoch < ? OR last_sent_epoch IS NULL))", + []interface{}{epoch, int64(epoch) - int64(epochWaitInBetween)}, + nil, + ) + + // TODO: clarify why we need grouping here?! + // err := db.FrontendWriterDB.Select(&allSubscribed, + // `SELECT + // us.user_id, + // max(us.id) AS id, + // ENCODE((array_agg(us.unsubscribe_hash))[1], 'hex') AS unsubscribe_hash, + // event_filter, + // COALESCE(event_threshold, 0) AS event_threshold + // FROM users_subscriptions us + // WHERE us.event_name = $1 AND us.created_epoch <= $2 + // AND (us.last_sent_epoch < ($2 - $3) OR us.last_sent_epoch IS NULL) + // group by us.user_id, event_filter, event_threshold`, + // eventName, epoch, epochWaitInBetween) + if err != nil { + return err + } + + rowKeys := gcp_bigtable.RowList{} + totalSubscribed := 0 + for _, data := range dbResult { + for _, sub := range data { + rowKeys = append(rowKeys, db.BigtableClient.GetMachineRowKey(*sub.UserID, "system", sub.EventFilter)) + totalSubscribed++ + } + } + + machineDataOfSubscribed, err := db.BigtableClient.GetMachineMetricsForNotifications(rowKeys) + if err != nil { + return err + } + + var result []*types.Subscription + for _, data := range dbResult { + for _, sub := range data { + localData := sub // Create a local copy of the data variable + machineMap, found := machineDataOfSubscribed[*localData.UserID] + if !found { + continue + } + currentMachineData, found := machineMap[localData.EventFilter] + if !found { + continue + } + + //logrus.Infof("currentMachineData %v | %v | %v | %v", currentMachine.CurrentDataInsertTs, currentMachine.CompareDataInsertTs, currentMachine.UserID, currentMachine.Machine) + if notifyConditionFulfilled(localData, currentMachineData) { + result = append(result, localData) + } + } + } + + subThreshold := uint64(10) + if utils.Config.Notifications.MachineEventThreshold != 0 { + subThreshold = utils.Config.Notifications.MachineEventThreshold + } + + subFirstRatioThreshold := 0.3 + if utils.Config.Notifications.MachineEventFirstRatioThreshold != 0 { + subFirstRatioThreshold = utils.Config.Notifications.MachineEventFirstRatioThreshold + } + + subSecondRatioThreshold := 0.9 + if utils.Config.Notifications.MachineEventSecondRatioThreshold != 0 { + subSecondRatioThreshold = utils.Config.Notifications.MachineEventSecondRatioThreshold + } + + var subScriptionCount uint64 + err = db.FrontendWriterDB.Get(&subScriptionCount, + `SELECT + COUNT(DISTINCT user_id) + FROM users_subscriptions + WHERE event_name = $1`, + eventName) + if err != nil { + return err + } + + // If there are too few users subscribed to this event, we always send the notifications + if subScriptionCount >= subThreshold { + subRatioThreshold := subSecondRatioThreshold + // For the machine offline check we do a low threshold check first and the next time a high threshold check + if isFirstNotificationCheck && eventName == types.MonitoringMachineOfflineEventName { + subRatioThreshold = subFirstRatioThreshold + isFirstNotificationCheck = false + } + if float64(len(result))/float64(totalSubscribed) >= subRatioThreshold { + log.Error(nil, fmt.Errorf("error too many users would be notified concerning: %v", eventName), 0) + return nil + } + } + + for _, r := range result { + n := &MonitorMachineNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *r.ID, + UserID: *r.UserID, + EventName: r.EventName, + Epoch: epoch, + EventFilter: r.EventFilter, + DashboardId: r.DashboardId, + DashboardName: r.DashboardName, + DashboardGroupId: r.DashboardGroupId, + DashboardGroupName: r.DashboardGroupName, + }, + MachineName: r.EventFilter, + EventThreshold: r.EventThreshold, + } + //logrus.Infof("notify %v %v", eventName, n) + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } + + if eventName == types.MonitoringMachineOfflineEventName { + // Notifications will be sent, reset the flag + isFirstNotificationCheck = true + } + + return nil +} + +func collectTaxReportNotificationNotifications(notificationsByUserID types.NotificationsPerUserId) error { + lastStatsDay, err := cache.LatestExportedStatisticDay.GetOrDefault(db.GetLastExportedStatisticDay) + + if err != nil { + return err + } + //Check that the last day of the month is already exported + tNow := time.Now() + firstDayOfMonth := time.Date(tNow.Year(), tNow.Month(), 1, 0, 0, 0, 0, time.UTC) + if utils.TimeToDay(uint64(firstDayOfMonth.Unix())) > lastStatsDay { + return nil + } + + // err = db.FrontendWriterDB.Select(&dbResult, ` + // SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash + // FROM users_subscriptions AS us + // WHERE us.event_name=$1 AND (us.last_sent_ts < $2 OR (us.last_sent_ts IS NULL AND us.created_ts < $2)); + // `, + // name, firstDayOfMonth) + + dbResults, err := GetSubsForEventFilter( + types.TaxReportEventName, + "(last_sent_ts < ? OR (last_sent_ts IS NULL AND created_ts < ?))", + []interface{}{firstDayOfMonth, firstDayOfMonth}, + nil, + ) + if err != nil { + return err + } + + for _, subs := range dbResults { + for _, sub := range subs { + n := &TaxReportNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, + DashboardId: sub.DashboardId, + DashboardName: sub.DashboardName, + DashboardGroupId: sub.DashboardGroupId, + DashboardGroupName: sub.DashboardGroupName, + }, + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } + } + + return nil +} + +func collectNetworkNotifications(notificationsByUserID types.NotificationsPerUserId) error { + count := 0 + err := db.WriterDb.Get(&count, ` + SELECT count(ts) FROM network_liveness WHERE (headepoch-finalizedepoch) > 3 AND ts > now() - interval '60 minutes'; + `) + + if err != nil { + return err + } + + if count > 0 { + // err := db.FrontendWriterDB.Select(&dbResult, ` + // SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash + // FROM users_subscriptions AS us + // WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '1 hour' OR us.last_sent_ts IS NULL); + // `, + // utils.GetNetwork()+":"+string(eventName)) + + dbResult, err := GetSubsForEventFilter( + types.NetworkLivenessIncreasedEventName, + "(last_sent_ts <= NOW() - INTERVAL '1 hour' OR last_sent_ts IS NULL)", + nil, + nil, + ) + if err != nil { + return err + } + + for _, subs := range dbResult { + for _, sub := range subs { + n := &NetworkNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, + DashboardId: sub.DashboardId, + DashboardName: sub.DashboardName, + DashboardGroupId: sub.DashboardGroupId, + DashboardGroupName: sub.DashboardGroupName, + }, + } + + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } + } + } + + return nil +} + +func collectRocketpoolComissionNotifications(notificationsByUserID types.NotificationsPerUserId) error { + fee := 0.0 + err := db.WriterDb.Get(&fee, ` + select current_node_fee from rocketpool_network_stats order by id desc LIMIT 1; + `) + + if err != nil { + return err + } + + if fee > 0 { + // err := db.FrontendWriterDB.Select(&dbResult, ` + // SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash + // FROM users_subscriptions AS us + // WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '8 hours' OR us.last_sent_ts IS NULL) AND (us.event_threshold <= $2 OR (us.event_threshold < 0 AND us.event_threshold * -1 >= $2)); + // `, + // utils.GetNetwork()+":"+string(eventName), fee) + + dbResult, err := GetSubsForEventFilter( + types.RocketpoolCommissionThresholdEventName, + "(last_sent_ts <= NOW() - INTERVAL '8 hours' OR last_sent_ts IS NULL) AND (event_threshold <= ? OR (event_threshold < 0 AND event_threshold * -1 >= ?))", + []interface{}{fee, fee}, + nil, + ) + if err != nil { + return err + } + + for _, subs := range dbResult { + for _, sub := range subs { + n := &RocketpoolNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, + DashboardId: sub.DashboardId, + DashboardName: sub.DashboardName, + DashboardGroupId: sub.DashboardGroupId, + DashboardGroupName: sub.DashboardGroupName, + }, + ExtraData: strconv.FormatInt(int64(fee*100), 10) + "%", + } + + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } + } + } + + return nil +} + +func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID types.NotificationsPerUserId) error { + var ts int64 + err := db.WriterDb.Get(&ts, ` + select date_part('epoch', claim_interval_time_start)::int from rocketpool_network_stats order by id desc LIMIT 1; + `) + + if err != nil { + return err + } + + if ts+3*60*60 > time.Now().Unix() { + // var dbResult []*types.Subscription + + // err := db.FrontendWriterDB.Select(&dbResult, ` + // SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash + // FROM users_subscriptions AS us + // WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '5 hours' OR us.last_sent_ts IS NULL); + // `, + // utils.GetNetwork()+":"+string(eventName)) + + dbResult, err := GetSubsForEventFilter( + types.RocketpoolNewClaimRoundStartedEventName, + "(last_sent_ts <= NOW() - INTERVAL '5 hours' OR last_sent_ts IS NULL)", + nil, + nil, + ) + if err != nil { + return err + } + + for _, subs := range dbResult { + for _, sub := range subs { + n := &RocketpoolNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, + DashboardId: sub.DashboardId, + DashboardName: sub.DashboardName, + DashboardGroupId: sub.DashboardGroupId, + DashboardGroupName: sub.DashboardGroupName, + }, + } + + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } + } + } + + return nil +} + +func collectRocketpoolRPLCollateralNotifications(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName, epoch uint64) error { + subMap, err := GetSubsForEventFilter( + eventName, + "(last_sent_ts <= NOW() - INTERVAL '24 hours' OR last_sent_ts IS NULL)", // send out this notification type only once per day + nil, + nil) + if err != nil { + return fmt.Errorf("error getting subscriptions for RocketpoolRPLCollateral %w", err) + } + + type dbResult struct { + Address []byte + RPLStake BigFloat `db:"rpl_stake"` + RPLStakeMin BigFloat `db:"min_rpl_stake"` + RPLStakeMax BigFloat `db:"max_rpl_stake"` + } + + // filter nodes with no minipools (anymore) because they have min/max stake of 0 + // TODO properly remove notification entry from db + stakeInfoPerNode := make([]dbResult, 0) + batchSize := 5000 + keys := make([][]byte, 0, batchSize) + for pubkey := range subMap { + b, err := hex.DecodeString(pubkey) + if err != nil { + log.Error(err, fmt.Sprintf("error decoding pubkey %s", pubkey), 0) + continue + } + keys = append(keys, b) + + if len(keys) > batchSize { + var partial []dbResult + + err = db.WriterDb.Select(&partial, ` + SELECT address, rpl_stake, min_rpl_stake, max_rpl_stake + FROM rocketpool_nodes + WHERE address = ANY($1) AND min_rpl_stake != 0 AND max_rpl_stake != 0`, pq.ByteaArray(keys)) + if err != nil { + return err + } + stakeInfoPerNode = append(stakeInfoPerNode, partial...) + keys = make([][]byte, 0, batchSize) + } + } + if len(keys) > 0 { + var partial []dbResult + + // filter nodes with no minipools (anymore) because they have min/max stake of 0 + // TODO properly remove notification entry from db + err = db.WriterDb.Select(&partial, ` + SELECT address, rpl_stake, min_rpl_stake, max_rpl_stake + FROM rocketpool_nodes + WHERE address = ANY($1) AND min_rpl_stake != 0 AND max_rpl_stake != 0`, pq.ByteaArray(keys)) + if err != nil { + return err + } + stakeInfoPerNode = append(stakeInfoPerNode, partial...) + } + + // factor in network-wide min/max collat ratio. Since LEB8 they are not directly correlated anymore (ratio of bonded to borrowed ETH), so we need either min or max + // however this is dynamic and might be changed in the future; Should extend rocketpool_network_stats to include min/max collateral values! + minRPLCollatRatio := bigFloat(0.1) // bigFloat it to save some memory re-allocations + maxRPLCollatRatio := bigFloat(1.5) + // temporary helper (modifying values in dbResult directly would be bad style) + nodeCollatRatioHelper := bigFloat(0) + + for _, r := range stakeInfoPerNode { + subs, ok := subMap[hex.EncodeToString(r.Address)] + if !ok { + continue + } + sub := subs[0] // RPL min/max collateral notifications are always unique per user + var alertConditionMet bool + + // according to app logic, sub.EventThreshold can be +- [0.9 to 1.5] for CollateralMax after manually changed by the user + // this corresponds to a collateral range of 140% to 200% currently shown in the app UI; so +- 0.5 allows us to compare to the actual collat ratio + // for CollateralMin it can be 1.0 to 4.0 if manually changed, to represent 10% to 40% + // 0 in both cases if not modified + var threshold float64 = sub.EventThreshold + if threshold == 0 { + threshold = 1.0 // default case + } + inverse := false + if eventName == types.RocketpoolCollateralMaxReachedEventName { + if threshold < 0 { + threshold *= -1 + } else { + inverse = true + } + threshold += 0.5 + + // 100% (of bonded eth) + nodeCollatRatioHelper.Quo(r.RPLStakeMax.bigFloat(), maxRPLCollatRatio) + } else { + threshold /= 10.0 + + // 100% (of borrowed eth) + nodeCollatRatioHelper.Quo(r.RPLStakeMin.bigFloat(), minRPLCollatRatio) + } + + nodeCollatRatio, _ := nodeCollatRatioHelper.Quo(r.RPLStake.bigFloat(), nodeCollatRatioHelper).Float64() + + alertConditionMet = nodeCollatRatio <= threshold + if inverse { + // handle special case for max collateral: notify if *above* selected amount + alertConditionMet = !alertConditionMet + } + + if !alertConditionMet { + continue + } + + if sub.LastEpoch != nil { + lastSentEpoch := *sub.LastEpoch + if lastSentEpoch >= epoch-225 || epoch < sub.CreatedEpoch { + continue + } + } + + n := &RocketpoolNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: epoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, + DashboardId: sub.DashboardId, + DashboardName: sub.DashboardName, + DashboardGroupId: sub.DashboardGroupId, + DashboardGroupName: sub.DashboardGroupName, + }, + ExtraData: strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.2f", threshold*100), "0"), "."), + } + + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } + + return nil +} + +func collectSyncCommitteeNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { + slotsPerSyncCommittee := utils.SlotsPerSyncCommittee() + currentPeriod := epoch * utils.Config.Chain.ClConfig.SlotsPerEpoch / slotsPerSyncCommittee + nextPeriod := currentPeriod + 1 + + var validators []struct { + PubKey string `db:"pubkey"` + Index uint64 `db:"validatorindex"` + } + err := db.WriterDb.Select(&validators, `SELECT ENCODE(pubkey, 'hex') AS pubkey, validators.validatorindex FROM sync_committees LEFT JOIN validators ON validators.validatorindex = sync_committees.validatorindex WHERE period = $1`, nextPeriod) + + if err != nil { + return err + } + + if len(validators) <= 0 { + return nil + } + + var mapping map[string]uint64 = make(map[string]uint64) + for _, val := range validators { + mapping[val.PubKey] = val.Index + } + + dbResult, err := GetSubsForEventFilter(types.SyncCommitteeSoonEventName, "(last_sent_ts <= NOW() - INTERVAL '26 hours' OR last_sent_ts IS NULL)", nil, nil) + + if err != nil { + return err + } + + for pubkey := range mapping { + subs, ok := dbResult[pubkey] + if ok { + for _, sub := range subs { + n := &SyncCommitteeSoonNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: epoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, + DashboardId: sub.DashboardId, + DashboardName: sub.DashboardName, + DashboardGroupId: sub.DashboardGroupId, + DashboardGroupName: sub.DashboardGroupName, + }, + ValidatorIndex: mapping[sub.EventFilter], + StartEpoch: nextPeriod * utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod, + EndEpoch: (nextPeriod + 1) * utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod, + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } + } + } + + return nil +} + +func getSyncCommitteeSoonInfo(format types.NotificationFormat, ns map[types.EventFilter]types.Notification) string { + validators := []uint64{} + var startEpoch, endEpoch uint64 + var inTime time.Duration + + i := 0 + for _, n := range ns { + n, ok := n.(*SyncCommitteeSoonNotification) + if !ok { + log.Error(nil, "Sync committee notification not of type syncCommitteeSoonNotification", 0) + return "" + } + + validators = append(validators, n.ValidatorIndex) + if i == 0 { + // startEpoch, endEpoch and inTime must be the same for all validators + startEpoch = n.StartEpoch + endEpoch = n.EndEpoch + + inTime = time.Until(utils.EpochToTime(startEpoch)).Round(time.Second) + } + i++ + } + + if len(validators) > 0 { + startEpochFormatted := formatEpochLink(format, startEpoch) + endEpochFormatted := formatEpochLink(format, endEpoch) + validatorsInfo := "" + if len(validators) == 1 { + vali := formatValidatorLink(format, validators[0]) + validatorsInfo = fmt.Sprintf(`Your validator %s has been elected to be part of the next sync committee.`, vali) + } else { + validatorsText := "" + for i, validator := range validators { + vali := formatValidatorLink(format, validator) + if i < len(validators)-1 { + validatorsText += fmt.Sprintf("%v, ", vali) + } else { + validatorsText += fmt.Sprintf("and %v", vali) + } + } + validatorsInfo = fmt.Sprintf(`Your validators %s have been elected to be part of the next sync committee.`, validatorsText) + } + return fmt.Sprintf(`%s The additional duties start at epoch %v, which is in %v and will last for about a day until epoch %v.`, validatorsInfo, startEpochFormatted, inTime, endEpochFormatted) + } + + return "" +} diff --git a/backend/pkg/notification/db.go b/backend/pkg/notification/db.go index eed83b0f1..d7ab55394 100644 --- a/backend/pkg/notification/db.go +++ b/backend/pkg/notification/db.go @@ -2,57 +2,298 @@ package notification import ( "encoding/hex" + "fmt" + "strconv" + "strings" + "time" + "github.com/doug-martin/goqu/v9" "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" + "github.com/jmoiron/sqlx" "github.com/lib/pq" ) -func GetSubsForEventFilter(eventName types.EventName) ([][]byte, map[string][]types.Subscription, error) { - var subs []types.Subscription - subQuery := ` - SELECT id, user_id, event_filter, last_sent_epoch, created_epoch, event_threshold, ENCODE(unsubscribe_hash, 'hex') as unsubscribe_hash, internal_state from users_subscriptions where event_name = $1 - ` +// Retrieves all subscription for a given event filter +// Map key corresponds to the event filter which can be +// a validator pubkey or an eth1 address (for RPL notifications) +// or a list of validators for the tax report notifications +// or a machine name for machine notifications or a eth client name for ethereum client update notifications +// optionally it is possible to set a filter on the last sent ts and the event filter +// fields +func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, lastSentFilterArgs []interface{}, eventFilters []string) (map[string][]*types.Subscription, error) { + var subs []*types.Subscription - subMap := make(map[string][]types.Subscription, 0) - err := db.FrontendWriterDB.Select(&subs, subQuery, utils.GetNetwork()+":"+string(eventName)) + // subQuery := ` + // SELECT + // id, + // user_id, + // event_filter, + // last_sent_epoch, + // created_epoch, + // event_threshold, + // ENCODE(unsubscribe_hash, 'hex') as unsubscribe_hash, + // internal_state + // from users_subscriptions + // where event_name = $1 + // ` + + eventNameForQuery := utils.GetNetwork() + ":" + string(eventName) + + if _, ok := types.UserIndexEventsMap[eventName]; ok { + eventNameForQuery = string(eventName) + } + ds := goqu.Dialect("postgres").From("users_subscriptions").Select( + goqu.T("users_subscriptions").Col("id"), + goqu.C("user_id"), + goqu.C("event_filter"), + goqu.C("last_sent_epoch"), + goqu.C("created_epoch"), + goqu.C("event_threshold"), + goqu.C("event_name"), + ).Join(goqu.T("users"), goqu.On(goqu.T("users").Col("id").Eq(goqu.T("users_subscriptions").Col("user_id")))). + Where(goqu.L("(event_name = ? AND user_id <> 0)", eventNameForQuery)). + Where(goqu.L("(users.notifications_do_not_disturb_ts IS NULL OR users.notifications_do_not_disturb_ts < NOW())")). + // filter out users that have all notification channels disabled + Where(goqu.L("(select bool_or(active) from users_notification_channels where users_notification_channels.user_id = users_subscriptions.user_id)")) + + if lastSentFilter != "" { + if len(lastSentFilterArgs) > 0 { + ds = ds.Where(goqu.L(lastSentFilter, lastSentFilterArgs...)) + } else { + ds = ds.Where(goqu.L(lastSentFilter)) + } + } + if len(eventFilters) > 0 { + ds = ds.Where(goqu.L("(event_filter = ANY(?))", pq.StringArray(eventFilters))) + } + + query, args, err := ds.Prepared(true).ToSQL() + if err != nil { + return nil, err + } + + subMap := make(map[string][]*types.Subscription, 0) + err = db.FrontendWriterDB.Select(&subs, query, args...) if err != nil { - return nil, nil, err + return nil, err } - filtersEncode := make([][]byte, 0, len(subs)) + log.Infof("found %d subscriptions for event %s", len(subs), eventName) + + dashboardConfigsToFetch := make([]types.DashboardId, 0) for _, sub := range subs { - if _, ok := subMap[sub.EventFilter]; !ok { - subMap[sub.EventFilter] = make([]types.Subscription, 0) + // sub.LastEpoch = &zero + // sub.LastSent = &time.Time{} + sub.EventName = types.EventName(strings.Replace(string(sub.EventName), utils.GetNetwork()+":", "", 1)) // remove the network name from the event name + if strings.HasPrefix(sub.EventFilter, "vdb:") { + dashboardData := strings.Split(sub.EventFilter, ":") + if len(dashboardData) != 3 { + log.Error(fmt.Errorf("invalid dashboard subscription: %s", sub.EventFilter), "invalid dashboard subscription", 0) + continue + } + dashboardId, err := strconv.ParseInt(dashboardData[1], 10, 64) + if err != nil { + log.Error(err, "Invalid dashboard subscription", 0) + continue + } + sub.DashboardId = &dashboardId + + dashboardGroupId, err := strconv.ParseInt(dashboardData[2], 10, 64) + if err != nil { + log.Error(err, "Invalid dashboard subscription", 0) + continue + } + sub.DashboardGroupId = &dashboardGroupId + + dashboardConfigsToFetch = append(dashboardConfigsToFetch, types.DashboardId(dashboardId)) + } else { + if _, ok := subMap[sub.EventFilter]; !ok { + subMap[sub.EventFilter] = make([]*types.Subscription, 0) + } + subMap[sub.EventFilter] = append(subMap[sub.EventFilter], sub) } - subMap[sub.EventFilter] = append(subMap[sub.EventFilter], types.Subscription{ - UserID: sub.UserID, - ID: sub.ID, - LastEpoch: sub.LastEpoch, - EventFilter: sub.EventFilter, - CreatedEpoch: sub.CreatedEpoch, - EventThreshold: sub.EventThreshold, - State: sub.State, - }) - - b, _ := hex.DecodeString(sub.EventFilter) - filtersEncode = append(filtersEncode, b) } - return filtersEncode, subMap, nil + + if len(dashboardConfigsToFetch) > 0 { + log.Infof("fetching dashboard configurations for %d dashboards (%v)", len(dashboardConfigsToFetch), dashboardConfigsToFetch) + dashboardConfigRetrievalStartTs := time.Now() + type dashboardDefinitionRow struct { + DashboardId types.DashboardId `db:"dashboard_id"` + DashboardName string `db:"dashboard_name"` + UserId types.UserId `db:"user_id"` + GroupId types.DashboardGroupId `db:"group_id"` + GroupName string `db:"group_name"` + ValidatorIndex types.ValidatorIndex `db:"validator_index"` + WebhookTarget string `db:"webhook_target"` + WebhookFormat string `db:"webhook_format"` + } + var dashboardDefinitions []dashboardDefinitionRow + err = db.AlloyWriter.Select(&dashboardDefinitions, ` + SELECT + users_val_dashboards.id as dashboard_id, + users_val_dashboards.name as dashboard_name, + users_val_dashboards.user_id, + users_val_dashboards_groups.id as group_id, + users_val_dashboards_groups.name as group_name, + users_val_dashboards_validators.validator_index, + COALESCE(users_val_dashboards_groups.webhook_target, '') AS webhook_target, + COALESCE(users_val_dashboards_groups.webhook_format, '') AS webhook_format + FROM users_val_dashboards + LEFT JOIN users_val_dashboards_groups ON users_val_dashboards_groups.dashboard_id = users_val_dashboards.id + LEFT JOIN users_val_dashboards_validators ON users_val_dashboards_validators.dashboard_id = users_val_dashboards_groups.dashboard_id AND users_val_dashboards_validators.group_id = users_val_dashboards_groups.id + WHERE users_val_dashboards_validators.validator_index IS NOT NULL AND users_val_dashboards.id = ANY($1) + `, pq.Array(dashboardConfigsToFetch)) + if err != nil { + return nil, fmt.Errorf("error getting dashboard definitions: %v", err) + } + log.Infof("retrieved %d dashboard definitions", len(dashboardDefinitions)) + + // Now initialize the validator dashboard configuration map + validatorDashboardConfig := &types.ValidatorDashboardConfig{ + DashboardsById: make(map[types.DashboardId]*types.ValidatorDashboard), + RocketpoolNodeByPubkey: make(map[string]string), + } + for _, row := range dashboardDefinitions { + if validatorDashboardConfig.DashboardsById[row.DashboardId] == nil { + validatorDashboardConfig.DashboardsById[row.DashboardId] = &types.ValidatorDashboard{ + Name: row.DashboardName, + Groups: make(map[types.DashboardGroupId]*types.ValidatorDashboardGroup), + } + } + if validatorDashboardConfig.DashboardsById[row.DashboardId].Groups[row.GroupId] == nil { + validatorDashboardConfig.DashboardsById[row.DashboardId].Groups[row.GroupId] = &types.ValidatorDashboardGroup{ + Name: row.GroupName, + Validators: []uint64{}, + } + } + validatorDashboardConfig.DashboardsById[row.DashboardId].Groups[row.GroupId].Validators = append(validatorDashboardConfig.DashboardsById[row.DashboardId].Groups[row.GroupId].Validators, uint64(row.ValidatorIndex)) + } + + log.Infof("retrieving dashboard definitions took: %v", time.Since(dashboardConfigRetrievalStartTs)) + + // Now collect the mapping of rocketpool node addresses to validator pubkeys + // This is needed for the rocketpool notifications + type rocketpoolNodeRow struct { + Pubkey []byte `db:"pubkey"` + NodeAddress []byte `db:"node_address"` + } + + var rocketpoolNodes []rocketpoolNodeRow + err = db.AlloyWriter.Select(&rocketpoolNodes, ` + SELECT + pubkey, + node_address + FROM rocketpool_minipools;`) + if err != nil { + return nil, fmt.Errorf("error getting rocketpool node addresses: %v", err) + } + + for _, row := range rocketpoolNodes { + validatorDashboardConfig.RocketpoolNodeByPubkey[hex.EncodeToString(row.Pubkey)] = hex.EncodeToString(row.NodeAddress) + } + + //log.Infof("retrieved %d rocketpool node addresses", len(rocketpoolNodes)) + + for _, sub := range subs { + if strings.HasPrefix(sub.EventFilter, "vdb:") { + //log.Infof("hydrating subscription for dashboard %d and group %d for user %d", *sub.DashboardId, *sub.DashboardGroupId, *sub.UserID) + if dashboard, ok := validatorDashboardConfig.DashboardsById[types.DashboardId(*sub.DashboardId)]; ok { + if dashboard.Name == "" { + dashboard.Name = fmt.Sprintf("Dashboard %d", *sub.DashboardId) + } + if group, ok := dashboard.Groups[types.DashboardGroupId(*sub.DashboardGroupId)]; ok { + if group.Name == "" { + group.Name = "default" + } + + uniqueRPLNodes := make(map[string]struct{}) + + for _, validatorIndex := range group.Validators { + validatorEventFilterRaw, err := GetPubkeyForIndex(validatorIndex) + if err != nil { + log.Error(err, "error retrieving pubkey for validator", 0, map[string]interface{}{"validator": validatorIndex}) + continue + } + validatorEventFilter := hex.EncodeToString(validatorEventFilterRaw) + + if eventName == types.RocketpoolCollateralMaxReachedEventName || eventName == types.RocketpoolCollateralMinReachedEventName { + // Those two RPL notifications are not tied to a specific validator but to a node address, create a subscription for each + // node in the group + nodeAddress, ok := validatorDashboardConfig.RocketpoolNodeByPubkey[validatorEventFilter] + if !ok { + // Validator is not a rocketpool minipool + continue + } + if _, ok := uniqueRPLNodes[nodeAddress]; !ok { + if _, ok := subMap[nodeAddress]; !ok { + subMap[nodeAddress] = make([]*types.Subscription, 0) + } + hydratedSub := &types.Subscription{ + ID: sub.ID, + UserID: sub.UserID, + EventName: sub.EventName, + EventFilter: nodeAddress, + LastSent: sub.LastSent, + LastEpoch: sub.LastEpoch, + CreatedTime: sub.CreatedTime, + CreatedEpoch: sub.CreatedEpoch, + EventThreshold: sub.EventThreshold, + DashboardId: sub.DashboardId, + DashboardName: dashboard.Name, + DashboardGroupId: sub.DashboardGroupId, + DashboardGroupName: group.Name, + } + subMap[nodeAddress] = append(subMap[nodeAddress], hydratedSub) + //log.Infof("hydrated subscription for validator %v of dashboard %d and group %d for user %d", hydratedSub.EventFilter, *hydratedSub.DashboardId, *hydratedSub.DashboardGroupId, *hydratedSub.UserID) + } + uniqueRPLNodes[nodeAddress] = struct{}{} + } else { + if _, ok := subMap[validatorEventFilter]; !ok { + subMap[validatorEventFilter] = make([]*types.Subscription, 0) + } + hydratedSub := &types.Subscription{ + ID: sub.ID, + UserID: sub.UserID, + EventName: sub.EventName, + EventFilter: validatorEventFilter, + LastSent: sub.LastSent, + LastEpoch: sub.LastEpoch, + CreatedTime: sub.CreatedTime, + CreatedEpoch: sub.CreatedEpoch, + EventThreshold: sub.EventThreshold, + DashboardId: sub.DashboardId, + DashboardName: dashboard.Name, + DashboardGroupId: sub.DashboardGroupId, + DashboardGroupName: group.Name, + } + subMap[validatorEventFilter] = append(subMap[validatorEventFilter], hydratedSub) + //log.Infof("hydrated subscription for validator %v of dashboard %d and group %d for user %d", hydratedSub.EventFilter, *hydratedSub.DashboardId, *hydratedSub.DashboardGroupId, *hydratedSub.UserID) + } + } + } + } + } + } + //log.Infof("hydrated %d subscriptions for event %s", len(subMap), eventName) + } + + return subMap, nil } -func GetUserPushTokenByIds(ids []uint64) (map[uint64][]string, error) { - pushByID := map[uint64][]string{} +func GetUserPushTokenByIds(ids []types.UserId, userDbConn *sqlx.DB) (map[types.UserId][]string, error) { + pushByID := map[types.UserId][]string{} if len(ids) == 0 { return pushByID, nil } var rows []struct { - ID uint64 `db:"user_id"` - Token string `db:"notification_token"` + ID types.UserId `db:"user_id"` + Token string `db:"notification_token"` } - err := db.FrontendWriterDB.Select(&rows, "SELECT DISTINCT ON (user_id, notification_token) user_id, notification_token FROM users_devices WHERE (user_id = ANY($1) AND user_id NOT IN (SELECT user_id from users_notification_channels WHERE active = false and channel = $2)) AND notify_enabled = true AND active = true AND notification_token IS NOT NULL AND LENGTH(notification_token) > 20 ORDER BY user_id, notification_token, id DESC", pq.Array(ids), types.PushNotificationChannel) + err := userDbConn.Select(&rows, "SELECT DISTINCT ON (user_id, notification_token) user_id, notification_token FROM users_devices WHERE (user_id = ANY($1) AND user_id NOT IN (SELECT user_id from users_notification_channels WHERE active = false and channel = $2)) AND notify_enabled = true AND active = true AND notification_token IS NOT NULL AND LENGTH(notification_token) > 20 ORDER BY user_id, notification_token, id DESC", pq.Array(ids), types.PushNotificationChannel) if err != nil { return nil, err } @@ -69,14 +310,14 @@ func GetUserPushTokenByIds(ids []uint64) (map[uint64][]string, error) { } // GetUserEmailsByIds returns the emails of users. -func GetUserEmailsByIds(ids []uint64) (map[uint64]string, error) { - mailsByID := map[uint64]string{} +func GetUserEmailsByIds(ids []types.UserId) (map[types.UserId]string, error) { + mailsByID := map[types.UserId]string{} if len(ids) == 0 { return mailsByID, nil } var rows []struct { - ID uint64 `db:"id"` - Email string `db:"email"` + ID types.UserId `db:"id"` + Email string `db:"email"` } // err := db.FrontendWriterDB.Select(&rows, "SELECT id, email FROM users WHERE id = ANY($1) AND id NOT IN (SELECT user_id from users_notification_channels WHERE active = false and channel = $2)", pq.Array(ids), types.EmailNotificationChannel) diff --git a/backend/pkg/notification/firebase.go b/backend/pkg/notification/firebase.go index 91291da54..e23a538c0 100644 --- a/backend/pkg/notification/firebase.go +++ b/backend/pkg/notification/firebase.go @@ -2,13 +2,16 @@ package notification import ( "context" + "fmt" "strings" "time" - firebase "firebase.google.com/go" - "firebase.google.com/go/messaging" + firebase "firebase.google.com/go/v4" + "firebase.google.com/go/v4/messaging" + "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" "google.golang.org/api/option" ) @@ -17,17 +20,19 @@ func isRelevantError(response *messaging.SendResponse) bool { if !response.Success && response.Error != nil { // Ignore https://stackoverflow.com/questions/58308835/using-firebase-for-notifications-getting-app-instance-has-been-unregistered // Errors since they indicate that the user token is expired - if !strings.Contains(response.Error.Error(), "registration-token-not-registered") { + if !strings.Contains(response.Error.Error(), "registration-token-not-registered") && + !strings.Contains(response.Error.Error(), "Requested entity was not found.") && + !strings.Contains(response.Error.Error(), "Request contains an invalid argument.") { return true } } return false } -func SendPushBatch(messages []*messaging.Message) error { +func SendPushBatch(userId types.UserId, messages []*messaging.Message, dryRun bool) error { credentialsPath := utils.Config.Notifications.FirebaseCredentialsPath if credentialsPath == "" { - log.Error(nil, "firebase credentials path not provided, disabling push notifications", 0) + log.Error(fmt.Errorf("firebase credentials path not provided, disabling push notifications"), "error initializing SendPushBatch", 0) return nil } @@ -42,29 +47,40 @@ func SendPushBatch(messages []*messaging.Message) error { app, err := firebase.NewApp(context.Background(), nil, opt) if err != nil { - log.Error(nil, "error initializing app", 0) + log.Error(err, "error initializing app", 0) return err } client, err := app.Messaging(ctx) if err != nil { - log.Error(nil, "error initializing messaging", 0) + log.Error(err, "error initializing messaging", 0) return err } - var waitBeforeTryInSeconds = []time.Duration{0 * time.Second, 2 * time.Second, 4 * time.Second, 8 * time.Second, 16 * time.Second} + var waitBeforeTryInSeconds = []int{0, 2, 4, 8, 16} var resultSuccessCount, resultFailureCount int = 0, 0 var result *messaging.BatchResponse currentMessages := messages + + for range messages { + _, err := db.CountSentMessage("n_push", userId) + if err != nil { + log.Error(err, "error counting sent push", 0) + } + } + tries := 0 for _, s := range waitBeforeTryInSeconds { - time.Sleep(s) + time.Sleep(time.Duration(s) * time.Second) tries++ - - result, err = client.SendAll(context.Background(), currentMessages) + if dryRun { + result, err = client.SendEachDryRun(context.Background(), currentMessages) + } else { + result, err = client.SendEach(context.Background(), currentMessages) + } if err != nil { - log.Error(nil, "error sending push notifications", 0) + log.Error(err, "error sending push notifications", 0) return err } @@ -74,7 +90,9 @@ func SendPushBatch(messages []*messaging.Message) error { newMessages := make([]*messaging.Message, 0, result.FailureCount) if result.FailureCount > 0 { for i, response := range result.Responses { + log.Info(response) if isRelevantError(response) { + log.Infof("retrying message %d", i) newMessages = append(newMessages, currentMessages[i]) resultFailureCount-- } @@ -90,7 +108,7 @@ func SendPushBatch(messages []*messaging.Message) error { if len(currentMessages) > 0 { for _, response := range result.Responses { if isRelevantError(response) { - log.Error(nil, "firebase error", 0, log.Fields{"MessageID": response.MessageID, "response": response.Error}) + log.Error(fmt.Errorf("firebase error, message id: %s, error: %s", response.MessageID, response.Error), "error sending push notifications", 0) resultFailureCount++ } } diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 1ba2266bf..7bb71162a 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -1,3254 +1,30 @@ package notification import ( - "bytes" - "context" - "crypto/sha256" - "database/sql" - "database/sql/driver" - "encoding/hex" - "encoding/json" - "errors" - - "fmt" - "html" - "html/template" - "io" - "math/big" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - gcp_bigtable "cloud.google.com/go/bigtable" - "firebase.google.com/go/messaging" - "github.com/ethereum/go-ethereum/common" - "github.com/gobitfly/beaconchain/pkg/commons/cache" - "github.com/gobitfly/beaconchain/pkg/commons/db" - "github.com/gobitfly/beaconchain/pkg/commons/ethclients" "github.com/gobitfly/beaconchain/pkg/commons/log" - "github.com/gobitfly/beaconchain/pkg/commons/mail" - "github.com/gobitfly/beaconchain/pkg/commons/metrics" - "github.com/gobitfly/beaconchain/pkg/commons/services" "github.com/gobitfly/beaconchain/pkg/commons/types" - "github.com/gobitfly/beaconchain/pkg/commons/utils" - - "github.com/jmoiron/sqlx" - "github.com/lib/pq" - "github.com/rocket-pool/rocketpool-go/utils/eth" - "golang.org/x/text/cases" - "golang.org/x/text/language" + "github.com/gobitfly/beaconchain/pkg/exporter/modules" ) -func InitNotificationSender() { - log.Infof("starting notifications-sender") - go notificationSender() -} - -func InitNotificationCollector(pubkeyCachePath string) { - err := initPubkeyCache(pubkeyCachePath) - if err != nil { - log.Fatal(err, "error initializing pubkey cache path for notifications", 0) - } - - go ethclients.Init() - - go notificationCollector() -} - -// the notificationCollector is responsible for collecting & queuing notifications -// it is epoch based and will only collect notification for a given epoch once -// notifications are collected in ascending epoch order -// the epochs_notified sql table is used to keep track of already notified epochs -// before collecting notifications several db consistency checks are done -func notificationCollector() { - for { - latestFinalizedEpoch := cache.LatestFinalizedEpoch.Get() - - if latestFinalizedEpoch < 4 { - log.Error(nil, "pausing notifications until at least 5 epochs have been exported into the db", 0) - time.Sleep(time.Minute) - continue - } - - var lastNotifiedEpoch uint64 - err := db.WriterDb.Get(&lastNotifiedEpoch, "SELECT COALESCE(MAX(epoch), 0) FROM epochs_notified") - - if err != nil { - log.Error(err, "error retrieving last notified epoch from the db", 0) - time.Sleep(time.Minute) - continue - } - - log.Infof("latest finalized epoch is %v, latest notified epoch is %v", latestFinalizedEpoch, lastNotifiedEpoch) - - if latestFinalizedEpoch < lastNotifiedEpoch { - log.Error(nil, "notification consistency error, lastest finalized epoch is lower than the last notified epoch!", 0) - time.Sleep(time.Minute) - continue - } - - if latestFinalizedEpoch-lastNotifiedEpoch > 5 { - log.Infof("last notified epoch is more than 5 epochs behind the last finalized epoch, limiting lookback to last 5 epochs") - lastNotifiedEpoch = latestFinalizedEpoch - 5 - } - - for epoch := lastNotifiedEpoch + 1; epoch <= latestFinalizedEpoch; epoch++ { - var exported uint64 - err := db.WriterDb.Get(&exported, "SELECT COUNT(*) FROM epochs WHERE epoch <= $1 AND epoch >= $2", epoch, epoch-3) - if err != nil { - log.Error(err, "error retrieving export status of epoch", 0, log.Fields{"epoch": epoch}) - services.ReportStatus("notification-collector", "Error", nil) - break - } - - if exported != 4 { - log.Error(nil, "epoch notification consistency error, epochs are not all yet exported into the db", 0, log.Fields{"epoch start": epoch, "epoch end": epoch - 3, "exported": exported, "wanted": 4}) - } - - start := time.Now() - log.Infof("collecting notifications for epoch %v", epoch) - - // Network DB Notifications (network related) - notifications, err := collectNotifications(epoch) - - if err != nil { - log.Error(err, "error collection notifications", 0) - services.ReportStatus("notification-collector", "Error", nil) - break - } - - _, err = db.WriterDb.Exec("INSERT INTO epochs_notified VALUES ($1, NOW())", epoch) - if err != nil { - log.Error(err, "error marking notification status for epoch %v in db: %v", 0, log.Fields{"epoch": epoch}) - services.ReportStatus("notification-collector", "Error", nil) - break - } - - queueNotifications(notifications, db.FrontendWriterDB) // this caused the collected notifications to be queued and sent - - // Network DB Notifications (user related, must only run on one instance ever!!!!) - if utils.Config.Notifications.UserDBNotifications { - log.Infof("collecting user db notifications") - userNotifications, err := collectUserDbNotifications(epoch) - if err != nil { - log.Error(err, "error collection user db notifications", 0) - services.ReportStatus("notification-collector", "Error", nil) - time.Sleep(time.Minute * 2) - continue - } - - queueNotifications(userNotifications, db.FrontendWriterDB) - } - - log.InfoWithFields(log.Fields{"notifications": len(notifications), "duration": time.Since(start), "epoch": epoch}, "notifications completed") - - metrics.TaskDuration.WithLabelValues("service_notifications").Observe(time.Since(start).Seconds()) - } - - services.ReportStatus("notification-collector", "Running", nil) - time.Sleep(time.Second * 10) - } -} - -func notificationSender() { - for { - start := time.Now() - ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) - - conn, err := db.FrontendWriterDB.Conn(ctx) - if err != nil { - log.Error(err, "error creating connection", 0) - cancel() - continue - } - - _, err = conn.ExecContext(ctx, `SELECT pg_advisory_lock(500)`) - if err != nil { - log.Error(err, "error getting advisory lock from db", 0) - - conn.Close() - if err != nil { - log.Error(err, "error returning connection to connection pool", 0) - } - cancel() - continue - } - - log.Infof("lock obtained") - err = dispatchNotifications(db.FrontendWriterDB) - if err != nil { - log.Error(err, "error dispatching notifications", 0) - } - - err = garbageCollectNotificationQueue(db.FrontendWriterDB) - if err != nil { - log.Error(err, "error garbage collecting notification queue", 0) - } - - log.InfoWithFields(log.Fields{"duration": time.Since(start)}, "notifications dispatched and garbage collected") - metrics.TaskDuration.WithLabelValues("service_notifications_sender").Observe(time.Since(start).Seconds()) - - unlocked := false - rows, err := conn.QueryContext(ctx, `SELECT pg_advisory_unlock(500)`) - if err != nil { - log.Error(err, "error executing advisory unlock", 0) - - err = conn.Close() - if err != nil { - log.WarnWithStackTrace(err, "error returning connection to connection pool", 0) - } - cancel() - continue - } - - for rows.Next() { - err = rows.Scan(&unlocked) - if err != nil { - log.Error(err, "error scanning advisory unlock result", 0) - } - } - - if !unlocked { - log.Error(nil, fmt.Errorf("error releasing advisory lock unlocked: %v", unlocked), 0) - } - - conn.Close() - if err != nil { - log.WarnWithStackTrace(err, "error returning connection to connection pool", 0) - } - cancel() - - services.ReportStatus("notification-sender", "Running", nil) - time.Sleep(time.Second * 30) - } -} - -func collectNotifications(epoch uint64) (map[uint64]map[types.EventName][]types.Notification, error) { - notificationsByUserID := map[uint64]map[types.EventName][]types.Notification{} - start := time.Now() - var err error - var dbIsCoherent bool - - err = db.WriterDb.Get(&dbIsCoherent, ` - SELECT - NOT (array[false] && array_agg(is_coherent)) AS is_coherent - FROM ( - SELECT - epoch - 1 = lead(epoch) OVER (ORDER BY epoch DESC) AS is_coherent - FROM epochs - ORDER BY epoch DESC - LIMIT 2^14 - ) coherency`) - - if err != nil { - log.Error(err, "error doing epochs table coherence check", 0) - return nil, err - } - if !dbIsCoherent { - log.Error(nil, "epochs coherence check failed, aborting", 0) - return nil, fmt.Errorf("epochs coherence check failed, aborting") - } - - log.Infof("started collecting notifications") - - err = collectAttestationAndOfflineValidatorNotifications(notificationsByUserID, epoch) - if err != nil { - metrics.Errors.WithLabelValues("notifications_collect_missed_attestation").Inc() - return nil, fmt.Errorf("error collecting validator_attestation_missed notifications: %v", err) - } - log.Infof("collecting attestation & offline notifications took: %v", time.Since(start)) - - err = collectBlockProposalNotifications(notificationsByUserID, 1, types.ValidatorExecutedProposalEventName, epoch) - if err != nil { - metrics.Errors.WithLabelValues("notifications_collect_executed_block_proposal").Inc() - return nil, fmt.Errorf("error collecting validator_proposal_submitted notifications: %v", err) - } - log.Infof("collecting block proposal proposed notifications took: %v", time.Since(start)) - - err = collectBlockProposalNotifications(notificationsByUserID, 2, types.ValidatorMissedProposalEventName, epoch) - if err != nil { - metrics.Errors.WithLabelValues("notifications_collect_missed_block_proposal").Inc() - return nil, fmt.Errorf("error collecting validator_proposal_missed notifications: %v", err) - } - log.Infof("collecting block proposal missed notifications took: %v", time.Since(start)) - - err = collectBlockProposalNotifications(notificationsByUserID, 3, types.ValidatorMissedProposalEventName, epoch) - if err != nil { - metrics.Errors.WithLabelValues("notifications_collect_missed_orphaned_block_proposal").Inc() - return nil, fmt.Errorf("error collecting validator_proposal_missed notifications for orphaned slots: %w", err) - } - log.Infof("collecting block proposal missed notifications for orphaned slots took: %v", time.Since(start)) - - err = collectValidatorGotSlashedNotifications(notificationsByUserID, epoch) - if err != nil { - metrics.Errors.WithLabelValues("notifications_collect_validator_got_slashed").Inc() - return nil, fmt.Errorf("error collecting validator_got_slashed notifications: %v", err) - } - log.Infof("collecting validator got slashed notifications took: %v", time.Since(start)) - - err = collectWithdrawalNotifications(notificationsByUserID, epoch) - if err != nil { - metrics.Errors.WithLabelValues("notifications_collect_validator_withdrawal").Inc() - return nil, fmt.Errorf("error collecting withdrawal notifications: %v", err) - } - log.Infof("collecting withdrawal notifications took: %v", time.Since(start)) - - err = collectNetworkNotifications(notificationsByUserID, types.NetworkLivenessIncreasedEventName) - if err != nil { - metrics.Errors.WithLabelValues("notifications_collect_network").Inc() - return nil, fmt.Errorf("error collecting network notifications: %v", err) - } - log.Infof("collecting network notifications took: %v", time.Since(start)) - - // Rocketpool - { - var ts int64 - err = db.ReaderDb.Get(&ts, `SELECT id FROM rocketpool_network_stats LIMIT 1;`) - - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - log.Infof("skipped the collecting of rocketpool notifications, because rocketpool_network_stats is empty") - } else { - metrics.Errors.WithLabelValues("notifications_collect_rocketpool_notifications").Inc() - return nil, fmt.Errorf("error collecting rocketpool notifications: %v", err) - } - } else { - err = collectRocketpoolComissionNotifications(notificationsByUserID, types.RocketpoolCommissionThresholdEventName) - if err != nil { - //nolint:misspell - metrics.Errors.WithLabelValues("notifications_collect_rocketpool_comission").Inc() - return nil, fmt.Errorf("error collecting rocketpool commission: %v", err) - } - log.Infof("collecting rocketpool commissions took: %v", time.Since(start)) - - err = collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID, types.RocketpoolNewClaimRoundStartedEventName) - if err != nil { - metrics.Errors.WithLabelValues("notifications_collect_rocketpool_reward_claim").Inc() - return nil, fmt.Errorf("error collecting new rocketpool claim round: %v", err) - } - log.Infof("collecting rocketpool claim round took: %v", time.Since(start)) - - err = collectRocketpoolRPLCollateralNotifications(notificationsByUserID, types.RocketpoolCollateralMaxReached, epoch) - if err != nil { - metrics.Errors.WithLabelValues("notifications_collect_rocketpool_rpl_collateral_max_reached").Inc() - return nil, fmt.Errorf("error collecting rocketpool max collateral: %v", err) - } - log.Infof("collecting rocketpool max collateral took: %v", time.Since(start)) - - err = collectRocketpoolRPLCollateralNotifications(notificationsByUserID, types.RocketpoolCollateralMinReached, epoch) - if err != nil { - metrics.Errors.WithLabelValues("notifications_collect_rocketpool_rpl_collateral_min_reached").Inc() - return nil, fmt.Errorf("error collecting rocketpool min collateral: %v", err) - } - log.Infof("collecting rocketpool min collateral took: %v", time.Since(start)) - } - } - - err = collectSyncCommittee(notificationsByUserID, types.SyncCommitteeSoon, epoch) - if err != nil { - metrics.Errors.WithLabelValues("notifications_collect_sync_committee").Inc() - return nil, fmt.Errorf("error collecting sync committee: %v", err) - } - log.Infof("collecting sync committee took: %v", time.Since(start)) - - return notificationsByUserID, nil -} - -func collectUserDbNotifications(epoch uint64) (map[uint64]map[types.EventName][]types.Notification, error) { - notificationsByUserID := map[uint64]map[types.EventName][]types.Notification{} - var err error - - // Monitoring (premium): machine offline - err = collectMonitoringMachineOffline(notificationsByUserID, epoch) - if err != nil { - metrics.Errors.WithLabelValues("notifications_collect_monitoring_machine_offline").Inc() - return nil, fmt.Errorf("error collecting Eth client offline notifications: %v", err) - } - - // Monitoring (premium): disk full warnings - err = collectMonitoringMachineDiskAlmostFull(notificationsByUserID, epoch) - if err != nil { - metrics.Errors.WithLabelValues("notifications_collect_monitoring_machine_disk_almost_full").Inc() - return nil, fmt.Errorf("error collecting Eth client disk full notifications: %v", err) - } - - // Monitoring (premium): cpu load - err = collectMonitoringMachineCPULoad(notificationsByUserID, epoch) - if err != nil { - metrics.Errors.WithLabelValues("notifications_collect_monitoring_machine_cpu_load").Inc() - return nil, fmt.Errorf("error collecting Eth client cpu notifications: %v", err) - } - - // Monitoring (premium): ram - err = collectMonitoringMachineMemoryUsage(notificationsByUserID, epoch) - if err != nil { - metrics.Errors.WithLabelValues("notifications_collect_monitoring_machine_memory_usage").Inc() - return nil, fmt.Errorf("error collecting Eth client memory notifications: %v", err) - } - - // New ETH clients - err = collectEthClientNotifications(notificationsByUserID, types.EthClientUpdateEventName) - if err != nil { - metrics.Errors.WithLabelValues("notifications_collect_eth_client").Inc() - return nil, fmt.Errorf("error collecting Eth client notifications: %v", err) - } - - //Tax Report - err = collectTaxReportNotificationNotifications(notificationsByUserID, types.TaxReportEventName) - if err != nil { - metrics.Errors.WithLabelValues("notifications_collect_tax_report").Inc() - return nil, fmt.Errorf("error collecting tax report notifications: %v", err) - } - - return notificationsByUserID, nil -} - -func queueNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, useDB *sqlx.DB) { - subByEpoch := map[uint64][]uint64{} - - // prevent multiple events being sent with the same subscription id - for user, notifications := range notificationsByUserID { - for eventType, events := range notifications { - filteredEvents := make([]types.Notification, 0) - - for _, ev := range events { - isDuplicate := false - for _, fe := range filteredEvents { - if fe.GetSubscriptionID() == ev.GetSubscriptionID() { - isDuplicate = true - } - } - if !isDuplicate { - filteredEvents = append(filteredEvents, ev) - } - } - notificationsByUserID[user][eventType] = filteredEvents - } - } - - err := queueEmailNotifications(notificationsByUserID, useDB) - if err != nil { - log.Error(err, "error queuing email notifications", 0) - } - - err = queuePushNotification(notificationsByUserID, useDB) - if err != nil { - log.Error(err, "error queuing push notifications", 0) - } - - err = queueWebhookNotifications(notificationsByUserID, useDB) - if err != nil { - log.Error(err, "error queuing webhook notifications", 0) - } - - for _, events := range notificationsByUserID { - for _, notifications := range events { - for _, n := range notifications { - e := n.GetEpoch() - if _, exists := subByEpoch[e]; !exists { - subByEpoch[e] = []uint64{n.GetSubscriptionID()} - } else { - subByEpoch[e] = append(subByEpoch[e], n.GetSubscriptionID()) - } - } - } - } - for epoch, subIDs := range subByEpoch { - // update that we've queued the subscription (last sent rather means last queued) - err := db.UpdateSubscriptionsLastSent(subIDs, time.Now(), epoch, useDB) - if err != nil { - log.Error(err, "error updating sent-time of sent notifications", 0) - metrics.Errors.WithLabelValues("notifications_updating_sent_time").Inc() - } - } - // update internal state of subscriptions - stateToSub := make(map[string]map[uint64]bool, 0) - - for _, notificationMap := range notificationsByUserID { // _ => user - for _, notifications := range notificationMap { // _ => eventname - for _, notification := range notifications { // _ => index - state := notification.GetLatestState() - if state == "" { - continue - } - if _, exists := stateToSub[state]; !exists { - stateToSub[state] = make(map[uint64]bool, 0) - } - if _, exists := stateToSub[state][notification.GetSubscriptionID()]; !exists { - stateToSub[state][notification.GetSubscriptionID()] = true - } - } - } - } - - for state, subs := range stateToSub { - subArray := make([]int64, 0) - for subID := range subs { - subArray = append(subArray, int64(subID)) - } - _, err := db.FrontendWriterDB.Exec(`UPDATE users_subscriptions SET internal_state = $1 WHERE id = ANY($2)`, state, pq.Int64Array(subArray)) - if err != nil { - log.Error(err, "error updating internal state of notifications", 0) - } - } -} - -func dispatchNotifications(useDB *sqlx.DB) error { - err := sendEmailNotifications(useDB) - if err != nil { - return fmt.Errorf("error sending email notifications, err: %w", err) - } - - err = sendPushNotifications(useDB) - if err != nil { - return fmt.Errorf("error sending push notifications, err: %w", err) - } - - err = sendWebhookNotifications(useDB) - if err != nil { - return fmt.Errorf("error sending webhook notifications, err: %w", err) - } - - err = sendDiscordNotifications(useDB) - if err != nil { - return fmt.Errorf("error sending webhook discord notifications, err: %w", err) - } - - return nil -} - -// garbageCollectNotificationQueue deletes entries from the notification queue that have been processed -func garbageCollectNotificationQueue(useDB *sqlx.DB) error { - rows, err := useDB.Exec(`DELETE FROM notification_queue WHERE (sent < now() - INTERVAL '30 minutes') OR (created < now() - INTERVAL '1 hour')`) - if err != nil { - return fmt.Errorf("error deleting from notification_queue %w", err) - } - - rowsAffected, _ := rows.RowsAffected() - - log.Infof("deleted %v rows from the notification_queue", rowsAffected) - - return nil -} - -func getNetwork() string { - domainParts := strings.Split(utils.Config.Frontend.SiteDomain, ".") - if len(domainParts) >= 3 { - return fmt.Sprintf("%s: ", cases.Title(language.English).String(domainParts[0])) - } - return "" -} - -func queuePushNotification(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, useDB *sqlx.DB) error { - userIDs := []uint64{} - for userID := range notificationsByUserID { - userIDs = append(userIDs, userID) - } - - tokensByUserID, err := GetUserPushTokenByIds(userIDs) - if err != nil { - metrics.Errors.WithLabelValues("notifications_send_push_notifications").Inc() - return fmt.Errorf("error when sending push-notifications: could not get tokens: %w", err) - } - - for userID, userNotifications := range notificationsByUserID { - userTokens, exists := tokensByUserID[userID] - if !exists { - continue - } - - go func(userTokens []string, userNotifications map[types.EventName][]types.Notification) { - var batch []*messaging.Message - for event, ns := range userNotifications { - for _, n := range ns { - added := false - for _, userToken := range userTokens { - notification := new(messaging.Notification) - notification.Title = fmt.Sprintf("%s%s", getNetwork(), n.GetTitle()) - notification.Body = n.GetInfo(false) - if notification.Body == "" { - continue - } - added = true - - message := new(messaging.Message) - message.Notification = notification - message.Token = userToken - - message.APNS = new(messaging.APNSConfig) - message.APNS.Payload = new(messaging.APNSPayload) - message.APNS.Payload.Aps = new(messaging.Aps) - message.APNS.Payload.Aps.Sound = "default" - - batch = append(batch, message) - } - if added { - metrics.NotificationsQueued.WithLabelValues("push", string(event)).Inc() - } - } - } - - transitPushContent := types.TransitPushContent{ - Messages: batch, - } - - _, err = useDB.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES ($1, 'push', $2)`, time.Now(), transitPushContent) - if err != nil { - log.Error(err, "error writing transit push notification to db", 0) - return - } - }(userTokens, userNotifications) - } - return nil -} - -func sendPushNotifications(useDB *sqlx.DB) error { - var notificationQueueItem []types.TransitPush - - err := useDB.Select(¬ificationQueueItem, `SELECT - id, - created, - sent, - channel, - content - FROM notification_queue WHERE sent IS null AND channel = 'push' ORDER BY created ASC`) - if err != nil { - return fmt.Errorf("error querying notification queue, err: %w", err) - } - - log.Infof("processing %v push notifications", len(notificationQueueItem)) - - batchSize := 500 - for _, n := range notificationQueueItem { - for b := 0; b < len(n.Content.Messages); b += batchSize { - start := b - end := b + batchSize - if len(n.Content.Messages) < end { - end = len(n.Content.Messages) - } - - err = SendPushBatch(n.Content.Messages[start:end]) - if err != nil { - metrics.Errors.WithLabelValues("notifications_send_push_batch").Inc() - log.Error(err, "error sending firebase batch job", 0) - } else { - metrics.NotificationsSent.WithLabelValues("push", "200").Add(float64(len(n.Content.Messages))) - } - - _, err = useDB.Exec(`UPDATE notification_queue SET sent = now() WHERE id = $1`, n.Id) - if err != nil { - return fmt.Errorf("error updating sent status for push notification with id: %v, err: %w", n.Id, err) - } - } - } - return nil -} - -func queueEmailNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, useDB *sqlx.DB) error { - userIDs := []uint64{} - for userID := range notificationsByUserID { - userIDs = append(userIDs, userID) - } - emailsByUserID, err := GetUserEmailsByIds(userIDs) - if err != nil { - metrics.Errors.WithLabelValues("notifications_get_user_mail_by_id").Inc() - return fmt.Errorf("error when sending email-notifications: could not get emails: %w", err) - } - - for userID, userNotifications := range notificationsByUserID { - userEmail, exists := emailsByUserID[userID] - if !exists { - log.WarnWithStackTrace(nil, "email notification skipping user", 0, log.Fields{"user_id": userID}) - // we don't need this metrics as users can now deactivate email notifications and it would increment the counter - // metrics.Errors.WithLabelValues("notifications_mail_not_found").Inc() - continue - } - go func(userEmail string, userNotifications map[types.EventName][]types.Notification) { - attachments := []types.EmailAttachment{} - - var msg types.Email - - if utils.Config.Chain.Name != "mainnet" { - //nolint:gosec // this is a static string - msg.Body += template.HTML(fmt.Sprintf("Notice: This email contains notifications for the %s network!
", utils.Config.Chain.Name)) - } - - subject := "" - notificationTitlesMap := make(map[string]bool) - notificationTitles := []string{} - for event, ns := range userNotifications { - if len(msg.Body) > 0 { - msg.Body += "
" - } - event_title := event - if event == types.TaxReportEventName { - event_title = "income_history" - } - //nolint:gosec // this is a static string - msg.Body += template.HTML(fmt.Sprintf("%s
====

", types.EventLabel[event_title])) - unsubURL := "https://" + utils.Config.Frontend.SiteDomain + "/notifications/unsubscribe" - for i, n := range ns { - // Find all unique notification titles for the subject - title := n.GetTitle() - if _, ok := notificationTitlesMap[title]; !ok { - notificationTitlesMap[title] = true - notificationTitles = append(notificationTitles, title) - } - - unsubHash := n.GetUnsubscribeHash() - if unsubHash == "" { - id := n.GetSubscriptionID() - - tx, err := db.FrontendWriterDB.Beginx() - if err != nil { - log.Error(err, "error starting transaction", 0) - } - var sub types.Subscription - err = tx.Get(&sub, ` - SELECT - id, - user_id, - event_name, - event_filter, - last_sent_ts, - last_sent_epoch, - created_ts, - created_epoch, - event_threshold - FROM users_subscriptions - WHERE id = $1 - `, id) - if err != nil { - log.Error(err, "error getting user subscription by subscription id", 0) - err = tx.Rollback() - if err != nil { - log.Error(err, "error rolling back transaction", 0) - } - } - - raw := fmt.Sprintf("%v%v%v%v", sub.ID, sub.UserID, sub.EventName, sub.CreatedTime) - digest := sha256.Sum256([]byte(raw)) - - _, err = tx.Exec("UPDATE users_subscriptions set unsubscribe_hash = $1 WHERE id = $2", digest[:], id) - if err != nil { - log.Error(err, "error updating users subscriptions table with unsubscribe hash", 0) - err = tx.Rollback() - if err != nil { - log.Error(err, "error rolling back transaction", 0) - } - } - - err = tx.Commit() - if err != nil { - log.Error(err, "error committing transaction to update users subscriptions with an unsubscribe hash", 0) - err = tx.Rollback() - if err != nil { - log.Error(err, "error rolling back transaction", 0) - } - } - - unsubHash = hex.EncodeToString(digest[:]) - } - if i == 0 { - unsubURL += "?hash=" + html.EscapeString(unsubHash) - } else { - unsubURL += "&hash=" + html.EscapeString(unsubHash) - } - //nolint:gosec // this is a static string - msg.UnsubURL = template.HTML(fmt.Sprintf(`Unsubscribe`, unsubURL)) - - if event != types.SyncCommitteeSoon { - // SyncCommitteeSoon notifications are summed up in getEventInfo for all validators - //nolint:gosec // this is a static string - msg.Body += template.HTML(fmt.Sprintf("%s
", n.GetInfo(true))) - } - - if att := n.GetEmailAttachment(); att != nil { - attachments = append(attachments, *att) - } - - metrics.NotificationsQueued.WithLabelValues("email", string(event)).Inc() - } - - eventInfo := getEventInfo(event, ns) - if eventInfo != "" { - //nolint:gosec // this is a static string - msg.Body += template.HTML(fmt.Sprintf("%s
", eventInfo)) - } - } - - if len(notificationTitles) > 2 { - subject = fmt.Sprintf("%s: %s,... and %d other notifications", utils.Config.Frontend.SiteDomain, notificationTitles[0], len(notificationTitles)-1) - } else if len(notificationTitles) == 2 { - subject = fmt.Sprintf("%s: %s and %s", utils.Config.Frontend.SiteDomain, notificationTitles[0], notificationTitles[1]) - } else if len(notificationTitles) == 1 { - subject = fmt.Sprintf("%s: %s", utils.Config.Frontend.SiteDomain, notificationTitles[0]) - } - - // msg.Body += template.HTML(fmt.Sprintf("
Best regards
\n%s", utils.Config.Frontend.SiteDomain)) - //nolint:gosec // this is a static string - msg.SubscriptionManageURL = template.HTML(fmt.Sprintf(`Manage`, "https://"+utils.Config.Frontend.SiteDomain+"/user/notifications")) - - transitEmailContent := types.TransitEmailContent{ - Address: userEmail, - Subject: subject, - Email: msg, - Attachments: attachments, - } - - _, err = useDB.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES ($1, 'email', $2)`, time.Now(), transitEmailContent) - if err != nil { - log.Error(err, "error writing transit email to db", 0) - } - }(userEmail, userNotifications) - } - return nil -} - -func sendEmailNotifications(useDb *sqlx.DB) error { - var notificationQueueItem []types.TransitEmail - - err := useDb.Select(¬ificationQueueItem, `SELECT - id, - created, - sent, - channel, - content - FROM notification_queue WHERE sent IS null AND channel = 'email' ORDER BY created ASC`) - if err != nil { - return fmt.Errorf("error querying notification queue, err: %w", err) - } - - log.Infof("processing %v email notifications", len(notificationQueueItem)) - - for _, n := range notificationQueueItem { - err = mail.SendMailRateLimited(n.Content.Address, n.Content.Subject, n.Content.Email, n.Content.Attachments) - if err != nil { - if !strings.Contains(err.Error(), "rate limit has been exceeded") { - metrics.Errors.WithLabelValues("notifications_send_email").Inc() - log.Error(err, "error sending email notification", 0) - } else { - metrics.NotificationsSent.WithLabelValues("email", "200").Inc() - } - } - _, err = useDb.Exec(`UPDATE notification_queue set sent = now() where id = $1`, n.Id) - if err != nil { - return fmt.Errorf("error updating sent status for email notification with id: %v, err: %w", n.Id, err) - } - } - return nil -} - -func queueWebhookNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, useDB *sqlx.DB) error { - for userID, userNotifications := range notificationsByUserID { - var webhooks []types.UserWebhook - err := useDB.Select(&webhooks, ` - SELECT - id, - user_id, - url, - retries, - event_names, - destination - FROM - users_webhooks - WHERE - user_id = $1 AND user_id NOT IN (SELECT user_id from users_notification_channels WHERE active = false and channel = $2) - `, userID, types.WebhookNotificationChannel) - // continue if the user does not have a webhook - if err == sql.ErrNoRows { - continue - } - if err != nil { - return fmt.Errorf("error quering users_webhooks, err: %w", err) - } - // webhook => [] notifications - discordNotifMap := make(map[uint64][]types.TransitDiscordContent) - notifs := make([]types.TransitWebhook, 0) - // send the notifications to each registered webhook - for _, w := range webhooks { - for event, notifications := range userNotifications { - eventSubscribed := false - // check if the webhook is subscribed to the type of event - for _, w := range w.EventNames { - if w == string(event) { - eventSubscribed = true - break - } - } - if eventSubscribed { - if len(notifications) > 0 { - // reset Retries - if w.Retries > 5 && w.LastSent.Valid && w.LastSent.Time.Add(time.Hour).Before(time.Now()) { - _, err = useDB.Exec(`UPDATE users_webhooks SET retries = 0 WHERE id = $1;`, w.ID) - if err != nil { - log.Error(err, "error updating users_webhooks table; setting retries to zero", 0) - continue - } - } else if w.Retries > 5 && !w.LastSent.Valid { - log.Warnf("webhook '%v' has more than 5 retries and does not have a valid last_sent timestamp", w.Url) - continue - } - - if w.Retries >= 5 { - // early return - continue - } - } - - for _, n := range notifications { - if w.Destination.Valid && w.Destination.String == "webhook_discord" { - if _, exists := discordNotifMap[w.ID]; !exists { - discordNotifMap[w.ID] = make([]types.TransitDiscordContent, 0) - } - l_notifs := len(discordNotifMap[w.ID]) - if l_notifs == 0 || len(discordNotifMap[w.ID][l_notifs-1].DiscordRequest.Embeds) >= 10 { - discordNotifMap[w.ID] = append(discordNotifMap[w.ID], types.TransitDiscordContent{ - Webhook: w, - DiscordRequest: types.DiscordReq{ - Username: utils.Config.Frontend.SiteDomain, - }, - }) - l_notifs++ - } - - fields := []types.DiscordEmbedField{ - { - Name: "Epoch", - Value: fmt.Sprintf("[%[1]v](https://%[2]s/%[1]v)", n.GetEpoch(), utils.Config.Frontend.SiteDomain+"/epoch"), - Inline: false, - }, - } - - if strings.HasPrefix(string(n.GetEventName()), "monitoring") || n.GetEventName() == types.EthClientUpdateEventName || n.GetEventName() == types.RocketpoolCollateralMaxReached || n.GetEventName() == types.RocketpoolCollateralMinReached { - fields = append(fields, - types.DiscordEmbedField{ - Name: "Target", - Value: fmt.Sprintf("%v", n.GetEventFilter()), - Inline: false, - }) - } - discordNotifMap[w.ID][l_notifs-1].DiscordRequest.Embeds = append(discordNotifMap[w.ID][l_notifs-1].DiscordRequest.Embeds, types.DiscordEmbed{ - Type: "rich", - Color: "16745472", - Description: n.GetInfoMarkdown(), - Title: n.GetTitle(), - Fields: fields, - }) - } else { - notifs = append(notifs, types.TransitWebhook{ - Channel: w.Destination.String, - Content: types.TransitWebhookContent{ - Webhook: w, - Event: types.WebhookEvent{ - Network: utils.GetNetwork(), - Name: string(n.GetEventName()), - Title: n.GetTitle(), - Description: n.GetInfo(false), - Epoch: n.GetEpoch(), - Target: n.GetEventFilter(), - }, - }, - }) - } - } - } - } - } - // process notifs - for _, n := range notifs { - _, err = useDB.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES (now(), $1, $2);`, n.Channel, n.Content) - if err != nil { - log.Error(err, "error inserting into webhooks_queue", 0) - } else { - metrics.NotificationsQueued.WithLabelValues(n.Channel, n.Content.Event.Name).Inc() - } - } - // process discord notifs - for _, dNotifs := range discordNotifMap { - for _, n := range dNotifs { - _, err = useDB.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES (now(), 'webhook_discord', $1);`, n) - if err != nil { - log.Error(err, "error inserting into webhooks_queue (discord)", 0) - continue - } else { - metrics.NotificationsQueued.WithLabelValues("webhook_discord", "multi").Inc() - } - } - } - } - return nil -} - -func sendWebhookNotifications(useDB *sqlx.DB) error { - var notificationQueueItem []types.TransitWebhook - - err := useDB.Select(¬ificationQueueItem, `SELECT - id, - created, - sent, - channel, - content - FROM notification_queue WHERE sent IS null AND channel = 'webhook' ORDER BY created ASC`) +// Used for isolated testing +func GetNotificationsForEpoch(pubkeyCachePath string, epoch uint64) (types.NotificationsPerUserId, error) { + mc, err := modules.GetModuleContext() if err != nil { - return fmt.Errorf("error querying notification queue, err: %w", err) - } - client := &http.Client{Timeout: time.Second * 30} - - log.Infof("processing %v webhook notifications", len(notificationQueueItem)) - - for _, n := range notificationQueueItem { - // do not retry after 5 attempts - if n.Content.Webhook.Retries > 5 { - _, err := db.FrontendWriterDB.Exec(`DELETE FROM notification_queue WHERE id = $1`, n.Id) - if err != nil { - return fmt.Errorf("error deleting from notification queue: %w", err) - } - continue - } - - reqBody := new(bytes.Buffer) - - err := json.NewEncoder(reqBody).Encode(n.Content) - if err != nil { - log.Error(err, "error marshalling webhook event", 0) - } - - _, err = url.Parse(n.Content.Webhook.Url) - if err != nil { - _, err := db.FrontendWriterDB.Exec(`DELETE FROM notification_queue WHERE id = $1`, n.Id) - if err != nil { - return fmt.Errorf("error deleting from notification queue: %w", err) - } - continue - } - - go func(n types.TransitWebhook) { - if n.Content.Webhook.Retries > 0 { - time.Sleep(time.Duration(n.Content.Webhook.Retries) * time.Second) - } - resp, err := client.Post(n.Content.Webhook.Url, "application/json", reqBody) - if err != nil { - log.Error(err, "error sending webhook request", 0) - return - } else { - metrics.NotificationsSent.WithLabelValues("webhook", resp.Status).Inc() - } - defer resp.Body.Close() - - _, err = useDB.Exec(`UPDATE notification_queue SET sent = now() WHERE id = $1`, n.Id) - if err != nil { - log.Error(err, "error updating notification_queue table", 0) - return - } - - if resp != nil && resp.StatusCode < 400 { - _, err = useDB.Exec(`UPDATE users_webhooks SET retries = 0, last_sent = now() WHERE id = $1;`, n.Content.Webhook.ID) - if err != nil { - log.Error(err, "error updating users_webhooks table", 0) - return - } - } else { - var errResp types.ErrorResponse - - if resp != nil { - b, err := io.ReadAll(resp.Body) - if err != nil { - log.Error(err, "error reading body", 0) - } - - errResp.Status = resp.Status - errResp.Body = string(b) - } - - _, err = useDB.Exec(`UPDATE users_webhooks SET retries = retries + 1, last_sent = now(), request = $2, response = $3 WHERE id = $1;`, n.Content.Webhook.ID, n.Content, errResp) - if err != nil { - log.Error(err, "error updating users_webhooks table", 0) - return - } - } - }(n) + log.Fatal(err, "error getting module context", 0) } - return nil -} - -func sendDiscordNotifications(useDB *sqlx.DB) error { - var notificationQueueItem []types.TransitDiscord - err := useDB.Select(¬ificationQueueItem, `SELECT - id, - created, - sent, - channel, - content - FROM notification_queue WHERE sent IS null AND channel = 'webhook_discord' ORDER BY created ASC`) + err = initPubkeyCache(pubkeyCachePath) if err != nil { - return fmt.Errorf("error querying notification queue, err: %w", err) - } - client := &http.Client{Timeout: time.Second * 30} - - log.Infof("processing %v discord webhook notifications", len(notificationQueueItem)) - webhookMap := make(map[uint64]types.UserWebhook) - - notifMap := make(map[uint64][]types.TransitDiscord) - // generate webhook id => discord req - // while mapping. aggregate embeds while doing so, up to 10 per req can be sent - for _, n := range notificationQueueItem { - // purge the event from existence if the retry counter is over 5 - if n.Content.Webhook.Retries > 5 { - _, err = db.FrontendWriterDB.Exec(`DELETE FROM notification_queue where id = $1`, n.Id) - if err != nil { - log.Warnf("failed to delete notification from queue: %v", err) - } - continue - } - if _, exists := webhookMap[n.Content.Webhook.ID]; !exists { - webhookMap[n.Content.Webhook.ID] = n.Content.Webhook - } - if _, exists := notifMap[n.Content.Webhook.ID]; !exists { - notifMap[n.Content.Webhook.ID] = make([]types.TransitDiscord, 0) - } - notifMap[n.Content.Webhook.ID] = append(notifMap[n.Content.Webhook.ID], n) - } - for _, webhook := range webhookMap { - go func(webhook types.UserWebhook, reqs []types.TransitDiscord) { - defer func() { - // update retries counters in db based on end result - _, err = useDB.Exec(`UPDATE users_webhooks SET retries = $1, last_sent = now() WHERE id = $2;`, webhook.Retries, webhook.ID) - if err != nil { - log.Warnf("failed to update retries counter to %v for webhook %v: %v", webhook.Retries, webhook.ID, err) - } - - // mark notifcations as sent in db - ids := make([]uint64, 0) - for _, req := range reqs { - ids = append(ids, req.Id) - } - _, err = db.FrontendWriterDB.Exec(`UPDATE notification_queue SET sent = now() where id = ANY($1)`, pq.Array(ids)) - if err != nil { - log.Warnf("failed to update sent for notifcations in queue: %v", err) - } - }() - - _, err = url.Parse(webhook.Url) - if err != nil { - log.Error(err, "error parsing url", 0, log.Fields{"webhook_id": webhook.ID}) - return - } - - for i := 0; i < len(reqs); i++ { - if webhook.Retries > 5 { - break // stop - } - // sleep between retries - time.Sleep(time.Duration(webhook.Retries) * time.Second) - - reqBody := new(bytes.Buffer) - err := json.NewEncoder(reqBody).Encode(reqs[i].Content.DiscordRequest) - if err != nil { - log.Error(err, "error marshalling discord webhook event", 0) - continue // skip - } - - resp, err := client.Post(webhook.Url, "application/json", reqBody) - if err != nil { - log.Error(err, "error sending discord webhook request", 0) - } else { - metrics.NotificationsSent.WithLabelValues("webhook_discord", resp.Status).Inc() - } - if resp != nil && resp.StatusCode < 400 { - webhook.Retries = 0 - } else { - webhook.Retries++ - var errResp types.ErrorResponse - - if resp != nil { - b, err := io.ReadAll(resp.Body) - if err != nil { - log.Error(err, "error reading body", 0) - } else { - errResp.Body = string(b) - } - errResp.Status = resp.Status - resp.Body.Close() - } - - if strings.Contains(errResp.Body, "You are being rate limited") { - log.Warnf("could not push to discord webhook due to rate limit. %v url: %v", errResp.Body, webhook.Url) - } else { - log.Error(nil, "error pushing discord webhook", 0, map[string]interface{}{"errResp.Body": errResp.Body, "webhook.Url": webhook.Url}) - } - _, err = useDB.Exec(`UPDATE users_webhooks SET request = $2, response = $3 WHERE id = $1;`, webhook.ID, reqs[i].Content.DiscordRequest, errResp) - if err != nil { - log.Error(err, "error storing failure data in users_webhooks table", 0) - } - - i-- // retry, IMPORTANT to be at the END of the ELSE, otherwise the wrong index will be used in the commands above! - } - } - }(webhook, notifMap[webhook.ID]) + log.Fatal(err, "error initializing pubkey cache path for notifications", 0) } - - return nil -} - -func getUrlPart(validatorIndex uint64) string { - return fmt.Sprintf(` For more information visit: https://%s/validator/%v.`, utils.Config.Frontend.SiteDomain, validatorIndex, utils.Config.Frontend.SiteDomain, validatorIndex) + return collectNotifications(epoch, mc) } -func collectBlockProposalNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, status uint64, eventName types.EventName, epoch uint64) error { - type dbResult struct { - Proposer uint64 `db:"proposer"` - Status uint64 `db:"status"` - Slot uint64 `db:"slot"` - ExecBlock uint64 `db:"exec_block_number"` - ExecRewardETH float64 - } - - _, subMap, err := GetSubsForEventFilter(eventName) - if err != nil { - return fmt.Errorf("error getting subscriptions for (missed) block proposals %w", err) - } - - events := make([]dbResult, 0) - err = db.WriterDb.Select(&events, "SELECT slot, proposer, status, COALESCE(exec_block_number, 0) AS exec_block_number FROM blocks WHERE epoch = $1 AND status = $2", epoch, fmt.Sprintf("%d", status)) +// Used for isolated testing +func GetUserNotificationsForEpoch(pubkeyCachePath string, epoch uint64) (types.NotificationsPerUserId, error) { + err := initPubkeyCache(pubkeyCachePath) if err != nil { - return fmt.Errorf("error retrieving slots for epoch %v: %w", epoch, err) + log.Fatal(err, "error initializing pubkey cache path for notifications", 0) } - - log.Infof("retrieved %v events", len(events)) - - // Get Execution reward for proposed blocks - if status == 1 { // if proposed - var blockList = []uint64{} - for _, data := range events { - if data.ExecBlock != 0 { - blockList = append(blockList, data.ExecBlock) - } - } - - if len(blockList) > 0 { - blocks, err := db.BigtableClient.GetBlocksIndexedMultiple(blockList, 10000) - if err != nil { - log.Error(err, "error loading blocks from bigtable", 0, log.Fields{"blockList": blockList}) - return err - } - var execBlockNrToExecBlockMap = map[uint64]*types.Eth1BlockIndexed{} - for _, block := range blocks { - execBlockNrToExecBlockMap[block.GetNumber()] = block - } - relaysData, err := db.GetRelayDataForIndexedBlocks(blocks) - if err != nil { - return err - } - - for j := 0; j < len(events); j++ { - execData, found := execBlockNrToExecBlockMap[events[j].ExecBlock] - if found { - reward := utils.Eth1TotalReward(execData) - relayData, found := relaysData[common.BytesToHash(execData.Hash)] - if found { - reward = relayData.MevBribe.BigInt() - } - events[j].ExecRewardETH = float64(int64(eth.WeiToEth(reward)*100000)) / 100000 - } - } - } - } - - for _, event := range events { - pubkey, err := GetPubkeyForIndex(event.Proposer) - if err != nil { - log.Error(err, "error retrieving pubkey for validator", 0, map[string]interface{}{"validator": event.Proposer}) - continue - } - subscribers, ok := subMap[hex.EncodeToString(pubkey)] - if !ok { - continue - } - for _, sub := range subscribers { - if sub.UserID == nil || sub.ID == nil { - return fmt.Errorf("error expected userId and subId to be defined but got user: %v, sub: %v", sub.UserID, sub.ID) - } - if sub.LastEpoch != nil { - lastSentEpoch := *sub.LastEpoch - if lastSentEpoch >= epoch || epoch < sub.CreatedEpoch { - continue - } - } - log.Infof("creating %v notification for validator %v in epoch %v", eventName, event.Proposer, epoch) - n := &validatorProposalNotification{ - SubscriptionID: *sub.ID, - ValidatorIndex: event.Proposer, - Epoch: epoch, - Status: event.Status, - EventName: eventName, - Reward: event.ExecRewardETH, - EventFilter: hex.EncodeToString(pubkey), - Slot: event.Slot, - } - if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = []types.Notification{} - } - notificationsByUserID[*sub.UserID][n.GetEventName()] = append(notificationsByUserID[*sub.UserID][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() - } - } - - return nil -} - -type validatorProposalNotification struct { - SubscriptionID uint64 - ValidatorIndex uint64 - ValidatorPublicKey string - Epoch uint64 - Slot uint64 - Status uint64 // * Can be 0 = scheduled, 1 executed, 2 missed */ - EventName types.EventName - EventFilter string - Reward float64 - UnsubscribeHash sql.NullString -} - -func (n *validatorProposalNotification) GetLatestState() string { - return "" -} - -func (n *validatorProposalNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *validatorProposalNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *validatorProposalNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *validatorProposalNotification) GetEpoch() uint64 { - return n.Epoch -} - -func (n *validatorProposalNotification) GetEventName() types.EventName { - return n.EventName -} - -func (n *validatorProposalNotification) GetInfo(includeUrl bool) string { - var generalPart, suffix string - vali := strconv.FormatUint(n.ValidatorIndex, 10) - slot := strconv.FormatUint(n.Slot, 10) - if includeUrl { - vali = fmt.Sprintf(`%[2]v`, utils.Config.Frontend.SiteDomain, n.ValidatorIndex) - slot = fmt.Sprintf(`%[2]v`, utils.Config.Frontend.SiteDomain, n.Slot) - suffix = getUrlPart(n.ValidatorIndex) - } - switch n.Status { - case 0: - generalPart = fmt.Sprintf(`New scheduled block proposal at slot %s for Validator %s.`, slot, vali) - case 1: - generalPart = fmt.Sprintf(`Validator %s proposed block at slot %s with %v %v execution reward.`, vali, slot, n.Reward, utils.Config.Frontend.ElCurrency) - case 2: - generalPart = fmt.Sprintf(`Validator %s missed a block proposal at slot %s.`, vali, slot) - } - return generalPart + suffix -} - -func (n *validatorProposalNotification) GetTitle() string { - switch n.Status { - case 0: - return "Block Proposal Scheduled" - case 1: - return "New Block Proposal" - case 2: - return "Block Proposal Missed" - } - return "-" -} - -func (n *validatorProposalNotification) GetEventFilter() string { - return n.EventFilter -} - -func (n *validatorProposalNotification) GetInfoMarkdown() string { - var generalPart = "" - switch n.Status { - case 0: - generalPart = fmt.Sprintf(`New scheduled block proposal at slot [%[3]v](https://%[1]v/slot/%[3]v) for Validator [%[2]v](https://%[1]v/validator/%[2]v).`, utils.Config.Frontend.SiteDomain, n.ValidatorIndex, n.Slot) - case 1: - generalPart = fmt.Sprintf(`Validator [%[2]v](https://%[1]v/validator/%[2]v) proposed a new block at slot [%[3]v](https://%[1]v/slot/%[3]v) with %[4]v %[5]v execution reward.`, utils.Config.Frontend.SiteDomain, n.ValidatorIndex, n.Slot, n.Reward, utils.Config.Frontend.ElCurrency) - case 2: - generalPart = fmt.Sprintf(`Validator [%[2]v](https://%[1]v/validator/%[2]v) missed a block proposal at slot [%[3]v](https://%[1]v/slot/%[3]v).`, utils.Config.Frontend.SiteDomain, n.ValidatorIndex, n.Slot) - } - - return generalPart -} - -func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { - _, subMap, err := GetSubsForEventFilter(types.ValidatorMissedAttestationEventName) - if err != nil { - return fmt.Errorf("error getting subscriptions for missted attestations %w", err) - } - - type dbResult struct { - ValidatorIndex uint64 `db:"validatorindex"` - Epoch uint64 `db:"epoch"` - Status uint64 `db:"status"` - EventFilter []byte `db:"pubkey"` - } - - // get attestations for all validators for the last 4 epochs - - validators, err := db.GetValidatorIndices() - if err != nil { - return err - } - - participationPerEpoch, err := db.GetValidatorAttestationHistoryForNotifications(epoch-3, epoch) - if err != nil { - return fmt.Errorf("error getting validator attestations from db %w", err) - } - - log.Infof("retrieved validator attestation history data") - - events := make([]dbResult, 0) - - epochAttested := make(map[types.Epoch]uint64) - epochTotal := make(map[types.Epoch]uint64) - for currentEpoch, participation := range participationPerEpoch { - for validatorIndex, participated := range participation { - epochTotal[currentEpoch] = epochTotal[currentEpoch] + 1 // count the total attestations for each epoch - - if !participated { - pubkey, err := GetPubkeyForIndex(uint64(validatorIndex)) - if err == nil { - if currentEpoch != types.Epoch(epoch) || subMap[hex.EncodeToString(pubkey)] == nil { - continue - } - - events = append(events, dbResult{ - ValidatorIndex: uint64(validatorIndex), - Epoch: uint64(currentEpoch), - Status: 0, - EventFilter: pubkey, - }) - } else { - log.Error(err, "error retrieving pubkey for validator", 0, map[string]interface{}{"validatorIndex": validatorIndex}) - } - } else { - epochAttested[currentEpoch] = epochAttested[currentEpoch] + 1 // count the total attested attestation for each epoch (exlude missing) - } - } - } - - // process missed attestation events - for _, event := range events { - subscribers, ok := subMap[hex.EncodeToString(event.EventFilter)] - if !ok { - return fmt.Errorf("error event returned that does not exist: %x", event.EventFilter) - } - for _, sub := range subscribers { - if sub.UserID == nil || sub.ID == nil { - return fmt.Errorf("error expected userId and subId to be defined but got user: %v, sub: %v", sub.UserID, sub.ID) - } - if sub.LastEpoch != nil { - lastSentEpoch := *sub.LastEpoch - if lastSentEpoch >= event.Epoch || event.Epoch < sub.CreatedEpoch { - // log.Infof("skipping creating %v for validator %v (lastSentEpoch: %v, createdEpoch: %v)", types.ValidatorMissedAttestationEventName, event.ValidatorIndex, lastSentEpoch, sub.CreatedEpoch) - continue - } - } - - log.Infof("creating %v notification for validator %v in epoch %v", types.ValidatorMissedAttestationEventName, event.ValidatorIndex, event.Epoch) - n := &validatorAttestationNotification{ - SubscriptionID: *sub.ID, - ValidatorIndex: event.ValidatorIndex, - Epoch: event.Epoch, - Status: event.Status, - EventName: types.ValidatorMissedAttestationEventName, - EventFilter: hex.EncodeToString(event.EventFilter), - } - if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = []types.Notification{} - } - isDuplicate := false - for _, userEvent := range notificationsByUserID[*sub.UserID][n.GetEventName()] { - if userEvent.GetSubscriptionID() == n.SubscriptionID { - isDuplicate = true - } - } - if isDuplicate { - continue - } - notificationsByUserID[*sub.UserID][n.GetEventName()] = append(notificationsByUserID[*sub.UserID][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() - } - } - - // detect online & offline validators - type indexPubkeyPair struct { - Index uint64 - Pubkey []byte - } - var offlineValidators []*indexPubkeyPair - var onlineValidators []*indexPubkeyPair - - epochNMinus1 := types.Epoch(epoch - 1) - epochNMinus2 := types.Epoch(epoch - 2) - epochNMinus3 := types.Epoch(epoch - 3) - - if epochTotal[types.Epoch(epoch)] == 0 { - return fmt.Errorf("consistency error, did not retrieve attestation data for epoch %v", epoch) - } - if epochTotal[epochNMinus1] == 0 { - return fmt.Errorf("consistency error, did not retrieve attestation data for epoch %v", epochNMinus1) - } - if epochTotal[epochNMinus2] == 0 { - return fmt.Errorf("consistency error, did not retrieve attestation data for epoch %v", epochNMinus2) - } - if epochTotal[epochNMinus3] == 0 { - return fmt.Errorf("consistency error, did not retrieve attestation data for epoch %v", epochNMinus3) - } - - if epochAttested[types.Epoch(epoch)]*100/epochTotal[types.Epoch(epoch)] < 60 { - return fmt.Errorf("consistency error, did receive more than 60%% of missed attestation in epoch %v (total: %v, attested: %v)", epoch, epochTotal[types.Epoch(epoch)], epochAttested[types.Epoch(epoch)]) - } - if epochAttested[epochNMinus1]*100/epochTotal[epochNMinus1] < 60 { - return fmt.Errorf("consistency error, did receive more than 60%% of missed attestation in epoch %v (total: %v, attested: %v)", epochNMinus1, epochTotal[epochNMinus1], epochAttested[epochNMinus1]) - } - if epochAttested[epochNMinus2]*100/epochTotal[epochNMinus2] < 60 { - return fmt.Errorf("consistency error, did receive more than 60%% of missed attestation in epoch %v (total: %v, attested: %v)", epochNMinus2, epochTotal[epochNMinus2], epochAttested[epochNMinus2]) - } - if epochAttested[epochNMinus3]*100/epochTotal[epochNMinus3] < 60 { - return fmt.Errorf("consistency error, did receive more than 60%% of missed attestation in epoch %v (total: %v, attested: %v)", epochNMinus3, epochTotal[epochNMinus3], epochAttested[epochNMinus3]) - } - - for _, validator := range validators { - if participationPerEpoch[epochNMinus3][types.ValidatorIndex(validator)] && !participationPerEpoch[epochNMinus2][types.ValidatorIndex(validator)] && !participationPerEpoch[epochNMinus1][types.ValidatorIndex(validator)] && !participationPerEpoch[types.Epoch(epoch)][types.ValidatorIndex(validator)] { - log.Infof("validator %v detected as offline in epoch %v (did not attest since epoch %v)", validator, epoch, epochNMinus2) - pubkey, err := GetPubkeyForIndex(validator) - if err != nil { - return err - } - offlineValidators = append(offlineValidators, &indexPubkeyPair{Index: validator, Pubkey: pubkey}) - } - - if !participationPerEpoch[epochNMinus3][types.ValidatorIndex(validator)] && !participationPerEpoch[epochNMinus2][types.ValidatorIndex(validator)] && !participationPerEpoch[epochNMinus1][types.ValidatorIndex(validator)] && participationPerEpoch[types.Epoch(epoch)][types.ValidatorIndex(validator)] { - log.Infof("validator %v detected as online in epoch %v (attested again in epoch %v)", validator, epoch, epoch) - pubkey, err := GetPubkeyForIndex(validator) - if err != nil { - return err - } - onlineValidators = append(onlineValidators, &indexPubkeyPair{Index: validator, Pubkey: pubkey}) - } - } - - offlineValidatorsLimit := 5000 - if utils.Config.Notifications.OfflineDetectionLimit != 0 { - offlineValidatorsLimit = utils.Config.Notifications.OfflineDetectionLimit - } - - onlineValidatorsLimit := 5000 - if utils.Config.Notifications.OnlineDetectionLimit != 0 { - onlineValidatorsLimit = utils.Config.Notifications.OnlineDetectionLimit - } - - if len(offlineValidators) > offlineValidatorsLimit { - return fmt.Errorf("retrieved more than %v offline validators notifications: %v, exiting", offlineValidatorsLimit, len(offlineValidators)) - } - - if len(onlineValidators) > onlineValidatorsLimit { - return fmt.Errorf("retrieved more than %v online validators notifications: %v, exiting", onlineValidatorsLimit, len(onlineValidators)) - } - - _, subMap, err = GetSubsForEventFilter(types.ValidatorIsOfflineEventName) - if err != nil { - return fmt.Errorf("failed to get subs for %v: %v", types.ValidatorIsOfflineEventName, err) - } - - for _, validator := range offlineValidators { - t := hex.EncodeToString(validator.Pubkey) - subs := subMap[t] - for _, sub := range subs { - if sub.UserID == nil || sub.ID == nil { - return fmt.Errorf("error expected userId and subId to be defined but got user: %v, sub: %v", sub.UserID, sub.ID) - } - log.Infof("new event: validator %v detected as offline since epoch %v", validator.Index, epoch) - - n := validatorIsOfflineNotification{ - SubscriptionID: *sub.ID, - ValidatorIndex: validator.Index, - IsOffline: true, - EventEpoch: epoch, - EventName: types.ValidatorIsOfflineEventName, - InternalState: fmt.Sprint(epoch - 2), // first epoch the validator stopped attesting - EventFilter: hex.EncodeToString(validator.Pubkey), - } - - if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = []types.Notification{} - } - isDuplicate := false - for _, userEvent := range notificationsByUserID[*sub.UserID][n.GetEventName()] { - if userEvent.GetSubscriptionID() == n.SubscriptionID { - isDuplicate = true - break - } - } - if isDuplicate { - log.Infof("duplicate offline notification detected") - continue - } - notificationsByUserID[*sub.UserID][n.GetEventName()] = append(notificationsByUserID[*sub.UserID][n.GetEventName()], &n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() - } - } - - for _, validator := range onlineValidators { - t := hex.EncodeToString(validator.Pubkey) - subs := subMap[t] - for _, sub := range subs { - if sub.State.String == "" || sub.State.String == "-" { // discard online notifications that do not have a corresponding offline notification - continue - } - - originalLastSeenEpoch, err := strconv.ParseUint(sub.State.String, 10, 64) - if err != nil { - // i have no idea what just happened. - return fmt.Errorf("this should never happen. couldn't parse state as uint64: %v", err) - } - - epochsSinceOffline := epoch - originalLastSeenEpoch - - if epochsSinceOffline > epoch { // fix overflow - epochsSinceOffline = 4 - } - - if sub.UserID == nil || sub.ID == nil { - return fmt.Errorf("error expected userId and subId to be defined but got user: %v, sub: %v", sub.UserID, sub.ID) - } - - log.Infof("new event: validator %v detected as online again at epoch %v", validator.Index, epoch) - - n := validatorIsOfflineNotification{ - SubscriptionID: *sub.ID, - ValidatorIndex: validator.Index, - IsOffline: false, - EventEpoch: epoch, - EventName: types.ValidatorIsOfflineEventName, - InternalState: "-", - EventFilter: hex.EncodeToString(validator.Pubkey), - EpochsOffline: epochsSinceOffline, - } - - if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = []types.Notification{} - } - isDuplicate := false - for _, userEvent := range notificationsByUserID[*sub.UserID][n.GetEventName()] { - if userEvent.GetSubscriptionID() == n.SubscriptionID { - isDuplicate = true - break - } - } - if isDuplicate { - log.Infof("duplicate online notification detected") - continue - } - notificationsByUserID[*sub.UserID][n.GetEventName()] = append(notificationsByUserID[*sub.UserID][n.GetEventName()], &n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() - } - } - - return nil -} - -type validatorIsOfflineNotification struct { - SubscriptionID uint64 - ValidatorIndex uint64 - EventEpoch uint64 - EpochsOffline uint64 - IsOffline bool - EventName types.EventName - EventFilter string - UnsubscribeHash sql.NullString - InternalState string -} - -func (n *validatorIsOfflineNotification) GetLatestState() string { - return n.InternalState -} - -func (n *validatorIsOfflineNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *validatorIsOfflineNotification) GetEventName() types.EventName { - return n.EventName -} - -func (n *validatorIsOfflineNotification) GetEpoch() uint64 { - return n.EventEpoch -} - -func (n *validatorIsOfflineNotification) GetInfo(includeUrl bool) string { - if n.IsOffline { - if includeUrl { - return fmt.Sprintf(`Validator %[1]v is offline since epoch %[2]s).`, n.ValidatorIndex, n.InternalState, utils.Config.Frontend.SiteDomain) - } else { - return fmt.Sprintf(`Validator %v is offline since epoch %s.`, n.ValidatorIndex, n.InternalState) - } - } else { - if includeUrl { - return fmt.Sprintf(`Validator %[1]v is back online since epoch %[2]v (was offline for %[4]v epoch(s)).`, n.ValidatorIndex, n.EventEpoch, utils.Config.Frontend.SiteDomain, n.EpochsOffline) - } else { - return fmt.Sprintf(`Validator %v is back online since epoch %v (was offline for %v epoch(s)).`, n.ValidatorIndex, n.EventEpoch, n.EpochsOffline) - } - } -} - -func (n *validatorIsOfflineNotification) GetTitle() string { - if n.IsOffline { - return "Validator is Offline" - } else { - return "Validator Back Online" - } -} - -func (n *validatorIsOfflineNotification) GetEventFilter() string { - return n.EventFilter -} - -func (n *validatorIsOfflineNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *validatorIsOfflineNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *validatorIsOfflineNotification) GetInfoMarkdown() string { - if n.IsOffline { - return fmt.Sprintf(`Validator [%[1]v](https://%[3]v/validator/%[1]v) is offline since epoch [%[2]v](https://%[3]v/epoch/%[2]v).`, n.ValidatorIndex, n.EventEpoch, utils.Config.Frontend.SiteDomain) - } else { - return fmt.Sprintf(`Validator [%[1]v](https://%[3]v/validator/%[1]v) is back online since epoch [%[2]v](https://%[3]v/epoch/%[2]v) (was offline for %[4]v epoch(s)).`, n.ValidatorIndex, n.EventEpoch, utils.Config.Frontend.SiteDomain, n.EpochsOffline) - } -} - -type validatorAttestationNotification struct { - SubscriptionID uint64 - ValidatorIndex uint64 - ValidatorPublicKey string - Epoch uint64 - Status uint64 // * Can be 0 = scheduled | missed, 1 executed - EventName types.EventName - EventFilter string - UnsubscribeHash sql.NullString -} - -func (n *validatorAttestationNotification) GetLatestState() string { - return "" -} - -func (n *validatorAttestationNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *validatorAttestationNotification) GetEventName() types.EventName { - return n.EventName -} - -func (n *validatorAttestationNotification) GetEpoch() uint64 { - return n.Epoch -} - -func (n *validatorAttestationNotification) GetInfo(includeUrl bool) string { - var generalPart = "" - if includeUrl { - switch n.Status { - case 0: - generalPart = fmt.Sprintf(`Validator %[1]v missed an attestation in epoch %[2]v.`, n.ValidatorIndex, n.Epoch, utils.Config.Frontend.SiteDomain) - case 1: - generalPart = fmt.Sprintf(`Validator %[1]v submitted a successful attestation for epoch %[2]v.`, n.ValidatorIndex, n.Epoch, utils.Config.Frontend.SiteDomain) - } - // return generalPart + getUrlPart(n.ValidatorIndex) - } else { - switch n.Status { - case 0: - generalPart = fmt.Sprintf(`Validator %v missed an attestation in epoch %v.`, n.ValidatorIndex, n.Epoch) - case 1: - generalPart = fmt.Sprintf(`Validator %v submitted a successful attestation in epoch %v.`, n.ValidatorIndex, n.Epoch) - } - } - return generalPart -} - -func (n *validatorAttestationNotification) GetTitle() string { - switch n.Status { - case 0: - return "Attestation Missed" - case 1: - return "Attestation Submitted" - } - return "-" -} - -func (n *validatorAttestationNotification) GetEventFilter() string { - return n.EventFilter -} - -func (n *validatorAttestationNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *validatorAttestationNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *validatorAttestationNotification) GetInfoMarkdown() string { - var generalPart = "" - switch n.Status { - case 0: - generalPart = fmt.Sprintf(`Validator [%[1]v](https://%[3]v/validator/%[1]v) missed an attestation in epoch [%[2]v](https://%[3]v/epoch/%[2]v).`, n.ValidatorIndex, n.Epoch, utils.Config.Frontend.SiteDomain) - case 1: - generalPart = fmt.Sprintf(`Validator [%[1]v](https://%[3]v/validator/%[1]v) submitted a successful attestation in epoch [%[2]v](https://%[3]v/epoch/%[2]v).`, n.ValidatorIndex, n.Epoch, utils.Config.Frontend.SiteDomain) - } - return generalPart -} - -type validatorGotSlashedNotification struct { - SubscriptionID uint64 - ValidatorIndex uint64 - Epoch uint64 - Slasher uint64 - Reason string - EventFilter string - UnsubscribeHash sql.NullString -} - -func (n *validatorGotSlashedNotification) GetLatestState() string { - return "" -} - -func (n *validatorGotSlashedNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *validatorGotSlashedNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *validatorGotSlashedNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *validatorGotSlashedNotification) GetEpoch() uint64 { - return n.Epoch -} - -func (n *validatorGotSlashedNotification) GetEventName() types.EventName { - return types.ValidatorGotSlashedEventName -} - -func (n *validatorGotSlashedNotification) GetInfo(includeUrl bool) string { - generalPart := fmt.Sprintf(`Validator %v has been slashed at epoch %v by validator %v for %s.`, n.ValidatorIndex, n.Epoch, n.Slasher, n.Reason) - if includeUrl { - return generalPart + getUrlPart(n.ValidatorIndex) - } - return generalPart -} - -func (n *validatorGotSlashedNotification) GetTitle() string { - return "Validator got Slashed" -} - -func (n *validatorGotSlashedNotification) GetEventFilter() string { - return n.EventFilter -} - -func (n *validatorGotSlashedNotification) GetInfoMarkdown() string { - generalPart := fmt.Sprintf(`Validator [%[1]v](https://%[5]v/validator/%[1]v) has been slashed at epoch [%[2]v](https://%[5]v/epoch/%[2]v) by validator [%[3]v](https://%[5]v/validator/%[3]v) for %[4]s.`, n.ValidatorIndex, n.Epoch, n.Slasher, n.Reason, utils.Config.Frontend.SiteDomain) - return generalPart -} - -func collectValidatorGotSlashedNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { - dbResult, err := db.GetValidatorsGotSlashed(epoch) - if err != nil { - return fmt.Errorf("error getting slashed validators from database, err: %w", err) - } - query := "" - resultsLen := len(dbResult) - for i, event := range dbResult { - query += fmt.Sprintf(`SELECT %d AS ref, id, user_id, ENCODE(unsubscribe_hash, 'hex') AS unsubscribe_hash from users_subscriptions where event_name = $1 AND event_filter = '%x'`, i, event.SlashedValidatorPubkey) - if i < resultsLen-1 { - query += " UNION " - } - } - - if query == "" { - return nil - } - - var subscribers []struct { - Ref uint64 `db:"ref"` - Id uint64 `db:"id"` - UserId uint64 `db:"user_id"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - name := string(types.ValidatorGotSlashedEventName) - if utils.Config.Chain.ClConfig.ConfigName != "" { - name = utils.Config.Chain.ClConfig.ConfigName + ":" + name - } - err = db.FrontendWriterDB.Select(&subscribers, query, name) - if err != nil { - return fmt.Errorf("error querying subscribers, err: %w", err) - } - - for _, sub := range subscribers { - event := dbResult[sub.Ref] - - log.Infof("creating %v notification for validator %v in epoch %v", event.SlashedValidatorPubkey, event.Reason, epoch) - - n := &validatorGotSlashedNotification{ - SubscriptionID: sub.Id, - Slasher: event.SlasherIndex, - Epoch: event.Epoch, - Reason: event.Reason, - ValidatorIndex: event.SlashedValidatorIndex, - EventFilter: hex.EncodeToString(event.SlashedValidatorPubkey), - UnsubscribeHash: sub.UnsubscribeHash, - } - - if _, exists := notificationsByUserID[sub.UserId]; !exists { - notificationsByUserID[sub.UserId] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[sub.UserId][n.GetEventName()]; !exists { - notificationsByUserID[sub.UserId][n.GetEventName()] = []types.Notification{} - } - notificationsByUserID[sub.UserId][n.GetEventName()] = append(notificationsByUserID[sub.UserId][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() - } - - return nil -} - -type validatorWithdrawalNotification struct { - SubscriptionID uint64 - ValidatorIndex uint64 - Epoch uint64 - Slot uint64 - Amount uint64 - Address []byte - EventFilter string - UnsubscribeHash sql.NullString -} - -func (n *validatorWithdrawalNotification) GetLatestState() string { - return "" -} - -func (n *validatorWithdrawalNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *validatorWithdrawalNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *validatorWithdrawalNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *validatorWithdrawalNotification) GetEpoch() uint64 { - return n.Epoch -} - -func (n *validatorWithdrawalNotification) GetEventName() types.EventName { - return types.ValidatorReceivedWithdrawalEventName -} - -func (n *validatorWithdrawalNotification) GetInfo(includeUrl bool) string { - generalPart := fmt.Sprintf(`An automatic withdrawal of %v has been processed for validator %v.`, utils.FormatClCurrencyString(n.Amount, utils.Config.Frontend.MainCurrency, 6, true, false, false), n.ValidatorIndex) - if includeUrl { - return generalPart + getUrlPart(n.ValidatorIndex) - } - return generalPart -} - -func (n *validatorWithdrawalNotification) GetTitle() string { - return "Withdrawal Processed" -} - -func (n *validatorWithdrawalNotification) GetEventFilter() string { - return n.EventFilter -} - -func (n *validatorWithdrawalNotification) GetInfoMarkdown() string { - generalPart := fmt.Sprintf(`An automatic withdrawal of %[2]v has been processed for validator [%[1]v](https://%[6]v/validator/%[1]v) during slot [%[3]v](https://%[6]v/slot/%[3]v). The funds have been sent to: [%[4]v](https://%[6]v/address/0x%[5]x).`, n.ValidatorIndex, utils.FormatClCurrencyString(n.Amount, utils.Config.Frontend.MainCurrency, 6, true, false, false), n.Slot, utils.FormatHashRaw(n.Address), n.Address, utils.Config.Frontend.SiteDomain) - return generalPart -} - -// collectWithdrawalNotifications collects all notifications validator withdrawals -func collectWithdrawalNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { - // get all users that are subscribed to this event (scale: a few thousand rows depending on how many users we have) - _, subMap, err := GetSubsForEventFilter(types.ValidatorReceivedWithdrawalEventName) - if err != nil { - return fmt.Errorf("error getting subscriptions for missed attestations %w", err) - } - - // get all the withdrawal events for a specific epoch. Will be at most X per slot (currently 16 on mainnet, which is 32 * 16 per epoch; 512 rows). - events, err := db.GetEpochWithdrawals(epoch) - if err != nil { - return fmt.Errorf("error getting withdrawals from database, err: %w", err) - } - - // log.Infof("retrieved %v events", len(events)) - for _, event := range events { - subscribers, ok := subMap[hex.EncodeToString(event.Pubkey)] - if ok { - for _, sub := range subscribers { - if sub.UserID == nil || sub.ID == nil { - return fmt.Errorf("error expected userId and subId to be defined but got user: %v, sub: %v", sub.UserID, sub.ID) - } - if sub.LastEpoch != nil { - lastSentEpoch := *sub.LastEpoch - if lastSentEpoch >= epoch || epoch < sub.CreatedEpoch { - continue - } - } - // log.Infof("creating %v notification for validator %v in epoch %v", types.ValidatorReceivedWithdrawalEventName, event.ValidatorIndex, epoch) - n := &validatorWithdrawalNotification{ - SubscriptionID: *sub.ID, - ValidatorIndex: event.ValidatorIndex, - Epoch: epoch, - Slot: event.Slot, - Amount: event.Amount, - Address: event.Address, - EventFilter: hex.EncodeToString(event.Pubkey), - UnsubscribeHash: sub.UnsubscribeHash, - } - if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = []types.Notification{} - } - notificationsByUserID[*sub.UserID][n.GetEventName()] = append(notificationsByUserID[*sub.UserID][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() - } - } - } - - return nil -} - -type ethClientNotification struct { - SubscriptionID uint64 - UserID uint64 - Epoch uint64 - EthClient string - EventFilter string - UnsubscribeHash sql.NullString -} - -func (n *ethClientNotification) GetLatestState() string { - return "" -} - -func (n *ethClientNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *ethClientNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *ethClientNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *ethClientNotification) GetEpoch() uint64 { - return n.Epoch -} - -func (n *ethClientNotification) GetEventName() types.EventName { - return types.EthClientUpdateEventName -} - -func (n *ethClientNotification) GetInfo(includeUrl bool) string { - generalPart := fmt.Sprintf(`A new version for %s is available.`, n.EthClient) - if includeUrl { - url := "" - switch n.EthClient { - case "Geth": - url = "https://github.com/ethereum/go-ethereum/releases" - case "Nethermind": - url = "https://github.com/NethermindEth/nethermind/releases" - case "Teku": - url = "https://github.com/ConsenSys/teku/releases" - case "Prysm": - url = "https://github.com/prysmaticlabs/prysm/releases" - case "Nimbus": - url = "https://github.com/status-im/nimbus-eth2/releases" - case "Lighthouse": - url = "https://github.com/sigp/lighthouse/releases" - case "Erigon": - url = "https://github.com/erigontech/erigon/releases" - case "Rocketpool": - url = "https://github.com/rocket-pool/smartnode-install/releases" - case "MEV-Boost": - url = "https://github.com/flashbots/mev-boost/releases" - case "Lodestar": - url = "https://github.com/chainsafe/lodestar/releases" - default: - url = "https://beaconcha.in/ethClients" - } - - return generalPart + " " + url - } - return generalPart -} - -func (n *ethClientNotification) GetTitle() string { - return fmt.Sprintf("New %s update", n.EthClient) -} - -func (n *ethClientNotification) GetEventFilter() string { - return n.EventFilter -} - -func (n *ethClientNotification) GetInfoMarkdown() string { - url := "" - switch n.EthClient { - case "Geth": - url = "https://github.com/ethereum/go-ethereum/releases" - case "Nethermind": - url = "https://github.com/NethermindEth/nethermind/releases" - case "Teku": - url = "https://github.com/ConsenSys/teku/releases" - case "Prysm": - url = "https://github.com/prysmaticlabs/prysm/releases" - case "Nimbus": - url = "https://github.com/status-im/nimbus-eth2/releases" - case "Lighthouse": - url = "https://github.com/sigp/lighthouse/releases" - case "Erigon": - url = "https://github.com/erigontech/erigon/releases" - case "Rocketpool": - url = "https://github.com/rocket-pool/smartnode-install/releases" - case "MEV-Boost": - url = "https://github.com/flashbots/mev-boost/releases" - case "Lodestar": - url = "https://github.com/chainsafe/lodestar/releases" - default: - url = "https://beaconcha.in/ethClients" - } - - generalPart := fmt.Sprintf(`A new version for [%s](%s) is available.`, n.EthClient, url) - - return generalPart -} - -func collectEthClientNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName) error { - updatedClients := ethclients.GetUpdatedClients() //only check if there are new updates - for _, client := range updatedClients { - var dbResult []struct { - SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` - Epoch uint64 `db:"created_epoch"` - EventFilter string `db:"event_filter"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - err := db.FrontendWriterDB.Select(&dbResult, ` - SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash - FROM users_subscriptions AS us - WHERE - us.event_name=$1 - AND - us.event_filter=$2 - AND - ((us.last_sent_ts <= NOW() - INTERVAL '2 DAY' AND TO_TIMESTAMP($3) > us.last_sent_ts) OR us.last_sent_ts IS NULL) - `, - eventName, strings.ToLower(client.Name), client.Date.Unix()) // was last notification sent 2 days ago for this client - - if err != nil { - return err - } - - for _, r := range dbResult { - n := ðClientNotification{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - EthClient: client.Name, - UnsubscribeHash: r.UnsubscribeHash, - } - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} - } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() - } - } - return nil -} - -type MachineEvents struct { - SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` - MachineName string `db:"machine"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - EventThreshold float64 `db:"event_threshold"` -} - -func collectMonitoringMachineOffline(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { - nowTs := time.Now().Unix() - return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineOfflineEventName, 120, - // notify condition - func(_ *MachineEvents, machineData *types.MachineMetricSystemUser) bool { - if machineData.CurrentDataInsertTs < nowTs-10*60 && machineData.CurrentDataInsertTs > nowTs-90*60 { - return true - } - return false - }, - epoch, - ) -} - -func isMachineDataRecent(machineData *types.MachineMetricSystemUser) bool { - nowTs := time.Now().Unix() - return machineData.CurrentDataInsertTs >= nowTs-60*60 -} - -func collectMonitoringMachineDiskAlmostFull(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { - return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineDiskAlmostFullEventName, 750, - // notify condition - func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool { - if !isMachineDataRecent(machineData) { - return false - } - - percentFree := float64(machineData.CurrentData.DiskNodeBytesFree) / float64(machineData.CurrentData.DiskNodeBytesTotal+1) - return percentFree < subscribeData.EventThreshold - }, - epoch, - ) -} - -func collectMonitoringMachineCPULoad(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { - return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineCpuLoadEventName, 10, - // notify condition - func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool { - if !isMachineDataRecent(machineData) { - return false - } - - if machineData.FiveMinuteOldData == nil { // no compare data found (5 min old data) - return false - } - - idle := float64(machineData.CurrentData.CpuNodeIdleSecondsTotal) - float64(machineData.FiveMinuteOldData.CpuNodeIdleSecondsTotal) - total := float64(machineData.CurrentData.CpuNodeSystemSecondsTotal) - float64(machineData.FiveMinuteOldData.CpuNodeSystemSecondsTotal) - percentLoad := float64(1) - (idle / total) - - return percentLoad > subscribeData.EventThreshold - }, - epoch, - ) -} - -func collectMonitoringMachineMemoryUsage(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { - return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineMemoryUsageEventName, 10, - // notify condition - func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool { - if !isMachineDataRecent(machineData) { - return false - } - - memFree := float64(machineData.CurrentData.MemoryNodeBytesFree) + float64(machineData.CurrentData.MemoryNodeBytesCached) + float64(machineData.CurrentData.MemoryNodeBytesBuffers) - memTotal := float64(machineData.CurrentData.MemoryNodeBytesTotal) - memUsage := float64(1) - (memFree / memTotal) - - return memUsage > subscribeData.EventThreshold - }, - epoch, - ) -} - -var isFirstNotificationCheck = true - -func collectMonitoringMachine( - notificationsByUserID map[uint64]map[types.EventName][]types.Notification, - eventName types.EventName, - epochWaitInBetween int, - notifyConditionFulfilled func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool, - epoch uint64, -) error { - var allSubscribed []MachineEvents - err := db.FrontendWriterDB.Select(&allSubscribed, - `SELECT - us.user_id, - max(us.id) AS id, - ENCODE((array_agg(us.unsubscribe_hash))[1], 'hex') AS unsubscribe_hash, - event_filter AS machine, - COALESCE(event_threshold, 0) AS event_threshold - FROM users_subscriptions us - WHERE us.event_name = $1 AND us.created_epoch <= $2 - AND (us.last_sent_epoch < ($2 - $3) OR us.last_sent_epoch IS NULL) - group by us.user_id, machine, event_threshold`, - eventName, epoch, epochWaitInBetween) - if err != nil { - return err - } - - rowKeys := gcp_bigtable.RowList{} - for _, data := range allSubscribed { - rowKeys = append(rowKeys, db.BigtableClient.GetMachineRowKey(data.UserID, "system", data.MachineName)) - } - - machineDataOfSubscribed, err := db.BigtableClient.GetMachineMetricsForNotifications(rowKeys) - if err != nil { - return err - } - - var result []MachineEvents - for _, data := range allSubscribed { - localData := data // Create a local copy of the data variable - machineMap, found := machineDataOfSubscribed[localData.UserID] - if !found { - continue - } - currentMachineData, found := machineMap[localData.MachineName] - if !found { - continue - } - - //logrus.Infof("currentMachineData %v | %v | %v | %v", currentMachine.CurrentDataInsertTs, currentMachine.CompareDataInsertTs, currentMachine.UserID, currentMachine.Machine) - if notifyConditionFulfilled(&localData, currentMachineData) { - result = append(result, localData) - } - } - - subThreshold := uint64(10) - if utils.Config.Notifications.MachineEventThreshold != 0 { - subThreshold = utils.Config.Notifications.MachineEventThreshold - } - - subFirstRatioThreshold := 0.3 - if utils.Config.Notifications.MachineEventFirstRatioThreshold != 0 { - subFirstRatioThreshold = utils.Config.Notifications.MachineEventFirstRatioThreshold - } - - subSecondRatioThreshold := 0.9 - if utils.Config.Notifications.MachineEventSecondRatioThreshold != 0 { - subSecondRatioThreshold = utils.Config.Notifications.MachineEventSecondRatioThreshold - } - - var subScriptionCount uint64 - err = db.FrontendWriterDB.Get(&subScriptionCount, - `SELECT - COUNT(DISTINCT user_id) - FROM users_subscriptions - WHERE event_name = $1`, - eventName) - if err != nil { - return err - } - - // If there are too few users subscribed to this event, we always send the notifications - if subScriptionCount >= subThreshold { - subRatioThreshold := subSecondRatioThreshold - // For the machine offline check we do a low threshold check first and the next time a high threshold check - if isFirstNotificationCheck && eventName == types.MonitoringMachineOfflineEventName { - subRatioThreshold = subFirstRatioThreshold - isFirstNotificationCheck = false - } - if float64(len(result))/float64(len(allSubscribed)) >= subRatioThreshold { - log.Error(nil, fmt.Errorf("error too many users would be notified concerning: %v", eventName), 0) - return nil - } - } - - for _, r := range result { - n := &monitorMachineNotification{ - SubscriptionID: r.SubscriptionID, - MachineName: r.MachineName, - UserID: r.UserID, - EventName: eventName, - Epoch: epoch, - UnsubscribeHash: r.UnsubscribeHash, - } - //logrus.Infof("notify %v %v", eventName, n) - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} - } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() - } - - if eventName == types.MonitoringMachineOfflineEventName { - // Notifications will be sent, reset the flag - isFirstNotificationCheck = true - } - - return nil -} - -type monitorMachineNotification struct { - SubscriptionID uint64 - MachineName string - UserID uint64 - Epoch uint64 - EventName types.EventName - UnsubscribeHash sql.NullString -} - -func (n *monitorMachineNotification) GetLatestState() string { - return "" -} - -func (n *monitorMachineNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *monitorMachineNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *monitorMachineNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *monitorMachineNotification) GetEpoch() uint64 { - return n.Epoch -} - -func (n *monitorMachineNotification) GetEventName() types.EventName { - return n.EventName -} - -func (n *monitorMachineNotification) GetInfo(includeUrl bool) string { - switch n.EventName { - case types.MonitoringMachineDiskAlmostFullEventName: - return fmt.Sprintf(`Your staking machine "%v" is running low on storage space.`, n.MachineName) - case types.MonitoringMachineOfflineEventName: - return fmt.Sprintf(`Your staking machine "%v" might be offline. It has not been seen for a couple minutes now.`, n.MachineName) - case types.MonitoringMachineCpuLoadEventName: - return fmt.Sprintf(`Your staking machine "%v" has reached your configured CPU usage threshold.`, n.MachineName) - case types.MonitoringMachineSwitchedToETH1FallbackEventName: - return fmt.Sprintf(`Your staking machine "%v" has switched to your configured ETH1 fallback`, n.MachineName) - case types.MonitoringMachineSwitchedToETH2FallbackEventName: - return fmt.Sprintf(`Your staking machine "%v" has switched to your configured ETH2 fallback`, n.MachineName) - case types.MonitoringMachineMemoryUsageEventName: - return fmt.Sprintf(`Your staking machine "%v" has reached your configured RAM threshold.`, n.MachineName) - } - return "" -} - -func (n *monitorMachineNotification) GetTitle() string { - switch n.EventName { - case types.MonitoringMachineDiskAlmostFullEventName: - return "Storage Warning" - case types.MonitoringMachineOfflineEventName: - return "Staking Machine Offline" - case types.MonitoringMachineCpuLoadEventName: - return "High CPU Load" - case types.MonitoringMachineSwitchedToETH1FallbackEventName: - return "ETH1 Fallback Active" - case types.MonitoringMachineSwitchedToETH2FallbackEventName: - return "ETH2 Fallback Active" - case types.MonitoringMachineMemoryUsageEventName: - return "Memory Warning" - } - return "" -} - -func (n *monitorMachineNotification) GetEventFilter() string { - return n.MachineName -} - -func (n *monitorMachineNotification) GetInfoMarkdown() string { - return n.GetInfo(false) -} - -type taxReportNotification struct { - SubscriptionID uint64 - UserID uint64 - Epoch uint64 - EventFilter string - UnsubscribeHash sql.NullString -} - -func (n *taxReportNotification) GetLatestState() string { - return "" -} - -func (n *taxReportNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *taxReportNotification) GetEmailAttachment() *types.EmailAttachment { - tNow := time.Now() - lastDay := time.Date(tNow.Year(), tNow.Month(), 1, 0, 0, 0, 0, time.UTC) - firstDay := lastDay.AddDate(0, -1, 0) - - q, err := url.ParseQuery(n.EventFilter) - - if err != nil { - log.Warnf("Failed to parse rewards report eventfilter: %v", err) - return nil - } - - currency := q.Get("currency") - - validators := []uint64{} - valSlice := strings.Split(q.Get("validators"), ",") - if len(valSlice) > 0 { - for _, val := range valSlice { - v, err := strconv.ParseUint(val, 10, 64) - if err != nil { - continue - } - validators = append(validators, v) - } - } else { - log.Warnf("Validators Not found in rewards report eventfilter") - return nil - } - - pdf := services.GetPdfReport(validators, currency, uint64(firstDay.Unix()), uint64(lastDay.Unix())) - - return &types.EmailAttachment{Attachment: pdf, Name: fmt.Sprintf("income_history_%v_%v.pdf", firstDay.Format("20060102"), lastDay.Format("20060102"))} -} - -func (n *taxReportNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *taxReportNotification) GetEpoch() uint64 { - return n.Epoch -} - -func (n *taxReportNotification) GetEventName() types.EventName { - return types.TaxReportEventName -} - -func (n *taxReportNotification) GetInfo(includeUrl bool) string { - generalPart := `Please find attached the income history of your selected validators.` - return generalPart -} - -func (n *taxReportNotification) GetTitle() string { - return "Income Report" -} - -func (n *taxReportNotification) GetEventFilter() string { - return n.EventFilter -} - -func (n *taxReportNotification) GetInfoMarkdown() string { - return n.GetInfo(false) -} - -func collectTaxReportNotificationNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName) error { - lastStatsDay, err := cache.LatestExportedStatisticDay.GetOrDefault(db.GetLastExportedStatisticDay) - - if err != nil { - return err - } - //Check that the last day of the month is already exported - tNow := time.Now() - firstDayOfMonth := time.Date(tNow.Year(), tNow.Month(), 1, 0, 0, 0, 0, time.UTC) - if utils.TimeToDay(uint64(firstDayOfMonth.Unix())) > lastStatsDay { - return nil - } - - var dbResult []struct { - SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` - Epoch uint64 `db:"created_epoch"` - EventFilter string `db:"event_filter"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - name := string(eventName) - if utils.Config.Chain.ClConfig.ConfigName != "" { - name = utils.Config.Chain.ClConfig.ConfigName + ":" + name - } - - err = db.FrontendWriterDB.Select(&dbResult, ` - SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash - FROM users_subscriptions AS us - WHERE us.event_name=$1 AND (us.last_sent_ts < $2 OR (us.last_sent_ts IS NULL AND us.created_ts < $2)); - `, - name, firstDayOfMonth) - - if err != nil { - return err - } - - for _, r := range dbResult { - n := &taxReportNotification{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - UnsubscribeHash: r.UnsubscribeHash, - } - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} - } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() - } - - return nil -} - -type networkNotification struct { - SubscriptionID uint64 - UserID uint64 - Epoch uint64 - EventFilter string - UnsubscribeHash sql.NullString -} - -func (n *networkNotification) GetLatestState() string { - return "" -} - -func (n *networkNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *networkNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *networkNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *networkNotification) GetEpoch() uint64 { - return n.Epoch -} - -func (n *networkNotification) GetEventName() types.EventName { - return types.NetworkLivenessIncreasedEventName -} - -func (n *networkNotification) GetInfo(includeUrl bool) string { - generalPart := fmt.Sprintf(`Network experienced finality issues. Learn more at https://%v/charts/network_liveness`, utils.Config.Frontend.SiteDomain) - return generalPart -} - -func (n *networkNotification) GetTitle() string { - return "Beaconchain Network Issues" -} - -func (n *networkNotification) GetEventFilter() string { - return n.EventFilter -} - -func (n *networkNotification) GetInfoMarkdown() string { - generalPart := fmt.Sprintf(`Network experienced finality issues ([view chart](https://%v/charts/network_liveness)).`, utils.Config.Frontend.SiteDomain) - return generalPart -} - -func collectNetworkNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName) error { - count := 0 - err := db.WriterDb.Get(&count, ` - SELECT count(ts) FROM network_liveness WHERE (headepoch-finalizedepoch) > 3 AND ts > now() - interval '60 minutes'; - `) - - if err != nil { - return err - } - - if count > 0 { - var dbResult []struct { - SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` - Epoch uint64 `db:"created_epoch"` - EventFilter string `db:"event_filter"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - err := db.FrontendWriterDB.Select(&dbResult, ` - SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash - FROM users_subscriptions AS us - WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '1 hour' OR us.last_sent_ts IS NULL); - `, - utils.GetNetwork()+":"+string(eventName)) - - if err != nil { - return err - } - - for _, r := range dbResult { - n := &networkNotification{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - UnsubscribeHash: r.UnsubscribeHash, - } - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} - } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() - } - } - - return nil -} - -type rocketpoolNotification struct { - SubscriptionID uint64 - UserID uint64 - Epoch uint64 - EventFilter string - EventName types.EventName - ExtraData string - UnsubscribeHash sql.NullString -} - -func (n *rocketpoolNotification) GetLatestState() string { - return "" -} - -func (n *rocketpoolNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *rocketpoolNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *rocketpoolNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *rocketpoolNotification) GetEpoch() uint64 { - return n.Epoch -} - -func (n *rocketpoolNotification) GetEventName() types.EventName { - return n.EventName -} - -func (n *rocketpoolNotification) GetInfo(includeUrl bool) string { - switch n.EventName { - case types.RocketpoolCommissionThresholdEventName: - return fmt.Sprintf(`The current RPL commission rate of %v has reached your configured threshold.`, n.ExtraData) - case types.RocketpoolNewClaimRoundStartedEventName: - return `A new reward round has started. You can now claim your rewards from the previous round.` - case types.RocketpoolCollateralMaxReached: - return fmt.Sprintf(`Your RPL collateral has reached your configured threshold at %v%%.`, n.ExtraData) - case types.RocketpoolCollateralMinReached: - return fmt.Sprintf(`Your RPL collateral has reached your configured threshold at %v%%.`, n.ExtraData) - case types.SyncCommitteeSoon: - return getSyncCommitteeSoonInfo([]types.Notification{n}) - } - - return "" -} - -func (n *rocketpoolNotification) GetTitle() string { - switch n.EventName { - case types.RocketpoolCommissionThresholdEventName: - return `Rocketpool Commission` - case types.RocketpoolNewClaimRoundStartedEventName: - return `Rocketpool Claim Available` - case types.RocketpoolCollateralMaxReached: - return `Rocketpool Max Collateral` - case types.RocketpoolCollateralMinReached: - return `Rocketpool Min Collateral` - case types.SyncCommitteeSoon: - return `Sync Committee Duty` - } - return "" -} - -func (n *rocketpoolNotification) GetEventFilter() string { - return n.EventFilter -} - -func (n *rocketpoolNotification) GetInfoMarkdown() string { - return n.GetInfo(false) -} - -func collectRocketpoolComissionNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName) error { - fee := 0.0 - err := db.WriterDb.Get(&fee, ` - select current_node_fee from rocketpool_network_stats order by id desc LIMIT 1; - `) - - if err != nil { - return err - } - - if fee > 0 { - var dbResult []struct { - SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` - Epoch uint64 `db:"created_epoch"` - EventFilter string `db:"event_filter"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - err := db.FrontendWriterDB.Select(&dbResult, ` - SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash - FROM users_subscriptions AS us - WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '8 hours' OR us.last_sent_ts IS NULL) AND (us.event_threshold <= $2 OR (us.event_threshold < 0 AND us.event_threshold * -1 >= $2)); - `, - utils.GetNetwork()+":"+string(eventName), fee) - - if err != nil { - return err - } - - for _, r := range dbResult { - n := &rocketpoolNotification{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - EventName: eventName, - ExtraData: strconv.FormatInt(int64(fee*100), 10) + "%", - UnsubscribeHash: r.UnsubscribeHash, - } - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} - } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() - } - } - - return nil -} - -func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName) error { - var ts int64 - err := db.WriterDb.Get(&ts, ` - select date_part('epoch', claim_interval_time_start)::int from rocketpool_network_stats order by id desc LIMIT 1; - `) - - if err != nil { - return err - } - - if ts+3*60*60 > time.Now().Unix() { - var dbResult []struct { - SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` - Epoch uint64 `db:"created_epoch"` - EventFilter string `db:"event_filter"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - err := db.FrontendWriterDB.Select(&dbResult, ` - SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash - FROM users_subscriptions AS us - WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '5 hours' OR us.last_sent_ts IS NULL); - `, - utils.GetNetwork()+":"+string(eventName)) - - if err != nil { - return err - } - - for _, r := range dbResult { - n := &rocketpoolNotification{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - EventName: eventName, - UnsubscribeHash: r.UnsubscribeHash, - } - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} - } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() - } - } - - return nil -} - -func collectRocketpoolRPLCollateralNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName, epoch uint64) error { - pubkeys, subMap, err := GetSubsForEventFilter(eventName) - if err != nil { - return fmt.Errorf("error getting subscriptions for RocketpoolRPLCollateral %w", err) - } - - type dbResult struct { - Address []byte - RPLStake BigFloat `db:"rpl_stake"` - RPLStakeMin BigFloat `db:"min_rpl_stake"` - RPLStakeMax BigFloat `db:"max_rpl_stake"` - } - - stakeInfoPerNode := make([]dbResult, 0) - batchSize := 5000 - dataLen := len(pubkeys) - for i := 0; i < dataLen; i += batchSize { - var keys [][]byte - start := i - end := i + batchSize - - if dataLen < end { - end = dataLen - } - - keys = pubkeys[start:end] - - var partial []dbResult - - // filter nodes with no minipools (anymore) because they have min/max stake of 0 - // TODO properly remove notification entry from db - err = db.WriterDb.Select(&partial, ` - SELECT address, rpl_stake, min_rpl_stake, max_rpl_stake - FROM rocketpool_nodes - WHERE address = ANY($1) AND min_rpl_stake != 0 AND max_rpl_stake != 0`, pq.ByteaArray(keys)) - if err != nil { - return err - } - stakeInfoPerNode = append(stakeInfoPerNode, partial...) - } - - // factor in network-wide min/max collat ratio. Since LEB8 they are not directly correlated anymore (ratio of bonded to borrowed ETH), so we need either min or max - // however this is dynamic and might be changed in the future; Should extend rocketpool_network_stats to include min/max collateral values! - minRPLCollatRatio := bigFloat(0.1) // bigFloat it to save some memory re-allocations - maxRPLCollatRatio := bigFloat(1.5) - // temporary helper (modifying values in dbResult directly would be bad style) - nodeCollatRatioHelper := bigFloat(0) - - for _, r := range stakeInfoPerNode { - subs, ok := subMap[hex.EncodeToString(r.Address)] - if !ok { - continue - } - sub := subs[0] // RPL min/max collateral notifications are always unique per user - var alertConditionMet bool - - // according to app logic, sub.EventThreshold can be +- [0.9 to 1.5] for CollateralMax after manually changed by the user - // this corresponds to a collateral range of 140% to 200% currently shown in the app UI; so +- 0.5 allows us to compare to the actual collat ratio - // for CollateralMin it can be 1.0 to 4.0 if manually changed, to represent 10% to 40% - // 0 in both cases if not modified - var threshold float64 = sub.EventThreshold - if threshold == 0 { - threshold = 1.0 // default case - } - inverse := false - if eventName == types.RocketpoolCollateralMaxReached { - if threshold < 0 { - threshold *= -1 - } else { - inverse = true - } - threshold += 0.5 - - // 100% (of bonded eth) - nodeCollatRatioHelper.Quo(r.RPLStakeMax.bigFloat(), maxRPLCollatRatio) - } else { - threshold /= 10.0 - - // 100% (of borrowed eth) - nodeCollatRatioHelper.Quo(r.RPLStakeMin.bigFloat(), minRPLCollatRatio) - } - - nodeCollatRatio, _ := nodeCollatRatioHelper.Quo(r.RPLStake.bigFloat(), nodeCollatRatioHelper).Float64() - - alertConditionMet = nodeCollatRatio <= threshold - if inverse { - // handle special case for max collateral: notify if *above* selected amount - alertConditionMet = !alertConditionMet - } - - if !alertConditionMet { - continue - } - - if sub.LastEpoch != nil { - lastSentEpoch := *sub.LastEpoch - if lastSentEpoch >= epoch-225 || epoch < sub.CreatedEpoch { - continue - } - } - - n := &rocketpoolNotification{ - SubscriptionID: *sub.ID, - UserID: *sub.UserID, - Epoch: epoch, - EventFilter: sub.EventFilter, - EventName: eventName, - ExtraData: strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.2f", threshold*100), "0"), "."), - UnsubscribeHash: sub.UnsubscribeHash, - } - if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = []types.Notification{} - } - notificationsByUserID[*sub.UserID][n.GetEventName()] = append(notificationsByUserID[*sub.UserID][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() - } - - return nil -} - -type BigFloat big.Float - -func (b *BigFloat) Value() (driver.Value, error) { - if b != nil { - return (*big.Float)(b).String(), nil - } - return nil, nil -} - -func (b *BigFloat) Scan(value interface{}) error { - if value == nil { - return errors.New("can not cast nil to BigFloat") - } - - switch t := value.(type) { - case float64: - (*big.Float)(b).SetFloat64(value.(float64)) - case []uint8: - _, ok := (*big.Float)(b).SetString(string(value.([]uint8))) - if !ok { - return fmt.Errorf("failed to load value to []uint8: %v", value) - } - case string: - _, ok := (*big.Float)(b).SetString(value.(string)) - if !ok { - return fmt.Errorf("failed to load value to []uint8: %v", value) - } - default: - return fmt.Errorf("could not scan type %T into BigFloat", t) - } - - return nil -} - -func (b *BigFloat) bigFloat() *big.Float { - return (*big.Float)(b) -} -func bigFloat(x float64) *big.Float { - return new(big.Float).SetFloat64(x) -} - -func collectSyncCommittee(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName, epoch uint64) error { - slotsPerSyncCommittee := utils.SlotsPerSyncCommittee() - currentPeriod := epoch * utils.Config.Chain.ClConfig.SlotsPerEpoch / slotsPerSyncCommittee - nextPeriod := currentPeriod + 1 - - var validators []struct { - PubKey string `db:"pubkey"` - Index uint64 `db:"validatorindex"` - } - err := db.WriterDb.Select(&validators, `SELECT ENCODE(pubkey, 'hex') AS pubkey, validators.validatorindex FROM sync_committees LEFT JOIN validators ON validators.validatorindex = sync_committees.validatorindex WHERE period = $1`, nextPeriod) - - if err != nil { - return err - } - - if len(validators) <= 0 { - return nil - } - - var pubKeys []string - var mapping map[string]uint64 = make(map[string]uint64) - for _, val := range validators { - mapping[val.PubKey] = val.Index - pubKeys = append(pubKeys, val.PubKey) - } - - var dbResult []struct { - SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` - EventFilter string `db:"event_filter"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - err = db.FrontendWriterDB.Select(&dbResult, ` - SELECT us.id, us.user_id, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') as unsubscribe_hash - FROM users_subscriptions AS us - WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '26 hours' OR us.last_sent_ts IS NULL) AND event_filter = ANY($2); - `, - utils.GetNetwork()+":"+string(eventName), pq.StringArray(pubKeys), - ) - - if err != nil { - return err - } - - for _, r := range dbResult { - n := &rocketpoolNotification{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: epoch, - EventFilter: r.EventFilter, - EventName: eventName, - ExtraData: fmt.Sprintf("%v|%v|%v", mapping[r.EventFilter], nextPeriod*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod, (nextPeriod+1)*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod), - UnsubscribeHash: r.UnsubscribeHash, - } - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} - } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() - } - - return nil -} - -type WebhookQueue struct { - NotificationID uint64 `db:"id"` - Url string `db:"url"` - Retries uint64 `db:"retries"` - LastSent time.Time `db:"last_retry"` - Destination sql.NullString `db:"destination"` - Payload []byte `db:"payload"` - LastTry time.Time `db:"last_try"` -} - -func getEventInfo(event types.EventName, ns []types.Notification) string { - switch event { - case types.SyncCommitteeSoon: - return getSyncCommitteeSoonInfo(ns) - case "validator_balance_decreased": - return "
You will not receive any further balance decrease mails for these validators until the balance of a validator is increasing again." - } - - return "" -} - -func getSyncCommitteeSoonInfo(ns []types.Notification) string { - validators := []string{} - var startEpoch, endEpoch string - var inTime time.Duration - - for i, n := range ns { - n, ok := n.(*rocketpoolNotification) - if !ok { - log.Error(nil, "Sync committee notification not of type rocketpoolNotification", 0) - return "" - } - extras := strings.Split(n.ExtraData, "|") - if len(extras) != 3 { - log.Error(nil, "Invalid number of arguments passed to sync committee extra data. Notification will not be sent until code is corrected.", 0) - return "" - } - - validators = append(validators, extras[0]) - if i == 0 { - // startEpoch, endEpoch and inTime must be the same for all validators - startEpoch = extras[1] - endEpoch = extras[2] - - syncStartEpoch, err := strconv.ParseUint(startEpoch, 10, 64) - if err != nil { - inTime = utils.Day - } else { - inTime = time.Until(utils.EpochToTime(syncStartEpoch)) - } - inTime = inTime.Round(time.Second) - } - } - - if len(validators) > 0 { - validatorsInfo := "" - if len(validators) == 1 { - validatorsInfo = fmt.Sprintf(`Your validator %s has been elected to be part of the next sync committee.`, validators[0]) - } else { - validatorsText := "" - for i, validator := range validators { - if i < len(validators)-1 { - validatorsText += fmt.Sprintf("%s, ", validator) - } else { - validatorsText += fmt.Sprintf("and %s", validator) - } - } - validatorsInfo = fmt.Sprintf(`Your validators %s have been elected to be part of the next sync committee.`, validatorsText) - } - return fmt.Sprintf(`%s The additional duties start at epoch %s, which is in %s and will last for about a day until epoch %s.`, validatorsInfo, startEpoch, inTime, endEpoch) - } - - return "" + return collectUserDbNotifications(epoch) } diff --git a/backend/pkg/notification/pubkey_cache.go b/backend/pkg/notification/pubkey_cache.go index 39c4f9834..37f3f5ea2 100644 --- a/backend/pkg/notification/pubkey_cache.go +++ b/backend/pkg/notification/pubkey_cache.go @@ -43,7 +43,7 @@ func GetPubkeyForIndex(index uint64) ([]byte, error) { if err != nil { return nil, err } - log.Infof("serving pubkey %x for validator %v from db", pubkey, index) + //log.Infof("serving pubkey %x for validator %v from db", pubkey, index) return pubkey, nil } else if err != nil { return nil, err @@ -72,7 +72,7 @@ func GetIndexForPubkey(pubkey []byte) (uint64, error) { if err != nil { return 0, err } - log.Infof("serving index %d for validator %x from db", index, pubkey) + // log.Infof("serving index %d for validator %x from db", index, pubkey) return index, nil } else if err != nil { return 0, err diff --git a/backend/pkg/notification/queuing.go b/backend/pkg/notification/queuing.go new file mode 100644 index 000000000..0a93b7846 --- /dev/null +++ b/backend/pkg/notification/queuing.go @@ -0,0 +1,795 @@ +package notification + +import ( + "bytes" + "compress/gzip" + "context" + "database/sql" + "encoding/gob" + "fmt" + "html/template" + "maps" + "slices" + "strings" + "time" + + "firebase.google.com/go/v4/messaging" + "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/metrics" + "github.com/gobitfly/beaconchain/pkg/commons/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" + "github.com/jmoiron/sqlx" + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +func queueNotifications(epoch uint64, notificationsByUserID types.NotificationsPerUserId) error { + tx, err := db.WriterDb.Beginx() + if err != nil { + return fmt.Errorf("error starting transaction: %w", err) + } + defer utils.Rollback(tx) + + err = QueueEmailNotifications(epoch, notificationsByUserID, tx) + if err != nil { + return fmt.Errorf("error queuing email notifications: %w", err) + } + + err = QueuePushNotification(epoch, notificationsByUserID, tx) + if err != nil { + return fmt.Errorf("error queuing push notifications: %w", err) + } + + err = QueueWebhookNotifications(notificationsByUserID, tx) + if err != nil { + return fmt.Errorf("error queuing webhook notifications: %w", err) + } + + err = tx.Commit() + if err != nil { + return fmt.Errorf("error committing transaction: %w", err) + } + + err = ExportNotificationHistory(epoch, notificationsByUserID) + if err != nil { + log.Error(err, "error exporting notification historyw", 0) + } + + subByEpoch := map[uint64][]uint64{} + for _, notificationsPerDashboard := range notificationsByUserID { + for _, notificationsPerGroup := range notificationsPerDashboard { + for _, events := range notificationsPerGroup { + for _, notifications := range events { + for _, n := range notifications { + e := n.GetEpoch() + if _, exists := subByEpoch[e]; !exists { + subByEpoch[e] = []uint64{n.GetSubscriptionID()} + } else { + subByEpoch[e] = append(subByEpoch[e], n.GetSubscriptionID()) + } + } + } + } + } + } + + // obsolete as notifications are anyway sent on a per-epoch basis + for epoch, subIDs := range subByEpoch { + // update that we've queued the subscription (last sent rather means last queued) + err := db.UpdateSubscriptionsLastSent(subIDs, time.Now(), epoch) + if err != nil { + log.Error(err, "error updating sent-time of sent notifications", 0) + metrics.Errors.WithLabelValues("notifications_updating_sent_time").Inc() + } + } + // update internal state of subscriptions + // stateToSub := make(map[string]map[uint64]bool, 0) + + // for _, notificationMap := range notificationsByUserID { // _ => user + // for _, notifications := range notificationMap { // _ => eventname + // for _, notification := range notifications { // _ => index + // state := notification.GetLatestState() + // if state == "" { + // continue + // } + // if _, exists := stateToSub[state]; !exists { + // stateToSub[state] = make(map[uint64]bool, 0) + // } + // if _, exists := stateToSub[state][notification.GetSubscriptionID()]; !exists { + // stateToSub[state][notification.GetSubscriptionID()] = true + // } + // } + // } + // } + + // no need to batch here as the internal state will become obsolete + // for state, subs := range stateToSub { + // subArray := make([]int64, 0) + // for subID := range subs { + // subArray = append(subArray, int64(subID)) + // } + // _, err := db.FrontendWriterDB.Exec(`UPDATE users_subscriptions SET internal_state = $1 WHERE id = ANY($2)`, state, pq.Int64Array(subArray)) + // if err != nil { + // log.Error(err, "error updating internal state of notifications", 0) + // } + // } + return nil +} + +func ExportNotificationHistory(epoch uint64, notificationsByUserID types.NotificationsPerUserId) error { + dashboardNotificationHistoryInsertStmt, err := db.WriterDb.Preparex(` + INSERT INTO users_val_dashboards_notifications_history + (user_id, dashboard_id, group_id, epoch, event_type, event_count, details) + VALUES ($1, $2, $3, $4, $5, $6, $7) + `) + if err != nil { + return fmt.Errorf("error preparing insert statement for dashboard notifications history: %w", err) + } + defer utils.ClosePreparedStatement(dashboardNotificationHistoryInsertStmt) + + machineNotificationHistoryInsertStmt, err := db.FrontendWriterDB.Preparex(` + INSERT INTO machine_notifications_history + (user_id, epoch, machine_id, machine_name, event_type, event_threshold) + VALUES ($1, $2, $3, $4, $5, $6) + `) + if err != nil { + return fmt.Errorf("error preparing insert statement for machine notifications history: %w", err) + } + defer utils.ClosePreparedStatement(machineNotificationHistoryInsertStmt) + + clientNotificationHistoryInsertStmt, err := db.FrontendWriterDB.Preparex(` + INSERT INTO client_notifications_history + (user_id, epoch, client, client_version, client_url) + VALUES ($1, $2, $3, $4, $5) + `) + if err != nil { + return fmt.Errorf("error preparing insert statement for client notifications history: %w", err) + } + defer utils.ClosePreparedStatement(clientNotificationHistoryInsertStmt) + + networktNotificationHistoryInsertStmt, err := db.FrontendWriterDB.Preparex(` + INSERT INTO network_notifications_history + (user_id, epoch, network, event_type, event_threshold) + VALUES ($1, $2, $3, $4, $5) + `) + if err != nil { + return fmt.Errorf("error preparing insert statement for client notifications history: %w", err) + } + defer utils.ClosePreparedStatement(networktNotificationHistoryInsertStmt) + + for userID, notificationsPerDashboard := range notificationsByUserID { + for dashboardID, notificationsPerGroup := range notificationsPerDashboard { + for group, notifications := range notificationsPerGroup { + for eventName, notifications := range notifications { + // handle all dashboard related notifications + if eventName != types.NetworkLivenessIncreasedEventName && !types.IsUserIndexed(eventName) && !types.IsMachineNotification(eventName) { + details, err := GetNotificationDetails(notifications) + if err != nil { + log.Error(err, "error getting notification details", 0) + continue + } + _, err = dashboardNotificationHistoryInsertStmt.Exec( + userID, + dashboardID, + group, + epoch, + eventName, + len(notifications), + details, + ) + if err != nil { + log.Error(err, "error inserting into dashboard notifications history", 0) + } + } else if types.IsMachineNotification(eventName) { // handle machine monitoring related events + for _, n := range notifications { + nTyped, ok := n.(*MonitorMachineNotification) + if !ok { + log.Error(err, "error casting machine notification", 0) + continue + } + _, err := machineNotificationHistoryInsertStmt.Exec( + userID, + epoch, + 0, + nTyped.MachineName, + eventName, + nTyped.EventThreshold, + ) + if err != nil { + log.Error(err, "error inserting into machine notifications history", 0) + } + } + } else if eventName == types.EthClientUpdateEventName { // handle client update events + for _, n := range notifications { + nTyped, ok := n.(*EthClientNotification) + if !ok { + log.Error(err, "error casting client update notification", 0) + continue + } + _, err := clientNotificationHistoryInsertStmt.Exec( + userID, + epoch, + nTyped.EthClient, + "", + "", + ) + if err != nil { + log.Error(err, "error inserting into client notifications history", 0) + } + } + } else if eventName == types.NetworkLivenessIncreasedEventName { // handle network liveness increased events + for range notifications { + _, err := networktNotificationHistoryInsertStmt.Exec( + userID, + epoch, + utils.Config.Chain.Name, + eventName, + 0, + ) + if err != nil { + log.Error(err, "error inserting into network notifications history", 0) + } + } + } + } + } + } + } + return nil +} + +func GetNotificationDetails(notificationsPerEventFilter types.NotificationsPerEventFilter) ([]byte, error) { + // get the notifications as array + notifications := make([]types.Notification, 0, len(notificationsPerEventFilter)) + for _, ns := range notificationsPerEventFilter { + ns.SetEventFilter("") // zero out the event filter as it is not needed in the details + notifications = append(notifications, ns) + } + // gob encode and gzip compress the notifications + var buf bytes.Buffer + gz := gzip.NewWriter(&buf) + enc := gob.NewEncoder(gz) + err := enc.Encode(notifications) + if err != nil { + return nil, fmt.Errorf("error encoding notifications: %w", err) + } + err = gz.Close() + if err != nil { + return nil, fmt.Errorf("error compressing notifications: %w", err) + } + return buf.Bytes(), nil +} + +func RenderEmailsForUserEvents(epoch uint64, notificationsByUserID types.NotificationsPerUserId) (emails []types.TransitEmailContent, err error) { + emails = make([]types.TransitEmailContent, 0, 50) + + createdTs := time.Now() + + userIDs := slices.Collect(maps.Keys(notificationsByUserID)) + + emailsByUserID, err := GetUserEmailsByIds(userIDs) + if err != nil { + metrics.Errors.WithLabelValues("notifications_get_user_mail_by_id").Inc() + return nil, fmt.Errorf("error when sending email-notifications: could not get emails: %w", err) + } + + for userID, notificationsPerDashboard := range notificationsByUserID { + userEmail, exists := emailsByUserID[userID] + if !exists { + log.WarnWithStackTrace(nil, "email notification skipping user", 0, log.Fields{"user_id": userID}) + // we don't need this metrics as users can now deactivate email notifications and it would increment the counter + // metrics.Errors.WithLabelValues("notifications_mail_not_found").Inc() + continue + } + attachments := []types.EmailAttachment{} + + var msg types.Email + + if utils.Config.Chain.Name != "mainnet" { + //nolint:gosec // this is a static string + msg.Body += template.HTML(fmt.Sprintf("Notice: This email contains notifications for the %s network!
", utils.Config.Chain.Name)) + } + + subject := "" + notificationTypesMap := make(map[types.EventName]int) + uniqueNotificationTypes := []types.EventName{} + + bodyDetails := template.HTML("") + + totalBlockReward := float64(0) + + for _, event := range types.EventSortOrder { + for _, notificationsPerGroup := range notificationsPerDashboard { + for _, userNotifications := range notificationsPerGroup { + ns, ok := userNotifications[event] + if !ok { // nothing to do for this event type + continue + } + //nolint:gosec // this is a static string + bodyDetails += template.HTML(fmt.Sprintf("%s
", types.EventLabel[event])) + i := 0 + for _, n := range ns { + // Find all unique notification titles for the subject + if _, ok := notificationTypesMap[event]; !ok { + uniqueNotificationTypes = append(uniqueNotificationTypes, event) + } + notificationTypesMap[event]++ + + if i <= 10 { + if event != types.SyncCommitteeSoonEventName { + // SyncCommitteeSoon notifications are summed up in getEventInfo for all validators + //nolint:gosec // this is a static string + bodyDetails += template.HTML(fmt.Sprintf("%s
", n.GetInfo(types.NotifciationFormatHtml))) + } + + if att := n.GetEmailAttachment(); att != nil { + attachments = append(attachments, *att) + } + } + + if event == types.ValidatorExecutedProposalEventName { + proposalNotification, ok := n.(*ValidatorProposalNotification) + if !ok { + log.Error(fmt.Errorf("error casting proposal notification"), "", 0) + continue + } + totalBlockReward += proposalNotification.Reward + } + + metrics.NotificationsQueued.WithLabelValues("email", string(event)).Inc() + i++ + + if i == 11 { + //nolint:gosec // this is a static string + bodyDetails += template.HTML(fmt.Sprintf("... and %d more notifications
", len(ns)-i)) + continue + } + } + + eventInfo := getEventInfo(event, types.NotifciationFormatHtml, ns) + if eventInfo != "" { + //nolint:gosec // this is a static string + bodyDetails += template.HTML(fmt.Sprintf("%s
", eventInfo)) + } + bodyDetails += "
" + } + } + } + + //nolint:gosec // this is a static string + bodySummary := template.HTML(fmt.Sprintf("

Summary for epoch %d:

", epoch)) + for _, event := range types.EventSortOrder { + count, ok := notificationTypesMap[event] + if !ok { + continue + } + plural := "" + if count > 1 { + plural = "s" + } + switch event { + case types.RocketpoolCollateralMaxReachedEventName, types.RocketpoolCollateralMinReachedEventName: + //nolint:gosec // this is a static string + bodySummary += template.HTML(fmt.Sprintf("%s: %d node%s", types.EventLabel[event], count, plural)) + case types.TaxReportEventName, types.NetworkLivenessIncreasedEventName: + //nolint:gosec // this is a static string + bodySummary += template.HTML(fmt.Sprintf("%s: %d event%s", types.EventLabel[event], count, plural)) + case types.EthClientUpdateEventName: + //nolint:gosec // this is a static string + bodySummary += template.HTML(fmt.Sprintf("%s: %d client%s", types.EventLabel[event], count, plural)) + case types.ValidatorExecutedProposalEventName: + //nolint:gosec // this is a static string + bodySummary += template.HTML(fmt.Sprintf("%s: %d validator%s, Reward: %.3f ETH", types.EventLabel[event], count, plural, totalBlockReward)) + case types.ValidatorGroupEfficiencyEventName: + //nolint:gosec // this is a static string + bodySummary += template.HTML(fmt.Sprintf("%s: %d Group%s", types.EventLabel[event], count, plural)) + default: + //nolint:gosec // this is a static string + bodySummary += template.HTML(fmt.Sprintf("%s: %d Validator%s", types.EventLabel[event], count, plural)) + } + bodySummary += "
" + } + msg.Body += bodySummary + msg.Body += template.HTML("

Details:

") + msg.Body += bodyDetails + + if len(uniqueNotificationTypes) > 2 { + subject = fmt.Sprintf("%s: %s,... and %d other notifications", utils.Config.Frontend.SiteDomain, types.EventLabel[uniqueNotificationTypes[0]], len(uniqueNotificationTypes)-1) + } else if len(uniqueNotificationTypes) == 2 { + subject = fmt.Sprintf("%s: %s and %s", utils.Config.Frontend.SiteDomain, types.EventLabel[uniqueNotificationTypes[0]], types.EventLabel[uniqueNotificationTypes[1]]) + } else if len(uniqueNotificationTypes) == 1 { + subject = fmt.Sprintf("%s: %s", utils.Config.Frontend.SiteDomain, types.EventLabel[uniqueNotificationTypes[0]]) + } + //nolint:gosec // this is a static string + msg.SubscriptionManageURL = template.HTML(fmt.Sprintf(`Manage`, "https://"+utils.Config.Frontend.SiteDomain+"/user/notifications")) + + emails = append(emails, types.TransitEmailContent{ + Address: userEmail, + Subject: subject, + Email: msg, + Attachments: attachments, + CreatedTs: createdTs, + UserId: userID, + }) + } + return emails, nil +} + +func QueueEmailNotifications(epoch uint64, notificationsByUserID types.NotificationsPerUserId, tx *sqlx.Tx) error { + // for emails multiple notifications will be rendered to one email per user for each run + emails, err := RenderEmailsForUserEvents(epoch, notificationsByUserID) + if err != nil { + return fmt.Errorf("error rendering emails: %w", err) + } + + // now batch insert the emails in one go + log.Infof("queueing %v email notifications", len(emails)) + if len(emails) == 0 { + return nil + } + type insertData struct { + Content types.TransitEmailContent `db:"content"` + } + + insertRows := make([]insertData, 0, len(emails)) + for _, email := range emails { + insertRows = append(insertRows, insertData{ + Content: email, + }) + } + + _, err = tx.NamedExec(`INSERT INTO notification_queue (created, channel, content) VALUES (NOW(), 'email', :content)`, insertRows) + if err != nil { + log.Error(err, "error writing transit email to db", 0) + } + return nil +} + +func RenderPushMessagesForUserEvents(epoch uint64, notificationsByUserID types.NotificationsPerUserId) ([]types.TransitPushContent, error) { + pushMessages := make([]types.TransitPushContent, 0, 50) + + userIDs := slices.Collect(maps.Keys(notificationsByUserID)) + + tokensByUserID, err := GetUserPushTokenByIds(userIDs, db.FrontendReaderDB) + if err != nil { + metrics.Errors.WithLabelValues("notifications_send_push_notifications").Inc() + return nil, fmt.Errorf("error when sending push-notifications: could not get tokens: %w", err) + } + + for userID, notificationsPerDashboard := range notificationsByUserID { + userTokens, exists := tokensByUserID[userID] + if !exists { + continue + } + for dashboardId, notficationsPerGroup := range notificationsPerDashboard { + for groupdId, userNotifications := range notficationsPerGroup { + log.Infof("generating push notification for user %d & dashboard %d and group %d", userID, dashboardId, groupdId) + + notificationTypesMap := make(map[types.EventName][]string) + + totalBlockReward := float64(0) + for _, event := range types.EventSortOrder { + ns, ok := userNotifications[event] + if !ok { // nothing to do for this event type + continue + } + if _, ok := notificationTypesMap[event]; !ok { + notificationTypesMap[event] = make([]string, 0) + } + for _, n := range ns { + notificationTypesMap[event] = append(notificationTypesMap[event], n.GetEntitiyId()) + + if event == types.ValidatorExecutedProposalEventName { + proposalNotification, ok := n.(*ValidatorProposalNotification) + if !ok { + log.Error(fmt.Errorf("error casting proposal notification"), "", 0) + continue + } + totalBlockReward += proposalNotification.Reward + } + } + metrics.NotificationsQueued.WithLabelValues("push", string(event)).Inc() + } + + bodySummary := "" + for _, event := range types.EventSortOrder { + events := notificationTypesMap[event] + if len(events) == 0 { + continue + } + count := len(events) + if len(bodySummary) > 0 { + bodySummary += "\n" + } + plural := "" + if count > 1 { + plural = "s" + } + switch event { + case types.RocketpoolCollateralMaxReachedEventName, types.RocketpoolCollateralMinReachedEventName: + bodySummary += fmt.Sprintf("%s: %d node%s", types.EventLabel[event], count, plural) + case types.TaxReportEventName, types.NetworkLivenessIncreasedEventName: + bodySummary += fmt.Sprintf("%s: %d event%s", types.EventLabel[event], count, plural) + case types.EthClientUpdateEventName: + bodySummary += fmt.Sprintf("%s: %d client%s", types.EventLabel[event], count, plural) + case types.MonitoringMachineCpuLoadEventName, types.MonitoringMachineMemoryUsageEventName, types.MonitoringMachineDiskAlmostFullEventName, types.MonitoringMachineOfflineEventName: + bodySummary += fmt.Sprintf("%s: %d machine%s", types.EventLabel[event], count, plural) + case types.ValidatorExecutedProposalEventName: + bodySummary += fmt.Sprintf("%s: %d validator%s, Reward: %.3f ETH", types.EventLabel[event], count, plural, totalBlockReward) + case types.ValidatorGroupEfficiencyEventName: + bodySummary += fmt.Sprintf("%s: %d group%s", types.EventLabel[event], count, plural) + default: + bodySummary += fmt.Sprintf("%s: %d validator%s", types.EventLabel[event], count, plural) + } + if len(events) < 3 { + bodySummary += fmt.Sprintf(" (%s)", strings.Join(events, ",")) + } + } + + if len(bodySummary) > 1000 { // cap the notification body to 1000 characters (firebase limit) + bodySummary = bodySummary[:1000] + } + for _, userToken := range userTokens { + message := new(messaging.Message) + message.Token = userToken + message.APNS = new(messaging.APNSConfig) + message.APNS.Payload = new(messaging.APNSPayload) + message.APNS.Payload.Aps = new(messaging.Aps) + message.APNS.Payload.Aps.Sound = "default" + + notification := new(messaging.Notification) + notification.Title = fmt.Sprintf("%sInfo for epoch %d", getNetwork(), epoch) + notification.Body = bodySummary + message.Notification = notification + message.Data = map[string]string{ + "epoch": fmt.Sprintf("%d", epoch), + } + transitPushContent := types.TransitPushContent{ + Messages: []*messaging.Message{message}, + UserId: userID, + } + + pushMessages = append(pushMessages, transitPushContent) + } + } + } + } + + return pushMessages, nil +} + +func QueuePushNotification(epoch uint64, notificationsByUserID types.NotificationsPerUserId, tx *sqlx.Tx) error { + pushMessages, err := RenderPushMessagesForUserEvents(epoch, notificationsByUserID) + if err != nil { + return fmt.Errorf("error rendering push messages: %w", err) + } + + // now batch insert the push messages in one go + log.Infof("queueing %v push notifications", len(pushMessages)) + if len(pushMessages) == 0 { + return nil + } + type insertData struct { + Content types.TransitPushContent `db:"content"` + } + + insertRows := make([]insertData, 0, len(pushMessages)) + for _, pushMessage := range pushMessages { + insertRows = append(insertRows, insertData{ + Content: pushMessage, + }) + } + + _, err = tx.NamedExec(`INSERT INTO notification_queue (created, channel, content) VALUES (NOW(), 'push', :content)`, insertRows) + if err != nil { + return fmt.Errorf("error writing transit push to db: %w", err) + } + return nil +} + +func QueueTestPushNotification(ctx context.Context, userId types.UserId, userDbConn *sqlx.DB, networkDbConn *sqlx.DB) error { + count, err := db.CountSentMessage("n_test_push", userId) + if err != nil { + return err + } + if count > 10 { + return fmt.Errorf("rate limit has been exceeded") + } + tokens, err := GetUserPushTokenByIds([]types.UserId{userId}, userDbConn) + if err != nil { + return err + } + + messages := []*messaging.Message{} + for _, tokensOfUser := range tokens { + for _, token := range tokensOfUser { + log.Infof("sending test push to user %d with token %v", userId, token) + messages = append(messages, &messaging.Message{ + Notification: &messaging.Notification{ + Title: "Test Push", + Body: "This is a test push from beaconcha.in", + }, + Token: token, + }) + } + } + + if len(messages) == 0 { + return fmt.Errorf("no push tokens found for user %v", userId) + } + + transit := types.TransitPushContent{ + Messages: messages, + UserId: userId, + } + + _, err = networkDbConn.ExecContext(ctx, `INSERT INTO notification_queue (created, channel, content) VALUES (NOW(), 'push', $1)`, transit) + + return err +} + +func QueueWebhookNotifications(notificationsByUserID types.NotificationsPerUserId, tx *sqlx.Tx) error { + for userID, userNotifications := range notificationsByUserID { + var webhooks []types.UserWebhook + err := db.FrontendWriterDB.Select(&webhooks, ` + SELECT + id, + user_id, + url, + retries, + event_names, + last_sent, + destination + FROM + users_webhooks + WHERE + user_id = $1 AND user_id NOT IN (SELECT user_id from users_notification_channels WHERE active = false and channel = $2) + `, userID, types.WebhookNotificationChannel) + // continue if the user does not have a webhook + if err == sql.ErrNoRows { + continue + } + if err != nil { + return fmt.Errorf("error quering users_webhooks, err: %w", err) + } + // webhook => [] notifications + discordNotifMap := make(map[uint64][]types.TransitDiscordContent) + notifs := make([]types.TransitWebhook, 0) + // send the notifications to each registered webhook + for _, w := range webhooks { + for dashboardId, notificationsPerDashboard := range userNotifications { + if dashboardId != 0 { // disable webhooks for dashboard notifications for now + continue + } + for _, notificationsPerGroup := range notificationsPerDashboard { + for event, notifications := range notificationsPerGroup { + // check if the webhook is subscribed to the type of event + eventSubscribed := slices.Contains(w.EventNames, string(event)) + + if eventSubscribed { + if len(notifications) > 0 { + // reset Retries + if w.Retries > 5 && w.LastSent.Valid && w.LastSent.Time.Add(time.Hour).Before(time.Now()) { + _, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET retries = 0 WHERE id = $1;`, w.ID) + if err != nil { + log.Error(err, "error updating users_webhooks table; setting retries to zero", 0) + continue + } + } else if w.Retries > 5 && !w.LastSent.Valid { + log.Warnf("webhook '%v' has more than 5 retries and does not have a valid last_sent timestamp", w.Url) + continue + } + + if w.Retries >= 5 { + // early return + continue + } + } + + for _, n := range notifications { + if w.Destination.Valid && w.Destination.String == "webhook_discord" { + if _, exists := discordNotifMap[w.ID]; !exists { + discordNotifMap[w.ID] = make([]types.TransitDiscordContent, 0) + } + l_notifs := len(discordNotifMap[w.ID]) + if l_notifs == 0 || len(discordNotifMap[w.ID][l_notifs-1].DiscordRequest.Embeds) >= 10 { + discordNotifMap[w.ID] = append(discordNotifMap[w.ID], types.TransitDiscordContent{ + Webhook: w, + DiscordRequest: types.DiscordReq{ + Username: utils.Config.Frontend.SiteDomain, + }, + UserId: userID, + }) + l_notifs++ + } + + fields := []types.DiscordEmbedField{ + { + Name: "Epoch", + Value: fmt.Sprintf("[%[1]v](https://%[2]s/%[1]v)", n.GetEpoch(), utils.Config.Frontend.SiteDomain+"/epoch"), + Inline: false, + }, + } + + if strings.HasPrefix(string(n.GetEventName()), "monitoring") || n.GetEventName() == types.EthClientUpdateEventName || n.GetEventName() == types.RocketpoolCollateralMaxReachedEventName || n.GetEventName() == types.RocketpoolCollateralMinReachedEventName { + fields = append(fields, + types.DiscordEmbedField{ + Name: "Target", + Value: fmt.Sprintf("%v", n.GetEventFilter()), + Inline: false, + }) + } + discordNotifMap[w.ID][l_notifs-1].DiscordRequest.Embeds = append(discordNotifMap[w.ID][l_notifs-1].DiscordRequest.Embeds, types.DiscordEmbed{ + Type: "rich", + Color: "16745472", + Description: n.GetLegacyInfo(), + Title: n.GetLegacyTitle(), + Fields: fields, + }) + } else { + notifs = append(notifs, types.TransitWebhook{ + Channel: w.Destination.String, + Content: types.TransitWebhookContent{ + Webhook: w, + Event: types.WebhookEvent{ + Network: utils.GetNetwork(), + Name: string(n.GetEventName()), + Title: n.GetLegacyTitle(), + Description: n.GetLegacyInfo(), + Epoch: n.GetEpoch(), + Target: n.GetEventFilter(), + }, + UserId: userID, + }, + }) + } + } + } + } + } + } + } + // process notifs + for _, n := range notifs { + _, err = tx.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES (now(), $1, $2);`, n.Channel, n.Content) + if err != nil { + log.Error(err, "error inserting into webhooks_queue", 0) + } else { + metrics.NotificationsQueued.WithLabelValues(n.Channel, n.Content.Event.Name).Inc() + } + } + // process discord notifs + for _, dNotifs := range discordNotifMap { + for _, n := range dNotifs { + _, err = tx.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES (now(), 'webhook_discord', $1);`, n) + if err != nil { + log.Error(err, "error inserting into webhooks_queue (discord)", 0) + continue + } else { + metrics.NotificationsQueued.WithLabelValues("webhook_discord", "multi").Inc() + } + } + } + } + return nil +} + +func getNetwork() string { + domainParts := strings.Split(utils.Config.Frontend.SiteDomain, ".") + if len(domainParts) >= 3 { + return fmt.Sprintf("%s: ", cases.Title(language.English).String(domainParts[0])) + } + return "" +} + +func getEventInfo(event types.EventName, format types.NotificationFormat, ns map[types.EventFilter]types.Notification) string { + switch event { + case types.SyncCommitteeSoonEventName: + return getSyncCommitteeSoonInfo(format, ns) + case "validator_balance_decreased": + return "
You will not receive any further balance decrease mails for these validators until the balance of a validator is increasing again." + } + + return "" +} diff --git a/backend/pkg/notification/sending.go b/backend/pkg/notification/sending.go new file mode 100644 index 000000000..5d280bfae --- /dev/null +++ b/backend/pkg/notification/sending.go @@ -0,0 +1,506 @@ +package notification + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/mail" + "github.com/gobitfly/beaconchain/pkg/commons/metrics" + "github.com/gobitfly/beaconchain/pkg/commons/services" + "github.com/gobitfly/beaconchain/pkg/commons/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" + "github.com/jmoiron/sqlx" + "github.com/lib/pq" +) + +func InitNotificationSender() { + log.Infof("starting notifications-sender") + go notificationSender() +} + +func notificationSender() { + for { + start := time.Now() + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) + + conn, err := db.FrontendWriterDB.Conn(ctx) + if err != nil { + log.Error(err, "error creating connection", 0) + cancel() + continue + } + + _, err = conn.ExecContext(ctx, `SELECT pg_advisory_lock(500)`) + if err != nil { + log.Error(err, "error getting advisory lock from db", 0) + + err := conn.Close() + if err != nil { + log.Error(err, "error returning connection to connection pool", 0) + } + cancel() + continue + } + + log.Infof("lock obtained") + err = dispatchNotifications() + if err != nil { + log.Error(err, "error dispatching notifications", 0) + } + + err = garbageCollectNotificationQueue() + if err != nil { + log.Error(err, "error garbage collecting notification queue", 0) + } + + log.InfoWithFields(log.Fields{"duration": time.Since(start)}, "notifications dispatched and garbage collected") + metrics.TaskDuration.WithLabelValues("service_notifications_sender").Observe(time.Since(start).Seconds()) + + unlocked := false + rows, err := conn.QueryContext(ctx, `SELECT pg_advisory_unlock(500)`) + if err != nil { + log.Error(err, "error executing advisory unlock", 0) + + err = conn.Close() + if err != nil { + log.WarnWithStackTrace(err, "error returning connection to connection pool", 0) + } + cancel() + continue + } + + for rows.Next() { + err = rows.Scan(&unlocked) + if err != nil { + log.Error(err, "error scanning advisory unlock result", 0) + } + } + + if !unlocked { + log.Error(nil, fmt.Errorf("error releasing advisory lock unlocked: %v", unlocked), 0) + } + + conn.Close() + if err != nil { + log.WarnWithStackTrace(err, "error returning connection to connection pool", 0) + } + cancel() + + services.ReportStatus("notification-sender", "Running", nil) + time.Sleep(time.Second * 30) + } +} + +// garbageCollectNotificationQueue deletes entries from the notification queue that have been processed +func garbageCollectNotificationQueue() error { + rows, err := db.WriterDb.Exec(`DELETE FROM notification_queue WHERE (sent < now() - INTERVAL '30 minutes') OR (created < now() - INTERVAL '1 hour')`) + if err != nil { + return fmt.Errorf("error deleting from notification_queue %w", err) + } + + rowsAffected, _ := rows.RowsAffected() + + log.Infof("deleted %v rows from the notification_queue", rowsAffected) + + return nil +} + +func dispatchNotifications() error { + err := sendEmailNotifications() + if err != nil { + return fmt.Errorf("error sending email notifications, err: %w", err) + } + + err = sendPushNotifications() + if err != nil { + return fmt.Errorf("error sending push notifications, err: %w", err) + } + + err = sendWebhookNotifications() + if err != nil { + return fmt.Errorf("error sending webhook notifications, err: %w", err) + } + + err = sendDiscordNotifications() + if err != nil { + return fmt.Errorf("error sending webhook discord notifications, err: %w", err) + } + + return nil +} + +func sendEmailNotifications() error { + var notificationQueueItem []types.TransitEmail + + err := db.WriterDb.Select(¬ificationQueueItem, `SELECT + id, + created, + sent, + channel, + content + FROM notification_queue WHERE sent IS null AND channel = 'email' ORDER BY created ASC`) + if err != nil { + return fmt.Errorf("error querying notification queue, err: %w", err) + } + + log.Infof("processing %v email notifications", len(notificationQueueItem)) + + for _, n := range notificationQueueItem { + userInfo, err := db.GetUserInfo(context.Background(), uint64(n.Content.UserId), db.FrontendReaderDB) + if err != nil { + return err + } + err = mail.SendMailRateLimited(n.Content, int64(userInfo.PremiumPerks.EmailNotificationsPerDay), "n_emails") + if err != nil { + if !strings.Contains(err.Error(), "rate limit has been exceeded") { + metrics.Errors.WithLabelValues("notifications_send_email").Inc() + log.Error(err, "error sending email notification", 0) + } else { + metrics.NotificationsSent.WithLabelValues("email", "429").Inc() + } + } else { + metrics.NotificationsSent.WithLabelValues("email", "200").Inc() + } + _, err = db.WriterDb.Exec(`UPDATE notification_queue set sent = now() where id = $1`, n.Id) + if err != nil { + return fmt.Errorf("error updating sent status for email notification with id: %v, err: %w", n.Id, err) + } + } + return nil +} + +func sendPushNotifications() error { + var notificationQueueItem []types.TransitPush + + err := db.WriterDb.Select(¬ificationQueueItem, `SELECT + id, + created, + sent, + channel, + content + FROM notification_queue WHERE sent IS null AND channel = 'push' ORDER BY created ASC`) + if err != nil { + return fmt.Errorf("error querying notification queue, err: %w", err) + } + + log.Infof("processing %v push notifications", len(notificationQueueItem)) + + batchSize := 500 + for _, n := range notificationQueueItem { + for b := 0; b < len(n.Content.Messages); b += batchSize { + start := b + end := b + batchSize + if len(n.Content.Messages) < end { + end = len(n.Content.Messages) + } + + err = SendPushBatch(n.Content.UserId, n.Content.Messages[start:end], false) + if err != nil { + metrics.Errors.WithLabelValues("notifications_send_push_batch").Inc() + log.Error(err, "error sending firebase batch job", 0) + } else { + metrics.NotificationsSent.WithLabelValues("push", "200").Add(float64(len(n.Content.Messages))) + } + + _, err = db.WriterDb.Exec(`UPDATE notification_queue SET sent = now() WHERE id = $1`, n.Id) + if err != nil { + return fmt.Errorf("error updating sent status for push notification with id: %v, err: %w", n.Id, err) + } + } + } + return nil +} + +func sendWebhookNotifications() error { + var notificationQueueItem []types.TransitWebhook + + err := db.WriterDb.Select(¬ificationQueueItem, `SELECT + id, + created, + sent, + channel, + content + FROM notification_queue WHERE sent IS null AND channel = 'webhook' ORDER BY created ASC`) + if err != nil { + return fmt.Errorf("error querying notification queue, err: %w", err) + } + client := &http.Client{Timeout: time.Second * 30} + + log.Infof("processing %v webhook notifications", len(notificationQueueItem)) + + for _, n := range notificationQueueItem { + _, err := db.CountSentMessage("n_webhooks", n.Content.UserId) + if err != nil { + log.Error(err, "error counting sent webhook", 0) + } + + // do not retry after 5 attempts + if n.Content.Webhook.Retries > 5 { + _, err := db.WriterDb.Exec(`DELETE FROM notification_queue WHERE id = $1`, n.Id) + if err != nil { + return fmt.Errorf("error deleting from notification queue: %w", err) + } + continue + } + + reqBody := new(bytes.Buffer) + + err = json.NewEncoder(reqBody).Encode(n.Content) + if err != nil { + log.Error(err, "error marshalling webhook event", 0) + } + + _, err = url.Parse(n.Content.Webhook.Url) + if err != nil { + _, err := db.WriterDb.Exec(`DELETE FROM notification_queue WHERE id = $1`, n.Id) + if err != nil { + return fmt.Errorf("error deleting from notification queue: %w", err) + } + continue + } + + go func(n types.TransitWebhook) { + if n.Content.Webhook.Retries > 0 { + time.Sleep(time.Duration(n.Content.Webhook.Retries) * time.Second) + } + resp, err := client.Post(n.Content.Webhook.Url, "application/json", reqBody) + if err != nil { + log.Warnf("error sending webhook request: %v", err) + metrics.NotificationsSent.WithLabelValues("webhook", "error").Inc() + return + } else { + metrics.NotificationsSent.WithLabelValues("webhook", resp.Status).Inc() + } + defer resp.Body.Close() + + _, err = db.WriterDb.Exec(`UPDATE notification_queue SET sent = now() WHERE id = $1`, n.Id) + if err != nil { + log.Error(err, "error updating notification_queue table", 0) + return + } + + if resp != nil && resp.StatusCode < 400 { + _, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET retries = 0, last_sent = now() WHERE id = $1;`, n.Content.Webhook.ID) + if err != nil { + log.Error(err, "error updating users_webhooks table", 0) + return + } + } else { + var errResp types.ErrorResponse + + if resp != nil { + b, err := io.ReadAll(resp.Body) + if err != nil { + log.Error(err, "error reading body", 0) + } + + errResp.Status = resp.Status + errResp.Body = string(b) + } + + _, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET retries = retries + 1, last_sent = now(), request = $2, response = $3 WHERE id = $1;`, n.Content.Webhook.ID, n.Content, errResp) + if err != nil { + log.Error(err, "error updating users_webhooks table", 0) + return + } + } + }(n) + } + return nil +} + +func sendDiscordNotifications() error { + var notificationQueueItem []types.TransitDiscord + + err := db.WriterDb.Select(¬ificationQueueItem, `SELECT + id, + created, + sent, + channel, + content + FROM notification_queue WHERE sent IS null AND channel = 'webhook_discord' ORDER BY created ASC`) + if err != nil { + return fmt.Errorf("error querying notification queue, err: %w", err) + } + client := &http.Client{Timeout: time.Second * 30} + + log.Infof("processing %v discord webhook notifications", len(notificationQueueItem)) + webhookMap := make(map[uint64]types.UserWebhook) + + notifMap := make(map[uint64][]types.TransitDiscord) + // generate webhook id => discord req + // while mapping. aggregate embeds while doing so, up to 10 per req can be sent + for _, n := range notificationQueueItem { + // purge the event from existence if the retry counter is over 5 + if n.Content.Webhook.Retries > 5 { + _, err = db.WriterDb.Exec(`DELETE FROM notification_queue where id = $1`, n.Id) + if err != nil { + log.Warnf("failed to delete notification from queue: %v", err) + } + continue + } + if _, exists := webhookMap[n.Content.Webhook.ID]; !exists { + webhookMap[n.Content.Webhook.ID] = n.Content.Webhook + } + if _, exists := notifMap[n.Content.Webhook.ID]; !exists { + notifMap[n.Content.Webhook.ID] = make([]types.TransitDiscord, 0) + } + notifMap[n.Content.Webhook.ID] = append(notifMap[n.Content.Webhook.ID], n) + } + for _, webhook := range webhookMap { + // todo: this has the potential to spin up thousands of go routines + // should use an errgroup instead if we decide to keep the aproach + go func(webhook types.UserWebhook, reqs []types.TransitDiscord) { + defer func() { + // update retries counters in db based on end result + _, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET retries = $1, last_sent = now() WHERE id = $2;`, webhook.Retries, webhook.ID) + if err != nil { + log.Warnf("failed to update retries counter to %v for webhook %v: %v", webhook.Retries, webhook.ID, err) + } + + // mark notifcations as sent in db + ids := make([]uint64, 0) + for _, req := range reqs { + ids = append(ids, req.Id) + } + _, err = db.WriterDb.Exec(`UPDATE notification_queue SET sent = now() where id = ANY($1)`, pq.Array(ids)) + if err != nil { + log.Warnf("failed to update sent for notifcations in queue: %v", err) + } + }() + + _, err = url.Parse(webhook.Url) + if err != nil { + log.Error(err, "error parsing url", 0, log.Fields{"webhook_id": webhook.ID}) + return + } + + for i := 0; i < len(reqs); i++ { + if webhook.Retries > 5 { + break // stop + } + // sleep between retries + time.Sleep(time.Duration(webhook.Retries) * time.Second) + + reqBody := new(bytes.Buffer) + err := json.NewEncoder(reqBody).Encode(reqs[i].Content.DiscordRequest) + if err != nil { + log.Error(err, "error marshalling discord webhook event", 0) + continue // skip + } + + resp, err := client.Post(webhook.Url, "application/json", reqBody) + if err != nil { + log.Warnf("failed sending discord webhook request %v: %v", webhook.ID, err) + metrics.NotificationsSent.WithLabelValues("webhook_discord", "error").Inc() + } else { + metrics.NotificationsSent.WithLabelValues("webhook_discord", resp.Status).Inc() + } + if resp != nil && resp.StatusCode < 400 { + webhook.Retries = 0 + } else { + webhook.Retries++ + var errResp types.ErrorResponse + + if resp != nil { + b, err := io.ReadAll(resp.Body) + if err != nil { + log.Error(err, "error reading body", 0) + } else { + errResp.Body = string(b) + } + errResp.Status = resp.Status + resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + log.WarnWithFields(map[string]interface{}{"errResp.Body": utils.FirstN(errResp.Body, 1000), "webhook.Url": webhook.Url}, "error pushing discord webhook") + } + _, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET request = $2, response = $3 WHERE id = $1;`, webhook.ID, reqs[i].Content.DiscordRequest, errResp) + if err != nil { + log.Error(err, "error storing failure data in users_webhooks table", 0) + } + } + + i-- // retry, IMPORTANT to be at the END of the ELSE, otherwise the wrong index will be used in the commands above! + } + } + }(webhook, notifMap[webhook.ID]) + } + + return nil +} + +func SendTestEmail(ctx context.Context, userId types.UserId, dbConn *sqlx.DB) error { + var email string + err := dbConn.GetContext(ctx, &email, `SELECT email FROM users WHERE id = $1`, userId) + if err != nil { + return err + } + content := types.TransitEmailContent{ + UserId: userId, + Address: email, + Subject: "Test Email", + Email: types.Email{ + Title: "beaconcha.in - Test Email", + Body: "This is a test email from beaconcha.in", + }, + Attachments: []types.EmailAttachment{}, + CreatedTs: time.Now(), + } + err = mail.SendMailRateLimited(content, 10, "n_test_emails") + if err != nil { + return fmt.Errorf("error sending test email, err: %w", err) + } + + return nil +} + +func SendTestWebhookNotification(ctx context.Context, userId types.UserId, webhookUrl string, isDiscordWebhook bool) error { + count, err := db.CountSentMessage("n_test_push", userId) + if err != nil { + return err + } + if count > 10 { + return fmt.Errorf("rate limit has been exceeded") + } + + client := http.Client{Timeout: time.Second * 5} + + if isDiscordWebhook { + req := types.DiscordReq{ + Content: "This is a test notification from beaconcha.in", + } + reqBody := new(bytes.Buffer) + err := json.NewEncoder(reqBody).Encode(req) + if err != nil { + return fmt.Errorf("error marshalling discord webhook event: %w", err) + } + resp, err := client.Post(webhookUrl, "application/json", reqBody) + if err != nil { + return fmt.Errorf("error sending discord webhook request: %w", err) + } + defer resp.Body.Close() + } else { + // send a test webhook notification with the text "TEST" in the post body + reqBody := new(bytes.Buffer) + err := json.NewEncoder(reqBody).Encode(`{data: "TEST"}`) + if err != nil { + return fmt.Errorf("error marshalling webhook event: %w", err) + } + resp, err := client.Post(webhookUrl, "application/json", reqBody) + if err != nil { + return fmt.Errorf("error sending webhook request: %w", err) + } + defer resp.Body.Close() + } + return nil +} diff --git a/backend/pkg/notification/types.go b/backend/pkg/notification/types.go new file mode 100644 index 000000000..754955cda --- /dev/null +++ b/backend/pkg/notification/types.go @@ -0,0 +1,813 @@ +package notification + +import ( + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "math/big" + "net/url" + "strconv" + "strings" + "time" + + "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/services" + "github.com/gobitfly/beaconchain/pkg/commons/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" +) + +func formatValidatorLink(format types.NotificationFormat, validatorIndex interface{}) string { + switch format { + case types.NotifciationFormatHtml: + return fmt.Sprintf(`%v`, utils.Config.Frontend.SiteDomain, validatorIndex, validatorIndex) + case types.NotifciationFormatText: + return fmt.Sprintf(`%v`, validatorIndex) + case types.NotifciationFormatMarkdown: + return fmt.Sprintf(`[%d](https://%s/validator/%v)`, validatorIndex, utils.Config.Frontend.SiteDomain, validatorIndex) + } + return "" +} +func formatEpochLink(format types.NotificationFormat, epoch interface{}) string { + switch format { + case types.NotifciationFormatHtml: + return fmt.Sprintf(`%v`, utils.Config.Frontend.SiteDomain, epoch, epoch) + case types.NotifciationFormatText: + return fmt.Sprintf(`%v`, epoch) + case types.NotifciationFormatMarkdown: + return fmt.Sprintf(`[%v](https://%s/epoch/%v)`, epoch, utils.Config.Frontend.SiteDomain, epoch) + } + return "" +} +func formatSlotLink(format types.NotificationFormat, slot interface{}) string { + switch format { + case types.NotifciationFormatHtml: + return fmt.Sprintf(`%v`, utils.Config.Frontend.SiteDomain, slot, slot) + case types.NotifciationFormatText: + return fmt.Sprintf(`%v`, slot) + case types.NotifciationFormatMarkdown: + return fmt.Sprintf(`[%v](https://%s/slot/%v)`, slot, utils.Config.Frontend.SiteDomain, slot) + } + return "" +} + +func formatValidatorPrefixedDashboardAndGroupLink(format types.NotificationFormat, n types.Notification) string { + dashboardAndGroupInfo := "" + if n.GetDashboardId() != nil { + switch format { + case types.NotifciationFormatHtml: + dashboardAndGroupInfo = fmt.Sprintf(` of Group %[2]v in Dashboard %[3]v`, utils.Config.Frontend.SiteDomain, n.GetDashboardGroupName(), n.GetDashboardName(), *n.GetDashboardId()) + case types.NotifciationFormatText: + dashboardAndGroupInfo = fmt.Sprintf(` of Group %[1]v in Dashboard %[2]v`, n.GetDashboardGroupName(), n.GetDashboardName()) + case types.NotifciationFormatMarkdown: + dashboardAndGroupInfo = fmt.Sprintf(` of Group **%[1]v** in Dashboard [%[2]v](https://%[3]v/dashboard/%[4]v)`, n.GetDashboardGroupName(), n.GetDashboardName(), utils.Config.Frontend.SiteDomain, *n.GetDashboardId()) + } + } + return dashboardAndGroupInfo +} + +func formatPureDashboardAndGroupLink(format types.NotificationFormat, n types.Notification) string { + dashboardAndGroupInfo := "" + if n.GetDashboardId() != nil { + switch format { + case types.NotifciationFormatHtml: + dashboardAndGroupInfo = fmt.Sprintf(`Group %[2]v in Dashboard %[3]v`, utils.Config.Frontend.SiteDomain, n.GetDashboardGroupName(), n.GetDashboardName(), *n.GetDashboardId()) + case types.NotifciationFormatText: + dashboardAndGroupInfo = fmt.Sprintf(`Group %[1]v in Dashboard %[2]v`, n.GetDashboardGroupName(), n.GetDashboardName()) + case types.NotifciationFormatMarkdown: + dashboardAndGroupInfo = fmt.Sprintf(`Group **%[1]v** in Dashboard [%[2]v](https://%[3]v/dashboard/%[4]v)`, n.GetDashboardGroupName(), n.GetDashboardName(), utils.Config.Frontend.SiteDomain, *n.GetDashboardId()) + } + } + return dashboardAndGroupInfo +} + +type ValidatorProposalNotification struct { + types.NotificationBaseImpl + + ValidatorIndex uint64 + Slot uint64 + Block uint64 + Status uint64 // * Can be 0 = scheduled, 1 executed, 2 missed */ + Reward float64 +} + +func (n *ValidatorProposalNotification) GetEntitiyId() string { + return fmt.Sprintf("%d", n.ValidatorIndex) +} + +func (n *ValidatorProposalNotification) GetInfo(format types.NotificationFormat) string { + vali := formatValidatorLink(format, n.ValidatorIndex) + slot := formatSlotLink(format, n.Slot) + dashboardAndGroupInfo := formatValidatorPrefixedDashboardAndGroupLink(format, n) + + switch n.Status { + case 0: + return fmt.Sprintf(`New scheduled block proposal at slot %s for Validator %s%s.`, slot, vali, dashboardAndGroupInfo) + case 1: + return fmt.Sprintf(`Validator %s%s proposed block at slot %s with %v %v execution reward.`, vali, dashboardAndGroupInfo, slot, n.Reward, utils.Config.Frontend.ElCurrency) + case 2: + return fmt.Sprintf(`Validator %s%s missed a block proposal at slot %s.`, vali, dashboardAndGroupInfo, slot) + case 3: + return fmt.Sprintf(`Validator %s%s had an orphaned block proposal at slot %s.`, vali, dashboardAndGroupInfo, slot) + default: + return "-" + } +} + +func (n *ValidatorProposalNotification) GetLegacyInfo() string { + var generalPart, suffix string + vali := strconv.FormatUint(n.ValidatorIndex, 10) + slot := strconv.FormatUint(n.Slot, 10) + + switch n.Status { + case 0: + generalPart = fmt.Sprintf(`New scheduled block proposal at slot %s for Validator %s.`, slot, vali) + case 1: + generalPart = fmt.Sprintf(`Validator %s proposed block at slot %s with %v %v execution reward.`, vali, slot, n.Reward, utils.Config.Frontend.ElCurrency) + case 2: + generalPart = fmt.Sprintf(`Validator %s missed a block proposal at slot %s.`, vali, slot) + case 3: + generalPart = fmt.Sprintf(`Validator %s had an orphaned block proposal at slot %s.`, vali, slot) + } + return generalPart + suffix +} + +func (n *ValidatorProposalNotification) GetTitle() string { + return n.GetLegacyTitle() +} + +func (n *ValidatorProposalNotification) GetLegacyTitle() string { + switch n.Status { + case 0: + return "Block Proposal Scheduled" + case 1: + return "New Block Proposal" + case 2: + return "Block Proposal Missed" + case 3: + return "Block Proposal Missed (Orphaned)" + } + return "-" +} + +type ValidatorUpcomingProposalNotification struct { + types.NotificationBaseImpl + + ValidatorIndex uint64 + Slot uint64 +} + +func (n *ValidatorUpcomingProposalNotification) GetEntitiyId() string { + return fmt.Sprintf("%d", n.ValidatorIndex) +} + +func (n *ValidatorUpcomingProposalNotification) GetInfo(format types.NotificationFormat) string { + vali := formatValidatorLink(format, n.ValidatorIndex) + slot := formatSlotLink(format, n.Slot) + dashboardAndGroupInfo := formatValidatorPrefixedDashboardAndGroupLink(format, n) + return fmt.Sprintf(`New scheduled block proposal at slot %s for Validator %s%s.`, slot, vali, dashboardAndGroupInfo) +} + +func (n *ValidatorUpcomingProposalNotification) GetLegacyInfo() string { + var generalPart, suffix string + vali := strconv.FormatUint(n.ValidatorIndex, 10) + slot := strconv.FormatUint(n.Slot, 10) + generalPart = fmt.Sprintf(`New scheduled block proposal at slot %s for Validator %s.`, slot, vali) + + return generalPart + suffix +} + +func (n *ValidatorUpcomingProposalNotification) GetTitle() string { + return n.GetLegacyTitle() +} + +func (n *ValidatorUpcomingProposalNotification) GetLegacyTitle() string { + return "Upcoming Block Proposal" +} + +type ValidatorIsOfflineNotification struct { + types.NotificationBaseImpl + + ValidatorIndex uint64 +} + +func (n *ValidatorIsOfflineNotification) GetEntitiyId() string { + return fmt.Sprintf("%d", n.ValidatorIndex) +} + +// Overwrite specific methods +func (n *ValidatorIsOfflineNotification) GetInfo(format types.NotificationFormat) string { + vali := formatValidatorLink(format, n.ValidatorIndex) + epoch := formatEpochLink(format, n.LatestState) + dashboardAndGroupInfo := formatValidatorPrefixedDashboardAndGroupLink(format, n) + return fmt.Sprintf(`Validator %v%v is offline since epoch %s.`, vali, dashboardAndGroupInfo, epoch) +} + +func (n *ValidatorIsOfflineNotification) GetTitle() string { + return n.GetLegacyTitle() +} + +func (n *ValidatorIsOfflineNotification) GetLegacyInfo() string { + return fmt.Sprintf(`Validator %v is offline since epoch %d.`, n.ValidatorIndex, n.Epoch) +} + +func (n *ValidatorIsOfflineNotification) GetLegacyTitle() string { + return "Validator is Offline" +} + +type ValidatorIsOnlineNotification struct { + types.NotificationBaseImpl + + ValidatorIndex uint64 +} + +func (n *ValidatorIsOnlineNotification) GetEntitiyId() string { + return fmt.Sprintf("%d", n.ValidatorIndex) +} + +// Overwrite specific methods +func (n *ValidatorIsOnlineNotification) GetInfo(format types.NotificationFormat) string { + vali := formatValidatorLink(format, n.ValidatorIndex) + epoch := formatEpochLink(format, n.Epoch) + dashboardAndGroupInfo := formatValidatorPrefixedDashboardAndGroupLink(format, n) + return fmt.Sprintf(`Validator %v%v is back online since epoch %v.`, vali, dashboardAndGroupInfo, epoch) +} + +func (n *ValidatorIsOnlineNotification) GetTitle() string { + return n.GetLegacyTitle() +} + +func (n *ValidatorIsOnlineNotification) GetLegacyInfo() string { + return fmt.Sprintf(`Validator %v is back online since epoch %v.`, n.ValidatorIndex, n.Epoch) +} + +func (n *ValidatorIsOnlineNotification) GetLegacyTitle() string { + return "Validator Back Online" +} + +type ValidatorGroupEfficiencyNotification struct { + types.NotificationBaseImpl + + Threshold float64 + Efficiency float64 +} + +func (n *ValidatorGroupEfficiencyNotification) GetEntitiyId() string { + return fmt.Sprintf("%s - %s", n.GetDashboardName(), n.GetDashboardGroupName()) +} + +// Overwrite specific methods +func (n *ValidatorGroupEfficiencyNotification) GetInfo(format types.NotificationFormat) string { + dashboardAndGroupInfo := formatPureDashboardAndGroupLink(format, n) + epoch := formatEpochLink(format, n.Epoch) + return fmt.Sprintf(`%s efficiency of %.2f%% was below the threhold of %.2f%% in epoch %s.`, dashboardAndGroupInfo, n.Efficiency, n.Threshold, epoch) +} + +func (n *ValidatorGroupEfficiencyNotification) GetTitle() string { + return "Low group efficiency" +} + +func (n *ValidatorGroupEfficiencyNotification) GetLegacyInfo() string { + return n.GetInfo(types.NotifciationFormatText) +} + +func (n *ValidatorGroupEfficiencyNotification) GetLegacyTitle() string { + return n.GetTitle() +} + +type ValidatorAttestationNotification struct { + types.NotificationBaseImpl + + ValidatorIndex uint64 + ValidatorPublicKey string + Status uint64 // * Can be 0 = scheduled | missed, 1 executed +} + +func (n *ValidatorAttestationNotification) GetEntitiyId() string { + return fmt.Sprintf("%d", n.ValidatorIndex) +} + +func (n *ValidatorAttestationNotification) GetInfo(format types.NotificationFormat) string { + dashboardAndGroupInfo := formatValidatorPrefixedDashboardAndGroupLink(format, n) + vali := formatValidatorLink(format, n.ValidatorIndex) + epoch := formatEpochLink(format, n.Epoch) + + switch format { + case types.NotifciationFormatHtml: + switch n.Status { + case 0: + return fmt.Sprintf(`Validator %s%s missed an attestation in epoch %s.`, vali, dashboardAndGroupInfo, epoch) + case 1: + return fmt.Sprintf(`Validator %s%s submitted a successful attestation for epoch %s.`, vali, dashboardAndGroupInfo, epoch) + default: + return "-" + } + case types.NotifciationFormatText: + switch n.Status { + case 0: + return fmt.Sprintf(`Validator %s%s missed an attestation in epoch %s.`, vali, dashboardAndGroupInfo, epoch) + case 1: + return fmt.Sprintf(`Validator %s%s submitted a successful attestation for epoch %s.`, vali, dashboardAndGroupInfo, epoch) + default: + return "-" + } + case types.NotifciationFormatMarkdown: + switch n.Status { + case 0: + return fmt.Sprintf(`Validator %s%s missed an attestation in epoch %s.`, vali, dashboardAndGroupInfo, epoch) + case 1: + return fmt.Sprintf(`Validator %s%s submitted a successful attestation for epoch %s.`, vali, dashboardAndGroupInfo, epoch) + default: + return "-" + } + } + return "" +} + +func (n *ValidatorAttestationNotification) GetTitle() string { + return n.GetLegacyTitle() +} + +func (n *ValidatorAttestationNotification) GetLegacyInfo() string { + var generalPart = "" + switch n.Status { + case 0: + generalPart = fmt.Sprintf(`Validator %v missed an attestation in epoch %v.`, n.ValidatorIndex, n.Epoch) + case 1: + generalPart = fmt.Sprintf(`Validator %v submitted a successful attestation in epoch %v.`, n.ValidatorIndex, n.Epoch) + } + return generalPart +} + +func (n *ValidatorAttestationNotification) GetLegacyTitle() string { + switch n.Status { + case 0: + return "Attestation Missed" + case 1: + return "Attestation Submitted" + } + return "-" +} + +type ValidatorGotSlashedNotification struct { + types.NotificationBaseImpl + + ValidatorIndex uint64 + Slasher uint64 + Reason string +} + +func (n *ValidatorGotSlashedNotification) GetEntitiyId() string { + return fmt.Sprintf("%d", n.ValidatorIndex) +} + +func (n *ValidatorGotSlashedNotification) GetInfo(format types.NotificationFormat) string { + dashboardAndGroupInfo := formatValidatorPrefixedDashboardAndGroupLink(format, n) + vali := formatValidatorLink(format, n.ValidatorIndex) + epoch := formatEpochLink(format, n.Epoch) + slasher := formatValidatorLink(format, n.Slasher) + + return fmt.Sprintf(`Validator %v%v has been slashed at epoch %v by validator %v for %s.`, vali, dashboardAndGroupInfo, epoch, slasher, n.Reason) +} + +func (n *ValidatorGotSlashedNotification) GetTitle() string { + return n.GetLegacyTitle() +} + +func (n *ValidatorGotSlashedNotification) GetLegacyInfo() string { + generalPart := fmt.Sprintf(`Validator %v has been slashed at epoch %v by validator %v for %s.`, n.ValidatorIndex, n.Epoch, n.Slasher, n.Reason) + return generalPart +} + +func (n *ValidatorGotSlashedNotification) GetLegacyTitle() string { + return "Validator got Slashed" +} + +type ValidatorWithdrawalNotification struct { + types.NotificationBaseImpl + + ValidatorIndex uint64 + Epoch uint64 + Slot uint64 + Amount uint64 + Address []byte +} + +func (n *ValidatorWithdrawalNotification) GetEntitiyId() string { + return fmt.Sprintf("%v", n.ValidatorIndex) +} + +func (n *ValidatorWithdrawalNotification) GetInfo(format types.NotificationFormat) string { + dashboardAndGroupInfo := formatValidatorPrefixedDashboardAndGroupLink(format, n) + vali := formatValidatorLink(format, n.ValidatorIndex) + amount := utils.FormatClCurrencyString(n.Amount, utils.Config.Frontend.MainCurrency, 6, true, false, false) + generalPart := fmt.Sprintf(`An automatic withdrawal of %s has been processed for validator %s%s.`, amount, vali, dashboardAndGroupInfo) + + return generalPart +} + +func (n *ValidatorWithdrawalNotification) GetTitle() string { + return n.GetLegacyTitle() +} + +func (n *ValidatorWithdrawalNotification) GetLegacyInfo() string { + generalPart := fmt.Sprintf(`An automatic withdrawal of %v has been processed for validator %v.`, utils.FormatClCurrencyString(n.Amount, utils.Config.Frontend.MainCurrency, 6, true, false, false), n.ValidatorIndex) + return generalPart +} + +func (n *ValidatorWithdrawalNotification) GetLegacyTitle() string { + return "Withdrawal Processed" +} + +type EthClientNotification struct { + types.NotificationBaseImpl + + EthClient string +} + +func (n *EthClientNotification) GetEntitiyId() string { + return n.EthClient +} + +func (n *EthClientNotification) GetInfo(format types.NotificationFormat) string { + clientUrls := map[string]string{ + "Geth": "https://github.com/ethereum/go-ethereum/releases", + "Nethermind": "https://github.com/NethermindEth/nethermind/releases", + "Teku": "https://github.com/ConsenSys/teku/releases", + "Prysm": "https://github.com/prysmaticlabs/prysm/releases", + "Nimbus": "https://github.com/status-im/nimbus-eth2/releases", + "Lighthouse": "https://github.com/sigp/lighthouse/releases", + "Erigon": "https://github.com/erigontech/erigon/releases", + "Rocketpool": "https://github.com/rocket-pool/smartnode-install/releases", + "MEV-Boost": "https://github.com/flashbots/mev-boost/releases", + "Lodestar": "https://github.com/chainsafe/lodestar/releases", + } + defaultUrl := "https://beaconcha.in/ethClients" + + switch format { + case types.NotifciationFormatHtml: + generalPart := fmt.Sprintf(`A new version for %s is available.`, n.EthClient) + url := clientUrls[n.EthClient] + if url == "" { + url = defaultUrl + } + return generalPart + " " + url + case types.NotifciationFormatText: + return fmt.Sprintf(`A new version for %s is available.`, n.EthClient) + case types.NotifciationFormatMarkdown: + url := clientUrls[n.EthClient] + if url == "" { + url = defaultUrl + } + + generalPart := fmt.Sprintf(`A new version for [%s](%s) is available.`, n.EthClient, url) + + return generalPart + } + return "" +} + +func (n *EthClientNotification) GetTitle() string { + return n.GetLegacyTitle() +} + +func (n *EthClientNotification) GetLegacyInfo() string { + generalPart := fmt.Sprintf(`A new version for %s is available.`, n.EthClient) + return generalPart +} + +func (n *EthClientNotification) GetLegacyTitle() string { + return fmt.Sprintf("New %s update", n.EthClient) +} + +type MachineEvents struct { + SubscriptionID uint64 `db:"id"` + UserID types.UserId `db:"user_id"` + MachineName string `db:"machine"` + UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` + EventThreshold float64 `db:"event_threshold"` +} + +type MonitorMachineNotification struct { + types.NotificationBaseImpl + + MachineName string + EventThreshold float64 +} + +func (n *MonitorMachineNotification) GetEntitiyId() string { + return n.MachineName +} + +func (n *MonitorMachineNotification) GetInfo(format types.NotificationFormat) string { + return n.GetLegacyInfo() +} + +func (n *MonitorMachineNotification) GetTitle() string { + return n.GetLegacyTitle() +} + +func (n *MonitorMachineNotification) GetLegacyInfo() string { + switch n.EventName { + case types.MonitoringMachineDiskAlmostFullEventName: + return fmt.Sprintf(`Your staking machine "%v" is running low on storage space.`, n.MachineName) + case types.MonitoringMachineOfflineEventName: + return fmt.Sprintf(`Your staking machine "%v" might be offline. It has not been seen for a couple minutes now.`, n.MachineName) + case types.MonitoringMachineCpuLoadEventName: + return fmt.Sprintf(`Your staking machine "%v" has reached your configured CPU usage threshold.`, n.MachineName) + case types.MonitoringMachineMemoryUsageEventName: + return fmt.Sprintf(`Your staking machine "%v" has reached your configured RAM threshold.`, n.MachineName) + } + return "" +} + +func (n *MonitorMachineNotification) GetLegacyTitle() string { + switch n.EventName { + case types.MonitoringMachineDiskAlmostFullEventName: + return "Storage Warning" + case types.MonitoringMachineOfflineEventName: + return "Staking Machine Offline" + case types.MonitoringMachineCpuLoadEventName: + return "High CPU Load" + case types.MonitoringMachineMemoryUsageEventName: + return "Memory Warning" + } + return "" +} + +func (n *MonitorMachineNotification) GetEventFilter() string { + return n.MachineName +} + +type TaxReportNotification struct { + types.NotificationBaseImpl +} + +func (n *TaxReportNotification) GetEntitiyId() string { + return "" +} + +func (n *TaxReportNotification) GetEmailAttachment() *types.EmailAttachment { + tNow := time.Now() + lastDay := time.Date(tNow.Year(), tNow.Month(), 1, 0, 0, 0, 0, time.UTC) + firstDay := lastDay.AddDate(0, -1, 0) + + q, err := url.ParseQuery(n.EventFilter) + + if err != nil { + log.Warnf("Failed to parse rewards report eventfilter: %v", err) + return nil + } + + currency := q.Get("currency") + + validators := []uint64{} + valSlice := strings.Split(q.Get("validators"), ",") + if len(valSlice) > 0 { + for _, val := range valSlice { + v, err := strconv.ParseUint(val, 10, 64) + if err != nil { + continue + } + validators = append(validators, v) + } + } else { + log.Warnf("Validators Not found in rewards report eventfilter") + return nil + } + + pdf := services.GetPdfReport(validators, currency, uint64(firstDay.Unix()), uint64(lastDay.Unix())) + + return &types.EmailAttachment{Attachment: pdf, Name: fmt.Sprintf("income_history_%v_%v.pdf", firstDay.Format("20060102"), lastDay.Format("20060102"))} +} + +func (n *TaxReportNotification) GetInfo(format types.NotificationFormat) string { + return n.GetLegacyInfo() +} + +func (n *TaxReportNotification) GetTitle() string { + return n.GetLegacyTitle() +} + +func (n *TaxReportNotification) GetLegacyInfo() string { + generalPart := `Please find attached the income history of your selected validators.` + return generalPart +} + +func (n *TaxReportNotification) GetLegacyTitle() string { + return "Income Report" +} + +func (n *TaxReportNotification) GetEventFilter() string { + return n.EventFilter +} + +type NetworkNotification struct { + types.NotificationBaseImpl +} + +func (n *NetworkNotification) GetEntitiyId() string { + return "" +} + +func (n *NetworkNotification) GetInfo(format types.NotificationFormat) string { + switch format { + case types.NotifciationFormatHtml, types.NotifciationFormatText: + return fmt.Sprintf(`Network experienced finality issues. Learn more at https://%v/charts/network_liveness`, utils.Config.Frontend.SiteDomain) + case types.NotifciationFormatMarkdown: + return fmt.Sprintf(`Network experienced finality issues. [Learn more](https://%v/charts/network_liveness)`, utils.Config.Frontend.SiteDomain) + } + return "" +} + +func (n *NetworkNotification) GetTitle() string { + return n.GetLegacyTitle() +} + +func (n *NetworkNotification) GetLegacyInfo() string { + generalPart := fmt.Sprintf(`Network experienced finality issues. Learn more at https://%v/charts/network_liveness`, utils.Config.Frontend.SiteDomain) + return generalPart +} + +func (n *NetworkNotification) GetLegacyTitle() string { + return "Beaconchain Network Issues" +} + +type RocketpoolNotification struct { + types.NotificationBaseImpl + ExtraData string +} + +func (n *RocketpoolNotification) GetEntitiyId() string { + return "" +} + +func (n *RocketpoolNotification) GetInfo(format types.NotificationFormat) string { + return n.GetLegacyInfo() +} + +func (n *RocketpoolNotification) GetTitle() string { + return n.GetLegacyTitle() +} + +func (n *RocketpoolNotification) GetLegacyInfo() string { + switch n.EventName { + case types.RocketpoolCommissionThresholdEventName: + return fmt.Sprintf(`The current RPL commission rate of %v has reached your configured threshold.`, n.ExtraData) + case types.RocketpoolNewClaimRoundStartedEventName: + return `A new reward round has started. You can now claim your rewards from the previous round.` + case types.RocketpoolCollateralMaxReachedEventName: + return fmt.Sprintf(`Your RPL collateral has reached your configured threshold at %v%%.`, n.ExtraData) + case types.RocketpoolCollateralMinReachedEventName: + return fmt.Sprintf(`Your RPL collateral has reached your configured threshold at %v%%.`, n.ExtraData) + } + + return "" +} + +func (n *RocketpoolNotification) GetLegacyTitle() string { + switch n.EventName { + case types.RocketpoolCommissionThresholdEventName: + return `Rocketpool Commission` + case types.RocketpoolNewClaimRoundStartedEventName: + return `Rocketpool Claim Available` + case types.RocketpoolCollateralMaxReachedEventName: + return `Rocketpool Max Collateral` + case types.RocketpoolCollateralMinReachedEventName: + return `Rocketpool Min Collateral` + } + return "" +} + +type SyncCommitteeSoonNotification struct { + types.NotificationBaseImpl + ValidatorIndex uint64 + StartEpoch uint64 + EndEpoch uint64 +} + +func (n *SyncCommitteeSoonNotification) GetEntitiyId() string { + return fmt.Sprintf("%d", n.ValidatorIndex) +} + +func (n *SyncCommitteeSoonNotification) GetInfo(format types.NotificationFormat) string { + return getSyncCommitteeSoonInfo(format, map[types.EventFilter]types.Notification{ + types.EventFilter(n.EventFilter): n, + }) +} + +func (n *SyncCommitteeSoonNotification) GetTitle() string { + return n.GetLegacyTitle() +} + +func (n *SyncCommitteeSoonNotification) GetLegacyInfo() string { + return getSyncCommitteeSoonLegacyInfo(map[types.EventFilter]types.Notification{ + types.EventFilter(n.EventFilter): n, + }) +} + +func (n *SyncCommitteeSoonNotification) GetLegacyTitle() string { + return `Sync Committee Duty` +} + +func getSyncCommitteeSoonLegacyInfo(ns map[types.EventFilter]types.Notification) string { + validators := []string{} + var startEpoch, endEpoch string + var inTime time.Duration + + i := 0 + for _, n := range ns { + n, ok := n.(*SyncCommitteeSoonNotification) + if !ok { + log.Error(nil, "Sync committee notification not of type syncCommitteeSoonNotification", 0) + return "" + } + + validators = append(validators, fmt.Sprintf("%d", n.ValidatorIndex)) + if i == 0 { + // startEpoch, endEpoch and inTime must be the same for all validators + startEpoch = fmt.Sprintf("%d", n.StartEpoch) + endEpoch = fmt.Sprintf("%d", n.EndEpoch) + + syncStartEpoch, err := strconv.ParseUint(startEpoch, 10, 64) + if err != nil { + inTime = utils.Day + } else { + inTime = time.Until(utils.EpochToTime(syncStartEpoch)) + } + inTime = inTime.Round(time.Second) + } + i++ + } + + if len(validators) > 0 { + validatorsInfo := "" + if len(validators) == 1 { + validatorsInfo = fmt.Sprintf(`Your validator %s has been elected to be part of the next sync committee.`, validators[0]) + } else { + validatorsText := "" + for i, validator := range validators { + if i < len(validators)-1 { + validatorsText += fmt.Sprintf("%s, ", validator) + } else { + validatorsText += fmt.Sprintf("and %s", validator) + } + } + validatorsInfo = fmt.Sprintf(`Your validators %s have been elected to be part of the next sync committee.`, validatorsText) + } + return fmt.Sprintf(`%s The additional duties start at epoch %s, which is in %s and will last for about a day until epoch %s.`, validatorsInfo, startEpoch, inTime, endEpoch) + } + + return "" +} + +type BigFloat big.Float + +func (b *BigFloat) Value() (driver.Value, error) { + if b != nil { + return (*big.Float)(b).String(), nil + } + return nil, nil +} + +func (b *BigFloat) Scan(value interface{}) error { + if value == nil { + return errors.New("can not cast nil to BigFloat") + } + + switch t := value.(type) { + case float64: + (*big.Float)(b).SetFloat64(value.(float64)) + case []uint8: + _, ok := (*big.Float)(b).SetString(string(value.([]uint8))) + if !ok { + return fmt.Errorf("failed to load value to []uint8: %v", value) + } + case string: + _, ok := (*big.Float)(b).SetString(value.(string)) + if !ok { + return fmt.Errorf("failed to load value to []uint8: %v", value) + } + default: + return fmt.Errorf("could not scan type %T into BigFloat", t) + } + + return nil +} + +func (b *BigFloat) bigFloat() *big.Float { + return (*big.Float)(b) +} +func bigFloat(x float64) *big.Float { + return new(big.Float).SetFloat64(x) +} + +type WebhookQueue struct { + NotificationID uint64 `db:"id"` + Url string `db:"url"` + Retries uint64 `db:"retries"` + LastSent time.Time `db:"last_retry"` + Destination sql.NullString `db:"destination"` + Payload []byte `db:"payload"` + LastTry time.Time `db:"last_try"` +} diff --git a/backend/pkg/userservice/appsubscription_oracle.go b/backend/pkg/userservice/appsubscription_oracle.go index 0b078674e..7dbf2e1a7 100644 --- a/backend/pkg/userservice/appsubscription_oracle.go +++ b/backend/pkg/userservice/appsubscription_oracle.go @@ -11,6 +11,8 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/metrics" + "github.com/gobitfly/beaconchain/pkg/commons/services" "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" @@ -37,18 +39,21 @@ func CheckMobileSubscriptions() { receipts, err := db.GetAllAppSubscriptions() if err != nil { + metrics.Errors.WithLabelValues("appsub_verify_db_failed").Inc() log.Error(err, "error retrieving subscription data from db: %v", 0, nil) return } googleClient, err := initGoogle() if googleClient == nil { + metrics.Errors.WithLabelValues("appsub_verify_initgoogle_failed").Inc() log.Error(err, "error initializing google client: %v", 0, nil) return } appleClient, err := initApple() if err != nil { + metrics.Errors.WithLabelValues("appsub_verify_initapple_failed").Inc() log.Error(err, "error initializing apple client: %v", 0, nil) return } @@ -71,11 +76,14 @@ func CheckMobileSubscriptions() { if strings.Contains(err.Error(), "expired") { err = db.SetSubscriptionToExpired(nil, receipt.ID) if err != nil { + metrics.Errors.WithLabelValues("appsub_verify_write_failed").Inc() log.Error(err, "subscription set expired failed", 0, map[string]interface{}{"receiptID": receipt.ID}) } continue } log.Warnf("subscription verification failed in service for [%v]: %v", receipt.ID, err) + metrics.Errors.WithLabelValues(fmt.Sprintf("appsub_verify_%s_failed", receipt.Store)).Inc() + continue } @@ -86,6 +94,8 @@ func CheckMobileSubscriptions() { updateValidationState(receipt, valid) } + services.ReportStatus("app_subscriptions_check", "Running", nil) + log.InfoWithFields(log.Fields{"subscriptions": len(receipts), "duration": time.Since(start)}, "subscription update completed") time.Sleep(time.Hour * 4) } @@ -233,13 +243,29 @@ func rejectReason(valid bool) string { return "expired" } +// first 3 trillion dollar company and you can't reuse ids +func mapAppleProductID(productID string) string { + mappings := map[string]string{ + "orca.yearly.apple": "orca.yearly", + "orca.apple": "orca", + "dolphin.yearly.apple": "dolphin.yearly", + "dolphin.apple": "dolphin", + "guppy.yearly.apple": "guppy.yearly", + "guppy.apple": "guppy", + } + if mapped, ok := mappings[productID]; ok { + return mapped + } + return productID +} + func verifyApple(apple *api.StoreClient, receipt *types.PremiumData) (*VerifyResponse, error) { response := &VerifyResponse{ Valid: false, ExpirationDate: 0, RejectReason: "", - ProductID: receipt.ProductID, // may be changed by this function to be different than receipt.ProductID - Receipt: receipt.Receipt, // may be changed by this function to be different than receipt.Receipt + ProductID: mapAppleProductID(receipt.ProductID), // may be changed by this function to be different than receipt.ProductID + Receipt: receipt.Receipt, // may be changed by this function to be different than receipt.Receipt } if apple == nil { @@ -300,7 +326,7 @@ func verifyApple(apple *api.StoreClient, receipt *types.PremiumData) (*VerifyRes response.RejectReason = "invalid_product_id" return response, nil } - response.ProductID = productId // update response to reflect the resolved product id + response.ProductID = mapAppleProductID(productId) // update response to reflect the resolved product id expiresDateFloat, ok := claims["expiresDate"].(float64) if !ok { diff --git a/backend/pkg/userservice/stripe_email_updater.go b/backend/pkg/userservice/stripe_email_updater.go index 2f790e20f..f539c8bd6 100644 --- a/backend/pkg/userservice/stripe_email_updater.go +++ b/backend/pkg/userservice/stripe_email_updater.go @@ -10,6 +10,7 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/services" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/lib/pq" ) @@ -50,6 +51,8 @@ func StripeEmailUpdater() { } } + services.ReportStatus("stripe_email_updater", "Running", nil) + time.Sleep(time.Minute) } } diff --git a/frontend/.env-example b/frontend/.env-example index 8e5b8de1b..ffb100613 100644 --- a/frontend/.env-example +++ b/frontend/.env-example @@ -1,15 +1,16 @@ -NUXT_PUBLIC_API_CLIENT: "" -NUXT_PUBLIC_LEGACY_API_CLIENT: "" -NUXT_PRIVATE_API_SERVER: "" -NUXT_PRIVATE_LEGACY_API_SERVER: "" -NUXT_PUBLIC_API_KEY: "" -NUXT_PUBLIC_DOMAIN: "" -NUXT_PUBLIC_STRIPE_BASE_URL: "" -NUXT_PUBLIC_LOG_IP: "" -NUXT_PUBLIC_SHOW_IN_DEVELOPMENT: "" -NUXT_PUBLIC_V1_DOMAIN: "" -NUXT_PUBLIC_LOG_FILE: "" -NUXT_PUBLIC_CHAIN_ID_BY_DEFAULT: "" -NUXT_PUBLIC_MAINTENANCE_TS: "1717700652" -NUXT_PUBLIC_DEPLOYMENT_TYPE: "development" -PRIVATE_SSR_SECRET: "" +NUXT_IS_API_MOCKED= +NUXT_PRIVATE_API_SERVER= +NUXT_PRIVATE_LEGACY_API_SERVER= +NUXT_PUBLIC_API_CLIENT= +NUXT_PUBLIC_API_KEY= +NUXT_PUBLIC_CHAIN_ID_BY_DEFAULT= +NUXT_PUBLIC_DEPLOYMENT_TYPE=development +NUXT_PUBLIC_DOMAIN= +NUXT_PUBLIC_LEGACY_API_CLIENT= +NUXT_PUBLIC_LOG_FILE= +NUXT_PUBLIC_LOG_IP= +NUXT_PUBLIC_MAINTENANCE_TS=1717700652 +NUXT_PUBLIC_SHOW_IN_DEVELOPMENT= +NUXT_PUBLIC_STRIPE_BASE_URL= +NUXT_PUBLIC_V1_DOMAIN= +PRIVATE_SSR_SECRET= diff --git a/frontend/.vscode/settings.json b/frontend/.vscode/settings.json index a97dbcf2a..3657d47ec 100644 --- a/frontend/.vscode/settings.json +++ b/frontend/.vscode/settings.json @@ -1,17 +1,44 @@ { "conventionalCommits.scopes": [ + "BcButton", + "BcInputUnit", + "BcLink", + "BcTablePager", + "BcToggle", + "DashboardChartSummaryChartFilter", + "DashboardGroupManagementModal", + "DashboardTableValidators", + "DashboardValidatorManagmentModal", + "NotificationMachinesTable", + "NotificationsClientsTable", + "NotificationsDashboardDialogEntity", + "NotificationsDashboardTable", + "NotificationsManagementModalWebhook", + "NotificationsManagementNetwork", + "NotificationsManagementSubscriptionDialog", + "NotificationsManagmentMachines", + "NotificationsNetworkTable", + "NotificationsOverview", + "NotificationsTableEmpty", + "a11y", "checkout", "ci", + "csrf", "customFetch", - "DashboardChartSummaryChartFilter", - "DashboardGroupManagementModal", + "dialog", "eslint", + "feature-flags", "git", + "git-blame-ignore", "i18n", "mainHeader", + "middleware", + "notifications", + "nuxt.config", "qrCode", - "vscode", - "DashboardValidatorManagmentModal" + "useNotificationsOverviewStore", + "useWindowSize", + "vscode" ], "editor.codeActionsOnSave": { "source.fixAll.eslint": "always" diff --git a/frontend/README.md b/frontend/README.md index 678f9fb89..7f1a5afb3 100644 --- a/frontend/README.md +++ b/frontend/README.md @@ -122,6 +122,17 @@ bun run preview Check out the [deployment documentation](https://nuxt.com/docs/getting-started/deployment) for more information. +## Get mocked api data + +If your `user` was added to the `ADMIN` or `DEV` group by the `api team`, you can get +`mocked data` from the `api` for certain `endpoints` by adding `?is_mocked=true` as a +`query parameter`. + +You can `turn on` mocked data `globally` for all `configured enpoints` +- by setting `NUXT_PUBLIC_IS_API_MOCKED=true` +in your [.env](.env) or +- running `npm run dev:mock:api` (See: [package.json](package.json)) + ## Descision Record We documented our decisions in the [decisions](decisions.md) file. diff --git a/frontend/assets/css/_breakpoints.scss b/frontend/assets/css/_breakpoints.scss new file mode 100644 index 000000000..82403ddf1 --- /dev/null +++ b/frontend/assets/css/_breakpoints.scss @@ -0,0 +1,5 @@ +$breakpoint-sm: 640px; +$breakpoint-md: 768px; +$breakpoint-lg: 1024px; +$breakpoint-xl: 1280px; +$breakpoint-2xl: 1536px; \ No newline at end of file diff --git a/frontend/assets/css/colors.scss b/frontend/assets/css/colors.scss index 68d69fecb..a30263b6d 100644 --- a/frontend/assets/css/colors.scss +++ b/frontend/assets/css/colors.scss @@ -27,6 +27,7 @@ --light-blue: #66bce9; --melllow-blue: #7db5ec; --blue: #2e82ae; + --blue-500: #3b82f6; --teal-blue: #116897; --mint-green: #d3e4d4; --flashy-green: #90ed7d; diff --git a/frontend/assets/css/main.scss b/frontend/assets/css/main.scss index e551aa1fd..77fce977f 100644 --- a/frontend/assets/css/main.scss +++ b/frontend/assets/css/main.scss @@ -3,9 +3,17 @@ @import "~/assets/css/fonts.scss"; @import "~/assets/css/utils.scss"; -html { +html, +h1,h2,h3,h4,h5,h6, +ul, +li + { margin: 0; padding: 0; + font: inherit; +} +ul { + padding-inline-start: 1.5rem; } body { @include standard_text; @@ -96,16 +104,3 @@ svg { } } } - -// see https://tailwindcss.com/docs/screen-readers -sr-only { - position: absolute; - width: 1px; - height: 1px; - padding: 0; - margin: -1px; - overflow: hidden; - clip: rect(0, 0, 0, 0); - white-space: nowrap; - border-width: 0; -} diff --git a/frontend/assets/css/prime.scss b/frontend/assets/css/prime.scss index 1e80ebaa3..2816bfd93 100644 --- a/frontend/assets/css/prime.scss +++ b/frontend/assets/css/prime.scss @@ -634,11 +634,11 @@ div.p-accordion { * TODO: remove the .p-overflow-hidden and .p-overlay-mask class when PrimeVue is updated. * This is quick-fix for shifting display issues. **/ - .p-overflow-hidden { + div.p-overflow-hidden { overflow: hidden !important; /* Block scroll */ border-right: solid 5px transparent !important; } -.p-overlay-mask { - background: var(--container-background); -} +div.p-dialog-mask.p-overlay-mask { + background: rgba(0, 0, 0, 0.5); +} \ No newline at end of file diff --git a/frontend/components/BcFeatureFlag.vue b/frontend/components/BcFeatureFlag.vue new file mode 100644 index 000000000..5d69eb435 --- /dev/null +++ b/frontend/components/BcFeatureFlag.vue @@ -0,0 +1,14 @@ + + + + + diff --git a/frontend/components/bc/BcAccordion.vue b/frontend/components/bc/BcAccordion.vue new file mode 100644 index 000000000..a6c8dfb4b --- /dev/null +++ b/frontend/components/bc/BcAccordion.vue @@ -0,0 +1,152 @@ + + + + + diff --git a/frontend/components/bc/BcButton.vue b/frontend/components/bc/BcButton.vue index 1e159f2f6..df9c72015 100644 --- a/frontend/components/bc/BcButton.vue +++ b/frontend/components/bc/BcButton.vue @@ -22,6 +22,7 @@ const shouldAppearDisabled = computed( type="button" :disabled="isDisabled" :aria-disabled="isAriaDisabled" + class="bc-button" :class="{ 'bc-button--secondary': !shouldAppearDisabled && variant === 'secondary', 'bc-button--disabled': shouldAppearDisabled, @@ -39,7 +40,16 @@ const shouldAppearDisabled = computed( diff --git a/frontend/components/bc/BcCard.vue b/frontend/components/bc/BcCard.vue new file mode 100644 index 000000000..f2eed9a5a --- /dev/null +++ b/frontend/components/bc/BcCard.vue @@ -0,0 +1,17 @@ + + + + + diff --git a/frontend/components/bc/BcContentFilter.vue b/frontend/components/bc/BcContentFilter.vue index edd6e4038..bf3619eea 100644 --- a/frontend/components/bc/BcContentFilter.vue +++ b/frontend/components/bc/BcContentFilter.vue @@ -1,15 +1,14 @@ diff --git a/frontend/components/bc/BcDropdownToggle.vue b/frontend/components/bc/BcDropdownToggle.vue new file mode 100644 index 000000000..781a9ca03 --- /dev/null +++ b/frontend/components/bc/BcDropdownToggle.vue @@ -0,0 +1,129 @@ + + + + + diff --git a/frontend/components/bc/BcLink.vue b/frontend/components/bc/BcLink.vue index b33af7173..6a27c8630 100644 --- a/frontend/components/bc/BcLink.vue +++ b/frontend/components/bc/BcLink.vue @@ -1,7 +1,16 @@ - + + diff --git a/frontend/components/bc/format/FormatHash.vue b/frontend/components/bc/format/BcFormatHash.vue similarity index 100% rename from frontend/components/bc/format/FormatHash.vue rename to frontend/components/bc/format/BcFormatHash.vue diff --git a/frontend/components/bc/format/FormatPercent.vue b/frontend/components/bc/format/FormatPercent.vue index 4f9b41036..c6dfbc025 100644 --- a/frontend/components/bc/format/FormatPercent.vue +++ b/frontend/components/bc/format/FormatPercent.vue @@ -52,7 +52,8 @@ const data = computed(() => { } label = formatPercent(percent, config) if (props.comparePercent !== undefined) { - if (Math.abs(props.comparePercent - percent) <= 0.5) { + const thresholdToDifferenciateUnderperformerAndOverperformer = 0.25 + if (Math.abs(props.comparePercent - percent) <= thresholdToDifferenciateUnderperformerAndOverperformer) { className = 'text-equal' leadingIcon = faArrowsLeftRight compareResult = 'equal' diff --git a/frontend/components/bc/header/HeaderLogo.vue b/frontend/components/bc/header/HeaderLogo.vue index e2c506bd3..ce022fa17 100644 --- a/frontend/components/bc/header/HeaderLogo.vue +++ b/frontend/components/bc/header/HeaderLogo.vue @@ -5,13 +5,13 @@ defineProps<{ diff --git a/frontend/components/bc/input/BcInputText.vue b/frontend/components/bc/input/BcInputText.vue index 5e26f7620..bc0fd03e6 100644 --- a/frontend/components/bc/input/BcInputText.vue +++ b/frontend/components/bc/input/BcInputText.vue @@ -4,14 +4,17 @@ import type { BcInputError } from '~/components/bc/input/BcInputError.vue' const idInput = useId() const input = defineModel() -const props = defineProps<{ +const props = withDefaults(defineProps<{ error?: BcInputError, inputWidth?: `${number}px`, label: string, + labelPosition?: 'left' | 'right', placeholder?: string, shouldAutoselect?: boolean, type?: HTMLInputElement['type'], -}>() +}>(), { + labelPosition: 'left', +}) onMounted(() => { if (props.shouldAutoselect) { const input = document.getElementById(idInput) @@ -25,7 +28,12 @@ onMounted(() => { diff --git a/frontend/components/bc/input/BcInputUnit.vue b/frontend/components/bc/input/BcInputUnit.vue new file mode 100644 index 000000000..4f1f21f81 --- /dev/null +++ b/frontend/components/bc/input/BcInputUnit.vue @@ -0,0 +1,37 @@ + + + + + diff --git a/frontend/components/bc/premium/BcPremiumGem.vue b/frontend/components/bc/premium/BcPremiumGem.vue index 45152a6f2..62fd2370c 100644 --- a/frontend/components/bc/premium/BcPremiumGem.vue +++ b/frontend/components/bc/premium/BcPremiumGem.vue @@ -1,21 +1,33 @@ diff --git a/frontend/components/bc/tab/BcTabPanel.vue b/frontend/components/bc/tab/BcTabPanel.vue new file mode 100644 index 000000000..cf88fadea --- /dev/null +++ b/frontend/components/bc/tab/BcTabPanel.vue @@ -0,0 +1,23 @@ + + + + + diff --git a/frontend/components/bc/table/BcTablePopoutEdit.vue b/frontend/components/bc/table/BcTablePopoutEdit.vue index dc9985ab2..365fa71d3 100644 --- a/frontend/components/bc/table/BcTablePopoutEdit.vue +++ b/frontend/components/bc/table/BcTablePopoutEdit.vue @@ -20,8 +20,9 @@ defineProps() diff --git a/frontend/components/bc/table/TablePager.vue b/frontend/components/bc/table/TablePager.vue index 2ca73de68..1f8ac4210 100644 --- a/frontend/components/bc/table/TablePager.vue +++ b/frontend/components/bc/table/TablePager.vue @@ -181,29 +181,27 @@ watch( @change="(event) => setPageSize(event.value)" /> -
-
- - - {{ - $t("table.showing", { - from: data.from, - to: data.to, - total: props.paging?.total_count, - }) - }} - - -
-
- -
+
+ + + {{ + $t("table.showing", { + from: data.from, + to: data.to, + total: props.paging?.total_count, + }) + }} + + +
+
+
@@ -212,30 +210,44 @@ watch( @use "~/assets/css/main.scss"; .bc-pageinator { - position: relative; - height: 78px; - display: flex; - flex-direction: column; - justify-content: center; - align-items: center; + margin-block-start: 20px; + display: grid; + gap: 10px; + justify-items: center; font-weight: var(--standard_text_medium_font_weight); - margin: var(--padding) var(--padding-large); - .very-last { - position: absolute; - display: flex; - flex-direction: row; - flex-wrap: wrap; - width: 100%; - pointer-events: none; + @media screen and (min-width: 768px) { + grid-template-rows: auto 1fr; + grid-template-columns: 1fr 1fr; + + .pager { + grid-column: span 2 + } + .left-info { + margin-inline-start: 16px; + justify-self: start; + } + .right-info{ + justify-self: end; + margin-inline-end: 16px; + } + } + @media screen and (min-width: 1024px) { + grid-template-rows: 1fr ; + grid-template-columns: 1fr auto 1fr; + .pager { + grid-column: 2/3; + grid-row: 1/2 + } .left-info { - margin-right: auto; - pointer-events: all; + grid-column: 1/2; + grid-row: 1/2 } .right-info { - margin-left: auto; - pointer-events: all; + grid-column: 3/4; + grid-row: 1/2 } + } .pager { @@ -299,13 +311,18 @@ watch( } } - @media screen and (max-width: 1399px) { - gap: var(--padding); - height: unset; - .very-last { - @media (max-width: 600px) { - position: relative; - } + .left-info { + display: none; + @media screen and (min-width: 640px) { + display: block; + } + } + .right-info { + font-weight: 400; + font-size: 12px; + @media screen and (min-width: 640px) { + font-weight: inherit; + font-size: inherit; } } } diff --git a/frontend/components/dashboard/DashboardHeader.vue b/frontend/components/dashboard/DashboardHeader.vue index 8db7f0d54..273853394 100644 --- a/frontend/components/dashboard/DashboardHeader.vue +++ b/frontend/components/dashboard/DashboardHeader.vue @@ -11,7 +11,7 @@ const { t: $t } = useTranslation() const { width } = useWindowSize() const route = useRoute() const router = useRouter() -const showInDevelopment = Boolean(useRuntimeConfig().public.showInDevelopment) +const { has } = useFeatureFlag() const { isLoggedIn } = useUserStore() const { dashboards } = useUserDashboardStore() @@ -90,17 +90,18 @@ const items = computed(() => { const cd = db as CookieDashboard return createMenuBarButton('validator', getDashboardName(cd), `${cd.hash !== undefined ? cd.hash : cd.id}`) })) - addToSortedItems($t('dashboard.header.account'), dashboards.value?.validator_dashboards?.slice(0, 1).map((db) => { - const cd = db as CookieDashboard - return createMenuBarButton('account', getDashboardName(cd), `${cd.hash ?? cd.id}`) - })) - const disabledTooltip = !showInDevelopment ? $t('common.coming_soon') : undefined - const onNotificationsPage = dashboardType.value === 'notifications' + if (has('feature-account_dashboards')) { + addToSortedItems($t('dashboard.header.account'), dashboards.value?.validator_dashboards?.slice(0, 1).map((db) => { + const cd = db as CookieDashboard + return createMenuBarButton('account', getDashboardName(cd), `${cd.hash ?? cd.id}`) + })) + } + const disabledTooltip = !has('feature-notifications') ? $t('common.coming_soon') : undefined addToSortedItems($t('notifications.title'), [ { - active: onNotificationsPage, + active: route.name === 'notifications', disabledTooltip, label: $t('notifications.title'), - route: !onNotificationsPage ? '/notifications' : undefined, + route: route.name !== 'notifications' ? '/notifications' : undefined, } ]) return buttons diff --git a/frontend/components/dashboard/DashboardValidatorManagementModal.vue b/frontend/components/dashboard/DashboardValidatorManagementModal.vue index 94762a647..206b5141e 100644 --- a/frontend/components/dashboard/DashboardValidatorManagementModal.vue +++ b/frontend/components/dashboard/DashboardValidatorManagementModal.vue @@ -5,7 +5,6 @@ import { import { FontAwesomeIcon } from '@fortawesome/vue-fontawesome' import type { DataTableSortEvent } from 'primevue/datatable' import { warn } from 'vue' -import { uniq } from 'lodash-es' import { BcDialogConfirm, BcPremiumModal, @@ -111,9 +110,8 @@ const onClose = () => { const mapIndexOrPubKey = ( validators?: VDBManageValidatorsTableRow[], ): string[] => { - return uniq( - validators?.map(vali => vali.index?.toString() ?? vali.public_key) ?? [], - ) + return [ ...new Set(validators?.map( + validator => validator.index?.toString() ?? validator.public_key)) ] } const changeGroup = async (body: ValidatorUpdateBody, groupId?: number) => { diff --git a/frontend/components/dashboard/DashboardValidatorOverview.vue b/frontend/components/dashboard/DashboardValidatorOverview.vue index bc90f1d71..5ecebdbc6 100644 --- a/frontend/components/dashboard/DashboardValidatorOverview.vue +++ b/frontend/components/dashboard/DashboardValidatorOverview.vue @@ -113,7 +113,7 @@ const aprInfos = TimeFrames.map(k => {{ validatorsOffline }} () @@ -199,6 +199,9 @@ const selectedSort = computed(() => ? `${sortField.value}:${getSortOrder(sortOrder.value)}` : undefined, ) +const isMobile = computed(() => { + return (width.value ?? 0) <= 800 +})