From 5804f7855ee262ff1e02d4a36c8bd80e19e8fae7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Nowacki?= Date: Fri, 1 Nov 2024 14:04:51 +0100 Subject: [PATCH 01/15] adjust deployment script to non AWS container registry --- devops/scripts/build-backend.sh | 11 ++++------- devops/scripts/deploy-backend.sh | 3 +-- devops/scripts/vars.sh | 11 ++++++----- 3 files changed, 11 insertions(+), 14 deletions(-) diff --git a/devops/scripts/build-backend.sh b/devops/scripts/build-backend.sh index 597d371..6297938 100755 --- a/devops/scripts/build-backend.sh +++ b/devops/scripts/build-backend.sh @@ -10,22 +10,19 @@ DATE_UTC=$(date -u) TIMESTAMP_UTC=$(date +%s) COMMIT_HASH=$(git rev-parse --short HEAD || echo -n "local") -echo "Building Backend: ${APP_NAME}" +echo "Building Backend: ${IMAGE_NAME}" ./setup-prod.sh -aws ecr get-login-password --region "${APP_REGION}" | docker login --username AWS --password-stdin "${APP_OWNER}".dkr.ecr."${APP_REGION}".amazonaws.com - DOCKER_BUILDKIT=1 docker build \ -f app/Dockerfile \ --progress plain \ --platform linux/amd64 \ - -t "${APP_NAME}" \ + -t "${IMAGE_NAME}" \ --label build_date_utc="$DATE_UTC" \ --label build_timestamp_utc="$TIMESTAMP_UTC" \ --label git_commit_hash="$COMMIT_HASH" \ . -docker tag "${APP_NAME}":latest "${APP_OWNER}".dkr.ecr."${APP_REGION}".amazonaws.com/"${APP_NAME}":latest -docker tag "${APP_NAME}":latest "${APP_OWNER}".dkr.ecr."${APP_REGION}".amazonaws.com/"${APP_NAME}":"${COMMIT_HASH}" +docker tag "${IMAGE_NAME}":latest "${IMAGE_NAME}":"${COMMIT_HASH}" -docker push "${APP_OWNER}".dkr.ecr."${APP_REGION}".amazonaws.com/"${APP_NAME}":"${COMMIT_HASH}" +docker push "${IMAGE_NAME}":"${COMMIT_HASH}" diff --git a/devops/scripts/deploy-backend.sh b/devops/scripts/deploy-backend.sh index ac3a77a..9080113 100755 --- a/devops/scripts/deploy-backend.sh +++ b/devops/scripts/deploy-backend.sh @@ -7,6 +7,5 @@ source "$THIS_DIR"/vars.sh cd "$PROJECT_DIR"/app echo "Deploying Backend: ${APP_NAME}" -docker push "${APP_OWNER}".dkr.ecr."${APP_REGION}".amazonaws.com/"${APP_NAME}":latest +docker push "${IMAGE_NAME}":latest -aws autoscaling start-instance-refresh --region "${APP_REGION}" --auto-scaling-group-name "${APP_NAME}" diff --git a/devops/scripts/vars.sh b/devops/scripts/vars.sh index 70474db..706d58f 100755 --- a/devops/scripts/vars.sh +++ b/devops/scripts/vars.sh @@ -4,9 +4,10 @@ PROJECT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/../../ -APP_SUFFIX="-$1" +if [ "$1" == "staging" ]; then + APP_SUFFIX="-staging" +else + APP_SUFFIX="" +fi -APP_OWNER=$(aws sts get-caller-identity --region us-east-1 --query "Account" --output text) -APP_REGION="us-east-1" -APP_NAME="bittensor-prometheus-proxy${APP_SUFFIX}" -CLOUDFRONT_BUCKET="${APP_NAME}-spa${APP_SUFFIX}" \ No newline at end of file +IMAGE_NAME="backenddevelopersltd/bittensor-prometheus-proxy${APP_SUFFIX}" \ No newline at end of file From 4f499846bb09f40d49c1f47df10ee0f4fd46f991 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Nowacki?= Date: Fri, 1 Nov 2024 14:06:53 +0100 Subject: [PATCH 02/15] remove redundant infra related files --- README_AWS.md | 137 --------------- README_vultr.md | 56 ------- deploy-to-aws.sh | 5 - deploy.sh | 35 ---- devops/packer/build.sh | 6 - devops/packer/docker-optimized.pkr.hcl | 76 --------- devops/tf/core/backend.tf | 16 -- devops/tf/core/main.tf | 21 --- devops/tf/core/terraform.tfvars | 2 - devops/tf/core/vars.tf | 7 - devops/tf/main/envs/common/main.tf | 80 --------- devops/tf/main/envs/common/vars.tf | 58 ------- devops/tf/main/envs/common/versions.tf | 10 -- devops/tf/main/envs/prod/backend.tf | 7 - devops/tf/main/envs/prod/main.tf | 80 --------- devops/tf/main/envs/prod/terraform.tfvars | 31 ---- devops/tf/main/envs/prod/vars.tf | 58 ------- devops/tf/main/envs/prod/versions.tf | 10 -- devops/tf/main/envs/staging/backend.tf | 7 - devops/tf/main/envs/staging/main.tf | 80 --------- devops/tf/main/envs/staging/terraform.tfvars | 31 ---- devops/tf/main/envs/staging/vars.tf | 58 ------- devops/tf/main/envs/staging/versions.tf | 10 -- devops/tf/main/files/authorized_keys | 1 - devops/tf/main/files/cloud-init.yml | 59 ------- devops/tf/main/files/docker-compose.yml | 102 ------------ devops/tf/main/files/env | 35 ---- devops/tf/main/files/envrc | 5 - .../files/nginx/config_helpers/brotli.conf | 44 ----- .../main/files/nginx/config_helpers/gzip.conf | 48 ------ .../monitoring_certs/monitoring-ca.crt.txt | 1 - .../nginx/monitoring_certs/monitoring.crt.txt | 1 - .../nginx/monitoring_certs/monitoring.key.txt | 1 - .../nginx/templates/default.conf.template | 108 ------------ devops/tf/main/modules/backend/alb.tf | 36 ---- devops/tf/main/modules/backend/domain.tf | 46 ------ .../tf/main/modules/backend/ec2-autoscale.tf | 66 -------- devops/tf/main/modules/backend/ec2-keys.tf | 4 - devops/tf/main/modules/backend/ec2-profile.tf | 27 --- .../backend/parameters.docker-compose.tf | 13 -- .../tf/main/modules/backend/parameters.env.tf | 31 ---- .../main/modules/backend/parameters.nginx.tf | 46 ------ .../modules/backend/parameters.ssh-keys.tf | 7 - devops/tf/main/modules/backend/security.tf | 63 ------- devops/tf/main/modules/backend/vars.tf | 22 --- devops/tf/main/modules/database/output.tf | 25 --- devops/tf/main/modules/database/rds.tf | 36 ---- devops/tf/main/modules/database/security.tf | 19 --- devops/tf/main/modules/database/vars.tf | 8 - devops/tf/main/modules/networking/network.tf | 12 -- devops/tf/main/modules/networking/output.tf | 15 -- devops/tf/main/modules/networking/vars.tf | 5 - devops/vultr_scripts/vultr-deploy.py | 29 ---- devops/vultr_scripts/vultr-get-instances.py | 29 ---- .../vultr_scripts/vultr-update-cloudinit.py | 30 ---- devops/vultr_tf/core/backend.tf | 10 -- devops/vultr_tf/core/main.tf | 52 ------ devops/vultr_tf/core/vars.tf | 8 - devops/vultr_tf/core/vars_cloud_init.tf | 41 ----- devops/vultr_tf/core/vultr-cloud-init.tftpl | 44 ----- letsencrypt_setup.sh | 22 --- nginx/config_helpers/brotli.conf | 44 ----- nginx/config_helpers/gzip.conf | 48 ------ nginx/monitoring_certs/README.md | 2 - nginx/templates/default.conf.template | 156 ------------------ promtail/config.yml | 27 --- 66 files changed, 2309 deletions(-) delete mode 100644 README_AWS.md delete mode 100644 README_vultr.md delete mode 100755 deploy-to-aws.sh delete mode 100755 deploy.sh delete mode 100755 devops/packer/build.sh delete mode 100644 devops/packer/docker-optimized.pkr.hcl delete mode 100644 devops/tf/core/backend.tf delete mode 100644 devops/tf/core/main.tf delete mode 100644 devops/tf/core/terraform.tfvars delete mode 100644 devops/tf/core/vars.tf delete mode 100644 devops/tf/main/envs/common/main.tf delete mode 100644 devops/tf/main/envs/common/vars.tf delete mode 100644 devops/tf/main/envs/common/versions.tf delete mode 100644 devops/tf/main/envs/prod/backend.tf delete mode 100644 devops/tf/main/envs/prod/main.tf delete mode 100644 devops/tf/main/envs/prod/terraform.tfvars delete mode 100644 devops/tf/main/envs/prod/vars.tf delete mode 100644 devops/tf/main/envs/prod/versions.tf delete mode 100644 devops/tf/main/envs/staging/backend.tf delete mode 100644 devops/tf/main/envs/staging/main.tf delete mode 100644 devops/tf/main/envs/staging/terraform.tfvars delete mode 100644 devops/tf/main/envs/staging/vars.tf delete mode 100644 devops/tf/main/envs/staging/versions.tf delete mode 100644 devops/tf/main/files/authorized_keys delete mode 100644 devops/tf/main/files/cloud-init.yml delete mode 100644 devops/tf/main/files/docker-compose.yml delete mode 100644 devops/tf/main/files/env delete mode 100644 devops/tf/main/files/envrc delete mode 100644 devops/tf/main/files/nginx/config_helpers/brotli.conf delete mode 100644 devops/tf/main/files/nginx/config_helpers/gzip.conf delete mode 100644 devops/tf/main/files/nginx/monitoring_certs/monitoring-ca.crt.txt delete mode 100644 devops/tf/main/files/nginx/monitoring_certs/monitoring.crt.txt delete mode 100644 devops/tf/main/files/nginx/monitoring_certs/monitoring.key.txt delete mode 100644 devops/tf/main/files/nginx/templates/default.conf.template delete mode 100644 devops/tf/main/modules/backend/alb.tf delete mode 100644 devops/tf/main/modules/backend/domain.tf delete mode 100644 devops/tf/main/modules/backend/ec2-autoscale.tf delete mode 100644 devops/tf/main/modules/backend/ec2-keys.tf delete mode 100644 devops/tf/main/modules/backend/ec2-profile.tf delete mode 100644 devops/tf/main/modules/backend/parameters.docker-compose.tf delete mode 100644 devops/tf/main/modules/backend/parameters.env.tf delete mode 100644 devops/tf/main/modules/backend/parameters.nginx.tf delete mode 100644 devops/tf/main/modules/backend/parameters.ssh-keys.tf delete mode 100644 devops/tf/main/modules/backend/security.tf delete mode 100644 devops/tf/main/modules/backend/vars.tf delete mode 100644 devops/tf/main/modules/database/output.tf delete mode 100644 devops/tf/main/modules/database/rds.tf delete mode 100644 devops/tf/main/modules/database/security.tf delete mode 100644 devops/tf/main/modules/database/vars.tf delete mode 100644 devops/tf/main/modules/networking/network.tf delete mode 100644 devops/tf/main/modules/networking/output.tf delete mode 100644 devops/tf/main/modules/networking/vars.tf delete mode 100644 devops/vultr_scripts/vultr-deploy.py delete mode 100644 devops/vultr_scripts/vultr-get-instances.py delete mode 100644 devops/vultr_scripts/vultr-update-cloudinit.py delete mode 100644 devops/vultr_tf/core/backend.tf delete mode 100644 devops/vultr_tf/core/main.tf delete mode 100644 devops/vultr_tf/core/vars.tf delete mode 100644 devops/vultr_tf/core/vars_cloud_init.tf delete mode 100644 devops/vultr_tf/core/vultr-cloud-init.tftpl delete mode 100755 letsencrypt_setup.sh delete mode 100644 nginx/config_helpers/brotli.conf delete mode 100644 nginx/config_helpers/gzip.conf delete mode 100644 nginx/monitoring_certs/README.md delete mode 100644 nginx/templates/default.conf.template delete mode 100644 promtail/config.yml diff --git a/README_AWS.md b/README_AWS.md deleted file mode 100644 index f81edf0..0000000 --- a/README_AWS.md +++ /dev/null @@ -1,137 +0,0 @@ -# Deploying to AWS - -The deployment is split into two steps: - -Files related to AWS deployment has been generated in `devops/` directory. - -By convention, projects that are meant to be deployed to AWS have a `deploy-to-aws.sh` script in the root dir and a `devops` directory. -The script builds the docker image, uploads it and tells AWS to reload the app (causing a new ec2 machine to be spawned). -In the `devops` directory you will find terraform configuration as well as packer files (for building the AMI). - -If you want to deploy your app to an AWS environment, you need to do following steps: - -- configuring your environment -- create an infra s3 bucket -- deploy `tf/core` (contains stuff common to all environments in given AWS Account) -- deploy chosen `tf/main/envs/` (by default staging and prod are generated) - -## Required software - -*AWS CLI* - -AWS recommends using profiles, when dealing with multiple AWS accounts. -To choose between environments, rather than switching access and secret keys, we just switch our profiles. -We can choose our profile name, which make it easier to recognize in which environment we operate. -To configure AWS environment, you need to have AWS CLI installed. -It is recommended to use AWS v2, which can be downloaded from: - - -*Terraform* You will also need terraform version 1.0.x. It is recommended to use `tfenv` to install terraform with correct version. -You can download an install it from - -*direnv* To avoid mistakes when switching environments (or regions), it is recommended to use `direnv` tools, which supports loading environment variables from .envrc file, placed in directory. -You can read about it here: - - -## Configure your environment - -To configure your AWS profile, please run: - -``` -$ aws configure --profile -``` - -And answer following questions: - -``` -AWS Access Key ID: ... -AWS Secret Access Key: ... -Default region name: us-east-1 (just an example) -Default output format [None]: (can be left blank) -``` - -Once, configured, you can switch your profile using `AWS_PROFILE=` env variable or by adding `--profile` option to your aws cli command. - -It's handy to create .envrc file in the project rood directory (where deploy-to-aws.sh is created) with content: - -``` -export AWS_PROFILE= -export AWS_REGION= -``` - -And then accept changes by using command: - -``` -$ direnv allow -``` - -After doing that, anytime you enter the project directory, correct profile will be loaded. - -## Configuring infra - -You only need to do this if you change anything in `devops` directory (or if you mess something up in AWS console and want to revert the changes). - -Create infra bucket - -Before being able to run terraform, we need to create S3 bucket, which will hold the state. -This bucket is used by all environments and needs to be globally unique. - -To create bucket, please type: - -``` -aws s3 mb --region us-east-1 s3://bittensor-prometheus-proxy-xeokrb -``` - -TF has a following structure: - -``` -|- devops - |- tf - |- core - |- main - |- envs - | |- staging - | |- prod - |- modules -``` - -You can run terraform from: - -- core -- envs/staging -- envs/prod - -directories. - -Directory *core* contains infrastructure code, which needs to be created BEFORE pushing docker image. -It is responsible for creating docker registries, which you can use, to push docker images to. - -Code places in *main* is the rest of the infrastructure, which is created after pushing docker image. - -Each of the environment (and core) can be applied by executing: - -``` -terraform init -terraform apply -``` - -IMPORTANT! the env variables for the apps (`.env` file) and `docker-compose.yml` are defined in terraform files, if you change any, you need to run `terraform apply` AND refresh the ec2 instance. -The same goes for AMI built by packer. - -## Adding secrets to the projects - -Cloud init is configured to provision EC2 machines spun up as part of this project's infrastructure. -As part of this provisioning, SSM parameters following a specific name convention are read and saved as files in EC2's home directory (RDS access details are managed in another way). -The naming convention is `/application/bittensor-prometheus-proxy/{env}/{path_of_the_file_to_be_created}`, for example `/application/project/staging/.env`. -A few such parameters are managed by terraform in this project (e.g. `.env`, `docker-compose.yml`) and more can be added. -In case you need to add confidential files (like a GCP credentials file) you can simply create appropriate SSM parameters. -These will only be accessible to people that access to AWS or EC2 machines, not to people who have access to this repository. -One such parameter, namely `/application/bittensor-prometheus-proxy/{env}/secret.env` is treated specially - if it exists (it doesn't by default) its contents are appended to `.env` during EC2 machine provisioning - this is a convenient way of supplying pieces of confidential information, like external systems' access keys to `.env`. - -## Deploying apps - -The docker containers are built with code you have locally, including any changes. -Building requires docker. -To successfully run `deploy-to-aws.sh` you first need to do `./setup.prod.sh`. -It uses the aws credentials stored as `AWS_PROFILE` variable. -If you don't set this variable, the `default` will be used. diff --git a/README_vultr.md b/README_vultr.md deleted file mode 100644 index d2a1150..0000000 --- a/README_vultr.md +++ /dev/null @@ -1,56 +0,0 @@ -# Deploying to Vultr - - -Files related to Vultr deployment are in `devops/vultr_scripts/` and `devops/vultr_tf`. - -To use Terraform, you need: -- create API key which you can find in Vultr -> Account -> API: -- allow your IP in Access Control section at the same page as above - -- To use ssh keys in Terraform, you need to create them in Vultr -> Account -> SSH Keys: - - -## Required software - - -*Terraform* You will also need terraform version 1.0.x. It is recommended to use `tfenv` to install terraform with correct version. -You can download an install it from - -*direnv* To avoid mistakes when switching environments (or regions), it is recommended to use `direnv` tools, which supports loading environment variables from .envrc file, placed in directory. -You can read about it here: - - -(recommended) *Vultr CLI* via to interact with Vultr instances post-deployment, eg. get their IP addressed, instances ID, update Cloud Init data. - -## Configure your environment - - -To deploy via Terraform, you have to fill all variables for Cloud Init in `vultr-cloud-init.tftpl`. -These variables can be sourced from various sources, recommended approach is to use environment variables in combination with `dotenv` - -To use Vultr CLI, you have to have API key, ideally in environment variable again. - -## Configuring infra - -You only need to do this if you change anything in `devops/vultr_tf` directory. - -TODO - currently TF Vultr is not configured to use S3 buckets. - - -``` -terraform init -terraform apply -``` - -## Adding secrets to the projects - -Project uses `.env` file in same directory as `docker-compose.yml` is, so any secrets should be sourced via this file. - -Do not commit secrets into the repository, this `.env` file can be updated via Cloud init executed when a new machines is spawned or reinstalled. The Cloud Init is located in Terraform directory: `vultr-cloud-init.tftpl`. - -After spawning the machines, Cloud Init can be updated via Vultr CLI, see `devops/vultr_scripts/vultr-update-cloudinit.py`. Updating Cloud Data in Terraform would mean destroying & recreating all instances from scratch. - - -## Deploying apps - -Deployment is executed via `post-receive` hook in git repository on each instance. See `devops/vultr_scripts/vultr-deploy.py` diff --git a/deploy-to-aws.sh b/deploy-to-aws.sh deleted file mode 100755 index 7bd35ab..0000000 --- a/deploy-to-aws.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -set -e -# shellcheck disable=2086 -./devops/scripts/build-backend.sh "$1" -./devops/scripts/deploy-backend.sh "$1" diff --git a/deploy.sh b/deploy.sh deleted file mode 100755 index ce18229..0000000 --- a/deploy.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/sh -# Copyright 2024, Reef Technologies (reef.pl), All rights reserved. -set -eux - -if [ ! -f ".env" ]; then - echo "\e[31mPlease setup the environment first!\e[0m"; - exit 1; -fi - -DOCKER_BUILDKIT=0 docker compose build - -# Tag the first image from multi-stage app Dockerfile to mark it as not dangling -BASE_IMAGE=$(docker images --quiet --filter="label=builder=true" | head -n1) -docker image tag "${BASE_IMAGE}" project/app-builder - -# collect static files to external storage while old app is still running -# docker compose run --rm app sh -c "python manage.py collectstatic --no-input" - -SERVICES=$(docker compose ps --services 2>/dev/null \ - | grep -v -e 'is not set' -e db -e redis) - -# shellcheck disable=2086 -docker compose stop $SERVICES - -# start the app container only in order to perform migrations -docker compose up -d db # in case it hasn't been launched before -docker compose run --rm app sh -c "python manage.py wait_for_database --timeout 10; python manage.py migrate" - -# start everything -docker compose up -d - -# Clean all dangling images -docker images --quiet --filter=dangling=true \ - | xargs --no-run-if-empty docker rmi \ - || true diff --git a/devops/packer/build.sh b/devops/packer/build.sh deleted file mode 100755 index 4abaa36..0000000 --- a/devops/packer/build.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh - -# initialize packer script and building image -packer init . - -packer build docker-optimized.pkr.hcl \ No newline at end of file diff --git a/devops/packer/docker-optimized.pkr.hcl b/devops/packer/docker-optimized.pkr.hcl deleted file mode 100644 index 021c509..0000000 --- a/devops/packer/docker-optimized.pkr.hcl +++ /dev/null @@ -1,76 +0,0 @@ -packer { - required_plugins { - amazon = { - version = ">= 1.0.0" - source = "github.com/hashicorp/amazon" - } - } -} - -local "ts" { - expression = formatdate("YYYYMMDDhhmm", timestamp()) -} - -source "amazon-ebs" "docker-optimized" { - ami_name = "docker-optimized-${local.ts}" - - source_ami_filter { - filters = { - virtualization-type = "hvm" - name = "*ubuntu-focal-20.04-amd64-minimal-*" - root-device-type = "ebs" - } - - owners = [ - "099720109477" - ] - - most_recent = true - } - - instance_type = "t3.medium" - ssh_username = "ubuntu" - force_deregister = true - encrypt_boot = true - - launch_block_device_mappings { - device_name = "/dev/sda1" - encrypted = true - volume_size = 20 - volume_type = "gp3" - delete_on_termination = true - } -} - -build { - sources = [ - "source.amazon-ebs.docker-optimized" - ] - - provisioner "shell" { - environment_vars = [ - "DEBIAN_FRONTEND=noninteractive" - ] - - inline = [ - "sleep 15", - - "sudo apt-get clean", - "sudo apt-get update", - "sudo apt-get install -y ca-certificates curl gnupg lsb-release unzip jq rng-tools", - - "curl https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip -o awscliv2.zip", - "unzip awscliv2.zip", - "sudo ./aws/install", - "rm -rf ./aws ./awscliv2.zip", - - "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg", - "echo \"deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null", - "sudo apt-get update", - "sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin", - "sudo gpasswd -a ubuntu docker", - "sudo mkdir -p /etc/docker/", - "sudo service docker restart", - ] - } -} diff --git a/devops/tf/core/backend.tf b/devops/tf/core/backend.tf deleted file mode 100644 index 04cb55c..0000000 --- a/devops/tf/core/backend.tf +++ /dev/null @@ -1,16 +0,0 @@ -terraform { - backend "s3" { - bucket = "bittensor-prometheus-proxy-xeokrb" - key = "core.tfstate" - region = "us-east-1" - } - - required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 4.0" - } - } - - required_version = "~> 1.0" -} \ No newline at end of file diff --git a/devops/tf/core/main.tf b/devops/tf/core/main.tf deleted file mode 100644 index 99e24a4..0000000 --- a/devops/tf/core/main.tf +++ /dev/null @@ -1,21 +0,0 @@ -provider "aws" { - region = var.region -} - -resource "aws_ecr_repository" "app" { - name = "${var.name}-prod" - image_tag_mutability = "MUTABLE" - - image_scanning_configuration { - scan_on_push = true - } -} - -resource "aws_ecr_repository" "app_staging" { - name = "${var.name}-staging" - image_tag_mutability = "MUTABLE" - - image_scanning_configuration { - scan_on_push = true - } -} \ No newline at end of file diff --git a/devops/tf/core/terraform.tfvars b/devops/tf/core/terraform.tfvars deleted file mode 100644 index 0a4693a..0000000 --- a/devops/tf/core/terraform.tfvars +++ /dev/null @@ -1,2 +0,0 @@ -region = "us-east-1" -name = "bittensor-prometheus-proxy" \ No newline at end of file diff --git a/devops/tf/core/vars.tf b/devops/tf/core/vars.tf deleted file mode 100644 index 1b56ec7..0000000 --- a/devops/tf/core/vars.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "name" { - type = string -} - -variable "region" { - type = string -} \ No newline at end of file diff --git a/devops/tf/main/envs/common/main.tf b/devops/tf/main/envs/common/main.tf deleted file mode 100644 index ea98a67..0000000 --- a/devops/tf/main/envs/common/main.tf +++ /dev/null @@ -1,80 +0,0 @@ -provider "aws" { - region = var.region -} - -data "aws_caller_identity" "env" {} - -data "aws_ami" "base_ami" { - most_recent = true - - filter { - name = "name" - values = [var.base_ami_image] - } - - filter { - name = "virtualization-type" - values = ["hvm"] - } - - owners = [var.base_ami_image_owner] -} - -locals { - ecr_base_url = "${data.aws_caller_identity.env.account_id}.dkr.ecr.${var.region}.amazonaws.com" - ecr_image = "${var.name}-${var.env}:latest" -} - -module "networking" { - source = "../../modules/networking" - - name = var.name - env = var.env - azs = var.azs - vpc_cidr = var.vpc_cidr - subnet_cidrs = var.subnet_cidrs -} - -module "database" { - source = "../../modules/database" - - name = var.name - env = var.env - vpc_id = module.networking.vpc_id - vpc_cidr = module.networking.vpc_cidr_block - azs = module.networking.azs - subnets = module.networking.subnets - instance_type = var.rds_instance_type -} - -module "backend" { - source = "../../modules/backend" - - depends_on = [ - module.database - ] - - base_ami_id = data.aws_ami.base_ami.image_id - - name = var.name - region = var.region - env = var.env - - ecr_base_url = local.ecr_base_url - ecr_image = local.ecr_image - - base_domain_name = var.base_domain_name - domain_name = var.domain_name - ec2_ssh_key = var.ec2_ssh_key - - vpc_id = module.networking.vpc_id - vpc_cidr = module.networking.vpc_cidr_block - - azs = module.networking.azs - subnets = module.networking.subnets - - instance_type = var.instance_type - health_check_type = var.autoscaling_health_check_type - account_id = data.aws_caller_identity.env.account_id - database = module.database -} \ No newline at end of file diff --git a/devops/tf/main/envs/common/vars.tf b/devops/tf/main/envs/common/vars.tf deleted file mode 100644 index 7e5527f..0000000 --- a/devops/tf/main/envs/common/vars.tf +++ /dev/null @@ -1,58 +0,0 @@ -variable "region" { - type = string -} - -variable "name" { - type = string -} - -variable "env" { - type = string -} - -variable "base_ami_image" { - type = string -} - -variable "base_ami_image_owner" { - type = string -} - -variable "vpc_cidr" { - type = string -} - -variable "subnet_cidrs" { - type = set(string) -} - -variable "azs" { - type = set(string) -} - -variable "base_domain_name" { - type = string -} - -variable "domain_name" { - type = string -} - -variable "ec2_ssh_key" { - type = string -} - -variable "instance_type" { - description = "EC2 instance type" - type = string -} - -variable "rds_instance_type" { - description = "RDS instance type" - type = string -} - -variable "autoscaling_health_check_type" { - description = "either EC2 or ELB" - type = string -} diff --git a/devops/tf/main/envs/common/versions.tf b/devops/tf/main/envs/common/versions.tf deleted file mode 100644 index 70cbf24..0000000 --- a/devops/tf/main/envs/common/versions.tf +++ /dev/null @@ -1,10 +0,0 @@ -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 4.0" - } - } - - required_version = "~> 1.0" -} \ No newline at end of file diff --git a/devops/tf/main/envs/prod/backend.tf b/devops/tf/main/envs/prod/backend.tf deleted file mode 100644 index 64e78bb..0000000 --- a/devops/tf/main/envs/prod/backend.tf +++ /dev/null @@ -1,7 +0,0 @@ -terraform { - backend "s3" { - bucket = "bittensor-prometheus-proxy-xeokrb" - key = "prod/main.tfstate" - region = "us-east-1" - } -} diff --git a/devops/tf/main/envs/prod/main.tf b/devops/tf/main/envs/prod/main.tf deleted file mode 100644 index ea98a67..0000000 --- a/devops/tf/main/envs/prod/main.tf +++ /dev/null @@ -1,80 +0,0 @@ -provider "aws" { - region = var.region -} - -data "aws_caller_identity" "env" {} - -data "aws_ami" "base_ami" { - most_recent = true - - filter { - name = "name" - values = [var.base_ami_image] - } - - filter { - name = "virtualization-type" - values = ["hvm"] - } - - owners = [var.base_ami_image_owner] -} - -locals { - ecr_base_url = "${data.aws_caller_identity.env.account_id}.dkr.ecr.${var.region}.amazonaws.com" - ecr_image = "${var.name}-${var.env}:latest" -} - -module "networking" { - source = "../../modules/networking" - - name = var.name - env = var.env - azs = var.azs - vpc_cidr = var.vpc_cidr - subnet_cidrs = var.subnet_cidrs -} - -module "database" { - source = "../../modules/database" - - name = var.name - env = var.env - vpc_id = module.networking.vpc_id - vpc_cidr = module.networking.vpc_cidr_block - azs = module.networking.azs - subnets = module.networking.subnets - instance_type = var.rds_instance_type -} - -module "backend" { - source = "../../modules/backend" - - depends_on = [ - module.database - ] - - base_ami_id = data.aws_ami.base_ami.image_id - - name = var.name - region = var.region - env = var.env - - ecr_base_url = local.ecr_base_url - ecr_image = local.ecr_image - - base_domain_name = var.base_domain_name - domain_name = var.domain_name - ec2_ssh_key = var.ec2_ssh_key - - vpc_id = module.networking.vpc_id - vpc_cidr = module.networking.vpc_cidr_block - - azs = module.networking.azs - subnets = module.networking.subnets - - instance_type = var.instance_type - health_check_type = var.autoscaling_health_check_type - account_id = data.aws_caller_identity.env.account_id - database = module.database -} \ No newline at end of file diff --git a/devops/tf/main/envs/prod/terraform.tfvars b/devops/tf/main/envs/prod/terraform.tfvars deleted file mode 100644 index b9c557a..0000000 --- a/devops/tf/main/envs/prod/terraform.tfvars +++ /dev/null @@ -1,31 +0,0 @@ -# each of this vars can be overridden by adding ENVIRONMENT variable with name: -# TF_VAR_var_name="value" - -name = "bittensor-prometheus-proxy" -region = "us-east-1" -env = "prod" - -# VPC and subnet CIDR settings, change them if you need to pair -# multiple CIDRs (i.e. with different component) -vpc_cidr = "10.2.0.0/16" -subnet_cidrs = ["10.2.1.0/24", "10.2.2.0/24"] -azs = ["us-east-1c", "us-east-1d"] - -# By default, we have an ubuntu image -base_ami_image = "*ubuntu-focal-20.04-amd64-minimal-*" -base_ami_image_owner = "099720109477" - -# domain setting -base_domain_name = "fake-domain.com" -domain_name = "api.fake-domain.com" - -# default ssh key -ec2_ssh_key = "" - -instance_type = "t3.medium" -rds_instance_type = "db.t3.small" - -# defines if we use EC2-only healthcheck or ELB healthcheck -# EC2 healthcheck reacts only on internal EC2 checks (i.e. if machine cannot be reached) -# recommended for staging = EC2, for prod = ELB -autoscaling_health_check_type = "ELB" diff --git a/devops/tf/main/envs/prod/vars.tf b/devops/tf/main/envs/prod/vars.tf deleted file mode 100644 index 7e5527f..0000000 --- a/devops/tf/main/envs/prod/vars.tf +++ /dev/null @@ -1,58 +0,0 @@ -variable "region" { - type = string -} - -variable "name" { - type = string -} - -variable "env" { - type = string -} - -variable "base_ami_image" { - type = string -} - -variable "base_ami_image_owner" { - type = string -} - -variable "vpc_cidr" { - type = string -} - -variable "subnet_cidrs" { - type = set(string) -} - -variable "azs" { - type = set(string) -} - -variable "base_domain_name" { - type = string -} - -variable "domain_name" { - type = string -} - -variable "ec2_ssh_key" { - type = string -} - -variable "instance_type" { - description = "EC2 instance type" - type = string -} - -variable "rds_instance_type" { - description = "RDS instance type" - type = string -} - -variable "autoscaling_health_check_type" { - description = "either EC2 or ELB" - type = string -} diff --git a/devops/tf/main/envs/prod/versions.tf b/devops/tf/main/envs/prod/versions.tf deleted file mode 100644 index 70cbf24..0000000 --- a/devops/tf/main/envs/prod/versions.tf +++ /dev/null @@ -1,10 +0,0 @@ -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 4.0" - } - } - - required_version = "~> 1.0" -} \ No newline at end of file diff --git a/devops/tf/main/envs/staging/backend.tf b/devops/tf/main/envs/staging/backend.tf deleted file mode 100644 index cb15dec..0000000 --- a/devops/tf/main/envs/staging/backend.tf +++ /dev/null @@ -1,7 +0,0 @@ -terraform { - backend "s3" { - bucket = "bittensor-prometheus-proxy-xeokrb" - key = "staging/main.tfstate" - region = "us-east-1" - } -} diff --git a/devops/tf/main/envs/staging/main.tf b/devops/tf/main/envs/staging/main.tf deleted file mode 100644 index ea98a67..0000000 --- a/devops/tf/main/envs/staging/main.tf +++ /dev/null @@ -1,80 +0,0 @@ -provider "aws" { - region = var.region -} - -data "aws_caller_identity" "env" {} - -data "aws_ami" "base_ami" { - most_recent = true - - filter { - name = "name" - values = [var.base_ami_image] - } - - filter { - name = "virtualization-type" - values = ["hvm"] - } - - owners = [var.base_ami_image_owner] -} - -locals { - ecr_base_url = "${data.aws_caller_identity.env.account_id}.dkr.ecr.${var.region}.amazonaws.com" - ecr_image = "${var.name}-${var.env}:latest" -} - -module "networking" { - source = "../../modules/networking" - - name = var.name - env = var.env - azs = var.azs - vpc_cidr = var.vpc_cidr - subnet_cidrs = var.subnet_cidrs -} - -module "database" { - source = "../../modules/database" - - name = var.name - env = var.env - vpc_id = module.networking.vpc_id - vpc_cidr = module.networking.vpc_cidr_block - azs = module.networking.azs - subnets = module.networking.subnets - instance_type = var.rds_instance_type -} - -module "backend" { - source = "../../modules/backend" - - depends_on = [ - module.database - ] - - base_ami_id = data.aws_ami.base_ami.image_id - - name = var.name - region = var.region - env = var.env - - ecr_base_url = local.ecr_base_url - ecr_image = local.ecr_image - - base_domain_name = var.base_domain_name - domain_name = var.domain_name - ec2_ssh_key = var.ec2_ssh_key - - vpc_id = module.networking.vpc_id - vpc_cidr = module.networking.vpc_cidr_block - - azs = module.networking.azs - subnets = module.networking.subnets - - instance_type = var.instance_type - health_check_type = var.autoscaling_health_check_type - account_id = data.aws_caller_identity.env.account_id - database = module.database -} \ No newline at end of file diff --git a/devops/tf/main/envs/staging/terraform.tfvars b/devops/tf/main/envs/staging/terraform.tfvars deleted file mode 100644 index 5ae5ed3..0000000 --- a/devops/tf/main/envs/staging/terraform.tfvars +++ /dev/null @@ -1,31 +0,0 @@ -# each of this vars can be overridden by adding ENVIRONMENT variable with name: -# TF_VAR_var_name="value" - -name = "bittensor-prometheus-proxy" -region = "us-east-1" -env = "staging" - -# VPC and subnet CIDR settings, change them if you need to pair -# multiple CIDRs (i.e. with different component) -vpc_cidr = "10.20.0.0/16" -subnet_cidrs = ["10.20.1.0/24", "10.20.2.0/24"] -azs = ["us-east-1a", "us-east-1b"] - -# By default, we have an ubuntu image -base_ami_image = "*ubuntu-focal-20.04-amd64-minimal-*" -base_ami_image_owner = "099720109477" - -# domain setting -base_domain_name = "fake-domain.com" -domain_name = "staging.api.fake-domain.com" - -# default ssh key -ec2_ssh_key = "" - -instance_type = "t3.medium" -rds_instance_type = "db.t3.small" - -# defines if we use EC2-only healthcheck or ELB healthcheck -# EC2 healthcheck reacts only on internal EC2 checks (i.e. if machine cannot be reached) -# recommended for staging = EC2, for prod = ELB -autoscaling_health_check_type = "EC2" diff --git a/devops/tf/main/envs/staging/vars.tf b/devops/tf/main/envs/staging/vars.tf deleted file mode 100644 index 7e5527f..0000000 --- a/devops/tf/main/envs/staging/vars.tf +++ /dev/null @@ -1,58 +0,0 @@ -variable "region" { - type = string -} - -variable "name" { - type = string -} - -variable "env" { - type = string -} - -variable "base_ami_image" { - type = string -} - -variable "base_ami_image_owner" { - type = string -} - -variable "vpc_cidr" { - type = string -} - -variable "subnet_cidrs" { - type = set(string) -} - -variable "azs" { - type = set(string) -} - -variable "base_domain_name" { - type = string -} - -variable "domain_name" { - type = string -} - -variable "ec2_ssh_key" { - type = string -} - -variable "instance_type" { - description = "EC2 instance type" - type = string -} - -variable "rds_instance_type" { - description = "RDS instance type" - type = string -} - -variable "autoscaling_health_check_type" { - description = "either EC2 or ELB" - type = string -} diff --git a/devops/tf/main/envs/staging/versions.tf b/devops/tf/main/envs/staging/versions.tf deleted file mode 100644 index 70cbf24..0000000 --- a/devops/tf/main/envs/staging/versions.tf +++ /dev/null @@ -1,10 +0,0 @@ -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 4.0" - } - } - - required_version = "~> 1.0" -} \ No newline at end of file diff --git a/devops/tf/main/files/authorized_keys b/devops/tf/main/files/authorized_keys deleted file mode 100644 index 737d852..0000000 --- a/devops/tf/main/files/authorized_keys +++ /dev/null @@ -1 +0,0 @@ -${ec2_ssh_key} \ No newline at end of file diff --git a/devops/tf/main/files/cloud-init.yml b/devops/tf/main/files/cloud-init.yml deleted file mode 100644 index 652d51a..0000000 --- a/devops/tf/main/files/cloud-init.yml +++ /dev/null @@ -1,59 +0,0 @@ -#cloud-config -groups: - - docker - -system_info: - default_user: - groups: [docker] - -write_files: - - path: /home/ubuntu/installer.sh - permissions: '0755' - content: | - apt-get clean && apt-get update && apt-get install -y ca-certificates curl gnupg lsb-release unzip jq rng-tools - - curl https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip -o awscliv2.zip - unzip awscliv2.zip - ./aws/install - rm -rf ./aws ./awscliv2.zip - - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg - echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null - apt-get update - apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin - gpasswd -a ubuntu docker - mkdir -p /etc/docker/ - service docker restart - - - path: /home/ubuntu/cloud-init.sh - permissions: '0755' - content: | - #!/bin/bash - - export APP_NAME=${name} - export APP_ENV=${env} - - aws ssm describe-parameters | jq -r '.Parameters[].Name' | grep "\/application\/$APP_NAME\/$APP_ENV" | sed "s/\/application.*$APP_ENV\///" | while read -r FILE; do - mkdir -p $(dirname "$FILE"); - aws ssm get-parameter --name "/application/$APP_NAME/$APP_ENV/$FILE" --output text --with-decrypt --query 'Parameter.Value' | sed "s/###//g" > "$FILE"; - done - - source .envrc - - export INSTANCE_ID_SUBST=`wget http://169.254.169.254/latest/meta-data/instance-id -O- --timeout=5 --tries=1` - [ -z "$INSTANCE_ID_SUBST" ] && export INSTANCE_ID_SUBST='{{.FullID}}' - echo "INSTANCE_ID_SUBST=$INSTANCE_ID_SUBST" >> .env - - [ -f secret.env ] && cat secret.env >> .env - - - aws ecr get-login-password --region ${region} | docker login --username AWS --password-stdin "$AWS_ECR_BASE_URL" - docker compose up -d - -runcmd: - - chown -R ubuntu:ubuntu /home/ubuntu - - cd /home/ubuntu/ - - - "[ -f ./installer.sh ] && ./installer.sh" - - - sudo -u ubuntu ./cloud-init.sh \ No newline at end of file diff --git a/devops/tf/main/files/docker-compose.yml b/devops/tf/main/files/docker-compose.yml deleted file mode 100644 index 3b73b7a..0000000 --- a/devops/tf/main/files/docker-compose.yml +++ /dev/null @@ -1,102 +0,0 @@ -version: '3.7' - -services: - app: - image: ${ecr_base_url}/${ecr_image} - init: true - restart: always - env_file: ./.env - - environment: - # Add this variable to all containers that should dump Prometheus metrics. Each container besides this one - # should use a different subdirectory of /prometheus-multiproc-dir, e.g. - # - PROMETHEUS_MULTIPROC_DIR=/prometheus-multiproc-dir/other-container - # Don't forget to also mount the prometheus-metrics volume in other containers too. - - PROMETHEUS_MULTIPROC_DIR=/prometheus-multiproc-dir - - volumes: - - backend-static:/root/src/static - - ./media:/root/src/media - - # Add this mount to each container that should dump Prometheus metrics. - - ./prometheus-metrics:/prometheus-multiproc-dir - - logging: - driver: awslogs - options: - awslogs-region: ${region} - awslogs-group: /aws/ec2/${name}-${env} - tag: '$${INSTANCE_ID_SUBST}-app' - awslogs-create-group: "true" - - - node-exporter: - image: ghcr.io/reef-technologies/node-exporter-aws-ec2:latest - container_name: node-exporter - restart: unless-stopped - network_mode: host - pid: host - volumes: - - /:/host:ro,rslave - command: - - '--path.rootfs=/host' - - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)' - logging: &exporter_logging - driver: journald - options: - tag: '{###{.Name}###}' - - cadvisor: - image: gcr.io/cadvisor/cadvisor:v0.40.0 - container_name: cadvisor - privileged: true - devices: - - /dev/kmsg:/dev/kmsg - volumes: - - /:/rootfs:ro - - /var/run:/var/run:ro - - /sys:/sys:ro - - /var/lib/docker:/var/lib/docker:ro - - /cgroup:/cgroup:ro - restart: unless-stopped - logging: - <<: *exporter_logging - - - nginx: - image: 'ghcr.io/reef-technologies/nginx-rt:v1.2.2' - restart: unless-stopped - healthcheck: - test: wget -q --spider http://0.0.0.0:8000/admin/login || exit 1 - depends_on: - - app - - - cadvisor - - node-exporter - - command: nginx -g 'daemon off;' - ports: - - - 10443:10443 - - - 8000:8000 - volumes: - - ./nginx/templates:/etc/nginx/templates - - ./nginx/config_helpers:/etc/nginx/config_helpers - - backend-static:/srv/static:ro - - ./media:/srv/media:ro - - ./nginx/monitoring_certs:/etc/monitoring_certs - logging: - driver: awslogs - options: - awslogs-region: ${region} - awslogs-group: /aws/ec2/${name}-${env} - tag: '$${INSTANCE_ID_SUBST}-nginx' - awslogs-create-group: "true" - - extra_hosts: - - "host.docker.internal:host-gateway" - - -volumes: - backend-static: diff --git a/devops/tf/main/files/env b/devops/tf/main/files/env deleted file mode 100644 index 0c3acc4..0000000 --- a/devops/tf/main/files/env +++ /dev/null @@ -1,35 +0,0 @@ -ENV=${env} -NGINX_HOST=localhost -DEBUG=0 -SECRET_KEY=${secret_key} -POSTGRES_DB=${database_name} -POSTGRES_USER=${database_user} -POSTGRES_PASSWORD=${database_password} -DATABASE_URL=${database_connection_string} - -SENTRY_DSN= -HTTPS_REDIRECT=n -HTTPS_PROXY_HEADER=X_SCHEME -CSP_ENABLED=n -CSP_REPORT_ONLY=n -CSP_REPORT_URL= -CSP_DEFAULT_SRC="'none'" -CSP_SCRIPT_SRC="'self'" -CSP_STYLE_SRC="'self'" -CSP_FONT_SRC="'self'" -CSP_IMG_SRC="'self'" -CSP_MEDIA_SRC="'self'" -CSP_OBJECT_SRC="'self'" -CSP_FRAME_SRC="'self'" -CSP_CONNECT_SRC="'self'" -CSP_CHILD_SRC="'self'" -CSP_MANIFEST_SRC="'self'" -CSP_WORKER_SRC="'self'" -CSP_BLOCK_ALL_MIXED_CONTENT=y -CSP_EXCLUDE_URL_PREFIXES= -BACKUP_B2_BUCKET= -BACKUP_B2_KEY_ID= -BACKUP_B2_KEY_SECRET= -BACKUP_LOCAL_ROTATE_KEEP_LAST= -DATABASE_POOL_URL= -CHANNELS_BACKEND_URL=redis://redis:6379/1 \ No newline at end of file diff --git a/devops/tf/main/files/envrc b/devops/tf/main/files/envrc deleted file mode 100644 index 8b2a386..0000000 --- a/devops/tf/main/files/envrc +++ /dev/null @@ -1,5 +0,0 @@ -export APP_NAME=${name} -export APP_ENV=${env} -export AWS_ACCOUNT_ID=${account_id} -export AWS_ECR_BASE_URL=${ecr_base_url} -export AWS_ECR_TAG=${ecr_image} diff --git a/devops/tf/main/files/nginx/config_helpers/brotli.conf b/devops/tf/main/files/nginx/config_helpers/brotli.conf deleted file mode 100644 index 1e4cb51..0000000 --- a/devops/tf/main/files/nginx/config_helpers/brotli.conf +++ /dev/null @@ -1,44 +0,0 @@ -brotli off; -brotli_static off; - -brotli_comp_level 6; -brotli_types - # text/html is always in brotli_types - text/richtext - text/plain - text/css - text/x-script - text/x-component - text/x-java-source - text/x-markdown - application/javascript - application/x-javascript - text/javascript - text/js - image/x-icon - application/x-perl - application/x-httpd-cgi - text/xml - application/xml - application/xml+rss - application/json - multipart/bag - multipart/mixed - application/xhtml+xml - font/ttf - font/otf - font/x-woff - image/svg+xml - application/vnd.ms-fontobject - application/ttf - application/x-ttf - application/otf - application/x-otf - application/truetype - application/opentype - application/x-opentype - application/font-woff - application/eot - application/font - application/font-sfnt - application/wasm; diff --git a/devops/tf/main/files/nginx/config_helpers/gzip.conf b/devops/tf/main/files/nginx/config_helpers/gzip.conf deleted file mode 100644 index 6ba8194..0000000 --- a/devops/tf/main/files/nginx/config_helpers/gzip.conf +++ /dev/null @@ -1,48 +0,0 @@ -gzip off; -gzip_static off; -gzip_proxied off; - -gzip_vary on; -gzip_comp_level 6; -gzip_buffers 16 8k; -gzip_http_version 1.1; -gzip_types - # text/html is always in gzip_types - text/richtext - text/plain - text/css - text/x-script - text/x-component - text/x-java-source - text/x-markdown - application/javascript - application/x-javascript - text/javascript - text/js - image/x-icon - application/x-perl - application/x-httpd-cgi - text/xml - application/xml - application/xml+rss - application/json - multipart/bag - multipart/mixed - application/xhtml+xml - font/ttf - font/otf - font/x-woff - image/svg+xml - application/vnd.ms-fontobject - application/ttf - application/x-ttf - application/otf - application/x-otf - application/truetype - application/opentype - application/x-opentype - application/font-woff - application/eot - application/font - application/font-sfnt - application/wasm; diff --git a/devops/tf/main/files/nginx/monitoring_certs/monitoring-ca.crt.txt b/devops/tf/main/files/nginx/monitoring_certs/monitoring-ca.crt.txt deleted file mode 100644 index 70dc2a2..0000000 --- a/devops/tf/main/files/nginx/monitoring_certs/monitoring-ca.crt.txt +++ /dev/null @@ -1 +0,0 @@ -"replace-me" \ No newline at end of file diff --git a/devops/tf/main/files/nginx/monitoring_certs/monitoring.crt.txt b/devops/tf/main/files/nginx/monitoring_certs/monitoring.crt.txt deleted file mode 100644 index 70dc2a2..0000000 --- a/devops/tf/main/files/nginx/monitoring_certs/monitoring.crt.txt +++ /dev/null @@ -1 +0,0 @@ -"replace-me" \ No newline at end of file diff --git a/devops/tf/main/files/nginx/monitoring_certs/monitoring.key.txt b/devops/tf/main/files/nginx/monitoring_certs/monitoring.key.txt deleted file mode 100644 index 70dc2a2..0000000 --- a/devops/tf/main/files/nginx/monitoring_certs/monitoring.key.txt +++ /dev/null @@ -1 +0,0 @@ -"replace-me" \ No newline at end of file diff --git a/devops/tf/main/files/nginx/templates/default.conf.template b/devops/tf/main/files/nginx/templates/default.conf.template deleted file mode 100644 index 36bd072..0000000 --- a/devops/tf/main/files/nginx/templates/default.conf.template +++ /dev/null @@ -1,108 +0,0 @@ -server { - listen 8000 default_server; - server_name _; - server_name_in_redirect off; - - include /etc/nginx/config_helpers/brotli.conf; - include /etc/nginx/config_helpers/gzip.conf; - - access_log /dev/stdout; - error_log /dev/stderr info; - - client_max_body_size 100M; - - location /static/ { - root /srv/; - } - - location /media/ { - root /srv/; - } - - - location /metrics { - return 404; - } - - location /business-metrics { - return 404; - } - - - location / { - - proxy_pass_header Server; - proxy_redirect off; - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_pass_header X-Forwarded-Proto; - - proxy_pass http://app:8000/; - } -} - -upstream node_exporter { - server host.docker.internal:9100; -} - -server { - server_name monitoring; - listen 10443 ssl http2; - - include /etc/nginx/config_helpers/brotli.conf; - include /etc/nginx/config_helpers/gzip.conf; - - ssl_certificate /etc/monitoring_certs/monitoring.crt; - ssl_certificate_key /etc/monitoring_certs/monitoring.key; - ssl_trusted_certificate /etc/monitoring_certs/monitoring-ca.crt; - - ssl_client_certificate /etc/monitoring_certs/monitoring-ca.crt; - ssl_verify_client on; - - access_log /dev/stdout; - error_log /dev/stderr info; - - location /node-exporter-metrics/ { - proxy_pass_header Server; - proxy_redirect off; - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X_SCHEME $scheme; - proxy_pass http://node_exporter/metrics; - } - - location /cadvisor-metrics/ { - proxy_pass_header Server; - proxy_redirect off; - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X_SCHEME $scheme; - proxy_pass http://cadvisor:8080/metrics; - } - - location /nginx-metrics/ { - vhost_traffic_status_display; - vhost_traffic_status_display_format prometheus; - } - - location /application-metrics/ { - proxy_pass_header Server; - proxy_redirect off; - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X_SCHEME $scheme; - proxy_pass http://app:8000/metrics; - } - - location /business-metrics/ { - proxy_pass_header Server; - proxy_redirect off; - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X_SCHEME $scheme; - proxy_pass http://app:8000/business-metrics; - } - -} - - diff --git a/devops/tf/main/modules/backend/alb.tf b/devops/tf/main/modules/backend/alb.tf deleted file mode 100644 index 6c85398..0000000 --- a/devops/tf/main/modules/backend/alb.tf +++ /dev/null @@ -1,36 +0,0 @@ -resource "aws_lb" "self" { - name = "${var.name}-${var.env}" - internal = false - load_balancer_type = "application" - subnets = var.subnets - security_groups = [aws_security_group.public.id] - enable_deletion_protection = false -} - -resource "aws_lb_target_group" "self" { - name = "${var.name}-${var.env}" - port = 8000 - protocol = "HTTP" - vpc_id = var.vpc_id - target_type = "instance" - - health_check { - enabled = true - port = 8000 - path = "/admin/login" - matcher = "200-302" - } -} - -resource "aws_lb_listener" "self" { - load_balancer_arn = aws_lb.self.arn - port = "443" - protocol = "HTTPS" - ssl_policy = "ELBSecurityPolicy-2016-08" - certificate_arn = aws_acm_certificate.self.arn - - default_action { - type = "forward" - target_group_arn = aws_lb_target_group.self.arn - } -} diff --git a/devops/tf/main/modules/backend/domain.tf b/devops/tf/main/modules/backend/domain.tf deleted file mode 100644 index dbd0cde..0000000 --- a/devops/tf/main/modules/backend/domain.tf +++ /dev/null @@ -1,46 +0,0 @@ -data "aws_route53_zone" "self" { - name = var.base_domain_name -} - -resource "aws_route53_record" "a" { - zone_id = data.aws_route53_zone.self.zone_id - name = var.domain_name - type = "A" - - alias { - name = aws_lb.self.dns_name - zone_id = aws_lb.self.zone_id - evaluate_target_health = true - } -} - -resource "aws_acm_certificate" "self" { - domain_name = var.domain_name - validation_method = "DNS" - - tags = { - Project = var.name - Env = var.env - } - - lifecycle { - create_before_destroy = true - } -} - -resource "aws_route53_record" "cert-validation" { - for_each = { - for dvo in aws_acm_certificate.self.domain_validation_options: dvo.domain_name => { - name = dvo.resource_record_name - record = dvo.resource_record_value - type = dvo.resource_record_type - } - } - - allow_overwrite = true - name = each.value.name - records = [each.value.record] - ttl = 60 - type = each.value.type - zone_id = data.aws_route53_zone.self.zone_id -} \ No newline at end of file diff --git a/devops/tf/main/modules/backend/ec2-autoscale.tf b/devops/tf/main/modules/backend/ec2-autoscale.tf deleted file mode 100644 index a7784f0..0000000 --- a/devops/tf/main/modules/backend/ec2-autoscale.tf +++ /dev/null @@ -1,66 +0,0 @@ -locals { - name_env = "${var.name}-${var.env}" - cloudinit = templatefile("../../files/cloud-init.yml", { - name = var.name - env = var.env - region = var.region - }) -} - -resource "aws_launch_template" "self" { - name = local.name_env - image_id = var.base_ami_id - instance_type = var.instance_type - - iam_instance_profile { - name = aws_iam_instance_profile.self.name - } - - disable_api_termination = false - key_name = aws_key_pair.self.key_name - - user_data = base64encode(local.cloudinit) - - block_device_mappings { - device_name = "/dev/sda1" - - ebs { - delete_on_termination = true - encrypted = true - volume_size = 20 - } - } - - credit_specification { - cpu_credits = "standard" - } - - vpc_security_group_ids = [ - aws_security_group.internal.id - ] -} - -resource "aws_autoscaling_group" "self" { - name = local.name_env - desired_capacity = 1 - max_size = 1 - min_size = 1 - vpc_zone_identifier = [var.subnets[0]] - - launch_template { - id = aws_launch_template.self.id - version = "$Latest" - } - - tag { - key = "Name" - propagate_at_launch = true - value = local.name_env - } - - target_group_arns = [ - aws_lb_target_group.self.arn - ] - - health_check_type = var.health_check_type -} diff --git a/devops/tf/main/modules/backend/ec2-keys.tf b/devops/tf/main/modules/backend/ec2-keys.tf deleted file mode 100644 index 9a496fe..0000000 --- a/devops/tf/main/modules/backend/ec2-keys.tf +++ /dev/null @@ -1,4 +0,0 @@ -resource "aws_key_pair" "self" { - key_name = "${var.name}-${var.env}-key" - public_key = var.ec2_ssh_key -} \ No newline at end of file diff --git a/devops/tf/main/modules/backend/ec2-profile.tf b/devops/tf/main/modules/backend/ec2-profile.tf deleted file mode 100644 index 71bf1fd..0000000 --- a/devops/tf/main/modules/backend/ec2-profile.tf +++ /dev/null @@ -1,27 +0,0 @@ -resource "aws_iam_role" "self" { - name = "${var.name}-${var.env}-ec2-role" - - assume_role_policy = jsonencode({ - Version = "2012-10-17", - Statement = [ - { - Effect = "Allow", - Principal = { - Service: "ec2.amazonaws.com" - }, - Action = "sts:AssumeRole" - } - ] - }) - - managed_policy_arns = [ - "arn:aws:iam::aws:policy/AmazonSSMReadOnlyAccess", - "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", - "arn:aws:iam::aws:policy/CloudWatchLogsFullAccess" - ] -} - -resource "aws_iam_instance_profile" "self" { - name = "${var.name}-${var.env}-ec2-profile" - role = aws_iam_role.self.name -} diff --git a/devops/tf/main/modules/backend/parameters.docker-compose.tf b/devops/tf/main/modules/backend/parameters.docker-compose.tf deleted file mode 100644 index eb3bac9..0000000 --- a/devops/tf/main/modules/backend/parameters.docker-compose.tf +++ /dev/null @@ -1,13 +0,0 @@ -data "aws_partition" "self" {} - -resource "aws_ssm_parameter" "compose" { - name = "/application/${var.name}/${var.env}/docker-compose.yml" - type = "SecureString" - value = templatefile("../../files/docker-compose.yml", { - name = var.name - env = var.env - region = var.region - ecr_base_url = var.ecr_base_url - ecr_image = var.ecr_image - }) -} \ No newline at end of file diff --git a/devops/tf/main/modules/backend/parameters.env.tf b/devops/tf/main/modules/backend/parameters.env.tf deleted file mode 100644 index f865a6b..0000000 --- a/devops/tf/main/modules/backend/parameters.env.tf +++ /dev/null @@ -1,31 +0,0 @@ -resource "random_uuid" "random_uuid" {} - -resource "aws_ssm_parameter" "envrc" { - name = "/application/${var.name}/${var.env}/.envrc" - type = "SecureString" - value = templatefile("../../files/envrc", { - name = var.name - env = var.env - region = var.region - account_id = var.account_id - ecr_base_url = var.ecr_base_url - ecr_image = var.ecr_image - }) -} - - -resource "aws_ssm_parameter" "env" { - name = "/application/${var.name}/${var.env}/.env" - type = "SecureString" - value = templatefile("../../files/env", { - name = var.name - env = var.env - region = var.region - secret_key = random_uuid.random_uuid.result - - database_name = var.database.name - database_user = var.database.user - database_password = var.database.password - database_connection_string = var.database.connection_string - }) -} \ No newline at end of file diff --git a/devops/tf/main/modules/backend/parameters.nginx.tf b/devops/tf/main/modules/backend/parameters.nginx.tf deleted file mode 100644 index ab0f2f4..0000000 --- a/devops/tf/main/modules/backend/parameters.nginx.tf +++ /dev/null @@ -1,46 +0,0 @@ -locals { - cert_dir = "../../files/nginx/monitoring_certs" - cert_files = fileset(local.cert_dir, "*.txt") - - certs = length(local.cert_files) > 0 ? [for cert_file in local.cert_files : { - name: replace(cert_file, ".txt", "") - content: "${local.cert_dir}/${cert_file}" - }] : [] - - helper_dir = "../../files/nginx/config_helpers" - helper_files = fileset(local.helper_dir, "*") - - helpers = length(local.helper_files) > 0 ? [for helper_file in local.helper_files : { - name: helper_file, - content: "${local.helper_dir}/${helper_file}" - }] : [] - - template_dir = "../../files/nginx/templates" - template_files = fileset(local.template_dir, "*") - - templates = length(local.template_files) > 0 ? [for template_file in local.template_files : { - name: template_file, - content: "${local.template_dir}/${template_file}" - }] : [] -} - -resource "aws_ssm_parameter" "certs" { - count = length(local.certs) - name = "/application/${var.name}/${var.env}/nginx/monitoring_certs/${local.certs[count.index].name}" - type = "SecureString" - value = file(local.certs[count.index].content) -} - -resource "aws_ssm_parameter" "helpers" { - count = length(local.helpers) - name = "/application/${var.name}/${var.env}/nginx/config_helpers/${local.helpers[count.index].name}" - type = "SecureString" - value = file(local.helpers[count.index].content) -} - -resource "aws_ssm_parameter" "templates" { - count = length(local.templates) - name = "/application/${var.name}/${var.env}/nginx/templates/${local.templates[count.index].name}" - type = "SecureString" - value = file(local.templates[count.index].content) -} \ No newline at end of file diff --git a/devops/tf/main/modules/backend/parameters.ssh-keys.tf b/devops/tf/main/modules/backend/parameters.ssh-keys.tf deleted file mode 100644 index fe84407..0000000 --- a/devops/tf/main/modules/backend/parameters.ssh-keys.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_ssm_parameter" "ssh-keys" { - name = "/application/${var.name}/${var.env}/.ssh/authorized_keys" - type = "SecureString" - value = templatefile("../../files/authorized_keys", { - ec2_ssh_key = var.ec2_ssh_key - }) -} diff --git a/devops/tf/main/modules/backend/security.tf b/devops/tf/main/modules/backend/security.tf deleted file mode 100644 index 6440b4a..0000000 --- a/devops/tf/main/modules/backend/security.tf +++ /dev/null @@ -1,63 +0,0 @@ -resource "aws_security_group" "public" { - name = "${var.name}-${var.env}-public-sg" - vpc_id = var.vpc_id - - ingress { - description = "allow traffic between load-balancer and EC2 instances within VPC" - from_port = 80 - to_port = 80 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - ingress { - description = "allow traffic between load-balancer and EC2 instances within VPC" - from_port = 443 - to_port = 443 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_security_group" "internal" { - name = "${var.name}-internal-sg" - vpc_id = var.vpc_id - - ingress { - description = "allow traffic to ssh from internet" - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = ["51.254.203.61/32"] - } - - ingress { - description = "allow monitoring" - from_port = 10443 - to_port = 10443 - protocol = "tcp" - cidr_blocks = ["138.68.147.48/32", "95.179.202.73/32"] - } - - ingress { - description = "allow traffic between load-balancer and EC2 instances within VPC" - from_port = 8000 - to_port = 8000 - protocol = "tcp" - cidr_blocks = [var.vpc_cidr] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } -} diff --git a/devops/tf/main/modules/backend/vars.tf b/devops/tf/main/modules/backend/vars.tf deleted file mode 100644 index 108c852..0000000 --- a/devops/tf/main/modules/backend/vars.tf +++ /dev/null @@ -1,22 +0,0 @@ -variable "name" {} -variable "env" {} -variable "region" {} - -variable "vpc_id" {} -variable "vpc_cidr" {} -variable "subnets" {} -variable "azs" {} - -variable "base_ami_id" {} -variable "base_domain_name" {} - -variable "domain_name" {} -variable "ec2_ssh_key" {} - -variable "ecr_base_url" {} -variable "ecr_image" {} - -variable "instance_type" {} -variable "health_check_type" {} -variable "account_id" {} -variable "database" {} \ No newline at end of file diff --git a/devops/tf/main/modules/database/output.tf b/devops/tf/main/modules/database/output.tf deleted file mode 100644 index 96f5801..0000000 --- a/devops/tf/main/modules/database/output.tf +++ /dev/null @@ -1,25 +0,0 @@ -output "connection_string" { - value = "postgres://${aws_db_instance.self.username}:${aws_db_instance.self.password}@${aws_db_instance.self.endpoint}/${aws_db_instance.self.db_name}" - sensitive = true -} - -output "user" { - value = aws_db_instance.self.username -} - -output "password" { - value = aws_db_instance.self.password - sensitive = true -} - -output "endpoint" { - value = aws_db_instance.self.endpoint -} - -output "port" { - value = aws_db_instance.self.port -} - -output "name" { - value = aws_db_instance.self.db_name -} \ No newline at end of file diff --git a/devops/tf/main/modules/database/rds.tf b/devops/tf/main/modules/database/rds.tf deleted file mode 100644 index 5504b93..0000000 --- a/devops/tf/main/modules/database/rds.tf +++ /dev/null @@ -1,36 +0,0 @@ -resource "random_string" "random" { - length = 20 - special = true - override_special = "$." -} - -resource "aws_db_subnet_group" "self" { - name = "${var.name}-${var.env}" - subnet_ids = var.subnets - - tags = { - Project = var.name - Env = var.env - Name = "DB subnet group" - } -} - -resource "aws_db_instance" "self" { - identifier = "${var.name}-${var.env}-db" - allocated_storage = 5 - max_allocated_storage = 20 - storage_encrypted = true - engine = "postgres" - instance_class = var.instance_type - username = "master" - db_name = "backend" - password = random_string.random.result - skip_final_snapshot = true - availability_zone = var.azs[0] - db_subnet_group_name = aws_db_subnet_group.self.name - vpc_security_group_ids = [aws_security_group.db.id] - - tags = { - Project = var.name - } -} diff --git a/devops/tf/main/modules/database/security.tf b/devops/tf/main/modules/database/security.tf deleted file mode 100644 index cf34c26..0000000 --- a/devops/tf/main/modules/database/security.tf +++ /dev/null @@ -1,19 +0,0 @@ -resource "aws_security_group" "db" { - name = "${var.name}-db-sg" - vpc_id = var.vpc_id - - ingress { - description = "allow traffic to postgres port from within VPC" - from_port = 5432 - to_port = 5432 - protocol = "tcp" - cidr_blocks = [var.vpc_cidr] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } -} \ No newline at end of file diff --git a/devops/tf/main/modules/database/vars.tf b/devops/tf/main/modules/database/vars.tf deleted file mode 100644 index fd54160..0000000 --- a/devops/tf/main/modules/database/vars.tf +++ /dev/null @@ -1,8 +0,0 @@ -variable "name" {} -variable "env" {} - -variable "vpc_id" {} -variable "vpc_cidr" {} -variable "subnets" {} -variable "azs" {} -variable "instance_type" {} diff --git a/devops/tf/main/modules/networking/network.tf b/devops/tf/main/modules/networking/network.tf deleted file mode 100644 index ded801b..0000000 --- a/devops/tf/main/modules/networking/network.tf +++ /dev/null @@ -1,12 +0,0 @@ -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "3.19.0" - - name = "${var.name}-${var.env}-vpc" - cidr = var.vpc_cidr - - azs = var.azs - public_subnets = var.subnet_cidrs - enable_nat_gateway = false - enable_vpn_gateway = false -} diff --git a/devops/tf/main/modules/networking/output.tf b/devops/tf/main/modules/networking/output.tf deleted file mode 100644 index 717f7b0..0000000 --- a/devops/tf/main/modules/networking/output.tf +++ /dev/null @@ -1,15 +0,0 @@ -output "vpc_id" { - value = module.vpc.vpc_id -} - -output "vpc_cidr_block" { - value = module.vpc.vpc_cidr_block -} - -output "subnets" { - value = module.vpc.public_subnets -} - -output "azs" { - value = module.vpc.azs -} diff --git a/devops/tf/main/modules/networking/vars.tf b/devops/tf/main/modules/networking/vars.tf deleted file mode 100644 index 7f5c180..0000000 --- a/devops/tf/main/modules/networking/vars.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "name" {} -variable "env" {} -variable "azs" {} -variable "vpc_cidr" {} -variable "subnet_cidrs" {} \ No newline at end of file diff --git a/devops/vultr_scripts/vultr-deploy.py b/devops/vultr_scripts/vultr-deploy.py deleted file mode 100644 index ebe1aef..0000000 --- a/devops/vultr_scripts/vultr-deploy.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python -# deploy to list of IPs from `instances_ip.txt` (see `vultr-get-instances.py`) - -import subprocess -from pathlib import Path - -pwd = Path(__file__).parent - - -with open(pwd / "instances_ip.txt") as f: - ips = f.readlines() - -errs = [] -for ip in ips: - print("deploying to", ip) - try: - res = subprocess.Popen( - ["git", "push", f"root@{ip.strip()}:~/repos/project-central.git"], - env={ - "GIT_SSH_COMMAND": "ssh -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" - }, - ).communicate() - except subprocess.CalledProcessError: - errs.append(ip) - else: - print("res", res) - -for err_ip in errs: - print("error deploying to", err_ip) diff --git a/devops/vultr_scripts/vultr-get-instances.py b/devops/vultr_scripts/vultr-get-instances.py deleted file mode 100644 index 06bf3bf..0000000 --- a/devops/vultr_scripts/vultr-get-instances.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python -# get list of all instances in Vultr account -# save their IDs and IPs into files which will be used by \ -# `vultr-deploy.py` and `vultr-update-cloudinit.py` - -import subprocess -from pathlib import Path - -pwd = Path(__file__).parent - -instance_id = pwd / "instances_id.txt" -instance_ip = pwd / "instances_ip.txt" - -res = subprocess.check_output(["vultr-cli", "instance", "list", "ipv4"]).decode("utf-8").split("\n") - -ids = [] -ips = [] -for line in res[1:]: # skip header - line_items = line.split("\t") - if len(line_items) != 13: - continue - ids.append(line_items[0].strip()) - ips.append(line_items[1].strip()) - -with open(instance_ip, "w") as f: - f.write("\n".join(ips)) - -with open(instance_id, "w") as f: - f.write("\n".join(ids)) diff --git a/devops/vultr_scripts/vultr-update-cloudinit.py b/devops/vultr_scripts/vultr-update-cloudinit.py deleted file mode 100644 index 8977e84..0000000 --- a/devops/vultr_scripts/vultr-update-cloudinit.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python -# update cloud-init data -# this should be used only to UPDATE the data, initialization should be done via Terraform -# see vultr_tf/core/vultr-cloud-init.tftpl -import subprocess -from pathlib import Path - -pwd = Path(__file__).parent - -# cloud-init script -# use `vultr-cli instance user-data get ` to get existing data -user_data = pwd / "userdata.txt" -assert user_data.exists() - -with open(pwd / "instances_id.txt") as f: - for instance_id in f.readlines(): - print("instance id", instance_id) - # res = subprocess.check_output(['vultr-cli', 'instance', 'user-data', 'get', instance_id.strip()]) - res = subprocess.check_output( - [ - "vultr-cli", - "instance", - "user-data", - "set", - instance_id.strip(), - "-d", - str(user_data), - ] - ) - print("res", res, type(res)) diff --git a/devops/vultr_tf/core/backend.tf b/devops/vultr_tf/core/backend.tf deleted file mode 100644 index 1785925..0000000 --- a/devops/vultr_tf/core/backend.tf +++ /dev/null @@ -1,10 +0,0 @@ -terraform { - - required_providers { - vultr = { - source = "vultr/vultr" - version = "~> 2.15.1" - } - } - -} \ No newline at end of file diff --git a/devops/vultr_tf/core/main.tf b/devops/vultr_tf/core/main.tf deleted file mode 100644 index 4ac1ca5..0000000 --- a/devops/vultr_tf/core/main.tf +++ /dev/null @@ -1,52 +0,0 @@ -provider "vultr" { - api_key = var.vultr_api_key -} - -resource "vultr_instance" "worker" { - count = 1 - hostname = "instance-project-${count.index + 1}" - region = var.region - plan = "vc2-1c-1gb" // via `vultr-cli plans list` - os_id = 1743 // ubuntu 22-04, via `vultr-cli os list` - ssh_key_ids = [ - // uuid-4 of ssh keys added in Vultr - ] - enable_ipv6 = true - activation_email = false - label = "instance-project" - backups = "disabled" - - user_data = templatefile("vultr-cloud-init.tftpl", { - DEPLOY_SSH_KEY = var.DEPLOY_SSH_KEY - SECRET_KEY = var.DOTENV_SECRET_KEY - POSTGRES_HOST = var.DOTENV_POSTGRES_HOST - POSTGRES_USER = var.DOTENV_POSTGRES_USER - POSTGRES_PASSWORD = var.DOTENV_POSTGRES_PASSWORD - DATABASE_POOL_URL = var.DOTENV_DATABASE_POOL_URL - DATABASE_URL = var.DOTENV_DATABASE_URL - SENTRY_DSN = var.DOTENV_SENTRY_DSN - }) -} - -resource "vultr_load_balancer" "loadbalancer" { - region = var.region - - forwarding_rules { - frontend_protocol = "https" - frontend_port = 443 - backend_protocol = "https" - backend_port = 443 - } - - health_check { - path = "/admin/" - port = "443" - protocol = "https" - response_timeout = 5 - unhealthy_threshold = 2 - check_interval = 15 - healthy_threshold = 4 - } - - attached_instances = [for instance in vultr_instance.worker : instance.id] -} diff --git a/devops/vultr_tf/core/vars.tf b/devops/vultr_tf/core/vars.tf deleted file mode 100644 index 03055da..0000000 --- a/devops/vultr_tf/core/vars.tf +++ /dev/null @@ -1,8 +0,0 @@ -variable "region" { - type = string -} - -variable "vultr_api_key" { - type = string - sensitive = true -} diff --git a/devops/vultr_tf/core/vars_cloud_init.tf b/devops/vultr_tf/core/vars_cloud_init.tf deleted file mode 100644 index 53075ae..0000000 --- a/devops/vultr_tf/core/vars_cloud_init.tf +++ /dev/null @@ -1,41 +0,0 @@ -variable "DEPLOY_SSH_KEY" { - // private ssh key for cloning github repo - type = string - sensitive = true -} - -// variables for .env file -variable "DOTENV_SECRET_KEY" { - type = string - sensitive = true -} - -variable "DOTENV_POSTGRES_HOST" { - type = string - sensitive = true -} - -variable "DOTENV_POSTGRES_USER" { - type = string - sensitive = true -} - -variable "DOTENV_POSTGRES_PASSWORD" { - type = string - sensitive = true -} - -variable "DOTENV_DATABASE_POOL_URL" { - type = string - sensitive = true -} - -variable "DOTENV_DATABASE_URL" { - type = string - sensitive = true -} - -variable "DOTENV_SENTRY_DSN" { - type = string - sensitive = true -} diff --git a/devops/vultr_tf/core/vultr-cloud-init.tftpl b/devops/vultr_tf/core/vultr-cloud-init.tftpl deleted file mode 100644 index a996ffa..0000000 --- a/devops/vultr_tf/core/vultr-cloud-init.tftpl +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -# shell variables have to have doubled dollar sign, otherwise Terraform will try to interpolate them -# the only variable with single dollar is `DEPLOY_SSH_KEY` in the block below -echo "starting custom cloud-init" - -# add deploy ssh key to clone repo -DEPLOY_KEY_FILE="/root/.ssh/id_ed25519" -echo "${DEPLOY_SSH_KEY}" > $${DEPLOY_KEY_FILE} -chmod 600 $${DEPLOY_KEY_FILE} - -DEPLOY_DIR="/root/domains/bittensor-prometheus-proxy/" -REPO_DIR="/root/repos/bittensor-prometheus-proxy.git" -REPO_ORIGIN="git@github.com:reef-technologies/project.git" - -mkdir -p /root/repos/ -mkdir -p $${DEPLOY_DIR} -mkdir -p /root/volumes/project-mount/ - -# repo init script for Vultr server -ssh-keyscan github.com >> /root/.ssh/known_hosts -apt install -y git -GIT_SSH_COMMAND="ssh -i $${DEPLOY_KEY_FILE}" git clone --depth=1 --bare --no-checkout $${REPO_ORIGIN} $${REPO_DIR} - -# 1st time deploy and setup -git --work-tree=$${DEPLOY_DIR} --git-dir=$${REPO_DIR} checkout -f main -cp $${DEPLOY_DIR}/bin/post-receive $${REPO_DIR}/hooks/post-receive - -$${DEPLOY_DIR}/bin/prepare-os.sh -$${DEPLOY_DIR}/setup-prod.sh - -# add env variables to .env file -cat <> $${DEPLOY_DIR}/.env -POSTGRES_HOST=${POSTGRES_HOST} -POSTGRES_USER=${POSTGRES_USER} -POSTGRES_PASSWORD=${POSTGRES_PASSWORD} -DATABASE_POOL_URL=${DATABASE_POOL_URL} -DATABASE_URL=${DATABASE_URL} -SENTRY_DSN=${SENTRY_DSN} -SECRET_KEY=${SECRET_KEY} - -EOF - -cd $${DEPLOY_DIR} && docker compose up --build --detach -echo "finishing custom cloud-init" diff --git a/letsencrypt_setup.sh b/letsencrypt_setup.sh deleted file mode 100755 index 161f602..0000000 --- a/letsencrypt_setup.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -set -eux -RELPATH="$(dirname "$0")" -ABSPATH="$(realpath "$RELPATH")" - -cd "$ABSPATH" - -source ./.env -mkdir -p "$ABSPATH/letsencrypt/etc/dhparams" - -docker run -it --rm \ - -v "$ABSPATH/letsencrypt/etc:/etc/letsencrypt" \ - alpine/openssl \ - dhparam -out /etc/letsencrypt/dhparams/dhparam.pem 2048 - -docker run --entrypoint certbot -it --rm \ - -v "$ABSPATH/letsencrypt/etc:/etc/letsencrypt" \ - -p 80:80\ - ghcr.io/reef-technologies/nginx-rt:v1.2.2 \ - certonly \ - --standalone --preferred-challenges http\ - -d "$NGINX_HOST" -d "www.$NGINX_HOST" diff --git a/nginx/config_helpers/brotli.conf b/nginx/config_helpers/brotli.conf deleted file mode 100644 index 1e4cb51..0000000 --- a/nginx/config_helpers/brotli.conf +++ /dev/null @@ -1,44 +0,0 @@ -brotli off; -brotli_static off; - -brotli_comp_level 6; -brotli_types - # text/html is always in brotli_types - text/richtext - text/plain - text/css - text/x-script - text/x-component - text/x-java-source - text/x-markdown - application/javascript - application/x-javascript - text/javascript - text/js - image/x-icon - application/x-perl - application/x-httpd-cgi - text/xml - application/xml - application/xml+rss - application/json - multipart/bag - multipart/mixed - application/xhtml+xml - font/ttf - font/otf - font/x-woff - image/svg+xml - application/vnd.ms-fontobject - application/ttf - application/x-ttf - application/otf - application/x-otf - application/truetype - application/opentype - application/x-opentype - application/font-woff - application/eot - application/font - application/font-sfnt - application/wasm; diff --git a/nginx/config_helpers/gzip.conf b/nginx/config_helpers/gzip.conf deleted file mode 100644 index 6ba8194..0000000 --- a/nginx/config_helpers/gzip.conf +++ /dev/null @@ -1,48 +0,0 @@ -gzip off; -gzip_static off; -gzip_proxied off; - -gzip_vary on; -gzip_comp_level 6; -gzip_buffers 16 8k; -gzip_http_version 1.1; -gzip_types - # text/html is always in gzip_types - text/richtext - text/plain - text/css - text/x-script - text/x-component - text/x-java-source - text/x-markdown - application/javascript - application/x-javascript - text/javascript - text/js - image/x-icon - application/x-perl - application/x-httpd-cgi - text/xml - application/xml - application/xml+rss - application/json - multipart/bag - multipart/mixed - application/xhtml+xml - font/ttf - font/otf - font/x-woff - image/svg+xml - application/vnd.ms-fontobject - application/ttf - application/x-ttf - application/otf - application/x-otf - application/truetype - application/opentype - application/x-opentype - application/font-woff - application/eot - application/font - application/font-sfnt - application/wasm; diff --git a/nginx/monitoring_certs/README.md b/nginx/monitoring_certs/README.md deleted file mode 100644 index a3c4a4d..0000000 --- a/nginx/monitoring_certs/README.md +++ /dev/null @@ -1,2 +0,0 @@ -Go to [promehtues-grafana-monitoring](https://github.com/reef-technologies/prometheus-grafana-monitoring) and generate a cert-key pair for this project (see prometheus-grafana-monitoring's README to find out how to do that). -Copy the generated cert-key pair along with `ca.crt` and place there, named `cert.crt`, `cert.key` and `ca.crt`, respectively. diff --git a/nginx/templates/default.conf.template b/nginx/templates/default.conf.template deleted file mode 100644 index 800ead0..0000000 --- a/nginx/templates/default.conf.template +++ /dev/null @@ -1,156 +0,0 @@ -# -# SSL config below is inspired by websites: -# - https://syslink.pl/cipherlist/ -# - https://ssl-config.mozilla.org/ -# Generated for Intermediate configuration, nginx 1.20.1 or later -# - -server { - listen 80 default_server; - server_name _; - server_name_in_redirect off; - - return 444; -} - -server { - listen 80; - server_name www.${NGINX_HOST} ${NGINX_HOST}; - return 301 https://${NGINX_HOST}$request_uri; -} - -server { - listen 443 ssl default_server; - server_name _; - server_name_in_redirect off; - - # Load the Diffie-Hellman parameter. - ssl_dhparam /etc/letsencrypt/dhparams/dhparam.pem; - - ssl_certificate /etc/letsencrypt/live/${NGINX_HOST}/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/${NGINX_HOST}/privkey.pem; - ssl_trusted_certificate /etc/letsencrypt/live/${NGINX_HOST}/chain.pem; - - return 444; -} - - - -server { - listen 443 ssl http2; - server_name ${NGINX_HOST} www.${NGINX_HOST}; - - add_header Strict-Transport-Security "max-age=31536000" always; - add_header X-Content-Type-Options nosniff; - add_header X-XSS-Protection "1; mode=block"; - add_header X-Frame-Options DENY; - - # Load the Diffie-Hellman parameter. - ssl_dhparam /etc/letsencrypt/dhparams/dhparam.pem; - - ssl_certificate /etc/letsencrypt/live/${NGINX_HOST}/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/${NGINX_HOST}/privkey.pem; - ssl_trusted_certificate /etc/letsencrypt/live/${NGINX_HOST}/chain.pem; - - include /etc/nginx/config_helpers/brotli.conf; - include /etc/nginx/config_helpers/gzip.conf; - - access_log /dev/stdout; - error_log /dev/stderr info; - - client_max_body_size 100M; - - location /static/ { - root /srv/; - } - - location /media/ { - root /srv/; - } - - - location /metrics { - return 404; - } - - location /business-metrics { - return 404; - } - - - location / { - - proxy_pass_header Server; - proxy_redirect off; - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-Proto $scheme; - - proxy_pass http://app:8000/; - } -} - -upstream node_exporter { - server host.docker.internal:9100; -} - -server { - server_name monitoring; - listen 10443 ssl http2; - - include /etc/nginx/config_helpers/brotli.conf; - include /etc/nginx/config_helpers/gzip.conf; - - ssl_certificate /etc/monitoring_certs/cert.crt; - ssl_certificate_key /etc/monitoring_certs/cert.key; - ssl_trusted_certificate /etc/monitoring_certs/ca.crt; - - ssl_client_certificate /etc/monitoring_certs/ca.crt; - ssl_verify_client on; - - access_log /dev/stdout; - error_log /dev/stderr info; - - location /node-exporter-metrics/ { - proxy_pass_header Server; - proxy_redirect off; - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X_SCHEME $scheme; - proxy_pass http://node_exporter/metrics; - } - - location /cadvisor-metrics/ { - proxy_pass_header Server; - proxy_redirect off; - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X_SCHEME $scheme; - proxy_pass http://cadvisor:8080/metrics; - } - - location /nginx-metrics/ { - vhost_traffic_status_display; - vhost_traffic_status_display_format prometheus; - } - - location /application-metrics/ { - proxy_pass_header Server; - proxy_redirect off; - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X_SCHEME $scheme; - proxy_pass http://app:8000/metrics; - } - - location /business-metrics { - proxy_pass_header Server; - proxy_redirect off; - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X_SCHEME $scheme; - proxy_pass http://app:8000/business-metrics; - } -} - - diff --git a/promtail/config.yml b/promtail/config.yml deleted file mode 100644 index 85605e4..0000000 --- a/promtail/config.yml +++ /dev/null @@ -1,27 +0,0 @@ -server: - http_listen_port: 9080 - grpc_listen_port: 0 - -positions: - filename: /tmp/positions.yaml - -clients: - - url: "${LOKI_URL}/api/prom/push" - basic_auth: - username: "${LOKI_USER}" - password: "${LOKI_PASSWORD}" - external_labels: - client: "${LOKI_CLIENT}" - client_server_group: "${LOKI_CLIENT_SERVER_GROUP}" - -scrape_configs: - - job_name: containerlogs - docker_sd_configs: - - host: unix:///var/run/docker.sock - refresh_interval: ${LOKI_REFRESH_INTERVAL} - relabel_configs: - - source_labels: ["__meta_docker_container_name"] - regex: "/(.*)" - target_label: "container" - - source_labels: ["__meta_docker_container_log_stream"] - target_label: "logstream" From e1080adb5414cb77a3ac6d1f063dfe5bbc17529f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Nowacki?= Date: Fri, 1 Nov 2024 14:10:49 +0100 Subject: [PATCH 03/15] remove redundant README entries --- README.md | 33 --------------------------------- 1 file changed, 33 deletions(-) diff --git a/README.md b/README.md index 2c6815a..961bdae 100644 --- a/README.md +++ b/README.md @@ -158,39 +158,6 @@ with some_calculation_time.labels('blabla').time(): -# Cloud deployment - -## AWS - -
-Initiate the infrastructure with Terraform: -TODO - -To push a new version of the application to AWS, just push to a branch named `deploy-$(ENVIRONMENT_NAME)`. -Typical values for `$(ENVIRONMENT_NAME)` are `prod` and `staging`. -For this to work, GitHub actions needs to be provided with credentials for an account that has the following policies enabled: - -- AutoScalingFullAccess -- AmazonEC2ContainerRegistryFullAccess -- AmazonS3FullAccess - -See `.github/workflows/cd.yml` to find out the secret names. - -For more details see [README_AWS.md](README_AWS.md) -
- -## Vultr - -
-Initiate the infrastructure with Terraform and cloud-init: - -- see Terraform template in `/devops/vultr_tf/core/` -- see scripts for interacting with Vultr API in `/devops/vultr_scripts/` - - note these scripts need `vultr-cli` installed - -For more details see [README_vultr.md](README_vultr.md). -
- # Backups
From f60d5f2392a88068b689ee92b1ab1229bb9836c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Nowacki?= Date: Sat, 2 Nov 2024 12:19:09 +0100 Subject: [PATCH 04/15] the proxy is working! --- .gitignore | 2 + .../commands/debug_add_validator.py | 18 ++ app/src/project/core/metrics.py | 16 ++ .../project/core/migrations/0001_initial.py | 23 +++ app/src/project/core/migrations/__init__.py | 0 app/src/project/core/models.py | 9 + .../core/prometheus_protobuf/README.md | 9 + .../core/prometheus_protobuf/__init__.py | 0 .../core/prometheus_protobuf/gogo_pb2.py | 36 ++++ .../core/prometheus_protobuf/remote_pb2.py | 55 ++++++ .../core/prometheus_protobuf/types_pb2.py | 88 ++++++++++ app/src/project/core/tasks.py | 38 ++++- app/src/project/core/views.py | 158 ++++++++++++++++++ app/src/project/settings.py | 65 ++++++- app/src/project/urls.py | 3 + envs/dev/.env.template | 7 + envs/dev/central-prometheus.yml | 5 + envs/dev/docker-compose.yml | 39 +++++ envs/dev/on-site-prometheus.yml | 14 ++ envs/prod/.env.template | 7 + pyproject.toml | 6 + setup-dev.sh | 2 + 22 files changed, 589 insertions(+), 11 deletions(-) create mode 100644 app/src/project/core/management/commands/debug_add_validator.py create mode 100644 app/src/project/core/migrations/0001_initial.py create mode 100644 app/src/project/core/migrations/__init__.py create mode 100644 app/src/project/core/prometheus_protobuf/README.md create mode 100644 app/src/project/core/prometheus_protobuf/__init__.py create mode 100644 app/src/project/core/prometheus_protobuf/gogo_pb2.py create mode 100644 app/src/project/core/prometheus_protobuf/remote_pb2.py create mode 100644 app/src/project/core/prometheus_protobuf/types_pb2.py create mode 100644 envs/dev/central-prometheus.yml create mode 100644 envs/dev/on-site-prometheus.yml diff --git a/.gitignore b/.gitignore index 9c56f64..35ab79e 100644 --- a/.gitignore +++ b/.gitignore @@ -18,3 +18,5 @@ media/ .terraform/ .nox/ __pycache__ +./central-prometheus.yml +./on-site-prometheus.yml \ No newline at end of file diff --git a/app/src/project/core/management/commands/debug_add_validator.py b/app/src/project/core/management/commands/debug_add_validator.py new file mode 100644 index 0000000..d92f468 --- /dev/null +++ b/app/src/project/core/management/commands/debug_add_validator.py @@ -0,0 +1,18 @@ +from django.core.management import BaseCommand + +from project.core.models import Validator + + +class Command(BaseCommand): + """For local development, run this command to whitelist a hotkey without the need to register it in any subnet.""" + def add_arguments(self, parser): + parser.add_argument( + "validator_public_key", type=str, help="public key of the validator to be inserted" + ) + + def handle(self, *args, **options): + Validator.objects.create( + public_key=options["validator_public_key"], + active=True, + debug=True, + ) diff --git a/app/src/project/core/metrics.py b/app/src/project/core/metrics.py index 4619580..7fe7caf 100644 --- a/app/src/project/core/metrics.py +++ b/app/src/project/core/metrics.py @@ -21,6 +21,22 @@ def collect(self): ENV_VAR_NAME = "PROMETHEUS_MULTIPROC_DIR" +metrics_counter = prometheus_client.Counter( + 'incoming', + 'How many metrics per hotkey are ingested', + namespace='django', + unit='metrics', + labelnames=['hotkey'], +) + +series_counter = prometheus_client.Counter( + 'incoming', + 'How many series per hotkey are ingested', + namespace='django', + unit='series', + labelnames=['hotkey'], +) + def metrics_view(request): """Exports metrics as a Django view""" diff --git a/app/src/project/core/migrations/0001_initial.py b/app/src/project/core/migrations/0001_initial.py new file mode 100644 index 0000000..b8867b7 --- /dev/null +++ b/app/src/project/core/migrations/0001_initial.py @@ -0,0 +1,23 @@ +# Generated by Django 4.2.16 on 2024-11-02 10:58 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='Validator', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('public_key', models.TextField(unique=True)), + ('active', models.BooleanField()), + ('debug', models.BooleanField(default=False)), + ], + ), + ] diff --git a/app/src/project/core/migrations/__init__.py b/app/src/project/core/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/src/project/core/models.py b/app/src/project/core/models.py index 2f0a416..49892d1 100644 --- a/app/src/project/core/models.py +++ b/app/src/project/core/models.py @@ -1 +1,10 @@ from django.db import models # noqa + + +class Validator(models.Model): + public_key = models.TextField(unique=True) + active = models.BooleanField() + debug = models.BooleanField(default=False) + + def __str__(self): + return f"hotkey: {self.public_key}" diff --git a/app/src/project/core/prometheus_protobuf/README.md b/app/src/project/core/prometheus_protobuf/README.md new file mode 100644 index 0000000..847b136 --- /dev/null +++ b/app/src/project/core/prometheus_protobuf/README.md @@ -0,0 +1,9 @@ +These files were created by: + +1. cloning prometheus (git@github.com:prometheus/prometheus.git) and protobuf (git clone https://github.com/gogo/protobuf.git) +2. Installing go and a bunch of stuff I'm not sure is necessary +3. Running `make proto` in prometheus's root dir until the error was `--gogofast_out: protoc-gen-gogofast: Plugin failed with status code 1.` - not sure if that was necessary +4. running `protoc -I=. -I=../../protobuf --python_out=. ../../protobuf/gogoproto/gogo.proto types.proto remote.proto` in `prometheus/prompb` +5. ... +6. profit + diff --git a/app/src/project/core/prometheus_protobuf/__init__.py b/app/src/project/core/prometheus_protobuf/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/src/project/core/prometheus_protobuf/gogo_pb2.py b/app/src/project/core/prometheus_protobuf/gogo_pb2.py new file mode 100644 index 0000000..ab65ed5 --- /dev/null +++ b/app/src/project/core/prometheus_protobuf/gogo_pb2.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: gogoproto/gogo.proto +# Protobuf Python Version: 5.28.3 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 28, + 3, + '', + 'gogoproto/gogo.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14gogoproto/gogo.proto\x12\tgogoproto\x1a google/protobuf/descriptor.proto:;\n\x13goproto_enum_prefix\x12\x1c.google.protobuf.EnumOptions\x18\xb1\xe4\x03 \x01(\x08:=\n\x15goproto_enum_stringer\x12\x1c.google.protobuf.EnumOptions\x18\xc5\xe4\x03 \x01(\x08:5\n\renum_stringer\x12\x1c.google.protobuf.EnumOptions\x18\xc6\xe4\x03 \x01(\x08:7\n\x0f\x65num_customname\x12\x1c.google.protobuf.EnumOptions\x18\xc7\xe4\x03 \x01(\t:0\n\x08\x65numdecl\x12\x1c.google.protobuf.EnumOptions\x18\xc8\xe4\x03 \x01(\x08:A\n\x14\x65numvalue_customname\x12!.google.protobuf.EnumValueOptions\x18\xd1\x83\x04 \x01(\t:;\n\x13goproto_getters_all\x12\x1c.google.protobuf.FileOptions\x18\x99\xec\x03 \x01(\x08:?\n\x17goproto_enum_prefix_all\x12\x1c.google.protobuf.FileOptions\x18\x9a\xec\x03 \x01(\x08:<\n\x14goproto_stringer_all\x12\x1c.google.protobuf.FileOptions\x18\x9b\xec\x03 \x01(\x08:9\n\x11verbose_equal_all\x12\x1c.google.protobuf.FileOptions\x18\x9c\xec\x03 \x01(\x08:0\n\x08\x66\x61\x63\x65_all\x12\x1c.google.protobuf.FileOptions\x18\x9d\xec\x03 \x01(\x08:4\n\x0cgostring_all\x12\x1c.google.protobuf.FileOptions\x18\x9e\xec\x03 \x01(\x08:4\n\x0cpopulate_all\x12\x1c.google.protobuf.FileOptions\x18\x9f\xec\x03 \x01(\x08:4\n\x0cstringer_all\x12\x1c.google.protobuf.FileOptions\x18\xa0\xec\x03 \x01(\x08:3\n\x0bonlyone_all\x12\x1c.google.protobuf.FileOptions\x18\xa1\xec\x03 \x01(\x08:1\n\tequal_all\x12\x1c.google.protobuf.FileOptions\x18\xa5\xec\x03 \x01(\x08:7\n\x0f\x64\x65scription_all\x12\x1c.google.protobuf.FileOptions\x18\xa6\xec\x03 \x01(\x08:3\n\x0btestgen_all\x12\x1c.google.protobuf.FileOptions\x18\xa7\xec\x03 \x01(\x08:4\n\x0c\x62\x65nchgen_all\x12\x1c.google.protobuf.FileOptions\x18\xa8\xec\x03 \x01(\x08:5\n\rmarshaler_all\x12\x1c.google.protobuf.FileOptions\x18\xa9\xec\x03 \x01(\x08:7\n\x0funmarshaler_all\x12\x1c.google.protobuf.FileOptions\x18\xaa\xec\x03 \x01(\x08:<\n\x14stable_marshaler_all\x12\x1c.google.protobuf.FileOptions\x18\xab\xec\x03 \x01(\x08:1\n\tsizer_all\x12\x1c.google.protobuf.FileOptions\x18\xac\xec\x03 \x01(\x08:A\n\x19goproto_enum_stringer_all\x12\x1c.google.protobuf.FileOptions\x18\xad\xec\x03 \x01(\x08:9\n\x11\x65num_stringer_all\x12\x1c.google.protobuf.FileOptions\x18\xae\xec\x03 \x01(\x08:<\n\x14unsafe_marshaler_all\x12\x1c.google.protobuf.FileOptions\x18\xaf\xec\x03 \x01(\x08:>\n\x16unsafe_unmarshaler_all\x12\x1c.google.protobuf.FileOptions\x18\xb0\xec\x03 \x01(\x08:B\n\x1agoproto_extensions_map_all\x12\x1c.google.protobuf.FileOptions\x18\xb1\xec\x03 \x01(\x08:@\n\x18goproto_unrecognized_all\x12\x1c.google.protobuf.FileOptions\x18\xb2\xec\x03 \x01(\x08:8\n\x10gogoproto_import\x12\x1c.google.protobuf.FileOptions\x18\xb3\xec\x03 \x01(\x08:6\n\x0eprotosizer_all\x12\x1c.google.protobuf.FileOptions\x18\xb4\xec\x03 \x01(\x08:3\n\x0b\x63ompare_all\x12\x1c.google.protobuf.FileOptions\x18\xb5\xec\x03 \x01(\x08:4\n\x0ctypedecl_all\x12\x1c.google.protobuf.FileOptions\x18\xb6\xec\x03 \x01(\x08:4\n\x0c\x65numdecl_all\x12\x1c.google.protobuf.FileOptions\x18\xb7\xec\x03 \x01(\x08:<\n\x14goproto_registration\x12\x1c.google.protobuf.FileOptions\x18\xb8\xec\x03 \x01(\x08:7\n\x0fmessagename_all\x12\x1c.google.protobuf.FileOptions\x18\xb9\xec\x03 \x01(\x08:=\n\x15goproto_sizecache_all\x12\x1c.google.protobuf.FileOptions\x18\xba\xec\x03 \x01(\x08:;\n\x13goproto_unkeyed_all\x12\x1c.google.protobuf.FileOptions\x18\xbb\xec\x03 \x01(\x08::\n\x0fgoproto_getters\x12\x1f.google.protobuf.MessageOptions\x18\x81\xf4\x03 \x01(\x08:;\n\x10goproto_stringer\x12\x1f.google.protobuf.MessageOptions\x18\x83\xf4\x03 \x01(\x08:8\n\rverbose_equal\x12\x1f.google.protobuf.MessageOptions\x18\x84\xf4\x03 \x01(\x08:/\n\x04\x66\x61\x63\x65\x12\x1f.google.protobuf.MessageOptions\x18\x85\xf4\x03 \x01(\x08:3\n\x08gostring\x12\x1f.google.protobuf.MessageOptions\x18\x86\xf4\x03 \x01(\x08:3\n\x08populate\x12\x1f.google.protobuf.MessageOptions\x18\x87\xf4\x03 \x01(\x08:3\n\x08stringer\x12\x1f.google.protobuf.MessageOptions\x18\xc0\x8b\x04 \x01(\x08:2\n\x07onlyone\x12\x1f.google.protobuf.MessageOptions\x18\x89\xf4\x03 \x01(\x08:0\n\x05\x65qual\x12\x1f.google.protobuf.MessageOptions\x18\x8d\xf4\x03 \x01(\x08:6\n\x0b\x64\x65scription\x12\x1f.google.protobuf.MessageOptions\x18\x8e\xf4\x03 \x01(\x08:2\n\x07testgen\x12\x1f.google.protobuf.MessageOptions\x18\x8f\xf4\x03 \x01(\x08:3\n\x08\x62\x65nchgen\x12\x1f.google.protobuf.MessageOptions\x18\x90\xf4\x03 \x01(\x08:4\n\tmarshaler\x12\x1f.google.protobuf.MessageOptions\x18\x91\xf4\x03 \x01(\x08:6\n\x0bunmarshaler\x12\x1f.google.protobuf.MessageOptions\x18\x92\xf4\x03 \x01(\x08:;\n\x10stable_marshaler\x12\x1f.google.protobuf.MessageOptions\x18\x93\xf4\x03 \x01(\x08:0\n\x05sizer\x12\x1f.google.protobuf.MessageOptions\x18\x94\xf4\x03 \x01(\x08:;\n\x10unsafe_marshaler\x12\x1f.google.protobuf.MessageOptions\x18\x97\xf4\x03 \x01(\x08:=\n\x12unsafe_unmarshaler\x12\x1f.google.protobuf.MessageOptions\x18\x98\xf4\x03 \x01(\x08:A\n\x16goproto_extensions_map\x12\x1f.google.protobuf.MessageOptions\x18\x99\xf4\x03 \x01(\x08:?\n\x14goproto_unrecognized\x12\x1f.google.protobuf.MessageOptions\x18\x9a\xf4\x03 \x01(\x08:5\n\nprotosizer\x12\x1f.google.protobuf.MessageOptions\x18\x9c\xf4\x03 \x01(\x08:2\n\x07\x63ompare\x12\x1f.google.protobuf.MessageOptions\x18\x9d\xf4\x03 \x01(\x08:3\n\x08typedecl\x12\x1f.google.protobuf.MessageOptions\x18\x9e\xf4\x03 \x01(\x08:6\n\x0bmessagename\x12\x1f.google.protobuf.MessageOptions\x18\xa1\xf4\x03 \x01(\x08:<\n\x11goproto_sizecache\x12\x1f.google.protobuf.MessageOptions\x18\xa2\xf4\x03 \x01(\x08::\n\x0fgoproto_unkeyed\x12\x1f.google.protobuf.MessageOptions\x18\xa3\xf4\x03 \x01(\x08:1\n\x08nullable\x12\x1d.google.protobuf.FieldOptions\x18\xe9\xfb\x03 \x01(\x08:.\n\x05\x65mbed\x12\x1d.google.protobuf.FieldOptions\x18\xea\xfb\x03 \x01(\x08:3\n\ncustomtype\x12\x1d.google.protobuf.FieldOptions\x18\xeb\xfb\x03 \x01(\t:3\n\ncustomname\x12\x1d.google.protobuf.FieldOptions\x18\xec\xfb\x03 \x01(\t:0\n\x07jsontag\x12\x1d.google.protobuf.FieldOptions\x18\xed\xfb\x03 \x01(\t:1\n\x08moretags\x12\x1d.google.protobuf.FieldOptions\x18\xee\xfb\x03 \x01(\t:1\n\x08\x63\x61sttype\x12\x1d.google.protobuf.FieldOptions\x18\xef\xfb\x03 \x01(\t:0\n\x07\x63\x61stkey\x12\x1d.google.protobuf.FieldOptions\x18\xf0\xfb\x03 \x01(\t:2\n\tcastvalue\x12\x1d.google.protobuf.FieldOptions\x18\xf1\xfb\x03 \x01(\t:0\n\x07stdtime\x12\x1d.google.protobuf.FieldOptions\x18\xf2\xfb\x03 \x01(\x08:4\n\x0bstdduration\x12\x1d.google.protobuf.FieldOptions\x18\xf3\xfb\x03 \x01(\x08:3\n\nwktpointer\x12\x1d.google.protobuf.FieldOptions\x18\xf4\xfb\x03 \x01(\x08\x42\x45\n\x13\x63om.google.protobufB\nGoGoProtosZ\"github.com/gogo/protobuf/gogoproto') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'gogoproto.gogo_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\023com.google.protobufB\nGoGoProtosZ\"github.com/gogo/protobuf/gogoproto' +# @@protoc_insertion_point(module_scope) diff --git a/app/src/project/core/prometheus_protobuf/remote_pb2.py b/app/src/project/core/prometheus_protobuf/remote_pb2.py new file mode 100644 index 0000000..7edb1c7 --- /dev/null +++ b/app/src/project/core/prometheus_protobuf/remote_pb2.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: remote.proto +# Protobuf Python Version: 5.28.3 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 28, + 3, + '', + 'remote.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import types_pb2 as types__pb2 +from . import gogo_pb2 as gogoproto_dot_gogo__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0cremote.proto\x12\nprometheus\x1a\x0btypes.proto\x1a\x14gogoproto/gogo.proto\"z\n\x0cWriteRequest\x12\x30\n\ntimeseries\x18\x01 \x03(\x0b\x32\x16.prometheus.TimeSeriesB\x04\xc8\xde\x1f\x00\x12\x32\n\x08metadata\x18\x03 \x03(\x0b\x32\x1a.prometheus.MetricMetadataB\x04\xc8\xde\x1f\x00J\x04\x08\x02\x10\x03\"\xae\x01\n\x0bReadRequest\x12\"\n\x07queries\x18\x01 \x03(\x0b\x32\x11.prometheus.Query\x12\x45\n\x17\x61\x63\x63\x65pted_response_types\x18\x02 \x03(\x0e\x32$.prometheus.ReadRequest.ResponseType\"4\n\x0cResponseType\x12\x0b\n\x07SAMPLES\x10\x00\x12\x17\n\x13STREAMED_XOR_CHUNKS\x10\x01\"8\n\x0cReadResponse\x12(\n\x07results\x18\x01 \x03(\x0b\x32\x17.prometheus.QueryResult\"\x8f\x01\n\x05Query\x12\x1a\n\x12start_timestamp_ms\x18\x01 \x01(\x03\x12\x18\n\x10\x65nd_timestamp_ms\x18\x02 \x01(\x03\x12*\n\x08matchers\x18\x03 \x03(\x0b\x32\x18.prometheus.LabelMatcher\x12$\n\x05hints\x18\x04 \x01(\x0b\x32\x15.prometheus.ReadHints\"9\n\x0bQueryResult\x12*\n\ntimeseries\x18\x01 \x03(\x0b\x32\x16.prometheus.TimeSeries\"]\n\x13\x43hunkedReadResponse\x12\x31\n\x0e\x63hunked_series\x18\x01 \x03(\x0b\x32\x19.prometheus.ChunkedSeries\x12\x13\n\x0bquery_index\x18\x02 \x01(\x03\x42\x08Z\x06prompbb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'remote_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'Z\006prompb' + _globals['_WRITEREQUEST'].fields_by_name['timeseries']._loaded_options = None + _globals['_WRITEREQUEST'].fields_by_name['timeseries']._serialized_options = b'\310\336\037\000' + _globals['_WRITEREQUEST'].fields_by_name['metadata']._loaded_options = None + _globals['_WRITEREQUEST'].fields_by_name['metadata']._serialized_options = b'\310\336\037\000' + _globals['_WRITEREQUEST']._serialized_start=63 + _globals['_WRITEREQUEST']._serialized_end=185 + _globals['_READREQUEST']._serialized_start=188 + _globals['_READREQUEST']._serialized_end=362 + _globals['_READREQUEST_RESPONSETYPE']._serialized_start=310 + _globals['_READREQUEST_RESPONSETYPE']._serialized_end=362 + _globals['_READRESPONSE']._serialized_start=364 + _globals['_READRESPONSE']._serialized_end=420 + _globals['_QUERY']._serialized_start=423 + _globals['_QUERY']._serialized_end=566 + _globals['_QUERYRESULT']._serialized_start=568 + _globals['_QUERYRESULT']._serialized_end=625 + _globals['_CHUNKEDREADRESPONSE']._serialized_start=627 + _globals['_CHUNKEDREADRESPONSE']._serialized_end=720 +# @@protoc_insertion_point(module_scope) diff --git a/app/src/project/core/prometheus_protobuf/types_pb2.py b/app/src/project/core/prometheus_protobuf/types_pb2.py new file mode 100644 index 0000000..a359baf --- /dev/null +++ b/app/src/project/core/prometheus_protobuf/types_pb2.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: types.proto +# Protobuf Python Version: 5.28.3 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 28, + 3, + '', + 'types.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import gogo_pb2 as gogoproto_dot_gogo__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0btypes.proto\x12\nprometheus\x1a\x14gogoproto/gogo.proto\"\xf8\x01\n\x0eMetricMetadata\x12\x33\n\x04type\x18\x01 \x01(\x0e\x32%.prometheus.MetricMetadata.MetricType\x12\x1a\n\x12metric_family_name\x18\x02 \x01(\t\x12\x0c\n\x04help\x18\x04 \x01(\t\x12\x0c\n\x04unit\x18\x05 \x01(\t\"y\n\nMetricType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07\x43OUNTER\x10\x01\x12\t\n\x05GAUGE\x10\x02\x12\r\n\tHISTOGRAM\x10\x03\x12\x12\n\x0eGAUGEHISTOGRAM\x10\x04\x12\x0b\n\x07SUMMARY\x10\x05\x12\x08\n\x04INFO\x10\x06\x12\x0c\n\x08STATESET\x10\x07\"*\n\x06Sample\x12\r\n\x05value\x18\x01 \x01(\x01\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\"U\n\x08\x45xemplar\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00\x12\r\n\x05value\x18\x02 \x01(\x01\x12\x11\n\ttimestamp\x18\x03 \x01(\x03\"\x87\x04\n\tHistogram\x12\x13\n\tcount_int\x18\x01 \x01(\x04H\x00\x12\x15\n\x0b\x63ount_float\x18\x02 \x01(\x01H\x00\x12\x0b\n\x03sum\x18\x03 \x01(\x01\x12\x0e\n\x06schema\x18\x04 \x01(\x11\x12\x16\n\x0ezero_threshold\x18\x05 \x01(\x01\x12\x18\n\x0ezero_count_int\x18\x06 \x01(\x04H\x01\x12\x1a\n\x10zero_count_float\x18\x07 \x01(\x01H\x01\x12\x34\n\x0enegative_spans\x18\x08 \x03(\x0b\x32\x16.prometheus.BucketSpanB\x04\xc8\xde\x1f\x00\x12\x17\n\x0fnegative_deltas\x18\t \x03(\x12\x12\x17\n\x0fnegative_counts\x18\n \x03(\x01\x12\x34\n\x0epositive_spans\x18\x0b \x03(\x0b\x32\x16.prometheus.BucketSpanB\x04\xc8\xde\x1f\x00\x12\x17\n\x0fpositive_deltas\x18\x0c \x03(\x12\x12\x17\n\x0fpositive_counts\x18\r \x03(\x01\x12\x33\n\nreset_hint\x18\x0e \x01(\x0e\x32\x1f.prometheus.Histogram.ResetHint\x12\x11\n\ttimestamp\x18\x0f \x01(\x03\"4\n\tResetHint\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03YES\x10\x01\x12\x06\n\x02NO\x10\x02\x12\t\n\x05GAUGE\x10\x03\x42\x07\n\x05\x63ountB\x0c\n\nzero_count\",\n\nBucketSpan\x12\x0e\n\x06offset\x18\x01 \x01(\x11\x12\x0e\n\x06length\x18\x02 \x01(\r\"\xc0\x01\n\nTimeSeries\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00\x12)\n\x07samples\x18\x02 \x03(\x0b\x32\x12.prometheus.SampleB\x04\xc8\xde\x1f\x00\x12-\n\texemplars\x18\x03 \x03(\x0b\x32\x14.prometheus.ExemplarB\x04\xc8\xde\x1f\x00\x12/\n\nhistograms\x18\x04 \x03(\x0b\x32\x15.prometheus.HistogramB\x04\xc8\xde\x1f\x00\"$\n\x05Label\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"1\n\x06Labels\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00\"\x82\x01\n\x0cLabelMatcher\x12+\n\x04type\x18\x01 \x01(\x0e\x32\x1d.prometheus.LabelMatcher.Type\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\"(\n\x04Type\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03NEQ\x10\x01\x12\x06\n\x02RE\x10\x02\x12\x07\n\x03NRE\x10\x03\"|\n\tReadHints\x12\x0f\n\x07step_ms\x18\x01 \x01(\x03\x12\x0c\n\x04\x66unc\x18\x02 \x01(\t\x12\x10\n\x08start_ms\x18\x03 \x01(\x03\x12\x0e\n\x06\x65nd_ms\x18\x04 \x01(\x03\x12\x10\n\x08grouping\x18\x05 \x03(\t\x12\n\n\x02\x62y\x18\x06 \x01(\x08\x12\x10\n\x08range_ms\x18\x07 \x01(\x03\"\xaf\x01\n\x05\x43hunk\x12\x13\n\x0bmin_time_ms\x18\x01 \x01(\x03\x12\x13\n\x0bmax_time_ms\x18\x02 \x01(\x03\x12(\n\x04type\x18\x03 \x01(\x0e\x32\x1a.prometheus.Chunk.Encoding\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\"D\n\x08\x45ncoding\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03XOR\x10\x01\x12\r\n\tHISTOGRAM\x10\x02\x12\x13\n\x0f\x46LOAT_HISTOGRAM\x10\x03\"a\n\rChunkedSeries\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00\x12\'\n\x06\x63hunks\x18\x02 \x03(\x0b\x32\x11.prometheus.ChunkB\x04\xc8\xde\x1f\x00\x42\x08Z\x06prompbb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'types_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'Z\006prompb' + _globals['_EXEMPLAR'].fields_by_name['labels']._loaded_options = None + _globals['_EXEMPLAR'].fields_by_name['labels']._serialized_options = b'\310\336\037\000' + _globals['_HISTOGRAM'].fields_by_name['negative_spans']._loaded_options = None + _globals['_HISTOGRAM'].fields_by_name['negative_spans']._serialized_options = b'\310\336\037\000' + _globals['_HISTOGRAM'].fields_by_name['positive_spans']._loaded_options = None + _globals['_HISTOGRAM'].fields_by_name['positive_spans']._serialized_options = b'\310\336\037\000' + _globals['_TIMESERIES'].fields_by_name['labels']._loaded_options = None + _globals['_TIMESERIES'].fields_by_name['labels']._serialized_options = b'\310\336\037\000' + _globals['_TIMESERIES'].fields_by_name['samples']._loaded_options = None + _globals['_TIMESERIES'].fields_by_name['samples']._serialized_options = b'\310\336\037\000' + _globals['_TIMESERIES'].fields_by_name['exemplars']._loaded_options = None + _globals['_TIMESERIES'].fields_by_name['exemplars']._serialized_options = b'\310\336\037\000' + _globals['_TIMESERIES'].fields_by_name['histograms']._loaded_options = None + _globals['_TIMESERIES'].fields_by_name['histograms']._serialized_options = b'\310\336\037\000' + _globals['_LABELS'].fields_by_name['labels']._loaded_options = None + _globals['_LABELS'].fields_by_name['labels']._serialized_options = b'\310\336\037\000' + _globals['_CHUNKEDSERIES'].fields_by_name['labels']._loaded_options = None + _globals['_CHUNKEDSERIES'].fields_by_name['labels']._serialized_options = b'\310\336\037\000' + _globals['_CHUNKEDSERIES'].fields_by_name['chunks']._loaded_options = None + _globals['_CHUNKEDSERIES'].fields_by_name['chunks']._serialized_options = b'\310\336\037\000' + _globals['_METRICMETADATA']._serialized_start=50 + _globals['_METRICMETADATA']._serialized_end=298 + _globals['_METRICMETADATA_METRICTYPE']._serialized_start=177 + _globals['_METRICMETADATA_METRICTYPE']._serialized_end=298 + _globals['_SAMPLE']._serialized_start=300 + _globals['_SAMPLE']._serialized_end=342 + _globals['_EXEMPLAR']._serialized_start=344 + _globals['_EXEMPLAR']._serialized_end=429 + _globals['_HISTOGRAM']._serialized_start=432 + _globals['_HISTOGRAM']._serialized_end=951 + _globals['_HISTOGRAM_RESETHINT']._serialized_start=876 + _globals['_HISTOGRAM_RESETHINT']._serialized_end=928 + _globals['_BUCKETSPAN']._serialized_start=953 + _globals['_BUCKETSPAN']._serialized_end=997 + _globals['_TIMESERIES']._serialized_start=1000 + _globals['_TIMESERIES']._serialized_end=1192 + _globals['_LABEL']._serialized_start=1194 + _globals['_LABEL']._serialized_end=1230 + _globals['_LABELS']._serialized_start=1232 + _globals['_LABELS']._serialized_end=1281 + _globals['_LABELMATCHER']._serialized_start=1284 + _globals['_LABELMATCHER']._serialized_end=1414 + _globals['_LABELMATCHER_TYPE']._serialized_start=1374 + _globals['_LABELMATCHER_TYPE']._serialized_end=1414 + _globals['_READHINTS']._serialized_start=1416 + _globals['_READHINTS']._serialized_end=1540 + _globals['_CHUNK']._serialized_start=1543 + _globals['_CHUNK']._serialized_end=1718 + _globals['_CHUNK_ENCODING']._serialized_start=1650 + _globals['_CHUNK_ENCODING']._serialized_end=1718 + _globals['_CHUNKEDSERIES']._serialized_start=1720 + _globals['_CHUNKEDSERIES']._serialized_end=1817 +# @@protoc_insertion_point(module_scope) diff --git a/app/src/project/core/tasks.py b/app/src/project/core/tasks.py index 8fc3818..a6c7e6a 100644 --- a/app/src/project/core/tasks.py +++ b/app/src/project/core/tasks.py @@ -1,8 +1,11 @@ import structlog from celery import Task from celery.utils.log import get_task_logger +from django.conf import settings +from compute_horde.utils import get_validators from project.celery import app +from project.core.models import Validator logger = structlog.wrap_logger(get_task_logger(__name__)) @@ -24,7 +27,34 @@ def send_to_dead_letter_queue(task: Task, exc, task_id, args, kwargs, einfo): task.apply_async(args=args, kwargs=kwargs, queue="dead_letter") -@app.task(on_failure=send_to_dead_letter_queue) -def demo_task(x, y): - logger.info("adding two numbers", x=x, y=y) - return x + y +@app.task +def fetch_validators(): + debug_validator_keys = set( + Validator.objects.filter(debug=True, active=True).values_list("public_key", flat=True) + ) + + validators = get_validators( + netuid=settings.BITTENSOR_NETUID, network=settings.BITTENSOR_NETWORK + ) + validator_keys = {v.hotkey for v in validators} | debug_validator_keys + + to_activate = [] + to_deactivate = [] + to_create = [] + for validator in Validator.objects.all(): + if validator.public_key in validator_keys: + to_activate.append(validator) + validator.active = True + validator_keys.remove(validator.public_key) + else: + validator.active = False + to_deactivate.append(validator) + for key in validator_keys: + to_create.append(Validator(public_key=key, active=True)) + + Validator.objects.bulk_create(to_create) + Validator.objects.bulk_update(to_activate + to_deactivate, ["active"]) + logger.info( + f"Fetched validators. Activated: {len(to_activate)}, deactivated: {len(to_deactivate)}, " + f"created: {len(to_create)}" + ) diff --git a/app/src/project/core/views.py b/app/src/project/core/views.py index e69de29..672b632 100644 --- a/app/src/project/core/views.py +++ b/app/src/project/core/views.py @@ -0,0 +1,158 @@ +from urllib.parse import urljoin + +from django.conf import settings +from django.http import HttpResponse +from django.views.decorators.http import require_POST +from django.views.decorators.csrf import csrf_exempt +from .prometheus_protobuf import remote_pb2 + +import bittensor +import requests +import structlog +import snappy +from requests.adapters import HTTPAdapter +from requests.packages.urllib3.util.retry import Retry + +from project.core.models import Validator +from .metrics import series_counter, metrics_counter + +logger = structlog.getLogger(__name__) +retries = Retry( + total=3, + connect=3, + read=3, + redirect=3, + backoff_factor=0.1, + status_forcelist=(), + raise_on_status=False, +) + +TIMEOUT = 15 + +session = requests.Session() +session.mount('http://', HTTPAdapter(max_retries=retries)) +session.mount('https://', HTTPAdapter(max_retries=retries)) + + +@csrf_exempt +@require_POST +def prometheus_outbound_proxy(request): + if not settings.CENTRAL_PROMETHEUS_PROXY_URL: + msg = "CENTRAL_PROMETHEUS_PROXY_URL is not configured" + logger.error(msg) + return HttpResponse(status=500, content=msg.encode()) + data = request.body + + prometheus_remote_url = urljoin(settings.CENTRAL_PROMETHEUS_PROXY_URL, 'prometheus_inbound_proxy') + + try: + response = session.post( + prometheus_remote_url, + data=data, + headers={ + 'Bittensor-Signature': settings.BITTENSOR_WALLET().hotkey.sign(data).hex(), + 'Bittensor-Hotkey': settings.BITTENSOR_WALLET().hotkey.ss58_address, + **request.headers, + }, + timeout=TIMEOUT, + ) + except requests.exceptions.RequestException as e: + return HttpResponse(status=500, content=type(e).__name__) + + return HttpResponse( + headers=response.headers, + content=response.content, + ) + + +@csrf_exempt +@require_POST +def prometheus_inbound_proxy(request): + if not settings.UPSTREAM_PROMETHEUS_URL: + msg = "UPSTREAM_PROMETHEUS_URL is not configured" + logger.error(msg) + return HttpResponse(status=500, content=msg.encode()) + + data = request.body + ss58_address = request.headers.get('Bittensor-Hotkey') + signature = request.headers.get('Bittensor-Signature') + if not ss58_address or not signature: + msg = "Missing required headers." + logger.debug(msg) + return HttpResponse(status=400, content=msg) + + if ss58_address not in Validator.objects.filter(active=True).values_list('public_key', flat=True): + msg = "Validator not active." + logger.debug(msg) + return HttpResponse(status=403, content=msg) + + sender_keypair = bittensor.Keypair(ss58_address) + if not sender_keypair.verify(data, "0x" + signature): + msg = "Bad signature." + logger.debug(msg) + return HttpResponse(status=400, content=msg) + + try: + decompressed_data = snappy.uncompress(data) + except Exception as e: + msg = f"Failed to decompress data: {str(e)}" + logger.debug(msg) + return HttpResponse(msg.encode(), status=400) + + try: + write_request = remote_pb2.WriteRequest() + write_request.ParseFromString(decompressed_data) + except Exception as e: + msg = f"Failed to decode metrics: {str(e)}" + logger.debug(msg) + return HttpResponse(msg, status=400) + + # Now you can access the TimeSeries data in the write_request + series_count = 0 + metrics = set() + for ts in write_request.timeseries: + name = "" + error = None + hotkey = None + + for label in ts.labels: + + if label.name == 'hotkey': + hotkey = label.value + if label.value != ss58_address: + msg = f"Received invalid hotkey. Expected {ss58_address} got {label.value}" + error = HttpResponse(status=403, content=msg.encode()) + if label.name == '__name__': + name = label.value + if not hotkey: + msg = f"Received no hotkey" + error = HttpResponse(status=403, content=msg.encode()) + + if error is not None: + error.content = f"Metric: {name}. ".encode() + error.content + logger.info(error.content.decode()) + return error + series_count += 1 + metrics.add(name) + series_counter.labels(ss58_address).inc(series_count) + metrics_counter.labels(ss58_address).inc(len(metrics)) + + logger.debug("%s sent %s metrics and %s series", ss58_address, len(metrics), series_count) + + prometheus_remote_url = urljoin(settings.UPSTREAM_PROMETHEUS_URL, 'api/v1/write') + + # Forward the received data with the hash in the headers + try: + response = session.post( + prometheus_remote_url, + data=data, + headers=request.headers, + timeout=TIMEOUT, + ) + except requests.exceptions.RequestException as e: + return HttpResponse(status=500, content=type(e).__name__) + + return HttpResponse( + headers=response.headers, + content=response.content, + ) diff --git a/app/src/project/settings.py b/app/src/project/settings.py index 32611a5..65099c1 100644 --- a/app/src/project/settings.py +++ b/app/src/project/settings.py @@ -4,9 +4,11 @@ import inspect import logging +import pathlib from datetime import timedelta from functools import wraps +import bittensor import environ import structlog @@ -82,7 +84,9 @@ def wrapped(*args, **kwargs): "django_structlog", "constance", "project.core", + "cacheops", ] + PROMETHEUS_EXPORT_MIGRATIONS = env.bool("PROMETHEUS_EXPORT_MIGRATIONS", default=True) PROMETHEUS_LATENCY_BUCKETS = ( 0.008, @@ -237,6 +241,20 @@ def wrapped(*args, **kwargs): } +CACHEOPS_REDIS = { + 'host': REDIS_HOST, + 'port': REDIS_PORT, + 'db': 1, + 'socket_timeout': 3, +} + +CACHEOPS = { + 'project.core.Validator': {'ops': 'all', 'timeout': 60*15}, +} + +CACHEOPS_DEGRADE_ON_FAILURE = True + + CELERY_BROKER_URL = env("CELERY_BROKER_URL", default="") CELERY_RESULT_BACKEND = env("CELERY_BROKER_URL", default="") # store results in Redis CELERY_RESULT_EXPIRES = int(timedelta(days=1).total_seconds()) # time until task result deletion @@ -244,14 +262,13 @@ def wrapped(*args, **kwargs): CELERY_MESSAGE_COMPRESSION = "gzip" # result compression CELERY_SEND_EVENTS = True # needed for worker monitoring CELERY_BEAT_SCHEDULE = { # type: ignore - # 'task_name': { - # 'task': "project.core.tasks.demo_task", - # 'args': [2, 2], - # 'kwargs': {}, - # 'schedule': crontab(minute=0, hour=0), - # 'options': {"time_limit": 300}, - # }, + "fetch_validators": { + "task": "compute_horde_miner.miner.tasks.fetch_validators", + "schedule": 60, + "options": {}, + }, } + CELERY_TASK_CREATE_MISSING_QUEUES = False CELERY_TASK_QUEUES = (Queue("celery"), Queue("worker"), Queue("dead_letter")) CELERY_TASK_DEFAULT_EXCHANGE = "celery" @@ -325,6 +342,40 @@ def wrapped(*args, **kwargs): }, } +CENTRAL_PROMETHEUS_PROXY_URL = env.str("CENTRAL_PROMETHEUS_PROXY_URL", default="") +UPSTREAM_PROMETHEUS_URL = env.str("UPSTREAM_PROMETHEUS_URL", default="") +if not UPSTREAM_PROMETHEUS_URL and not CENTRAL_PROMETHEUS_PROXY_URL: + raise RuntimeError("Either UPSTREAM_PROMETHEUS_URL or CENTRAL_PROMETHEUS_PROXY_URL must be set") + +BITTENSOR_NETUID = env.int("BITTENSOR_NETUID") +BITTENSOR_NETWORK = env.str("BITTENSOR_NETWORK") + +BITTENSOR_WALLET_DIRECTORY = env.path( + "BITTENSOR_WALLET_DIRECTORY", + default=pathlib.Path("~").expanduser() / ".bittensor" / "wallets", +) +BITTENSOR_WALLET_NAME = env.str("BITTENSOR_WALLET_NAME") +BITTENSOR_WALLET_HOTKEY_NAME = env.str("BITTENSOR_WALLET_HOTKEY_NAME") + +_wallet = None + + +def BITTENSOR_WALLET() -> bittensor.wallet: + global _wallet + if _wallet: + return _wallet + + if not BITTENSOR_WALLET_NAME or not BITTENSOR_WALLET_HOTKEY_NAME: + raise RuntimeError("Wallet not configured") + wallet = bittensor.wallet( + name=BITTENSOR_WALLET_NAME, + hotkey=BITTENSOR_WALLET_HOTKEY_NAME, + path=str(BITTENSOR_WALLET_DIRECTORY), + ) + wallet.hotkey_file.get_keypair() # this raises errors if the keys are inaccessible + _wallet = wallet + return wallet + def configure_structlog(): structlog.configure( diff --git a/app/src/project/urls.py b/app/src/project/urls.py index c05c52f..3ce3e81 100644 --- a/app/src/project/urls.py +++ b/app/src/project/urls.py @@ -4,12 +4,15 @@ from .core.business_metrics import metrics_manager from .core.metrics import metrics_view +from .core.views import prometheus_inbound_proxy, prometheus_outbound_proxy urlpatterns = [ path("admin/", site.urls), path("metrics", metrics_view, name="prometheus-django-metrics"), path("business-metrics", metrics_manager.view, name="prometheus-business-metrics"), path("healthcheck/", include("health_check.urls")), + path("prometheus_inbound_proxy", prometheus_inbound_proxy), + path("prometheus_outbound_proxy", prometheus_outbound_proxy), path("", include("django.contrib.auth.urls")), ] diff --git a/envs/dev/.env.template b/envs/dev/.env.template index 4463369..7a8d433 100644 --- a/envs/dev/.env.template +++ b/envs/dev/.env.template @@ -3,6 +3,13 @@ DEBUG=on DEBUG_TOOLBAR=on SECRET_KEY=12345 +BITTENSOR_NETUID=12 +BITTENSOR_NETWORK=finney +BITTENSOR_WALLET_NAME=default +BITTENSOR_WALLET_HOTKEY_NAME=default +CENTRAL_PROMETHEUS_PROXY_URL=http://localhost:8000 +UPSTREAM_PROMETHEUS_URL=http://localhost:29090 + POSTGRES_DB=project POSTGRES_HOST=localhost POSTGRES_PORT=8432 diff --git a/envs/dev/central-prometheus.yml b/envs/dev/central-prometheus.yml new file mode 100644 index 0000000..126b2c9 --- /dev/null +++ b/envs/dev/central-prometheus.yml @@ -0,0 +1,5 @@ +global: + scrape_interval: 5s + evaluation_interval: 5s + +scrape_configs: diff --git a/envs/dev/docker-compose.yml b/envs/dev/docker-compose.yml index 262eca9..470e1fd 100644 --- a/envs/dev/docker-compose.yml +++ b/envs/dev/docker-compose.yml @@ -23,3 +23,42 @@ services: - ./db/data:/var/lib/postgresql/data ports: - ${POSTGRES_PORT}:5432 + + central-prometheus: + image: prom/prometheus:v2.46.0 + command: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug + - --web.enable-remote-write-receiver + volumes: + - ./central-prometheus.yml:/etc/prometheus/prometheus.yml + restart: unless-stopped + ports: + - 19090:9090 + + on-site-prometheus: + image: prom/prometheus:v2.46.0 + command: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug + volumes: + - ./on-site-prometheus.yml:/etc/prometheus/prometheus.yml + restart: unless-stopped + depends_on: + - node-exporter + - central-prometheus + ports: + - 29090:9090 + extra_hosts: + - "host.docker.internal:host-gateway" + + node-exporter: + image: prom/node-exporter:latest + network_mode: host + pid: host + volumes: + - /:/host:ro,rslave + command: + - '--path.rootfs=/host' + - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc|run|boot|var/.+)($$|/)' + diff --git a/envs/dev/on-site-prometheus.yml b/envs/dev/on-site-prometheus.yml new file mode 100644 index 0000000..da4436a --- /dev/null +++ b/envs/dev/on-site-prometheus.yml @@ -0,0 +1,14 @@ +global: + scrape_interval: 5s + evaluation_interval: 5s + +scrape_configs: + - job_name: "node" + scrape_interval: 5s + static_configs: + - targets: ['host.docker.internal:9100'] + labels: + hotkey: 'replace-me' + +remote_write: + - url: "http://host.docker.internal:8000/prometheus_outbound_proxy" \ No newline at end of file diff --git a/envs/prod/.env.template b/envs/prod/.env.template index e876ef1..53715ff 100644 --- a/envs/prod/.env.template +++ b/envs/prod/.env.template @@ -3,6 +3,13 @@ DEBUG=off DEBUG_TOOLBAR=off SECRET_KEY= +BITTENSOR_NETUID=12 +BITTENSOR_NETWORK=finney +BITTENSOR_WALLET_NAME=default +BITTENSOR_WALLET_HOTKEY_NAME=default +CENTRAL_PROMETHEUS_PROXY_URL=http://localhost:8000 +UPSTREAM_PROMETHEUS_URL=http://localhost:29090 + POSTGRES_DB=project POSTGRES_HOST=db POSTGRES_PORT=5432 diff --git a/pyproject.toml b/pyproject.toml index aedf4ac..3572971 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,6 +25,12 @@ dependencies = [ "django-prometheus==2.3.1", "django-business-metrics>=1.0.1,<2", "django-health-check>=3.18.1,<4", + "requests>=2.32.3,<3", + "bittensor==8.2.0", + "compute_horde", + "django-cacheops>=7.1,<8", + "python-snappy==0.7.3", + "protobuf>=5.28.3,<6", ] [build-system] diff --git a/setup-dev.sh b/setup-dev.sh index 492e735..abb9214 100755 --- a/setup-dev.sh +++ b/setup-dev.sh @@ -29,6 +29,8 @@ fi # Set symlinks ln -sf "${ENV_DIR}/.env" .env ln -sf "${ENV_DIR}/docker-compose.yml" docker-compose.yml +cp "${ENV_DIR}/central-prometheus.yml" central-prometheus.yml +cp "${ENV_DIR}/on-site-prometheus.yml" on-site-prometheus.yml # shellcheck disable=SC2164 cd "${PROJECT_DIR}/app/" From b7c6d86ccd13ebdcd73ca6eeb50de2dd82482551 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Nowacki?= Date: Sat, 2 Nov 2024 13:33:39 +0100 Subject: [PATCH 05/15] bittensor_prometheus working --- app/bittensor_prometheus/Dockerfile | 36 +++++++++++++++++++ app/bittensor_prometheus/README.md | 32 +++++++++++++++++ app/bittensor_prometheus/entrypoint.sh | 3 ++ .../read_wallet_and_substitute_config.py | 35 ++++++++++++++++++ 4 files changed, 106 insertions(+) create mode 100644 app/bittensor_prometheus/Dockerfile create mode 100644 app/bittensor_prometheus/README.md create mode 100644 app/bittensor_prometheus/entrypoint.sh create mode 100644 app/bittensor_prometheus/read_wallet_and_substitute_config.py diff --git a/app/bittensor_prometheus/Dockerfile b/app/bittensor_prometheus/Dockerfile new file mode 100644 index 0000000..e1a8b7e --- /dev/null +++ b/app/bittensor_prometheus/Dockerfile @@ -0,0 +1,36 @@ +# Use Python base image from DockerHub +FROM python:3.11 + +RUN pip install bittensor==8.2.0 + +WORKDIR /app + +RUN apt-get update && \ + apt-get install -y wget curl && \ + wget https://github.com/prometheus/prometheus/releases/download/v2.55.0/prometheus-2.55.0.linux-amd64.tar.gz && \ + tar xvzf prometheus-*.tar.gz && \ + mkdir /etc/prometheus && \ + mv prometheus-2.55.0.linux-amd64/prometheus /bin/ && \ + mv prometheus-2.55.0.linux-amd64/promtool /bin/ && \ + mv prometheus-2.55.0.linux-amd64/prometheus.yml /etc/prometheus/ && \ + rm -rf prometheus-*.tar.gz prometheus-2.55.0.linux-amd64 && \ + chown -R nobody:nogroup /etc/prometheus && \ + chown -R nobody:nogroup /etc/prometheus + +RUN chown nobody: /etc/prometheus + +COPY read_wallet_and_substitute_config.py /app/ +RUN chown -R nobody: /app/ + +COPY entrypoint.sh / +RUN chown nobody: /entrypoint.sh + +RUN mkdir /nonexistent +RUN chown nobody: /nonexistent + +RUN mkdir /wallets +RUN chown nobody: /wallets + +#USER nobody + +ENTRYPOINT ["/entrypoint.sh"] \ No newline at end of file diff --git a/app/bittensor_prometheus/README.md b/app/bittensor_prometheus/README.md new file mode 100644 index 0000000..0bfb102 --- /dev/null +++ b/app/bittensor_prometheus/README.md @@ -0,0 +1,32 @@ +Here lie the tools to build an image that runs prometheus but before starting reads the hotkey of a configured ( +via env vars) bittensor wallet and allows for including that hotkey in prometheus' config. + + +To run it, you need to provide a template of the prometheus config (only the hotkey part is meant to substituted when +materializing this template), mount your wallet and specify it using env vars. for example: + +config: + +```yaml +global: + scrape_interval: 5s + evaluation_interval: 5s + +scrape_configs: + - job_name: "node" + scrape_interval: 5s + static_configs: + - targets: ['host.docker.internal:9100'] + labels: + hotkey: '{hotkey}' # the 'template engine' in use is python's str.format() + +``` + +running: + +docker run \ + -v config.yml:/etc/prometheus/prometheus.yml.template \ + -v /home/user/.bittensor/wallets/:/wallets/ \ + -e BITTENSOR_WALLET_NAME=validator \ + -e BITTENSOR_WALLET_HOTKEY_NAME=default \ + backenddevelopersltd/bittensor_prometheus diff --git a/app/bittensor_prometheus/entrypoint.sh b/app/bittensor_prometheus/entrypoint.sh new file mode 100644 index 0000000..cec4e87 --- /dev/null +++ b/app/bittensor_prometheus/entrypoint.sh @@ -0,0 +1,3 @@ +#!/bin/sh -e +python /app/read_wallet_and_substitute_config.py +exec /bin/prometheus --config.file=/etc/prometheus/prometheus.yml "$@" \ No newline at end of file diff --git a/app/bittensor_prometheus/read_wallet_and_substitute_config.py b/app/bittensor_prometheus/read_wallet_and_substitute_config.py new file mode 100644 index 0000000..0ee5579 --- /dev/null +++ b/app/bittensor_prometheus/read_wallet_and_substitute_config.py @@ -0,0 +1,35 @@ +import os +import pathlib + +import bittensor + + +BITTENSOR_WALLET_NAME = os.environ.get("BITTENSOR_WALLET_NAME") +BITTENSOR_WALLET_HOTKEY_NAME = os.environ.get("BITTENSOR_WALLET_HOTKEY_NAME") + +if not BITTENSOR_WALLET_NAME or not BITTENSOR_WALLET_HOTKEY_NAME: + raise RuntimeError("You must set BITTENSOR_WALLET_NAME and BITTENSOR_WALLET_HOTKEY_NAME env vars") + + +def get_wallet() -> bittensor.wallet: + wallet = bittensor.wallet( + name=BITTENSOR_WALLET_NAME, + hotkey=BITTENSOR_WALLET_HOTKEY_NAME, + path="/wallets", + ) + wallet.hotkey_file.get_keypair() # this raises errors if the keys are inaccessible + return wallet + + +def read_and_substitute_config(hotkey: str): + tmpl = pathlib.Path("/etc/prometheus/prometheus.yml.template").read_text() + pathlib.Path("/etc/prometheus/prometheus.yml").write_text(tmpl.format(hotkey=hotkey)) + + +def main(): + wallet = get_wallet() + read_and_substitute_config(wallet.hotkey.ss58_address) + + +if __name__ == "__main__": + main() From 527327a2f3d5805ec0ce0d1fd0073618256857af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Nowacki?= Date: Sat, 2 Nov 2024 14:36:17 +0100 Subject: [PATCH 06/15] require less env vars, allow running without db and redis, use bittensor_prometheus --- app/src/project/core/metrics.py | 2 +- app/src/project/core/views.py | 3 + app/src/project/settings.py | 130 ++++++++++++++++++-------------- envs/dev/.env.template | 28 ------- envs/dev/docker-compose.yml | 9 ++- envs/dev/on-site-prometheus.yml | 2 +- envs/prod/.env.template | 26 ------- 7 files changed, 83 insertions(+), 117 deletions(-) diff --git a/app/src/project/core/metrics.py b/app/src/project/core/metrics.py index 7fe7caf..bc243d2 100644 --- a/app/src/project/core/metrics.py +++ b/app/src/project/core/metrics.py @@ -52,7 +52,7 @@ def metrics_view(request): num_tasks_in_queue = {} -for queue in settings.CELERY_TASK_QUEUES: +for queue in getattr(settings, 'CELERY_TASK_QUEUES', []): gauge = prometheus_client.Gauge( f"celery_{queue.name}_queue_len", f"How many tasks are there in '{queue.name}' queue", diff --git a/app/src/project/core/views.py b/app/src/project/core/views.py index 672b632..5e5d748 100644 --- a/app/src/project/core/views.py +++ b/app/src/project/core/views.py @@ -57,9 +57,12 @@ def prometheus_outbound_proxy(request): timeout=TIMEOUT, ) except requests.exceptions.RequestException as e: + logger.info(f"Sending to central prometheus proxy failed: {e}") return HttpResponse(status=500, content=type(e).__name__) + logger.debug(f"Central prometheus proxy replied with {response.status_code}, {response.content[:200]}") return HttpResponse( + status=response.status_code, headers=response.headers, content=response.content, ) diff --git a/app/src/project/settings.py b/app/src/project/settings.py index 65099c1..f783f1b 100644 --- a/app/src/project/settings.py +++ b/app/src/project/settings.py @@ -54,7 +54,7 @@ def wrapped(*args, **kwargs): SECRET_KEY = env("SECRET_KEY") # SECURITY WARNING: don't run with debug turned on in production! -DEBUG = env("DEBUG") +DEBUG = env("DEBUG", default=False) ALLOWED_HOSTS = ["*"] @@ -140,7 +140,7 @@ def wrapped(*args, **kwargs): SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https") # Content Security Policy -if CSP_ENABLED := env.bool("CSP_ENABLED"): +if CSP_ENABLED := env.bool("CSP_ENABLED", default=False): MIDDLEWARE.append("csp.middleware.CSPMiddleware") CSP_REPORT_ONLY = env.bool("CSP_REPORT_ONLY", default=True) @@ -184,12 +184,12 @@ def wrapped(*args, **kwargs): WSGI_APPLICATION = "project.wsgi.application" DATABASES = {} -if env("DATABASE_POOL_URL"): # DB transaction-based connection pool, such as one provided PgBouncer +if env("DATABASE_POOL_URL", default=None): # DB transaction-based connection pool, such as one provided PgBouncer DATABASES["default"] = { **env.db_url("DATABASE_POOL_URL"), "DISABLE_SERVER_SIDE_CURSORS": True, # prevents random cursor errors with transaction-based connection pool } -elif env("DATABASE_URL"): +elif env("DATABASE_URL", default=None): DATABASES["default"] = env.db_url("DATABASE_URL") DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField" @@ -231,9 +231,14 @@ def wrapped(*args, **kwargs): CSRF_COOKIE_SECURE = True else: SECURE_SSL_REDIRECT = False -REDIS_HOST = env("REDIS_HOST") -REDIS_PORT = env.int("REDIS_PORT") -REDIS_URL = f"redis://{REDIS_HOST}:{REDIS_PORT}" + +REDIS_HOST = env("REDIS_HOST", default=None) +REDIS_PORT = env.int("REDIS_PORT", default=None) +if (REDIS_HOST is None) != (REDIS_PORT is None): + raise RuntimeError("Either set both redis host and port or none") + +if REDIS_HOST: + REDIS_URL = f"redis://{REDIS_HOST}:{REDIS_PORT}" CONSTANCE_BACKEND = "constance.backends.database.DatabaseBackend" CONSTANCE_CONFIG = { @@ -241,59 +246,52 @@ def wrapped(*args, **kwargs): } -CACHEOPS_REDIS = { - 'host': REDIS_HOST, - 'port': REDIS_PORT, - 'db': 1, - 'socket_timeout': 3, -} - -CACHEOPS = { - 'project.core.Validator': {'ops': 'all', 'timeout': 60*15}, -} - -CACHEOPS_DEGRADE_ON_FAILURE = True +if REDIS_HOST: + CACHEOPS_REDIS = { + 'host': REDIS_HOST, + 'port': REDIS_PORT, + 'db': 1, + 'socket_timeout': 3, + } + CACHEOPS = { + 'project.core.Validator': {'ops': 'all', 'timeout': 60*15}, + } -CELERY_BROKER_URL = env("CELERY_BROKER_URL", default="") -CELERY_RESULT_BACKEND = env("CELERY_BROKER_URL", default="") # store results in Redis -CELERY_RESULT_EXPIRES = int(timedelta(days=1).total_seconds()) # time until task result deletion -CELERY_COMPRESSION = "gzip" # task compression -CELERY_MESSAGE_COMPRESSION = "gzip" # result compression -CELERY_SEND_EVENTS = True # needed for worker monitoring -CELERY_BEAT_SCHEDULE = { # type: ignore - "fetch_validators": { - "task": "compute_horde_miner.miner.tasks.fetch_validators", - "schedule": 60, - "options": {}, - }, -} + CACHEOPS_DEGRADE_ON_FAILURE = True + + +if REDIS_HOST: + CELERY_BROKER_URL = env("CELERY_BROKER_URL", default="") + CELERY_RESULT_BACKEND = env("CELERY_BROKER_URL", default="") # store results in Redis + CELERY_RESULT_EXPIRES = int(timedelta(days=1).total_seconds()) # time until task result deletion + CELERY_COMPRESSION = "gzip" # task compression + CELERY_MESSAGE_COMPRESSION = "gzip" # result compression + CELERY_SEND_EVENTS = True # needed for worker monitoring + CELERY_BEAT_SCHEDULE = { # type: ignore + "fetch_validators": { + "task": "compute_horde_miner.miner.tasks.fetch_validators", + "schedule": 60, + "options": {}, + }, + } -CELERY_TASK_CREATE_MISSING_QUEUES = False -CELERY_TASK_QUEUES = (Queue("celery"), Queue("worker"), Queue("dead_letter")) -CELERY_TASK_DEFAULT_EXCHANGE = "celery" -CELERY_TASK_DEFAULT_ROUTING_KEY = "celery" -CELERY_TASK_ANNOTATIONS = {"*": {"acks_late": True, "reject_on_worker_lost": True}} -CELERY_TASK_ROUTES = {"*": {"queue": "celery"}} -CELERY_TASK_TIME_LIMIT = int(timedelta(minutes=5).total_seconds()) -CELERY_TASK_ALWAYS_EAGER = env.bool("CELERY_TASK_ALWAYS_EAGER", default=False) -CELERY_ACCEPT_CONTENT = ["json"] -CELERY_TASK_SERIALIZER = "json" -CELERY_RESULT_SERIALIZER = "json" -CELERY_WORKER_PREFETCH_MULTIPLIER = env.int("CELERY_WORKER_PREFETCH_MULTIPLIER", default=1) -CELERY_BROKER_POOL_LIMIT = env.int("CELERY_BROKER_POOL_LIMIT", default=50) + CELERY_TASK_CREATE_MISSING_QUEUES = False + CELERY_TASK_QUEUES = (Queue("celery"), Queue("worker"), Queue("dead_letter")) + CELERY_TASK_DEFAULT_EXCHANGE = "celery" + CELERY_TASK_DEFAULT_ROUTING_KEY = "celery" + CELERY_TASK_ANNOTATIONS = {"*": {"acks_late": True, "reject_on_worker_lost": True}} + CELERY_TASK_ROUTES = {"*": {"queue": "celery"}} + CELERY_TASK_TIME_LIMIT = int(timedelta(minutes=5).total_seconds()) + CELERY_TASK_ALWAYS_EAGER = env.bool("CELERY_TASK_ALWAYS_EAGER", default=False) + CELERY_ACCEPT_CONTENT = ["json"] + CELERY_TASK_SERIALIZER = "json" + CELERY_RESULT_SERIALIZER = "json" + CELERY_WORKER_PREFETCH_MULTIPLIER = env.int("CELERY_WORKER_PREFETCH_MULTIPLIER", default=1) + CELERY_BROKER_POOL_LIMIT = env.int("CELERY_BROKER_POOL_LIMIT", default=50) DJANGO_STRUCTLOG_CELERY_ENABLED = True -EMAIL_BACKEND = env("EMAIL_BACKEND") -EMAIL_FILE_PATH = env("EMAIL_FILE_PATH") -EMAIL_HOST = env("EMAIL_HOST") -EMAIL_PORT = env.int("EMAIL_PORT") -EMAIL_HOST_USER = env("EMAIL_HOST_USER") -EMAIL_HOST_PASSWORD = env("EMAIL_HOST_PASSWORD") -EMAIL_USE_TLS = env.bool("EMAIL_USE_TLS") -DEFAULT_FROM_EMAIL = env("DEFAULT_FROM_EMAIL") - LOGGING = { "version": 1, "disable_existing_loggers": False, @@ -347,15 +345,31 @@ def wrapped(*args, **kwargs): if not UPSTREAM_PROMETHEUS_URL and not CENTRAL_PROMETHEUS_PROXY_URL: raise RuntimeError("Either UPSTREAM_PROMETHEUS_URL or CENTRAL_PROMETHEUS_PROXY_URL must be set") -BITTENSOR_NETUID = env.int("BITTENSOR_NETUID") -BITTENSOR_NETWORK = env.str("BITTENSOR_NETWORK") +BITTENSOR_NETUID = env.int("BITTENSOR_NETUID", default=None) +BITTENSOR_NETWORK = env.str("BITTENSOR_NETWORK", default=None) + +if UPSTREAM_PROMETHEUS_URL: + if BITTENSOR_NETUID is None or BITTENSOR_NETWORK is None: + raise RuntimeError("Both BITTENSOR_NETUID and BITTENSOR_NETWORK must be set when " + "UPSTREAM_PROMETHEUS_URL is defined") + if not DATABASES: + raise RuntimeError("Either DATABASE_POOL_URL or DATABASE_URL must be set when " + "UPSTREAM_PROMETHEUS_URL is defined") + if not REDIS_HOST: + raise RuntimeError("REDIS_HOST must be set when UPSTREAM_PROMETHEUS_URL is defined") + BITTENSOR_WALLET_DIRECTORY = env.path( "BITTENSOR_WALLET_DIRECTORY", default=pathlib.Path("~").expanduser() / ".bittensor" / "wallets", ) -BITTENSOR_WALLET_NAME = env.str("BITTENSOR_WALLET_NAME") -BITTENSOR_WALLET_HOTKEY_NAME = env.str("BITTENSOR_WALLET_HOTKEY_NAME") +BITTENSOR_WALLET_NAME = env.str("BITTENSOR_WALLET_NAME", default=None) +BITTENSOR_WALLET_HOTKEY_NAME = env.str("BITTENSOR_WALLET_HOTKEY_NAME", default=None) + +if CENTRAL_PROMETHEUS_PROXY_URL: + if BITTENSOR_WALLET_NAME is None or BITTENSOR_WALLET_HOTKEY_NAME is None: + raise RuntimeError("Both BITTENSOR_WALLET_NAME and BITTENSOR_WALLET_HOTKEY_NAME must be set when " + "CENTRAL_PROMETHEUS_PROXY_URL is defined") _wallet = None diff --git a/envs/dev/.env.template b/envs/dev/.env.template index 7a8d433..711f9e5 100644 --- a/envs/dev/.env.template +++ b/envs/dev/.env.template @@ -41,36 +41,8 @@ CELERY_FLOWER_PASSWORD=12345 - -EMAIL_BACKEND=django.core.mail.backends.filebased.EmailBackend -EMAIL_FILE_PATH=/tmp/email -EMAIL_HOST=smtp.sendgrid.net -EMAIL_PORT=587 -EMAIL_USE_TLS=1 -EMAIL_HOST_USER=apikey -EMAIL_HOST_PASSWORD= -DEFAULT_FROM_EMAIL= - SENTRY_DSN= -CSP_ENABLED=n -CSP_REPORT_ONLY=y -CSP_REPORT_URL="" -CSP_DEFAULT_SRC="'none'" -CSP_SCRIPT_SRC="'self'" -CSP_STYLE_SRC="'self'" -CSP_FONT_SRC="'self'" -CSP_IMG_SRC="'self'" -CSP_MEDIA_SRC="'self'" -CSP_OBJECT_SRC="'self'" -CSP_FRAME_SRC="'self'" -CSP_CONNECT_SRC="'self'" -CSP_CHILD_SRC="'self'" -CSP_MANIFEST_SRC="'self'" -CSP_WORKER_SRC="'self'" -CSP_BLOCK_ALL_MIXED_CONTENT=y -CSP_EXCLUDE_URL_PREFIXES= - BACKUP_B2_BUCKET= BACKUP_B2_KEY_ID= BACKUP_B2_KEY_SECRET= diff --git a/envs/dev/docker-compose.yml b/envs/dev/docker-compose.yml index 470e1fd..f1813f7 100644 --- a/envs/dev/docker-compose.yml +++ b/envs/dev/docker-compose.yml @@ -37,12 +37,15 @@ services: - 19090:9090 on-site-prometheus: - image: prom/prometheus:v2.46.0 + image: backenddevelopersltd/bittensor_prometheus command: - - --config.file=/etc/prometheus/prometheus.yml - --log.level=debug + environment: + - BITTENSOR_WALLET_NAME=${BITTENSOR_WALLET_NAME} + - BITTENSOR_WALLET_HOTKEY_NAME=${BITTENSOR_WALLET_HOTKEY_NAME} volumes: - - ./on-site-prometheus.yml:/etc/prometheus/prometheus.yml + - ./on-site-prometheus.yml:/etc/prometheus/prometheus.yml.template + - ~/.bittensor/wallets:/wallets restart: unless-stopped depends_on: - node-exporter diff --git a/envs/dev/on-site-prometheus.yml b/envs/dev/on-site-prometheus.yml index da4436a..cb1e5c7 100644 --- a/envs/dev/on-site-prometheus.yml +++ b/envs/dev/on-site-prometheus.yml @@ -8,7 +8,7 @@ scrape_configs: static_configs: - targets: ['host.docker.internal:9100'] labels: - hotkey: 'replace-me' + hotkey: '{hotkey}' remote_write: - url: "http://host.docker.internal:8000/prometheus_outbound_proxy" \ No newline at end of file diff --git a/envs/prod/.env.template b/envs/prod/.env.template index 53715ff..8ecd481 100644 --- a/envs/prod/.env.template +++ b/envs/prod/.env.template @@ -51,34 +51,8 @@ LOKI_CLIENT= LOKI_CLIENT_SERVER_GROUP= -EMAIL_BACKEND=django.core.mail.backends.smtp.EmailBackend -EMAIL_FILE_PATH=/tmp/email -EMAIL_HOST=smtp.sendgrid.net -EMAIL_PORT=587 -EMAIL_USE_TLS=1 -EMAIL_HOST_USER=apikey -EMAIL_HOST_PASSWORD= -DEFAULT_FROM_EMAIL= - SENTRY_DSN= -CSP_ENABLED=n -CSP_REPORT_ONLY=y -CSP_REPORT_URL="" -CSP_DEFAULT_SRC="'none'" -CSP_SCRIPT_SRC="'self'" -CSP_STYLE_SRC="'self'" -CSP_FONT_SRC="'self'" -CSP_IMG_SRC="'self'" -CSP_MEDIA_SRC="'self'" -CSP_OBJECT_SRC="'self'" -CSP_FRAME_SRC="'self'" -CSP_CONNECT_SRC="'self'" -CSP_CHILD_SRC="'self'" -CSP_MANIFEST_SRC="'self'" -CSP_WORKER_SRC="'self'" -CSP_BLOCK_ALL_MIXED_CONTENT=y -CSP_EXCLUDE_URL_PREFIXES= BACKUP_B2_BUCKET= BACKUP_B2_KEY_ID= From c5b1fe7f274fe5dab505c5915c10a5dc04687790 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Nowacki?= Date: Sat, 2 Nov 2024 16:30:22 +0100 Subject: [PATCH 07/15] don't wait for database in entrypoint if there is no database --- app/envs/prod/entrypoint.sh | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/app/envs/prod/entrypoint.sh b/app/envs/prod/entrypoint.sh index 0fdcac6..7633c70 100755 --- a/app/envs/prod/entrypoint.sh +++ b/app/envs/prod/entrypoint.sh @@ -3,8 +3,11 @@ # We assume that WORKDIR is defined in Dockerfile ./prometheus-cleanup.sh -PROMETHEUS_EXPORT_MIGRATIONS=0 ./manage.py wait_for_database --timeout 10 -# this seems to be the only place to put this for AWS deployments to pick it up -PROMETHEUS_EXPORT_MIGRATIONS=0 ./manage.py migrate -gunicorn -c gunicorn.conf.py +if [ ! -z "$DATABASE_POOL_URL" ] || [ ! -z "$DATABASE_URL" ]; then + PROMETHEUS_EXPORT_MIGRATIONS=0 ./manage.py wait_for_database --timeout 10 + # this seems to be the only place to put this for AWS deployments to pick it up + PROMETHEUS_EXPORT_MIGRATIONS=0 ./manage.py migrate +fi + +exec gunicorn -c gunicorn.conf.py From 1812329634506634fa4d9ac0dcc78987f21ef484 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Nowacki?= Date: Sat, 2 Nov 2024 23:29:14 +0100 Subject: [PATCH 08/15] unfortunately go back to days without pdm --- app/envs/prod/Dockerfile | 21 +++++++++++++-------- pyproject.toml | 2 +- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/app/envs/prod/Dockerfile b/app/envs/prod/Dockerfile index de535d2..648aaea 100644 --- a/app/envs/prod/Dockerfile +++ b/app/envs/prod/Dockerfile @@ -6,23 +6,27 @@ LABEL builder=true WORKDIR /root/src/ -RUN pip3 install --no-cache-dir 'pdm>=2.12,<3' +#RUN pip3 install --no-cache-dir 'pdm>=2.12,<3' -COPY pyproject.toml pdm.lock ./ -RUN pdm lock --check +#COPY pyproject.toml pdm.lock ./ +COPY pyproject.toml ./ +#RUN pdm lock --check RUN apt-get update && apt-get install -y git -RUN pdm config python.use_venv False && pdm sync --prod --group :all -RUN mkdir -p /opt/ && mv __pypackages__/3.11/ /opt/pypackages/ +#RUN pdm config python.use_venv False && pdm sync --prod --group :all +RUN pip3 install tomlkit +RUN python -c "import tomlkit, pathlib, subprocess; subprocess.check_call(['pip', 'install', '--user', '--no-cache-dir', *tomlkit.loads(pathlib.Path('pyproject.toml').read_text())['project']['dependencies']])" + +#RUN mkdir -p /opt/ && mv __pypackages__/3.11/ /opt/pypackages/ ENV PATH=/opt/pypackages/bin:$PATH ENV PYTHONPATH=/opt/pypackages/lib:$PYTHONPATH COPY ./app/src/ ./app/envs/prod/entrypoint.sh ./app/envs/prod/gunicorn.conf.py ./app/envs/prod/celery-entrypoint.sh ./app/envs/prod/prometheus-cleanup.sh /root/src/ RUN python3 -m compileall -b -f -q /root/ -RUN ENV=prod ENV_FILL_MISSING_VALUES=1 SECRET_KEY=dummy python3 manage.py collectstatic --no-input --clear +RUN CENTRAL_PROMETHEUS_PROXY_URL=localhost BITTENSOR_WALLET_NAME=a BITTENSOR_WALLET_HOTKEY_NAME=b ENV=prod ENV_FILL_MISSING_VALUES=1 SECRET_KEY=dummy python3 manage.py collectstatic --no-input --clear FROM $BASE_IMAGE AS secondary-image @@ -35,10 +39,11 @@ RUN apt-get update \ WORKDIR /root/src/ ENV PYTHONUNBUFFERED=1 ENV PATH=/opt/pypackages/bin:$PATH +ENV PATH=/root/.local/bin:$PATH ENV PYTHONPATH=/opt/pypackages/lib:$PYTHONPATH -COPY --from=base-image /root/src/ /root/src/ -COPY --from=base-image /opt/pypackages/ /opt/pypackages/ +COPY --from=base-image /root/ /root/ + diff --git a/pyproject.toml b/pyproject.toml index 3572971..a9c8419 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,7 +26,7 @@ dependencies = [ "django-business-metrics>=1.0.1,<2", "django-health-check>=3.18.1,<4", "requests>=2.32.3,<3", - "bittensor==8.2.0", + "bittensor<8.0.0,>=7.3.1", "compute_horde", "django-cacheops>=7.1,<8", "python-snappy==0.7.3", From 0b4e86aafe0dd32bdbb01d4fe5bc293531d5a09c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Nowacki?= Date: Sat, 2 Nov 2024 23:29:45 +0100 Subject: [PATCH 09/15] final touches to make sure everything runs smoothly in production --- app/src/project/core/views.py | 7 +++++-- app/src/project/settings.py | 2 +- app/src/project/urls.py | 4 ++-- envs/dev/on-site-prometheus.yml | 2 +- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/app/src/project/core/views.py b/app/src/project/core/views.py index 5e5d748..167efd6 100644 --- a/app/src/project/core/views.py +++ b/app/src/project/core/views.py @@ -43,7 +43,7 @@ def prometheus_outbound_proxy(request): return HttpResponse(status=500, content=msg.encode()) data = request.body - prometheus_remote_url = urljoin(settings.CENTRAL_PROMETHEUS_PROXY_URL, 'prometheus_inbound_proxy') + prometheus_remote_url = urljoin(settings.CENTRAL_PROMETHEUS_PROXY_URL, 'prometheus_inbound_proxy/') try: response = session.post( @@ -63,7 +63,10 @@ def prometheus_outbound_proxy(request): logger.debug(f"Central prometheus proxy replied with {response.status_code}, {response.content[:200]}") return HttpResponse( status=response.status_code, - headers=response.headers, + headers={k: v for k, v in response.headers.items() if k.lower() not in [ + 'connection', 'keep-alive', 'public', + 'proxy-authenticate', 'transfer-encoding', 'upgrade' + ]}, content=response.content, ) diff --git a/app/src/project/settings.py b/app/src/project/settings.py index f783f1b..3a456f0 100644 --- a/app/src/project/settings.py +++ b/app/src/project/settings.py @@ -270,7 +270,7 @@ def wrapped(*args, **kwargs): CELERY_SEND_EVENTS = True # needed for worker monitoring CELERY_BEAT_SCHEDULE = { # type: ignore "fetch_validators": { - "task": "compute_horde_miner.miner.tasks.fetch_validators", + "task": "project.core.tasks.fetch_validators", "schedule": 60, "options": {}, }, diff --git a/app/src/project/urls.py b/app/src/project/urls.py index 3ce3e81..51d24e9 100644 --- a/app/src/project/urls.py +++ b/app/src/project/urls.py @@ -11,8 +11,8 @@ path("metrics", metrics_view, name="prometheus-django-metrics"), path("business-metrics", metrics_manager.view, name="prometheus-business-metrics"), path("healthcheck/", include("health_check.urls")), - path("prometheus_inbound_proxy", prometheus_inbound_proxy), - path("prometheus_outbound_proxy", prometheus_outbound_proxy), + path("prometheus_inbound_proxy/", prometheus_inbound_proxy), + path("prometheus_outbound_proxy/", prometheus_outbound_proxy), path("", include("django.contrib.auth.urls")), ] diff --git a/envs/dev/on-site-prometheus.yml b/envs/dev/on-site-prometheus.yml index cb1e5c7..9cd0c97 100644 --- a/envs/dev/on-site-prometheus.yml +++ b/envs/dev/on-site-prometheus.yml @@ -11,4 +11,4 @@ scrape_configs: hotkey: '{hotkey}' remote_write: - - url: "http://host.docker.internal:8000/prometheus_outbound_proxy" \ No newline at end of file + - url: "http://host.docker.internal:8000/prometheus_outbound_proxy/" \ No newline at end of file From 3bda646955d25b13de8bc86ff3f36e39e76bf0a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Nowacki?= Date: Thu, 7 Nov 2024 10:17:45 +0100 Subject: [PATCH 10/15] more docs --- README.md | 22 +++++++++++++++++++--- docs/build_plantuml.sh | 1 + docs/diagram.plantuml | 16 ++++++++++++++++ docs/diagram.svg | 1 + 4 files changed, 37 insertions(+), 3 deletions(-) create mode 100644 docs/build_plantuml.sh create mode 100644 docs/diagram.plantuml create mode 100644 docs/diagram.svg diff --git a/README.md b/README.md index 961bdae..8df708f 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,14 @@ # bittensor-prometheus-proxy -Proxy that allows for pushing prometheus metrics signed with bittensor wallets +Proxy that allows for +1. pushing prometheus metrics signed with bittensor wallets. Operating in this manner does not require a db or redis. +2. verifying incoming signed metrics. Operating in this manner does not require a wallet. Verification is two-fold: + 1. the full payload is signed, both the signature and the hotkey are included in the headers - that is verified + 2. the metrics data blob is unpacked and each metric is checked for the "hotkey" label - it has to be the same as + the value in the header + +![Diagram](./docs/diagram.svg) - - - # Base requirements @@ -15,11 +22,20 @@ Proxy that allows for pushing prometheus metrics signed with bittensor wallets ```sh ./setup-dev.sh -docker compose up -d +docker compose up -d # this will also start node_Exporter and two prometheus instances cd app/src pdm run manage.py wait_for_database --timeout 10 pdm run manage.py migrate -pdm run manage.py runserver +pdm run manage.py runserver 0.0.0.0:8000 +``` + +this setup requires a working bittensor wallet (for the on-site prometheus to read the hotkey and so that the proxy +can sign requests). Requests will be sent from on-site prometheus to proxy then to the same proxy (different view +though) and to the central prometheus. Starting celery and celery beat is not, however, required for local development, +because instead of having a periodic task populate the validator list, one can add records to it manually using + +```bash +python manage.py debug_add_validator ``` # Setup production environment (git deployment) diff --git a/docs/build_plantuml.sh b/docs/build_plantuml.sh new file mode 100644 index 0000000..faa6632 --- /dev/null +++ b/docs/build_plantuml.sh @@ -0,0 +1 @@ +plantuml -tsvg ./diagram.plantuml \ No newline at end of file diff --git a/docs/diagram.plantuml b/docs/diagram.plantuml new file mode 100644 index 0000000..040b4d7 --- /dev/null +++ b/docs/diagram.plantuml @@ -0,0 +1,16 @@ +@startuml +participant "On-site Exporters" as E +participant "On-site Prometheus" as P +participant "On-site Proxy" as OP +participant "Central Proxy" as CP +participant "Central DB and Redis" as DB +participant "Central Prometheus" as CProm + +P -> E: Scrape metrics +P -> OP: Push metrics data blob, include hotkey as label +OP -> OP: Sign the blob +OP -> CP: Push metrics data blob +CP -> DB: Keep a list of registered validators, cached +CP -> CP: Verify signature, verify "hotkey" label in each metric +CP -> CProm: Push metrics data blob +@enduml \ No newline at end of file diff --git a/docs/diagram.svg b/docs/diagram.svg new file mode 100644 index 0000000..1fde4b7 --- /dev/null +++ b/docs/diagram.svg @@ -0,0 +1 @@ +On-site ExportersOn-site ExportersOn-site PrometheusOn-site PrometheusOn-site ProxyOn-site ProxyCentral ProxyCentral ProxyCentral DB and RedisCentral DB and RedisCentral PrometheusCentral PrometheusScrape metricsPush metrics data blob, include hotkey as labelSign the blobPush metrics data blobKeep a list of registered validators, cachedVerify signature, verify "hotkey" label in each metricPush metrics data blob \ No newline at end of file From 415d65c562a156ecec4ae54e597e3ce3a8d4898d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Nowacki?= Date: Fri, 8 Nov 2024 14:47:06 +0100 Subject: [PATCH 11/15] Update app/bittensor_prometheus/entrypoint.sh Co-authored-by: emnoor-reef <137923473+emnoor-reef@users.noreply.github.com> --- app/bittensor_prometheus/entrypoint.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/app/bittensor_prometheus/entrypoint.sh b/app/bittensor_prometheus/entrypoint.sh index cec4e87..5ee0e2d 100644 --- a/app/bittensor_prometheus/entrypoint.sh +++ b/app/bittensor_prometheus/entrypoint.sh @@ -1,3 +1,4 @@ -#!/bin/sh -e +#!/bin/sh +set -e python /app/read_wallet_and_substitute_config.py exec /bin/prometheus --config.file=/etc/prometheus/prometheus.yml "$@" \ No newline at end of file From 393c31d85df49bf6c37d060ad7806dc2da3bed1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Nowacki?= Date: Fri, 8 Nov 2024 14:54:35 +0100 Subject: [PATCH 12/15] move env vars check to main() --- .../read_wallet_and_substitute_config.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/app/bittensor_prometheus/read_wallet_and_substitute_config.py b/app/bittensor_prometheus/read_wallet_and_substitute_config.py index 0ee5579..31a1ed1 100644 --- a/app/bittensor_prometheus/read_wallet_and_substitute_config.py +++ b/app/bittensor_prometheus/read_wallet_and_substitute_config.py @@ -7,9 +7,6 @@ BITTENSOR_WALLET_NAME = os.environ.get("BITTENSOR_WALLET_NAME") BITTENSOR_WALLET_HOTKEY_NAME = os.environ.get("BITTENSOR_WALLET_HOTKEY_NAME") -if not BITTENSOR_WALLET_NAME or not BITTENSOR_WALLET_HOTKEY_NAME: - raise RuntimeError("You must set BITTENSOR_WALLET_NAME and BITTENSOR_WALLET_HOTKEY_NAME env vars") - def get_wallet() -> bittensor.wallet: wallet = bittensor.wallet( @@ -27,6 +24,8 @@ def read_and_substitute_config(hotkey: str): def main(): + if not BITTENSOR_WALLET_NAME or not BITTENSOR_WALLET_HOTKEY_NAME: + raise RuntimeError("You must set BITTENSOR_WALLET_NAME and BITTENSOR_WALLET_HOTKEY_NAME env vars") wallet = get_wallet() read_and_substitute_config(wallet.hotkey.ss58_address) From 7bfd40c104d86aba47dc67da56479d40d1bae4de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Nowacki?= Date: Sat, 7 Dec 2024 12:52:16 +0100 Subject: [PATCH 13/15] add prometheus and protobuf as submodules --- .gitmodules | 6 ++++++ app/src/project/core/prometheus_protobuf/README.md | 2 +- prometheus | 1 + protobuf | 1 + 4 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 .gitmodules create mode 160000 prometheus create mode 160000 protobuf diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..8dd4be9 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,6 @@ +[submodule "prometheus"] + path = prometheus + url = git@github.com:prometheus/prometheus.git +[submodule "protobuf"] + path = protobuf + url = https://github.com/gogo/protobuf.git diff --git a/app/src/project/core/prometheus_protobuf/README.md b/app/src/project/core/prometheus_protobuf/README.md index 847b136..5f8a36f 100644 --- a/app/src/project/core/prometheus_protobuf/README.md +++ b/app/src/project/core/prometheus_protobuf/README.md @@ -1,6 +1,6 @@ These files were created by: -1. cloning prometheus (git@github.com:prometheus/prometheus.git) and protobuf (git clone https://github.com/gogo/protobuf.git) +1. checking out submodules of this repository (prometheus and protobuf) 2. Installing go and a bunch of stuff I'm not sure is necessary 3. Running `make proto` in prometheus's root dir until the error was `--gogofast_out: protoc-gen-gogofast: Plugin failed with status code 1.` - not sure if that was necessary 4. running `protoc -I=. -I=../../protobuf --python_out=. ../../protobuf/gogoproto/gogo.proto types.proto remote.proto` in `prometheus/prompb` diff --git a/prometheus b/prometheus new file mode 160000 index 0000000..af2a1cb --- /dev/null +++ b/prometheus @@ -0,0 +1 @@ +Subproject commit af2a1cb10c89de496cd4309ac532624b34112d74 diff --git a/protobuf b/protobuf new file mode 160000 index 0000000..f67b897 --- /dev/null +++ b/protobuf @@ -0,0 +1 @@ +Subproject commit f67b8970b736e53dbd7d0a27146c8f1ac52f74e5 From 9f821a48ffff986ae7fb0640aef0c22bcfa45345 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Nowacki?= Date: Sat, 7 Dec 2024 14:09:27 +0100 Subject: [PATCH 14/15] use @functools.cache for BITTENSOR_WALLET --- app/src/project/settings.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/app/src/project/settings.py b/app/src/project/settings.py index 3a456f0..124c2f2 100644 --- a/app/src/project/settings.py +++ b/app/src/project/settings.py @@ -1,7 +1,7 @@ """ Django settings for project project. """ - +import functools import inspect import logging import pathlib @@ -371,14 +371,9 @@ def wrapped(*args, **kwargs): raise RuntimeError("Both BITTENSOR_WALLET_NAME and BITTENSOR_WALLET_HOTKEY_NAME must be set when " "CENTRAL_PROMETHEUS_PROXY_URL is defined") -_wallet = None - +@functools.cache def BITTENSOR_WALLET() -> bittensor.wallet: - global _wallet - if _wallet: - return _wallet - if not BITTENSOR_WALLET_NAME or not BITTENSOR_WALLET_HOTKEY_NAME: raise RuntimeError("Wallet not configured") wallet = bittensor.wallet( From bd984b1c95829d03046826ae8fecf692a2f02b9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Nowacki?= Date: Sat, 4 Jan 2025 10:40:55 +0100 Subject: [PATCH 15/15] Update app/src/project/settings.py Co-authored-by: emnoor-reef <137923473+emnoor-reef@users.noreply.github.com> --- app/src/project/settings.py | 1 - 1 file changed, 1 deletion(-) diff --git a/app/src/project/settings.py b/app/src/project/settings.py index 124c2f2..34f1708 100644 --- a/app/src/project/settings.py +++ b/app/src/project/settings.py @@ -382,7 +382,6 @@ def BITTENSOR_WALLET() -> bittensor.wallet: path=str(BITTENSOR_WALLET_DIRECTORY), ) wallet.hotkey_file.get_keypair() # this raises errors if the keys are inaccessible - _wallet = wallet return wallet