diff --git a/.github/workflows/main-ci-only.yml b/.github/workflows/main-ci-only.yml new file mode 100644 index 000000000..d4783e1f2 --- /dev/null +++ b/.github/workflows/main-ci-only.yml @@ -0,0 +1,24 @@ +name: CI-Only Workflow + +# Trigger for specified criteria (specifically for PRs against stable branches) +on: + workflow_dispatch: + pull_request: + types: + - opened + - reopened + - synchronize + branches: + - master + - int + - develop + - Naksha_maintenance + +permissions: + checks: write # for junit reporting + pull-requests: write # for jacoco PR comments + +jobs: + ### Job to Build and Publish artifacts + Build-and-Publish: + uses: ./.github/workflows/reusable-build-and-publish.yml diff --git a/.github/workflows/main-dev-deploy.yml b/.github/workflows/main-dev-deploy.yml new file mode 100644 index 000000000..d72da1a13 --- /dev/null +++ b/.github/workflows/main-dev-deploy.yml @@ -0,0 +1,62 @@ +name: Dev Workflow + +# Trigger Dev deployment pipeline for commit on specific branch(es) +on: + push: + branches: + - develop + - Naksha_maintenance + - MCPODS-6260_dev_deployment + +permissions: + checks: write # for junit reporting + pull-requests: write # for jacoco PR comments + + +# let the run-name get picked up dynamically from most recent commit +#run-name: '[${{ github.event_name }}] event on ${{ github.ref_type }}/PR# [${{ github.ref_name }}]' + +jobs: + ### Job to Build and Publish artifacts + # Output + # - pipeline-artifact-name = Name of the pipeline artifact to be used in release step (e.g. codedeploy-artifact) + # - app-version = Application version to be used in release step as S3 bucket directory (e.g. 1.1.1-SNAPSHOT, 1.2.0) + # - codedeploy-artifact-version = CodeDeploy artifact version to be used in uniquely naming deployment bundle (e.g. 20230528-144100_d63fd762704ef242d9827662b872b305744f753e) + Build-and-Publish: + uses: ./.github/workflows/reusable-build-and-publish.yml + + + + ### Job to Release CodeDeploy artifact to S3 bucket + # Output + # - s3-artifact-path = s3 bucket artifact path to be used for codedeploy (e.g. 1.1.1/deployment.tar.gz) + S3-CodeDeploy-Release: + needs: Build-and-Publish + uses: ./.github/workflows/reusable-s3-codedeploy-release.yml + with: + aws-region: ${{ vars.AWS_REGION }} + pipeline-artifact-name: ${{ needs.Build-and-Publish.outputs.pipeline-artifact-name }} + s3-bucket-name: ${{ vars.RELEASE_S3_BUCKET_NAME }} + s3-bucket-dir-path: ${{ needs.Build-and-Publish.outputs.app-version }} + s3-artifact-version: ${{ needs.Build-and-Publish.outputs.codedeploy-artifact-version }} + secrets: + aws-key: ${{ secrets.AWS_KEY }} + aws-secret: ${{ secrets.AWS_SECRET }} + + + + ### Job to Deploy CodeDeploy artifact to Dev environment + Dev-Deploy: + needs: S3-CodeDeploy-Release + uses: ./.github/workflows/reusable-codedeploy-deployment.yml + with: + aws-region: ${{ vars.AWS_REGION }} + codedeploy-app-name: ${{ vars.CODEDEPLOY_APP_NAME }} + codedeploy-group-name: ${{ vars.CODEDEPLOY_DEV_GROUP_NAME }} + deployment-description: 'Deployment triggered by ${{ github.triggering_actor }} from Github repo [${{ github.repository }}], ${{ github.ref_type }} [${{ github.ref_name }}], commit sha [${{ github.sha }}]' + s3-bucket-name: ${{ vars.RELEASE_S3_BUCKET_NAME }} + s3-artifact-path: ${{ needs.S3-CodeDeploy-Release.outputs.s3-artifact-path }} + s3-artifact-type: tgz + secrets: + aws-key: ${{ secrets.AWS_KEY }} + aws-secret: ${{ secrets.AWS_SECRET }} diff --git a/.github/workflows/main-e2e-deploy.yml b/.github/workflows/main-e2e-deploy.yml new file mode 100644 index 000000000..a4f5d2e56 --- /dev/null +++ b/.github/workflows/main-e2e-deploy.yml @@ -0,0 +1,56 @@ +name: E2E Workflow + +# Trigger E2E Deployment for commit on int branch (i.e. integration) +on: + push: + branches: + - int + +permissions: + checks: write # for junit reporting + pull-requests: write # for jacoco PR comments + +jobs: + ### Job to Build and Publish artifacts + # Output + # - pipeline-artifact-name = Name of the pipeline artifact to be used in release step (e.g. codedeploy-artifact) + # - app-version = Application version to be used in release step as S3 bucket directory (e.g. 1.1.1-SNAPSHOT, 1.2.0) + # - codedeploy-artifact-version = CodeDeploy artifact version to be used in uniquely naming deployment bundle (e.g. 20230528-144100_d63fd762704ef242d9827662b872b305744f753e) + Build-and-Publish: + uses: ./.github/workflows/reusable-build-and-publish.yml + + + + ### Job to Release CodeDeploy artifact to S3 bucket + # Output + # - s3-artifact-path = s3 bucket artifact path to be used for codedeploy (e.g. 1.1.1/deployment.tar.gz) + S3-CodeDeploy-Release: + needs: Build-and-Publish + uses: ./.github/workflows/reusable-s3-codedeploy-release.yml + with: + aws-region: ${{ vars.AWS_REGION }} + pipeline-artifact-name: ${{ needs.Build-and-Publish.outputs.pipeline-artifact-name }} + s3-bucket-name: ${{ vars.RELEASE_S3_BUCKET_NAME }} + s3-bucket-dir-path: ${{ needs.Build-and-Publish.outputs.app-version }} + s3-artifact-version: ${{ needs.Build-and-Publish.outputs.codedeploy-artifact-version }} + secrets: + aws-key: ${{ secrets.AWS_KEY }} + aws-secret: ${{ secrets.AWS_SECRET }} + + + + ### Job to Deploy CodeDeploy artifact to E2E environment + E2E-Deploy: + needs: S3-CodeDeploy-Release + uses: ./.github/workflows/reusable-codedeploy-deployment.yml + with: + aws-region: ${{ vars.AWS_REGION }} + codedeploy-app-name: ${{ vars.CODEDEPLOY_APP_NAME }} + codedeploy-group-name: ${{ vars.CODEDEPLOY_E2E_GROUP_NAME }} + deployment-description: 'Deployment triggered by ${{ github.triggering_actor }} from Github repo [${{ github.repository }}], ${{ github.ref_type }} [${{ github.ref_name }}], commit sha [${{ github.sha }}]' + s3-bucket-name: ${{ vars.RELEASE_S3_BUCKET_NAME }} + s3-artifact-path: ${{ needs.S3-CodeDeploy-Release.outputs.s3-artifact-path }} + s3-artifact-type: tgz + secrets: + aws-key: ${{ secrets.AWS_KEY }} + aws-secret: ${{ secrets.AWS_SECRET }} diff --git a/.github/workflows/main-prd-deploy.yml b/.github/workflows/main-prd-deploy.yml new file mode 100644 index 000000000..d916eb5bc --- /dev/null +++ b/.github/workflows/main-prd-deploy.yml @@ -0,0 +1,57 @@ +name: Prod Workflow + +# Trigger Prod deployment pipeline for push of a tag Naksha_* +on: + push: + tags: + - Naksha_* + +permissions: + checks: write # for junit reporting + pull-requests: write # for jacoco PR comments + +jobs: + ### Job to Build and Publish artifacts + # Output + # - pipeline-artifact-name = Name of the pipeline artifact to be used in release step (e.g. codedeploy-artifact) + # - app-version = Application version to be used in release step as S3 bucket directory (e.g. 1.1.1-SNAPSHOT, 1.2.0) + # - codedeploy-artifact-version = CodeDeploy artifact version to be used in uniquely naming deployment bundle (e.g. 20230528-144100_d63fd762704ef242d9827662b872b305744f753e) + Build-and-Publish: + uses: ./.github/workflows/reusable-build-and-publish.yml + + + + ### Job to Release CodeDeploy artifact to S3 bucket + # Output + # - s3-artifact-path = s3 bucket artifact path to be used for codedeploy (e.g. 1.1.1/deployment.tar.gz) + S3-CodeDeploy-Release: + needs: Build-and-Publish + uses: ./.github/workflows/reusable-s3-codedeploy-release.yml + with: + aws-region: ${{ vars.AWS_REGION }} + pipeline-artifact-name: ${{ needs.Build-and-Publish.outputs.pipeline-artifact-name }} + s3-bucket-name: ${{ vars.RELEASE_S3_BUCKET_NAME }} + s3-bucket-dir-path: ${{ needs.Build-and-Publish.outputs.app-version }} + s3-artifact-version: ${{ needs.Build-and-Publish.outputs.codedeploy-artifact-version }} + secrets: + aws-key: ${{ secrets.AWS_KEY }} + aws-secret: ${{ secrets.AWS_SECRET }} + + + + ### Job to Deploy CodeDeploy artifact to Production environment + Prd-Deploy: + needs: S3-CodeDeploy-Release + uses: ./.github/workflows/reusable-codedeploy-deployment.yml + with: + aws-region: ${{ vars.PRD_AWS_REGION }} + codedeploy-app-name: ${{ vars.CODEDEPLOY_APP_NAME }} + codedeploy-group-name: ${{ vars.CODEDEPLOY_PRD_GROUP_NAME }} + deployment-description: 'Deployment triggered by ${{ github.triggering_actor }} from Github repo [${{ github.repository }}], ${{ github.ref_type }} [${{ github.ref_name }}], commit sha [${{ github.sha }}]' + # we use Prod access point (eu-west-1) to fetch deployment artifacts from E2E S3 bucket (us-east-1) + s3-bucket-name: ${{ vars.PRD_RELEASE_S3_BUCKET_NAME }} + s3-artifact-path: ${{ needs.S3-CodeDeploy-Release.outputs.s3-artifact-path }} + s3-artifact-type: tgz + secrets: + aws-key: ${{ secrets.PRD_AWS_KEY }} + aws-secret: ${{ secrets.PRD_AWS_SECRET }} diff --git a/.github/workflows/reusable-build-and-publish.yml b/.github/workflows/reusable-build-and-publish.yml new file mode 100644 index 000000000..5f3d88a54 --- /dev/null +++ b/.github/workflows/reusable-build-and-publish.yml @@ -0,0 +1,112 @@ +on: + workflow_call: + outputs: + pipeline-artifact-name: + description: 'Name of the uploaded artifact which can be downloaded using actions/download-artifact within the same pipeline (e.g. codedeploy-artifact)' + value: ${{ jobs.main.outputs.pipeline-artifact-name }} + app-version: + description: 'Application version identified using maven (e.g. 1.1.1-SNAPSHOT, 1.2.0)' + value: ${{ jobs.main.outputs.app-version }} + codedeploy-artifact-version: + description: 'Unique version which should be used in next step(s) for CodeDeploy artifact (e.g. 20230528-144100_d63fd762704ef242d9827662b872b305744f753e)' + value: ${{ jobs.main.outputs.codedeploy-artifact-version }} + + +env: + MIN_COVERAGE_OVERALL: 0 + MIN_COVERAGE_CHANGED_FILES: 0 + SERVICE_JAR_DIR: ${{ github.workspace }}/build/libs + CODEDEPLOY_DIR: ${{ github.workspace }}/deployment/codedeploy + GITHUB_CODEDEPLOY_ARTIFACT_NAME: codedeploy-artifact + +jobs: + main: + runs-on: ubuntu-latest + outputs: + pipeline-artifact-name: ${{ steps.save-artifact-name.outputs.name }} + app-version: ${{ steps.save-app-version.outputs.version }} + codedeploy-artifact-version: ${{ steps.save-artifact-version.outputs.version }} + services: + postgres: + image: postgis/postgis # Postgres with PostGIS extension + env: + POSTGRES_PASSWORD: password + POSTGRES_USER: postgres + POSTGRES_DB: postgres + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - 5432:5432 + steps: + - run: echo "[${{ github.triggering_actor }}] triggered [${{ github.event_name }}] event on ${{ github.ref_type }}/PR# [${{ github.ref_name }}]" + - run: echo "🎉 This job is running on a ${{ runner.os }} server hosted by GitHub!" + - name: Check out repository code + uses: actions/checkout@v4 + - name: Setup Java + uses: actions/setup-java@v3 + with: + distribution: 'temurin' + java-version: '17' + java-package: 'jdk' + cache: 'gradle' + - name: Setup Gradle + uses: gradle/gradle-build-action@v2 + with: + gradle-version: 8.2 + ### Build, Test, Coverage Verification + - name: Build fat jar, Run Unit tests, Verify code coverage + run: gradle shadowJar jacocoTestReport jacocoTestCoverageVerification + - name: Publish Test Report + uses: mikepenz/action-junit-report@v4 + if: success() || failure() # always run even if the previous step fails + with: + report_paths: '**/build/test-results/test/TEST-*.xml' + - name: Publish code coverage report as PR comment + id: jacoco + uses: madrapps/jacoco-report@v1.6.1 + with: + paths: '**/build/reports/jacoco/test/jacocoTestReport.xml' + token: ${{ secrets.GITHUB_TOKEN }} + min-coverage-overall: $MIN_COVERAGE_OVERALL + min-coverage-changed-files: $MIN_COVERAGE_CHANGED_FILES + title: Code Coverage + - name: Fail when coverage of changed files is too low + run: | + CHANGED_FILES_FAILED=$(echo '${{ steps.jacoco.outputs.coverage-changed-files }} < ${{ env.MIN_COVERAGE_CHANGED_FILES }}' | bc) + [[ $CHANGED_FILES_FAILED -ne 0 ]] && echo 'Changed files coverage ${{ steps.jacoco.outputs.coverage-changed-files }}% is smaller than required ${{ env.MIN_COVERAGE_CHANGED_FILES }}%' + [[ $CHANGED_FILES_FAILED -ne 0 ]] && exit 1 || exit 0 + - name: List generated artifacts + run: | + ls -l $SERVICE_JAR_DIR/* + ### TODO : Publish to central repository + - name: Prepare CodeDeploy artifact content + run: | + cp -p $SERVICE_JAR_DIR/naksha-*-all.jar $CODEDEPLOY_DIR/contents/naksha-hub/ + - name: List CodeDeploy artifact content + run: | + ls -lR $CODEDEPLOY_DIR + - name: Save pipeline artifact name + id: save-artifact-name + run: echo "name=${{ env.GITHUB_CODEDEPLOY_ARTIFACT_NAME }}" >> "$GITHUB_OUTPUT" + - name: Save CodeDeploy artifact content + uses: actions/upload-artifact@v3 + with: + name: ${{ env.GITHUB_CODEDEPLOY_ARTIFACT_NAME }} + path: ${{ env.CODEDEPLOY_DIR }} + if-no-files-found: error + - name: Identify and save Application version + id: save-app-version + run: | + APP_VERSION=`gradle -q printAppVersion` + echo $APP_VERSION + echo "version=$APP_VERSION" >> "$GITHUB_OUTPUT" + - name: Identify and save CodeDeploy artifact version + id: save-artifact-version + run: | + ARTIFACT_VERSION=`date +"%Y%m%d-%H%M%S"`_${{ github.sha }} + echo $ARTIFACT_VERSION + echo "version=$ARTIFACT_VERSION" >> "$GITHUB_OUTPUT" + - run: echo "🍏 This job's status is ${{ job.status }}." diff --git a/.github/workflows/reusable-codedeploy-deployment.yml b/.github/workflows/reusable-codedeploy-deployment.yml new file mode 100644 index 000000000..5804211d0 --- /dev/null +++ b/.github/workflows/reusable-codedeploy-deployment.yml @@ -0,0 +1,86 @@ +on: + workflow_call: + inputs: + aws-region: + type: string + description: 'AWS region the S3 bucket is associated with' + required: true + codedeploy-app-name: + type: string + description: 'CodeDeploy Application name, to be used for deployment' + required: true + codedeploy-group-name: + type: string + description: 'CodeDeploy DeploymentGroup name, to be used for deployment' + required: true + deployment-description: + type: string + description: 'CodeDeploy deployment description' + required: true + s3-bucket-name: + type: string + description: 'AWS S3 bucket name, which contains the artifact bundle to be deployed' + required: true + s3-artifact-path: + type: string + description: 'AWS S3 artifact path, which is to be used for deployment (e.g. 1.1.1/deployment.tar.gz)' + required: true + s3-artifact-type: + type: string + description: 'Type of the artifact to be deployed (e.g. tar, zip, tgz)' + required: true + secrets: + aws-key: + description: 'AWS Access key of a user, who is allowed to upload artifact to S3 bucket' + required: true + aws-secret: + description: 'AWS Access secret of a user, who is allowed to upload artifact to S3 bucket' + required: true + +jobs: + main: + runs-on: ubuntu-latest + steps: + - name: Check out repository code + uses: actions/checkout@v3 + - name: Print deployment parameters + run: | + echo "aws-region = ${{ inputs.aws-region }}" + echo "codedeploy-app-name = ${{ inputs.codedeploy-app-name }}" + echo "codedeploy-group-name = ${{ inputs.codedeploy-group-name }}" + echo "deployment-description = ${{ inputs.deployment-description }}" + echo "s3-bucket-name = ${{ inputs.s3-bucket-name }}" + echo "s3-artifact-path = ${{ inputs.s3-artifact-path }}" + echo "s3-artifact-type = ${{ inputs.s3-artifact-type }}" + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + aws-access-key-id: ${{ secrets.aws-key }} + aws-secret-access-key: ${{ secrets.aws-secret }} + aws-region: ${{ inputs.aws-region }} + - name: Submit AWS-CodeDeploy Deployment + id: create-deployment + run: | + aws deploy create-deployment \ + --application-name ${{ inputs.codedeploy-app-name }} \ + --deployment-group-name ${{ inputs.codedeploy-group-name }} \ + --s3-location bucket=${{ inputs.s3-bucket-name }},key=${{ inputs.s3-artifact-path }},bundleType=${{ inputs.s3-artifact-type }} \ + --description "${{ inputs.deployment-description }}" \ + > deployment_command.log + cat deployment_command.log + DEPLOYMENT_ID=`cat deployment_command.log | jq -r '.deploymentId'` + echo "id=$DEPLOYMENT_ID" >> "$GITHUB_OUTPUT" + - name: Print Deployment status link + run: | + echo "Deployment status link : https://$AWS_REGION.console.aws.amazon.com/codesuite/codedeploy/deployments/$DEPLOYMENT_ID?region=$AWS_REGION" + env: + DEPLOYMENT_ID: ${{ steps.create-deployment.outputs.id }} + AWS_REGION: ${{ inputs.aws-region }} + - name: Monitor Deployment status + run: | + echo "Deployment Id = $DEPLOYMENT_ID, Timeout seconds = $TIMEOUT_SEC" + bash ./.github/workflows/supporting-scripts/MonitorDeploymentStatus.sh $DEPLOYMENT_ID $TIMEOUT_SEC + env: + DEPLOYMENT_ID: ${{ steps.create-deployment.outputs.id }} + TIMEOUT_SEC: 600 + - run: echo "🍏 This job's status is ${{ job.status }}." diff --git a/.github/workflows/reusable-s3-codedeploy-release.yml b/.github/workflows/reusable-s3-codedeploy-release.yml new file mode 100644 index 000000000..eb6bde68e --- /dev/null +++ b/.github/workflows/reusable-s3-codedeploy-release.yml @@ -0,0 +1,93 @@ +on: + workflow_call: + inputs: + aws-region: + type: string + description: 'AWS region the S3 bucket is associated with' + required: true + pipeline-artifact-name: + type: string + description: 'Github pipeline artifact name, which can be downloaded using actions/download-artifact' + required: true + s3-bucket-name: + type: string + description: 'AWS S3 bucket name, where artifact is to be released to' + required: true + s3-bucket-dir-path: + type: string + description: 'AWS S3 bucket directory path, where artifact is to be uploaded into' + required: true + s3-artifact-version: + type: string + description: 'A version (or suffix) to be used in artifact name while uploading it to AWS S3 bucket' + required: true + secrets: + aws-key: + description: 'AWS Access key of a user, who is allowed to upload artifact to S3 bucket' + required: true + aws-secret: + description: 'AWS Access secret of a user, who is allowed to upload artifact to S3 bucket' + required: true + outputs: + s3-artifact-path: + description: 'AWS S3 artifact path, which was used for release (e.g. 1.1.1/deployment.tar.gz)' + value: ${{ jobs.main.outputs.s3-artifact-path }} + +jobs: + main: + runs-on: ubuntu-latest + outputs: + # S3 bucket artifact path (e.g. 1.1.1/deployment.tar.gz) + s3-artifact-path: ${{ steps.s3-artifact-path.outputs.path }} + steps: + # create directory to download artifacts + - run: mkdir download + - name: Fetch CodeDeploy artifacts + uses: actions/download-artifact@v3 + with: + name: ${{ inputs.pipeline-artifact-name }} + path: download + - name: List CodeDeploy artifacts + run: | + ls -lR download/ + - name: Preparing S3 artifact name + id: s3-artifact-name + run: | + BUNDLE_NAME=deployment_${{ inputs.s3-artifact-version }}.tar.gz + echo "name=$BUNDLE_NAME" >> "$GITHUB_OUTPUT" + - name: Preparing S3 artifact bundle + run: | + cd download + echo $BUNDLE_NAME + tar -cvzf ../$BUNDLE_NAME . + env: + BUNDLE_NAME: ${{ steps.s3-artifact-name.outputs.name }} + - name: List artifact bundle content + run: | + tar -tvf $BUNDLE_NAME + env: + BUNDLE_NAME: ${{ steps.s3-artifact-name.outputs.name }} + - name: Preparing S3 artifact path + id: s3-artifact-path + run: | + S3_ARTIFACT_PATH="${{ inputs.s3-bucket-dir-path }}/$BUNDLE_NAME" + echo "S3 Artifact path is $S3_ARTIFACT_PATH" + echo "path=$S3_ARTIFACT_PATH" >> "$GITHUB_OUTPUT" + env: + BUNDLE_NAME: ${{ steps.s3-artifact-name.outputs.name }} + # Upload artifact to S3 bucket + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + aws-access-key-id: ${{ secrets.aws-key }} + aws-secret-access-key: ${{ secrets.aws-secret }} + aws-region: ${{ inputs.aws-region }} + - name: Upload artifact to S3 bucket s3://${{ inputs.s3-bucket-name }} + run: | + FULL_S3_PATH=s3://${{ inputs.s3-bucket-name }}/$S3_ARTIFACT_PATH + echo "Full S3 artifact path is [$FULL_S3_PATH]" + aws s3 cp $BUNDLE_NAME $FULL_S3_PATH + env: + BUNDLE_NAME: ${{ steps.s3-artifact-name.outputs.name }} + S3_ARTIFACT_PATH: ${{ steps.s3-artifact-path.outputs.path }} + - run: echo "🍏 This job's status is ${{ job.status }}." diff --git a/.github/workflows/supporting-scripts/MonitorDeploymentStatus.sh b/.github/workflows/supporting-scripts/MonitorDeploymentStatus.sh new file mode 100644 index 000000000..fc3ac37bf --- /dev/null +++ b/.github/workflows/supporting-scripts/MonitorDeploymentStatus.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +## Argument 1 = CodeDeploy deploymentId, which is to be monitored for success +## Argument 2 = Max Timeout in seconds, for which this script should monitor the status before failing the deployment + +# Check if required parameters are provided +if [[ "$1" == "" ]] || [[ "$2" == "" ]]; then + echo "Error:: Missing mandatory parameters - deploymentId and maxTimeOut" + exit -1 +fi + +# set internal parameters +DEPLOYMENT_ID=$1 +MAX_TIMEOUT_SEC=$2 +SLEEP_INTERVAL_SEC=5 + +# start script from here +echo "Monitoring status of deployment Id [$DEPLOYMENT_ID], with max timeout of [$MAX_TIMEOUT_SEC] seconds" + +START_TIME=`date +%s` + +# Wait for final status of deployment +while [ 1 ]; +do + # Fetch deployment details + DEPLOYMENT_RESPONSE_JSON=`aws deploy get-deployment --deployment-id $DEPLOYMENT_ID` + + # Extract deployment details from JSON response + STATUS=`echo $DEPLOYMENT_RESPONSE_JSON | jq -r '.deploymentInfo.status'` + PENDING_CNT=`echo $DEPLOYMENT_RESPONSE_JSON | jq '.deploymentInfo.deploymentOverview.Pending'` + IN_PROGRESS_CNT=`echo $DEPLOYMENT_RESPONSE_JSON | jq '.deploymentInfo.deploymentOverview.InProgress'` + SUCCESS_CNT=`echo $DEPLOYMENT_RESPONSE_JSON | jq '.deploymentInfo.deploymentOverview.Succeeded'` + FAIL_CNT=`echo $DEPLOYMENT_RESPONSE_JSON | jq '.deploymentInfo.deploymentOverview.Failed'` + SKIP_CNT=`echo $DEPLOYMENT_RESPONSE_JSON | jq '.deploymentInfo.deploymentOverview.Skipped'` + + # exit if failed or stopped + if [[ "$STATUS" == "Failed" ]] || [[ "$STATUS" == "Stopped" ]]; + then + echo "Error:: Deployment failed/stopped. Overall status is: " + echo $DEPLOYMENT_RESPONSE_JSON | jq + exit 1 + fi + + # Even if Status is successful, we still need to check count of successful EC2 instances + if [[ "$STATUS" == "Succeeded" ]]; + then + let "TOTAL_PENDING_COUNT=$PENDING_CNT + $IN_PROGRESS_CNT" + let "TOTAL_UNSUCCESSFUL_COUNT=$FAIL_CNT + $SKIP_CNT" + + if [[ $TOTAL_PENDING_COUNT -gt 0 ]]; + then + # One or more EC2 instance(s) yet to be updated. Keep waiting. + null; + elif [[ $TOTAL_UNSUCCESSFUL_COUNT -gt 0 ]]; + then + echo "Error:: Deployment failed. Overall status is: " + echo $DEPLOYMENT_RESPONSE_JSON | jq + exit 1 + fi + + echo "Deployment successfully completed for $SUCCESS_CNT instances!" + exit 0 + fi + + # Exit if we have exceeded max wait time + CRT_TIME=`date +%s` + let "WAIT_TIME=$CRT_TIME - $START_TIME" + + # Print current status + echo "Current status is [ status : $STATUS, pending : $PENDING_CNT, in_progress : $IN_PROGRESS_CNT, success : $SUCCESS_CNT, failed : $FAIL_CNT, skipped : $SKIP_CNT ]. Waited [$WAIT_TIME] seconds ..." + + if [[ $WAIT_TIME -gt $MAX_TIMEOUT_SEC ]]; + then + echo "Error:: Exceeded timeout [$MAX_TIMEOUT_SEC] sec, while waiting for deployment [$DEPLOYMENT_ID] to complete." + exit 1 + fi + + # Sleep and try checking status again + sleep $SLEEP_INTERVAL_SEC +done + +exit 1 \ No newline at end of file diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml deleted file mode 100644 index eafb7e87a..000000000 --- a/.github/workflows/test.yaml +++ /dev/null @@ -1,68 +0,0 @@ -name: Run Naksha tests - -on: - pull_request: - types: - - opened # run whenever PR is opened - - synchronize # run whenever PR is updated - - reopened # run whenever PR is reopoened - workflow_dispatch: # manual run - - -permissions: - checks: write # for junit reporting - pull-requests: write # for jacoco PR comments - -env: - MIN_COVERAGE_OVERALL: 25 - MIN_COVERAGE_CHANGED_FILES: 65 - -jobs: - test: - runs-on: ubuntu-latest - services: - postgres: - image: postgis/postgis # Postgres with PostGIS extension - env: - POSTGRES_PASSWORD: password - POSTGRES_USER: postgres - POSTGRES_DB: postgres - options: >- - --health-cmd pg_isready - --health-interval 10s - --health-timeout 5s - --health-retries 5 - ports: - - 5432:5432 - steps: - - uses: actions/checkout@v4 - - name: Setup Java - uses: actions/setup-java@v3 - with: - distribution: 'temurin' - java-version: '17' - - name: Setup Gradle - uses: gradle/gradle-build-action@v2 - with: - gradle-version: 8.2 - - name: Execute tests & verify overall coverage - run: gradle test jacocoTestReport jacocoTestCoverageVerification - - name: Publish Test Report - uses: mikepenz/action-junit-report@v4 - if: success() || failure() # always run even if the previous step fails - with: - report_paths: '**/build/test-results/test/TEST-*.xml' - - name: Publish code coverage report as PR comment - id: jacoco - uses: madrapps/jacoco-report@v1.6.1 - with: - paths: '**/build/reports/jacoco/test/jacocoTestReport.xml' - token: ${{ secrets.GITHUB_TOKEN }} - min-coverage-overall: $MIN_COVERAGE_OVERALL - min-coverage-changed-files: $MIN_COVERAGE_CHANGED_FILES - title: Code Coverage - - name: Fail when coverage of changed files is too low - run: | - CHANGED_FILES_FAILED=$(echo '${{ steps.jacoco.outputs.coverage-changed-files }} < ${{ env.MIN_COVERAGE_CHANGED_FILES }}' | bc) - [[ $CHANGED_FILES_FAILED -ne 0 ]] && echo 'Changed files coverage ${{ steps.jacoco.outputs.coverage-changed-files }}% is smaller than required ${{ env.MIN_COVERAGE_CHANGED_FILES }}%' - [[ $CHANGED_FILES_FAILED -ne 0 ]] && exit 1 || exit 0 \ No newline at end of file diff --git a/.github/workflows/uploadAssets.yaml b/.github/workflows/uploadAssets.yaml deleted file mode 100644 index 461ff1061..000000000 --- a/.github/workflows/uploadAssets.yaml +++ /dev/null @@ -1,69 +0,0 @@ -on: - release: - types: [created] - workflow_dispatch: - inputs: - releaseTag: - description: The tag name of the release for which to build & upload the assets - required: true - default: '' - -name: Upload Release Assets - -jobs: - build: - name: Upload Release Assets - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - if: ${{ github.event.inputs.releaseTag == '' }} - - uses: actions/checkout@v2 - if: ${{ github.event.inputs.releaseTag != '' }} - with: - ref: ${{ github.event.inputs.releaseTag }} - - name: Cache local Maven repository - uses: actions/cache@v2 - with: - path: ~/.m2/repository - key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} - restore-keys: | - ${{ runner.os }}-maven- - - name: Setup Java - uses: actions/setup-java@v1 - with: - java-version: '8' - - name: Build & package project - run: | - mvn install -Dmaven.test.skip=true - mvn -f xyz-hub-service package shade:shade - - uses: pdamianik/release-tag-to-upload-url-action@v1.0.1 - id: get-upload-url - if: ${{ github.event.inputs.releaseTag != '' }} - with: - tag: ${{ github.event.inputs.releaseTag }} - token: ${{ secrets.GITHUB_TOKEN }} - - id: set-upload-url - run: | - if [ -z "${{ github.event.inputs.releaseTag }}" ]; then - echo "::set-output name=uploadUrl::${{ github.event.release.upload_url }}" - else - echo "::set-output name=uploadUrl::${{ steps.get-upload-url.outputs.uploadUrl }}" - fi - - name: Upload XYZ Hub Service Asset - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.set-upload-url.outputs.uploadUrl }} - asset_path: xyz-hub-service/target/xyz-hub-service.jar - asset_name: xyz-hub-service.jar - asset_content_type: application/java-archive - - name: Upload XYZ PSQL Connector Asset - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.set-upload-url.outputs.uploadUrl }} - asset_path: xyz-psql-connector/target/xyz-psql-connector.jar - asset_name: xyz-psql-connector.jar - asset_content_type: application/java-archive \ No newline at end of file diff --git a/.gitlab/gitlab-ci-dh.yml b/.gitlab/gitlab-ci-dh.yml deleted file mode 100644 index a2589d85e..000000000 --- a/.gitlab/gitlab-ci-dh.yml +++ /dev/null @@ -1,3 +0,0 @@ -include: - - project: xyz/data-hub/ci - file: datahub-handler.yml \ No newline at end of file diff --git a/.gitlab/gitlab-ci-iml.yml b/.gitlab/gitlab-ci-iml.yml deleted file mode 100644 index 1a525f1b4..000000000 --- a/.gitlab/gitlab-ci-iml.yml +++ /dev/null @@ -1,3 +0,0 @@ -include: - - project: xyz/data-hub/ci - file: interactiveapi-handler.yml \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5d107451d..11002ff03 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,8 +1,8 @@ # Introduction -The team behind the [XYZ Hub](https://github.com/heremaps/xyz-hub) gratefully accepts contributions via +The team behind the [Naksha](https://github.com/xeus2001/xyz-hub) gratefully accepts contributions via [pull requests](https://help.github.com/articles/about-pull-requests/) filed against the -[GitHub project](https://github.com/heremaps/xyz-hub/pulls). +[GitHub project](https://github.com/xeus2001/xyz-hub/pulls). # Signing each Commit @@ -27,42 +27,20 @@ Git has the `-s` flag that can sign a commit for you, see example below: `$ git commit -s -m 'README.md: Fix minor spelling mistake'` -# Before making a pull-request - -Eventually, before making a pull request, start the XYZ-Hub, the HTTP-connector and then execute the -integration tests. Within Linux, you can open a shell in the project root and do: - -```bash -mvn clean install -export FS_WEB_ROOT=$(pwd)/static -mkdir $FS_WEB_ROOT 2>/dev/null -java -jar xyz-hub-service/target/xyz-hub-service.jar >static/xyz-hub.out.txt 2>&1 & -java -cp xyz-hub-service/target/xyz-hub-service.jar com.here.xyz.hub.HttpConnector >static/xyz-http-connector.out.txt 2>&1 & -mvn verify -DskipTests=false -kill -9 $(ps a | grep xyz-hub-service.jar | grep -E "[/]xyz-hub-service.jar" | grep -Eo "[0-9]+ pts" | grep -Eo "[0-9]+" | xargs) 2>/dev/null -``` - -**Note**: You need to have [Redis installed](https://redis.io/docs/getting-started/). +# Branching strategy -# Run and test in IntelliJ IDEA +As per the [Branching strategy](docs/BRANCHING.md), contributors are expected to make changes into their own `feature_*` branches, created out of `develop` branch. -Open as project from existing code (`File->New->Project from Existing Sources...`) using `Maven`. Make copies of the configuration files: +# Local execution -```bash -mkdir .vertx -cp xyz-hub-service/src/main/resources/config.json .vertx/ -cp xyz-hub-service/src/main/resources/connector-config.json .vertx/ -``` - -Adjust the settings to your needs. +From Local machine: +* Validate service builds and starts - as per [Getting Started](README.md#getting-started) section. +* Validate your changes and normal functioning of service as per [Usage](README.md#usage) instructions. -Create run profiles for the [XYZ-Hub Service](here-naksha-app-service/src/main/java/com/here/xyz/hub/Core.java) and the [HTTP PSQL-Connector](here-naksha-app-service/src/main/java/com/here/xyz/hub/HttpConnector.java), setting the environment variables `XYZ_CONFIG_PATH=$PROJECT_DIR$/.vertx/;FS_WEB_ROOT=$PROJECT_DIR$/static/`. When running the tests in the IDE, ensure that the service and connector are running and that you provide the same environment variable to the tests. +# Before making a pull-request -If you do not run Redis, change the settings in the `config.json`: +Eventually, before making a pull request, ensure following: -```json -{ - "XYZ_HUB_REDIS_URI": "null", - "DEFAULT_MESSAGE_BROKER": "Noop" -} -``` +1. If any **new secret** parameter is introduced, then mention in pull-request as an indication for the repo-owner to add the secret in respective cloud environments. +2. If any **new config** (not a secret) parameter is introduced in [default-config.json](here-naksha-lib-hub/src/main/resources/config/default-config.json), +ensure the same is added into **deployment** [cloud-config.json](deployment/codedeploy/contents/naksha-hub/.config/cloud-config.json) as well. diff --git a/README.md b/README.md index 4ba34a47d..ac33955e8 100644 --- a/README.md +++ b/README.md @@ -42,39 +42,26 @@ Naksha uses [GeoJSON](https://tools.ietf.org/html/rfc79460) as the main geospati * Postgres 10+ with PostGIS 2.5+ # Getting started + Clone and install the project using: ```bash -git clone https://github.com/heremaps/xyz-hub.git +git clone https://github.com/xeus2001/xyz-hub.git cd xyz-hub -mvn clean install -``` - -### With docker - -The service and all dependencies could be started locally using Docker compose. -```bash -docker-compose up -d +gradle clean build ``` -Alternatively, you can start freshly from the sources by using this command after cloning the project: -```bash -mvn clean install -Pdocker -``` - -*Hint: Postgres with PostGIS will be automatically started if you use 'docker-compose up -d' to start the service.* +### Run App -### Without docker - -The service could also be started directly as a fat jar. In this case Postgres and the other optional dependencies need to be started separately. +The service could also be started directly from a fat jar. In this case Postgres and the other optional dependencies need to be started separately. To build the fat jar, at the root project directory, run one of the following: ```bash - #Using machine installed gradle (through apt, brew,... package managers) - gradle shadowJar - #Using gradle wrapper - ./gradlew shadowJar +# Using machine installed gradle (through apt, brew,... package managers) +gradle shadowJar +# Using gradle wrapper +./gradlew shadowJar ``` The jar can be found under `build/libs/`. @@ -95,22 +82,41 @@ java -jar build/libs/naksha-2.0.6-all.jar mock-config Then use a web browser to connect to `localhost:8080`, an OK message should be displayed if the service is up and running. +### OpenAPI specification + +Once application is UP, the OpenAPI specification is accessible at `http(s)://{host}:{port}/hub/swagger/index.html`, by default at [http://localhost:8080/hub/swagger/index.html](http://localhost:8080/hub/swagger/index.html) + ### Configuration -The service persists out of modules with a bootstrap code to start the service. All configuration is done in the [config.json](here-naksha-app-service/src/main/resources/config.json). +The service persists out of modules with a bootstrap code to start the service. Service provides default configuration in [default-config.json](here-naksha-lib-hub/src/main/resources/config/default-config.json). -The bootstrap code could be used to run only the `hub-verticle` or only the `connector-verticle` or it can be used to run both as a single monolith. In a microservice deployment you run one cluster with only `hub-verticle` deployment and another cluster with only `connector-verticle` deployment. It is as well possible to mix this, so running a monolith deployment that optionally can use connector configurations to use foreign connectors for individual spaces. +The custom (external) configuration file can be supplied by modifying environment variable or by creating the `default-config.json` file in the corresponding configuration folder. +The exact configuration folder is platform dependent, but generally follows the [XGD user configuration directory](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html), standard, so on Linux being by default `~/.config/naksha/v{x.x.x}/`. For Windows the files will reside in the [CSIDL_PROFILE](https://learn.microsoft.com/en-us/windows/win32/shell/csidl?redirectedfrom=MSDN) folder, by default `C:\Users\{username}\.config\naksha\v{x.x.x}\`. +Here `{x.x.x}` is the Naksha application version (for example, if version is `2.0.7`, then path will be `...\.config\naksha\v2.0.7`) -**Warning**: The `connector-verticle` does not perform security checks, so open it to external access will bypass all security restrictions! +Next to this, an explicit location can be specified via the environment variable `NAKSHA_CONFIG_PATH`, this path will not be extended by the `naksha/v{x.x.x}` folder, so you can directly specify where to keep the config files. This is important when you want to start multiple versions of the service: `NAKSHA_CONFIG_PATH=~/.config/naksha/ java -jar naksha.jar {arguments}`. -The location of the configuration file could be modified using environment variables or by creating the `config.json` file in the corresponding configuration folder. The exact configuration folder is platform dependent, but generally follows the [XGD user configuration directory](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html), standard, so on Linux being by default `~/.config/xyz-hub/`. For Windows the files will reside in the [CSIDL_PROFILE](https://learn.microsoft.com/en-us/windows/win32/shell/csidl?redirectedfrom=MSDN) folder, by default `C:\Users\{username}\.config\xyz-hub`. This path could be changed via environment variable `XDG_CONFIG_HOME`, which will result in the location `$XDG_CONFIG_HOME/xyz-hub/`. Next to this, an explicit location can be specified via the environment variable `XYZ_CONFIG_PATH`, this path will not be extended by the `xyz-hub` folder, so you can directly specify where to keep the config files. This is important when you want to start multiple versions of the service: `XYZ_CONFIG_PATH=~/.config/xyz-hub/a/ java -jar xyz-hub-service.jar`. +In the custom config file, the name of the individual properties can be set as per source code here [NakshaHubConfig](here-naksha-lib-hub/src/main/java/com/here/naksha/lib/hub/NakshaHubConfig.java). +All properties annotated with `@JsonProperty` can be set in custom config file. -The individual environment variable names can be found in the source code of the configuration files being [CoreConfig](xyz-models/src/main/java/com/here/xyz/config/CoreConfig.java), [HubConfig](xyz-models/src/main/java/com/here/xyz/config/HubConfig.java) and [ConnectorConfig](xyz-models/src/main/java/com/here/xyz/config/ConnectorConfig.java). All properties annotated with `@JsonProperty` can always be set as well as environment variable, prefixed with `XYZ_` unless they are always starting with that prefix, for example `XYZ_HUB_REMOTE_SERVICE_URLS`. If the environment variable name is different you will find an additional annotation `@EnvName`. If the name within the configuration file is different, then either `@JsonProperty` or `@JsonName` annotations can be found. +Config file is loaded using `{config-id}` supplied as CLI argument, as per following precedence on file location (first match wins): +1. using env variable `NAKSHA_CONFIG_PATH` (full path will be `$NAKSHA_CONFIG_PATH/{config-id}.json`) +2. as per user's home directory `user.home` (full path will be `{user-home}/.config/naksha/v{x.x.x}/{config-id}.json` ) +3. as per config previously loaded in Naksha Admin Storage (PostgreSQL database) +4. default config loaded from jar (`here-naksha-lib-hub/src/main/resources/config/default-config.json`) ```bash -mkdir ~/.config/xyz-hub -cp xyz-hub-service/src/main/resources/config.json ~/.config/xyz-hub/ -cp xyz-hub-service/src/main/resources/config-db.json ~/.config/xyz-hub/ +# Example of env variable NAKSHA_CONFIG_PATH + +# First, copy default config to custom location +export NAKSHA_CONFIG_PATH=/my-location/naksha +cp here-naksha-lib-hub/src/main/resources/config/default-config.json $NAKSHA_CONFIG_PATH/ + +# Modify config as per need +vi $NAKSHA_CONFIG_PATH/default-config.json + +# Start application using above config +java -jar naksha.jar default-config ``` # Usage @@ -184,19 +190,6 @@ The service will respond with the inserted geo features: } ``` -### OpenAPI specification - -The OpenAPI specification files are accessible under the following URIs: -* Full: [http://{host}:{port}/hub/static/openapi/full.yaml](http://localhost:8080/hub/static/openapi/full.yaml) -* Stable: [http://{host}:{port}/hub/static/openapi/stable.yaml](http://localhost:8080/hub/static/openapi/stable.yaml) -* Experimental: [http://{host}:{port}/hub/static/openapi/experimental.yaml](http://localhost:8080/hub/static/openapi/experimental.yaml) -* Contract: [http://{host}:{port}/hub/static/openapi/contract.yaml](http://localhost:8080/hub/static/openapi/contract.yaml) -* Connector: [http://{host}:{port}/psql/static/openapi/openapi-http-connector.yaml](http://localhost:8080/psql/static/openapi/openapi-http-connector.yaml) - -#### Swagger UI - -Currently to access the Swagger UI from a browser, use this URI [http://{host}:{port}/hub/static/index.html](http://localhost:8080/hub/static/index.html) - # Testing locally To run tests locally run Gradle `test` task: diff --git a/build.gradle.kts b/build.gradle.kts index 24890e3f6..f0de803de 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -85,7 +85,7 @@ val slf4j_console = "org.slf4j:slf4j-simple:2.0.6"; val log4j_core = "org.apache.logging.log4j:log4j-core:2.20.0" val log4j_api = "org.apache.logging.log4j:log4j-api:2.20.0" val log4j_jcl = "org.apache.logging.log4j:log4j-jcl:2.20.0" -val log4j_slf4j = "org.apache.logging.log4j:log4j-slf4j-impl:2.20.0" +val log4j_slf4j = "org.apache.logging.log4j:log4j-slf4j2-impl:2.20.0" val postgres = "org.postgresql:postgresql:42.5.4" val zaxxer_hikari = "com.zaxxer:HikariCP:5.1.0" @@ -109,6 +109,8 @@ val flipkart_zjsonpatch = "com.flipkart.zjsonpatch:zjsonpatch:0.4.13" val json_assert = "org.skyscreamer:jsonassert:1.5.1" val resillience4j_retry = "io.github.resilience4j:resilience4j-retry:2.0.0" +val otel = "io.opentelemetry:opentelemetry-api:1.28.0" + val mavenUrl = rootProject.properties["mavenUrl"] as String val mavenUser = rootProject.properties["mavenUser"] as String val mavenPassword = rootProject.properties["mavenPassword"] as String @@ -238,12 +240,6 @@ subprojects { targetCompatibility = JavaVersion.VERSION_17 } - testing { - dependencies { - implementation(slf4j_console) - } - } - // Fix transitive dependencies. dependencies { @@ -412,6 +408,10 @@ project(":here-naksha-handler-psql") { } } +configurations.implementation { + exclude(module = "commons-logging") +} + project(":here-naksha-lib-handlers") { description = "Naksha Handlers library" dependencies { @@ -451,9 +451,13 @@ project(":here-naksha-lib-handlers") { implementation(project(":here-naksha-lib-core")) implementation(project(":here-naksha-lib-psql")) //implementation(project(":here-naksha-lib-extension")) - implementation(project(":here-naksha-handler-psql")) + //implementation(project(":here-naksha-handler-psql")) implementation(project(":here-naksha-lib-hub")) + implementation(log4j_slf4j) + implementation(log4j_api) + implementation(log4j_core) + implementation(otel) implementation(commons_lang3) implementation(vividsolutions_jts_core) implementation(postgres) @@ -520,6 +524,10 @@ rootProject.tasks.shadowJar { } } +// print app version +rootProject.tasks.register("printAppVersion") { + println(rootProject.version) +} fun Project.setOverallCoverage(minOverallCoverage: Double) { ext.set(minOverallCoverageKey, minOverallCoverage) diff --git a/deployment/codedeploy/appspec.yml b/deployment/codedeploy/appspec.yml new file mode 100644 index 000000000..c15d66b37 --- /dev/null +++ b/deployment/codedeploy/appspec.yml @@ -0,0 +1,131 @@ +# This is an appspec.yml template file for use with an EC2/On-Premises deployment in CodeDeploy. +# The lines in this template starting with the hashtag symbol are +# instructional comments and can be safely left in the file or +# ignored. +# For help completing this file, see the "AppSpec File Reference" in the +# "CodeDeploy User Guide" at +# https://docs.aws.amazon.com/codedeploy/latest/userguide/app-spec-ref.html +version: 0.0 +# Specify "os: linux" if this revision targets Amazon Linux, +# Red Hat Enterprise Linux (RHEL), or Ubuntu Server +# instances. +# Specify "os: windows" if this revision targets Windows Server instances. +# (You cannot specify both "os: linux" and "os: windows".) +os: linux +# os: windows +# During the Install deployment lifecycle event (which occurs between the +# BeforeInstall and AfterInstall events), copy the specified files +# in "source" starting from the root of the revision's file bundle +# to "destination" on the Amazon EC2 instance. +# Specify multiple "source" and "destination" pairs if you want to copy +# from multiple sources or to multiple destinations. +# If you are not copying any files to the Amazon EC2 instance, then remove the +# "files" section altogether. A blank or incomplete "files" section +# may cause associated deployments to fail. +file_exists_behavior: OVERWRITE +files: + - source: /contents + destination: /home/admin + - source: /contents/naksha-hub/xyz-hub.service + destination: /etc/systemd/system +# For deployments to Amazon Linux, Ubuntu Server, or RHEL instances, +# you can specify a "permissions" +# section here that describes special permissions to apply to the files +# in the "files" section as they are being copied over to +# the Amazon EC2 instance. +# For more information, see the documentation. +# If you are deploying to Windows Server instances, +# then remove the +# "permissions" section altogether. A blank or incomplete "permissions" +# section may cause associated deployments to fail. +permissions: + # change owner for naksha-hub directory + - object: /home/admin + pattern: "naksha-hub" + owner: admin + group: admin + type: + - directory + # change owner for all subdirectories inside naksha-hub + - object: /home/admin/naksha-hub + owner: admin + group: admin + type: + - directory + # change owner for all files in naksha-hub directory (except shell scripts) + - object: /home/admin/naksha-hub + pattern: "**" + except: ["set-app-params.sh","set-auth-keys.sh","set-instance-params.sh","start-app.sh"] + owner: admin + group: admin + type: + - file + # change owner+permission for all shell scripts in naksha-hub directory + - object: /home/admin/naksha-hub + pattern: "*.sh" + owner: admin + group: admin + mode: 755 + type: + - file + # change permission for file xyz-hub.service + - object: /etc/systemd/system + pattern: "xyz-hub.service" + owner: admin + group: admin + type: + - file +# If you are not running any commands on the Amazon EC2 instance, then remove +# the "hooks" section altogether. A blank or incomplete "hooks" section +# may cause associated deployments to fail. +hooks: + # For each deployment lifecycle event, specify multiple "location" entries + # if you want to run multiple scripts during that event. + # You can specify "timeout" as the number of seconds to wait until failing the deployment + # if the specified scripts do not run within the specified time limit for the + # specified event. For example, 900 seconds is 15 minutes. If not specified, + # the default is 1800 seconds (30 minutes). + # Note that the maximum amount of time that all scripts must finish executing + # for each individual deployment lifecycle event is 3600 seconds (1 hour). + # Otherwise, the deployment will stop and CodeDeploy will consider the deployment + # to have failed to the Amazon EC2 instance. Make sure that the total number of seconds + # that are specified in "timeout" for all scripts in each individual deployment + # lifecycle event does not exceed a combined 3600 seconds (1 hour). + # For deployments to Amazon Linux, Ubuntu Server, or RHEL instances, + # you can specify "runas" in an event to + # run as the specified user. For more information, see the documentation. + # If you are deploying to Windows Server instances, + # remove "runas" altogether. + # If you do not want to run any commands during a particular deployment + # lifecycle event, remove that event declaration altogether. Blank or + # incomplete event declarations may cause associated deployments to fail. + # During the ApplicationStop deployment lifecycle event, run the commands + # in the script specified in "location" starting from the root of the + # revision's file bundle. + ApplicationStop: + - location: scripts/ApplicationStop.sh + timeout: 30 + runas: admin + + # During the BeforeInstall deployment lifecycle event, run the commands + # in the script specified in "location". + #BeforeInstall: + + # During the AfterInstall deployment lifecycle event, run the commands + # in the script specified in "location". + #AfterInstall: + + # During the ApplicationStart deployment lifecycle event, run the commands + # in the script specified in "location". + ApplicationStart: + - location: scripts/ApplicationStart.sh + timeout: 30 + runas: admin + + # During the ValidateService deployment lifecycle event, run the commands + # in the script specified in "location". + # NOTE : This section is disabled for Naksha, as ELB takes care of checking application health before opening the traffic + #ValidateService: + #- location: scripts/ApplicationHealth.sh + #timeout: 120 + #runas: admin diff --git a/deployment/codedeploy/contents/naksha-hub/.config/auth/dummyJwt.json b/deployment/codedeploy/contents/naksha-hub/.config/auth/dummyJwt.json new file mode 100644 index 000000000..04073fd7b --- /dev/null +++ b/deployment/codedeploy/contents/naksha-hub/.config/auth/dummyJwt.json @@ -0,0 +1,42 @@ +{ + "urm": { + "xyz-hub": { + "readFeatures": [ + {} + ], + "createFeatures": [ + {} + ], + "updateFeatures": [ + {} + ], + "deleteFeatures": [ + {} + ], + "manageSpaces": [ + {} + ], + "adminSpaces": [ + {} + ], + "managePackages": [ + {} + ], + "accessConnectors": [ + {} + ], + "manageConnectors": [ + {} + ], + "useCapabilities": [ + {} + ], + "useAdminCapabilities": [ + {} + ] + } + }, + "aid": "ANONYMOUS", + "iat": 1577833200, + "exp": 2000000000 +} \ No newline at end of file diff --git a/deployment/codedeploy/contents/naksha-hub/.config/cloud-config.json b/deployment/codedeploy/contents/naksha-hub/.config/cloud-config.json new file mode 100644 index 000000000..d5e0d13c9 --- /dev/null +++ b/deployment/codedeploy/contents/naksha-hub/.config/cloud-config.json @@ -0,0 +1,6 @@ +{ + "id": "cloud-config", + "type": "Config", + "httpPort": 7080, + "hubClassName": "com.here.naksha.lib.hub.mock.NakshaHubMock" +} \ No newline at end of file diff --git a/deployment/codedeploy/contents/naksha-hub/set-app-params.sh b/deployment/codedeploy/contents/naksha-hub/set-app-params.sh new file mode 100644 index 000000000..1ba626686 --- /dev/null +++ b/deployment/codedeploy/contents/naksha-hub/set-app-params.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +### Set env params based on App secrets fetched from AWS Secrets Manager + +SECRET_ID="$EC2_ENV/heremap/service/naksha" +SECRET_RESPONSE_JSON=`aws secretsmanager get-secret-value --region $EC2_REGION --secret-id $SECRET_ID` + +# Validate that we really got the successful response +# This should match the original secret-id +RESPONSE_SECRET_ID=`echo $SECRET_RESPONSE_JSON | jq -r '.Name'` +if [[ "$SECRET_ID" != "$RESPONSE_SECRET_ID" ]]; then + echo "ERROR :: Couldn't obtain Secrets for [$SECRET_ID]" + exit 1 +fi + +# To convert JSON response secrets into environment variables +while read secret_key_value; +do + #echo "export $secret_key_value" + eval "export $secret_key_value" +done < <(echo $SECRET_RESPONSE_JSON | jq -r '.SecretString | fromjson | to_entries | map(@sh "\(.key)=\(.value)") | .[]') diff --git a/deployment/codedeploy/contents/naksha-hub/set-auth-keys.sh b/deployment/codedeploy/contents/naksha-hub/set-auth-keys.sh new file mode 100644 index 000000000..8521dc618 --- /dev/null +++ b/deployment/codedeploy/contents/naksha-hub/set-auth-keys.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +### Create Auth key files based on App secrets fetched from AWS Secrets Manager + +# Note : -en flag converts '\n' to a new line, while writing into a file) +# Set private key file +echo -en $JWT_PVT_KEY > ${XYZ_CONFIG_PATH}auth/jwt.key +# Set public key file +echo -en $JWT_PUB_KEY > ${XYZ_CONFIG_PATH}auth/jwt.pub diff --git a/deployment/codedeploy/contents/naksha-hub/set-instance-params.sh b/deployment/codedeploy/contents/naksha-hub/set-instance-params.sh new file mode 100644 index 000000000..349b4a614 --- /dev/null +++ b/deployment/codedeploy/contents/naksha-hub/set-instance-params.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +### Set instance parameters based on EC2 instance tags + +### Get Current Instance Id and region +export EC2_INSTANCE_ID=`curl -s http://169.254.169.254/latest/meta-data/instance-id` +export EC2_REGION=`curl -s http://169.254.169.254/latest/meta-data/placement/region` + +if [[ "$EC2_INSTANCE_ID" == "" ]] || [[ "$EC2_REGION" == "" ]]; then + echo "ERROR :: Couldn't obtain EC2 instance metadata information" + exit 1 +fi + + +### Retrieve necessary tags for current instance Id +TAGS_RESPONSE_JSON=`aws ec2 describe-tags --filters "Name=resource-id,Values=$EC2_INSTANCE_ID" --region=$EC2_REGION` +# read value for "Environment" tag (i.e. dev, e2e, prd) +export EC2_ENV=`echo $TAGS_RESPONSE_JSON | jq -r '.Tags | .[] | select(.Key == "Environment") | .Value'` +export EC2_INSTANCE_NAME=`echo $TAGS_RESPONSE_JSON | jq -r '.Tags | .[] | select(.Key == "Name") | .Value'` + +if [[ "$EC2_ENV" == "" ]] || [[ "$EC2_INSTANCE_NAME" == "" ]]; then + echo "ERROR :: Couldn't find one or more of mandatory EC2 tags : [Environment, Name]" + exit 1 +fi + +# Convert "dev" to upper case "DEV" : +# echo $environment | tr '[:lower:]' '[:upper:]' +export EC2_ENV_UPPER=`echo $EC2_ENV | tr '[:lower:]' '[:upper:]'` diff --git a/deployment/codedeploy/contents/naksha-hub/start-app.sh b/deployment/codedeploy/contents/naksha-hub/start-app.sh new file mode 100755 index 000000000..4ee3c1e8c --- /dev/null +++ b/deployment/codedeploy/contents/naksha-hub/start-app.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Set static configuration +export NAKSHA_CONFIG_PATH=$(pwd)/.config/ +export CONFIG_ID=cloud-config +export LOG4J_CONFIGURATION_FILE=log4j2-rollingfile.xml + +# Set instance specific parameters +. ./set-instance-params.sh + +# Set application specific parameters +. ./set-app-params.sh + +# Set Auth keys for application +. ./set-auth-keys.sh + +if [ ! -d logs ]; then + mkdir logs +fi + +# Set local parameters +export OTEL_RESOURCE_ATTRIBUTES=service.name=${EC2_INSTANCE_NAME},service.namespace=Naksha-${EC2_ENV_UPPER} + +# Print basic parameters (avoid printing secrets) +echo "NAKSHA_CONFIG_PATH : $NAKSHA_CONFIG_PATH" +echo "CONFIG_ID : $CONFIG_ID" +echo "LOG4J_CONFIGURATION_FILE : $LOG4J_CONFIGURATION_FILE" +echo "OTEL_RESOURCE_ATTRIBUTES : $OTEL_RESOURCE_ATTRIBUTES" +echo "EC2_INSTANCE_NAME : $EC2_INSTANCE_NAME" +echo "EC2_ENV : $EC2_ENV" +echo "-Xms : $JVM_XMS" +echo "-Xmx : $JVM_XMX" + +# Start service +java -javaagent:/home/admin/aws-opentelemetry/aws-opentelemetry-agent.jar \ + -server -Xms${JVM_XMS} -Xmx${JVM_XMX} -Xss1024k \ + -XX:+UnlockDiagnosticVMOptions \ + -XX:+UseZGC \ + -XX:+UseNUMA \ + -XX:+UseTLAB -XX:+AlwaysPreTouch \ + -XX:+ExplicitGCInvokesConcurrent \ + --add-opens java.base/jdk.internal.misc=ALL-UNNAMED \ + --add-opens java.base/java.lang=ALL-UNNAMED \ + -Xlog:gc*:file=logs/naksha_gc.log \ + -XX:ErrorFile=logs/naksha_hs_err_pid%p.log \ + -XX:LogFile=logs/naksha_hotspot.log \ + -jar naksha*.jar ${CONFIG_ID} ${NAKSHA_ADMIN_DB_URL} 1> logs/naksha_stdout.txt 2> logs/naksha_stderr.txt diff --git a/deployment/codedeploy/contents/naksha-hub/xyz-hub.service b/deployment/codedeploy/contents/naksha-hub/xyz-hub.service new file mode 100644 index 000000000..93088c7f8 --- /dev/null +++ b/deployment/codedeploy/contents/naksha-hub/xyz-hub.service @@ -0,0 +1,26 @@ +# +# see: https://www.digitalocean.com/community/tutorials/understanding-systemd-units-and-unit-files +# +[Unit] +Description=Naksha-Hub +# After=network.target +# After=systemd-user-sessions.service +# After=network-online.target + +[Service] +User=admin +Type=simple +PIDFile=/run/xyz-hub.pid +WorkingDirectory=/home/admin/naksha-hub +ExecStart=/home/admin/naksha-hub/start-app.sh +# ExecReload=/home/transang/startup.sh reload +# ExecStop=/home/transang/startup.sh stop +# TimeoutSec=30 +Restart=always +#on-failure +RestartSec=5s +# StartLimitInterval=350 +# StartLimitBurst=10 + +[Install] +WantedBy=multi-user.target diff --git a/deployment/codedeploy/scripts/ApplicationHealth.sh b/deployment/codedeploy/scripts/ApplicationHealth.sh new file mode 100644 index 000000000..4a6f35461 --- /dev/null +++ b/deployment/codedeploy/scripts/ApplicationHealth.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +#### NOTE : +#### This file is not used at present, as during deployment, ELB itself takes care of checking application health before opening up the traffic + +#### Wait for application to be healthy + +# set internal parameters +APP_PORT=7080 +MAX_TIMEOUT_SEC=120 +SLEEP_INTERVAL_SEC=2 + +# run in a loop waiting for success +START_TIME=`date +%s` +WAIT_TIME=0 + +while [ 1 ]; +do + # Fetch application health status + STATUS=`curl -s localhost:$APP_PORT | jq -r '.status'` + + if [[ "$STATUS" == "OK" ]]; + then + echo "Application is up and running! ... after [$WAIT_TIME] seconds" + exit 0 + fi + + # Exit if we have exceeded max wait time + CRT_TIME=`date +%s` + let "WAIT_TIME=$CRT_TIME - $START_TIME" + + if [[ $WAIT_TIME -gt $MAX_TIMEOUT_SEC ]]; + then + echo "Error:: Exceeded timeout [$MAX_TIMEOUT_SEC] sec, while waiting for application to be healthy." + exit 1 + fi + + echo "Waited [$WAIT_TIME] seconds..." + # Sleep and try checking status again + sleep $SLEEP_INTERVAL_SEC +done + +exit 1 diff --git a/deployment/codedeploy/scripts/ApplicationStart.sh b/deployment/codedeploy/scripts/ApplicationStart.sh new file mode 100644 index 000000000..35fda7a9d --- /dev/null +++ b/deployment/codedeploy/scripts/ApplicationStart.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +# Start application +echo "Starting Naksha service..." +sudo systemctl start xyz-hub +echo "Start triggered!" diff --git a/deployment/codedeploy/scripts/ApplicationStop.sh b/deployment/codedeploy/scripts/ApplicationStop.sh new file mode 100644 index 000000000..4f8e5e2f9 --- /dev/null +++ b/deployment/codedeploy/scripts/ApplicationStop.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# Stop Naksha service (only if it was configured) +CODE=0 +sudo systemctl list-unit-files --all | grep -Fq "xyz-hub.service" || CODE=1 +if [[ $CODE -eq 0 ]]; then + echo ">> Stopping service [xyz-hub]..." + sudo systemctl stop xyz-hub +else + echo ">> Service [xyz-hub] not found. Nothing to stop" +fi diff --git a/docs/BRANCHING.md b/docs/BRANCHING.md new file mode 100644 index 000000000..556fec8f7 --- /dev/null +++ b/docs/BRANCHING.md @@ -0,0 +1,18 @@ +# Branching Strategy & Deployment Triggers + +[img_strategy]: diagrams/branches_and_deployment.png + +The diagram below provides view of: +* How various branches can be used for enhancements/maintenances/fixes to the repository +* How deployment to various environments gets triggered automatically from various branches + +**In General**: + +| Branch(es) | Description | +|-----------|------------------------------------------------------------------------------------------------------------------| +| `develop` | (under develop) should represent most up-to-date (may not be 100% stable) version in **DEV** environment | +| `int` | (intergration) should represent most stable version in **E2E** environment | +| `master` | (main) should represent most stable version in **Prod** environment | +| `hotfix_xxx` | (hot fixes) are the branches, can be forked from any of the above branches, to make and merge a quick fix | + +![Branching_Deployment_strategy][img_strategy] diff --git a/docs/diagrams/branches_and_deployment.drawio b/docs/diagrams/branches_and_deployment.drawio new file mode 100644 index 000000000..d0fa309c7 --- /dev/null +++ b/docs/diagrams/branches_and_deployment.drawio @@ -0,0 +1 @@ +7V1bc6M4Fv41eYwLxNWP7STdma2e2myS2pnelynFyDbTGFwgJ878+hUgYSSBLV8EpONOVZclQMI653znKvnKulluvqVwtfg9CVB0BYxgc2XdXgHgOwb5P+94LzscF5Qd8zQMyi5z2/EU/oNoJ31uvg4DlHE34iSJcLjiO6dJHKMp5vpgmiZv/G2zJOJnXcE5kjqepjCSe/8IA7ygvaY73l64R+F8Qaf2gVdeWEJ2M/0m2QIGyVuty7q7sm7SJMHlp+XmBkX52rF1KZ/72nK1erEUxVjlga8/7m82/7u9z+7/MB6XePLoPD9eu345zCuM1vQbT1IYTxdhPCfdTziFGM3f6TfA72xZsrdwGcGYtCZpso4DlE9iklb1LY288RPh6YI2ZkmMKX1NK7+I0+RntahkOSbTJIrgKgtfilmKZ8IoukmiJC1mtWZO/kf65ykMQvK9a9deir9q3NoVt/hHrsgrRhfxFaUYbWpddAW/oWSJcEq+vkGvepSYlJst2nzbsobP+hY1rrB92gkpO86rkbcUIx8o0ZoJeI/c52cPPH53Z/+K/uONb+H6r2tTop9EqwVeRpQ6+RcNCXN/icJ5TPpeEoyTJbmA4uBLLi2kbxrBLAunHGVF+uVjRfAFRRM4/Tkv7mMLHicFV/BUCNAMriMsUd0sp/4aRuwN0SbEf9Y+/8hnHzm0dbuhL1M03mkjgNmiYsC88QAxRmlc9ADDLqaFKWbfkL5iuVAokABgyx87ZSZL1ukU7aLNbl4yRobl2uVYKYogDl/5N2liFjraQxKSd6yGMi2eLz3Aj1C+Kn2ojhFsHHZjMptlqOUeBunm7rnISs8RluYiiw/fa7et8hsyif+rZTpeJIAkEjME8TpFf202G0k6ePh6W4QYPa1gQdc3otFIX01+DkW3GgqNXc+CbiPuCSiHzMBBXsWgEjgps2gr5wGegq6MYuMGEAOOJgyzLhg2XAyzusIwl2dKv0MIM82hYZgtiUSAXlGUrHrDL3fqo5eZCn4FEPmzaYf4ZRo9A5hzAbDhApjTE4CZoEsEc4aGYK4kE+xV+kCvwBu/GIYKes1myJ12il52z+jlXdBruOjldYZeY5NnyzEYCRynFcDGQwMwOTK2hBnhid4wzAcvlqvkQRL/0Q/sLjHM7xnDgC2RJZfKJ9pMUrxI5kkMo7tt72S6Tl8ruvFItX3ge0KM7vKWvxHG75REcI0TnrLHwA6hQfr+Z71Reypvbh8rWtvnAgGJthBoNPGOUfxrBE/tGEYj5aVA78K6lnCZMrCdxEFjSdzNkTEyr7MYrrJFIpsuZMxwlaH9Ig+zVZmZmIWbnMEO8KDavbBCdT4kWYjDJNdSU0Isgk1b/fxduAHnbDyBVGtLt0vqXB90SN5bpWhq6AGa0ENbEN04uwlExfKj2D/aQcBWBIFxV/ZOpbIqM1yvvSNPNzB7ByhEMS46VUUc9XsFLBO+V6W2RG+7UammQnLyeC3aljU5JPOiTclZrAyApVj613Hgs+s4BR9fu+SCE23hDvSg0a0eNIamBy0FQbnoQRWR7SViZvuqIrY7MXBdyJjDMWsFosfKXLugnIt7TYVk+tl0roKf2qHOte3hOZZyHveidAcRWFcOSnVW/2CZvMlo2pY+NTy2bWGyoalhzx2M1t3y+I/alYO07g6d64GtSn5AaUgWLo/MSXLN2k1R3cbcGy+8EkTfeuNJS0qUE9q2/GjxbpQahnZ5NR1FgXWNq0aB7cjlVQminEv9VqQ4JN39cQPHji1kKO3+9btc5XDR73X9Dj66G3BGBQ8MQcH7Wj3tsTzdwFS8KVeemCNzxPbL6HBYWGL8kOS6dsSsa9K2HSmnw6drt7Jfb/Ap11h8MvjsBRstTxUbO6s+sjw+0qK7+kiIQgIwNGwEsmWxSDCBtV43sbz4ju2olVH6U9RpGWXV7q0E6VJHOdxwj9WZlWcKhY3A6rSOElhC1Kh3JGMv1Gcgp/dUyCBrcpRjKm12wKklx6bg0FvGgYLCIgOeMJAryVyLGJxtv5Yc93l4bOT7wlkQvJZ9nsIyDIJCLFKUhf/A7f54KrVkcGdy5dzuUrf0gAT68FWlLuvstEN+d+bJgGdx6892pR/LH++ND+hLmllao3bqtlK7vXWwD1qZE3uZq/RW9ZlptmRq9+12Wpeo3RBKYVxF7dOh7Sa6hbIe0eqFuoOz3ZoidMbIPCM6XiJ0tQywEATpP0JnffoI3VA9WpYM2w+fnQXxJPgEWuHTFV1febreAZRRSQDQoaU4WuX2XMjaDYCaLu+HDABAWV3N5wVQ7Uiomum1e6up1gyElu/sm65/IBxAFHCwRdQ9b9BVNyVaPLFTCyOY2G1PTjlYXJgoWHySWy6x0B0IlCt+WwKBF9YfAOtbqiHwNiv6VE0x5i0WcMTuGyZFjqB05A2tunlfjqvBKEuuABnOWMJ4DfMhloi8RL7k5IJRnWqW39J0QBAxETHPyzz/UBarG8G0Sz2q3mRxy9ZVE8eK9bnaLFsgWhQNJ882ZbvFjMr57Fr5vIRPZtcOIYaqGkK1d3OXzu2EeoscHd/eN13/pq8cRL2YBEPdWq9cG9cmUgMohGP74ay9ZrVug0COHhcFvjoO1dGxNyJCM6wc60pLrdtuZmgzDmxDwNz+N0YwW/SCeR8A89ie4dP3Spim453FjHBMu42lD4VBqcroDCZC20uLx0pbu40X6RBX7n7yoXyDnqtjHDmkcXv3X9LxW5xhGBPWEuW6ucSEASVF1YxgexjPnwvZ9bcd34vLBR7TnkcKWGbRt4CrfJrp+qWw+EtZz6UxCFOiEkpoJsyRyw+Xd7B5MBB9t3OcyqYN4sGYlwfLkf0/02lA+Ops7PNDvEK988FK/CPRxBKqDq2GQzDtBpLY2iiikKz/tSniCxQZ900RhSjJr00R4VRhu+Go/k4p4skJeYkieWhp1bCO+zPrreuoUn17jk19A9AL7PeAONP/9hOaCyewy96ibK8ll90j2RUOHtIPdX2sOR3GEySxb6BTOHrol6aHz9Oj4VcWuqWHwh6DX5oeY54eDYqqW3oonPlcGgJd63Rb2E0zAJ3uyot1B+4+n05XiCqfID8tJxf1SHYFY1k7ZvWy5sPU6ey3kz8tPQam090h2Lx90mNoOn0sLf/+FNPlmMCdxwS6N/7d5GsjI+46JlD4EcghZK/KPMbZM/a2+Hsm3tGpKsc1Rnz633IFWTlTqsoWigUtmopqratlUZ3m+/lU1bkST75sdf47Ju2bZLkMG2oH9O+u3yNolayobXc5eku+t+9XUkzb4Ol14oZ8ttefD+Zem67LD6EvCenLpmjBC89wfsH8Blb0waT054/B/OrhAzGfRRd49m8t2twWPehXEqplXSXOtYrWtTEaW855pMkWfzD2iJOCGDw7XkeKw3UEl2iP4vCAs+v+odQ4sLMxP4yqqU5v1qxq/H2b5rSpGqECiNh54864QfbrvhAVsISYQB8wbtEqSt6XiNULD3O/gopDsBeYu9m3azuOAF+goZbCZNscO9ng4Mv5k285EhiTFMbTBRoq6dVIqsAb3ZAeSIrLlgjvNdD9iMpV0kyTBNehgizg4vckyHH87v8= \ No newline at end of file diff --git a/docs/diagrams/branches_and_deployment.png b/docs/diagrams/branches_and_deployment.png new file mode 100644 index 000000000..6ab0064a7 Binary files /dev/null and b/docs/diagrams/branches_and_deployment.png differ diff --git a/gradle.properties b/gradle.properties index 16494347c..559aa7801 100644 --- a/gradle.properties +++ b/gradle.properties @@ -7,4 +7,4 @@ mavenPassword=YourPassword # When updating the version, please as well consider: # - here-naksha-lib-core/INaksha (static property: latest) # - here-naksha-lib-psql/resources/naksha_plpgsql.sql (method: naksha_version) -version=2.0.6 +version=2.0.7 diff --git a/here-naksha-app-service/src/main/java/com/here/naksha/app/service/NakshaApp.java b/here-naksha-app-service/src/main/java/com/here/naksha/app/service/NakshaApp.java index 44f41e558..9e05b8827 100644 --- a/here-naksha-app-service/src/main/java/com/here/naksha/app/service/NakshaApp.java +++ b/here-naksha-app-service/src/main/java/com/here/naksha/app/service/NakshaApp.java @@ -23,8 +23,10 @@ import com.here.naksha.app.service.http.NakshaHttpVerticle; import com.here.naksha.app.service.http.auth.NakshaAuthProvider; +import com.here.naksha.app.service.metrics.OTelMetrics; import com.here.naksha.lib.core.INaksha; import com.here.naksha.lib.core.NakshaAdminCollection; +import com.here.naksha.lib.core.NakshaVersion; import com.here.naksha.lib.core.util.IoHelp; import com.here.naksha.lib.core.util.IoHelp.LoadedBytes; import com.here.naksha.lib.hub.NakshaHubConfig; @@ -117,6 +119,7 @@ private static void printUsage() { * @return The created Naksha-App instance. */ public static @NotNull NakshaApp newInstance(@NotNull String... args) { + log.info("Naksha App v{}", NakshaVersion.latest); final String cfgId; final String url; @@ -190,7 +193,7 @@ public NakshaApp(@NotNull PsqlConfig adminDbConfig, @NotNull String configId, @N try { config = ConfigUtil.readConfigFile(configId, adminDbConfig.appName); } catch (Exception ex) { - log.warn("Error reading supplied custom config, will continue with default. ", ex); + log.warn("No external config available, will attempt using default. Error was [{}]", ex.getMessage()); } // Instantiate NakshaHub instance this.hub = NakshaHubFactory.getInstance(adminDbConfig, config, configId); @@ -357,6 +360,9 @@ public void run() { final Thread appThread = this; + // initialize OTel metrics collector + OTelMetrics.init(); + // Add verticles final int processors = Runtime.getRuntime().availableProcessors(); verticles = new NakshaHttpVerticle[processors]; diff --git a/here-naksha-app-service/src/main/java/com/here/naksha/app/service/http/NakshaHttpVerticle.java b/here-naksha-app-service/src/main/java/com/here/naksha/app/service/http/NakshaHttpVerticle.java index 1f8bae1d4..18f80543d 100644 --- a/here-naksha-app-service/src/main/java/com/here/naksha/app/service/http/NakshaHttpVerticle.java +++ b/here-naksha-app-service/src/main/java/com/here/naksha/app/service/http/NakshaHttpVerticle.java @@ -50,6 +50,8 @@ import com.here.naksha.app.service.http.apis.StorageApi; import com.here.naksha.app.service.http.apis.WriteFeatureApi; import com.here.naksha.app.service.http.auth.NakshaJwtAuthHandler; +import com.here.naksha.app.service.util.logging.AccessLog; +import com.here.naksha.app.service.util.logging.AccessLogUtil; import com.here.naksha.lib.core.AbstractTask; import com.here.naksha.lib.core.INaksha; import com.here.naksha.lib.core.NakshaContext; @@ -64,6 +66,7 @@ import com.here.naksha.lib.core.storage.ModifyFeaturesResp; import com.here.naksha.lib.core.util.IoHelp; import com.here.naksha.lib.core.util.MIMEType; +import com.here.naksha.lib.core.util.StreamInfo; import com.here.naksha.lib.hub.NakshaHubConfig; import io.netty.handler.codec.http.HttpResponseStatus; import io.vertx.core.Handler; @@ -93,7 +96,6 @@ import java.util.List; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; import org.apache.commons.lang3.RandomStringUtils; import org.jetbrains.annotations.NotNull; @@ -154,7 +156,7 @@ public NakshaHttpVerticle(@NotNull INaksha naksha, int index, @NotNull NakshaApp @Override public void start(final @NotNull Promise startPromise) { - RouterBuilder.create(vertx, "static/openapi.yaml").onComplete(ar -> { + RouterBuilder.create(vertx, "swagger/openapi.yaml").onComplete(ar -> { try { if (ar.failed()) { throw ar.cause(); @@ -203,6 +205,8 @@ public void start(final @NotNull Promise startPromise) { // Static resources route. router.route("/hub/static/*").handler(this::onResourceRequest); + // Swagger doc route. + router.route("/hub/swagger/*").handler(this::onResourceRequest); // Optional: Web server. if (staticHandler != null) { @@ -219,6 +223,10 @@ public void start(final @NotNull Promise startPromise) { // If any error happened that was not handled otherwise. router.route().failureHandler(this::failureHandler); + // starts at the 2nd route, since the first one is automatically added from openapi's + // RouterBuilder.createRouter + router.route().order(1).handler(this::onNewRequest); + // Add the HTTP server. // When several HTTP servers listen on the same port, vert.x orchestrates the request handling using a // round-robin strategy. @@ -277,7 +285,7 @@ private void onResourceRequest(@NotNull RoutingContext routingContext) { sendRawResponse(routingContext, OK, contentType(path), cachedBuffer); return; } - if (!path.startsWith("/hub/static/")) { + if (!path.startsWith("/hub/static/") && !path.startsWith("/hub/swagger/")) { routingContext.next(); return; } @@ -338,7 +346,12 @@ private void notFoundHandler(final @NotNull RoutingContext routingContext) { * * @param routingContext The routing context. */ - private void onHeadersEnd(final @NotNull RoutingContext routingContext) {} + private void onHeadersEnd(final @NotNull RoutingContext routingContext) { + final StreamInfo streamInfo = AccessLogUtil.getStreamInfo(routingContext); + if (streamInfo != null) { + routingContext.response().putHeader(STREAM_INFO, streamInfo.toColonSeparatedString()); + } + } /** * An end handler for the response. This will be called when the response is disposed to allow consistent cleanup of the response. @@ -349,11 +362,10 @@ private void onResponseEnd(final @NotNull RoutingContext routingContext) { log.info("The request was cancelled. No response has been sent."); onRequestCancelled(routingContext); } - // TODO: We need to rewrite the LogUtil, because we now (with SLF4J) have structured logs: - // log.atInfo().setMessage("foo").addKeyValue("reqInfo", reqInfo)... - // routingContextLogger(routingContext).info("{}", LogUtil.responseToLogEntry(routingContext)); - // LogUtil.addResponseInfo(routingContext).end(); - // LogUtil.writeAccessLog(routingContext); + final AccessLog accessLog = AccessLogUtil.addResponseInfo(routingContext); + if (accessLog == null) return; + accessLog.end(); + AccessLogUtil.writeAccessLog(routingContext); } private static final HttpResponseStatus CLIENT_CLOSED_REQUEST = @@ -398,10 +410,8 @@ private void maxRequestSizeHandler(@NotNull RoutingContext routingContext) { * @param routingContext The routing context. */ private void onNewRequest(final @NotNull RoutingContext routingContext) { - routingContext.response().putHeader(STREAM_ID, streamId(routingContext)); - routingContext.response().putHeader(STRICT_TRANSPORT_SECURITY, "max-age=" + TimeUnit.MINUTES.toSeconds(1)); - // TODO: Add request information, first read in onResponseEnd. - // LogUtil.addRequestInfo(routingContext); + // Add request information, first read in onResponseEnd. + AccessLogUtil.addRequestInfo(routingContext); routingContext.response().endHandler(v -> onResponseEnd(routingContext)); routingContext.addHeadersEndHandler(v -> onHeadersEnd(routingContext)); routingContext.next(); @@ -716,6 +726,8 @@ public void sendRawResponse( public @NotNull NakshaContext createNakshaContext(final @NotNull RoutingContext routingContext) { final NakshaContext ctx = new NakshaContext(streamId(routingContext)); ctx.setAppId(hubConfig.appId); + // add streamInfo object to NakshaContext, which will be populated later during pipeline execution + ctx.attachStreamInfo(AccessLogUtil.getStreamInfo(routingContext)); // TODO : Author to be set based on JWT token. // ctx.setAuthor(); return ctx; diff --git a/here-naksha-app-service/src/main/java/com/here/naksha/app/service/http/tasks/AbstractApiTask.java b/here-naksha-app-service/src/main/java/com/here/naksha/app/service/http/tasks/AbstractApiTask.java index 275fe5c4b..795820b43 100644 --- a/here-naksha-app-service/src/main/java/com/here/naksha/app/service/http/tasks/AbstractApiTask.java +++ b/here-naksha-app-service/src/main/java/com/here/naksha/app/service/http/tasks/AbstractApiTask.java @@ -141,7 +141,7 @@ protected AbstractApiTask( return verticle.sendXyzResponse( routingContext, HttpResponseType.FEATURE_COLLECTION, - new XyzFeatureCollection().withInsertedFeatures(features)); + new XyzFeatureCollection().withFeatures(features)); } catch (NoCursor | NoSuchElementException emptyException) { logger.info("No data found in ResultCursor, returning empty collection"); return verticle.sendXyzResponse( diff --git a/here-naksha-app-service/src/main/java/com/here/naksha/app/service/metrics/OTelMetrics.java b/here-naksha-app-service/src/main/java/com/here/naksha/app/service/metrics/OTelMetrics.java new file mode 100644 index 000000000..862e763cd --- /dev/null +++ b/here-naksha-app-service/src/main/java/com/here/naksha/app/service/metrics/OTelMetrics.java @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2017-2023 HERE Europe B.V. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + * License-Filename: LICENSE + */ +package com.here.naksha.app.service.metrics; + +import io.opentelemetry.api.GlobalOpenTelemetry; +import io.opentelemetry.api.metrics.Meter; +import java.math.BigDecimal; +import java.math.RoundingMode; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class OTelMetrics { + + private static final Logger logger = LoggerFactory.getLogger(OTelMetrics.class); + private static final Meter meter = GlobalOpenTelemetry.meterBuilder("io.opentelemetry.metrics.memory") + .setInstrumentationVersion("1.28.0") // as per otel.version in pom.xml + .build(); + + public static void init() { + // This will keep collecting memory utilization in background + meter.gaugeBuilder("mem_used_pct") + .setDescription("Heap-Memory used percentage") + .setUnit("percent") + .buildWithCallback((r) -> { + Runtime rt = Runtime.getRuntime(); + long max = rt.maxMemory(); + long total = rt.totalMemory(); + long free = rt.freeMemory(); + long used = total - free; + double usedPct = ((double) used / max) * 100.00; + BigDecimal bd = new BigDecimal(usedPct).setScale(2, RoundingMode.HALF_EVEN); + r.record(bd.doubleValue()); + }); + } +} diff --git a/here-naksha-app-service/src/main/java/com/here/naksha/app/service/util/logging/AccessLog.java b/here-naksha-app-service/src/main/java/com/here/naksha/app/service/util/logging/AccessLog.java new file mode 100644 index 000000000..ed3fc1926 --- /dev/null +++ b/here-naksha-app-service/src/main/java/com/here/naksha/app/service/util/logging/AccessLog.java @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2017-2023 HERE Europe B.V. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + * License-Filename: LICENSE + */ +package com.here.naksha.app.service.util.logging; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonInclude.Include; +import com.fasterxml.jackson.annotation.JsonPropertyOrder; +import com.here.naksha.lib.core.util.StreamInfo; +import com.here.naksha.lib.core.util.json.JsonSerializable; +import java.time.Instant; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; + +@JsonPropertyOrder({"time"}) +@JsonInclude(Include.ALWAYS) +public class AccessLog extends AccessLogExtended implements JsonSerializable { + public ClientInfo clientInfo; + public RequestInfo reqInfo; + public ResponseInfo respInfo; + public StreamInfo streamInfo; + + public AccessLog() { + super(); + clientInfo = new ClientInfo(); + reqInfo = new RequestInfo(); + respInfo = new ResponseInfo(); + streamInfo = new StreamInfo(); + } + + public void end() { + super.end(); + } + + @JsonInclude(Include.ALWAYS) + public static class RequestInfo extends RequestInfoExtended { + public String method; + public String uri; + public String contentType; + public String accept; + public long size; + public String referer; + public String origin; + } + + @JsonInclude(Include.ALWAYS) + public static class ResponseInfo { + public long statusCode; + public String statusMsg; + public long size; + public String contentType; + } + + @JsonInclude(Include.ALWAYS) + public static class ClientInfo { + public String remoteAddress; + public String ip; + public String userAgent; + public String realm; + public String userId; + public String appId; + } +} + +@JsonInclude(Include.ALWAYS) +class AccessLogExtended { + private static DateTimeFormatter dtFormatter = + DateTimeFormatter.ofPattern("uuuu-MM-dd'T'HH:mm:ss,SSS").withZone(ZoneId.of("UTC")); + private long start; + public String t = "STREAM"; + public String src; + public String streamId; + public long unixtime; + public String time; + public long ns; + public long ms; + + public AccessLogExtended() { + start = System.nanoTime(); + } + + public void end() { + final Instant now = Instant.now(); + long end = System.nanoTime(); + + unixtime = now.toEpochMilli(); + time = dtFormatter.format(now); + ns = end - start; + ms = ns / 1000 / 1000; + } +} + +@JsonInclude(Include.ALWAYS) +class RequestInfoExtended { + public String contentType; + public String accept; +} diff --git a/here-naksha-app-service/src/main/java/com/here/naksha/app/service/util/logging/AccessLogUtil.java b/here-naksha-app-service/src/main/java/com/here/naksha/app/service/util/logging/AccessLogUtil.java new file mode 100644 index 000000000..b4ea12b79 --- /dev/null +++ b/here-naksha-app-service/src/main/java/com/here/naksha/app/service/util/logging/AccessLogUtil.java @@ -0,0 +1,193 @@ +/* + * Copyright (C) 2017-2023 HERE Europe B.V. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + * License-Filename: LICENSE + */ +package com.here.naksha.app.service.util.logging; + +import static io.vertx.core.http.HttpHeaders.*; +import static io.vertx.core.http.HttpMethod.*; + +import com.here.naksha.app.service.http.auth.JWTPayload; +import com.here.naksha.lib.core.util.StreamInfo; +import io.vertx.core.MultiMap; +import io.vertx.core.http.HttpMethod; +import io.vertx.core.json.jackson.DatabindCodec; +import io.vertx.ext.web.RoutingContext; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.regex.Pattern; +import org.apache.commons.lang3.StringUtils; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; +import org.slf4j.LoggerFactory; + +public class AccessLogUtil { + + private static final org.slf4j.Logger logger = LoggerFactory.getLogger(AccessLogUtil.class); + private static final String REALM = "rlm"; + + private static final String JWT = "jwt"; + public static final String STREAM_INFO_CTX_KEY = "streamInfo"; + private static final String ACCESS_LOG = "accessLog"; + public static final String X_FORWARDED_FOR = "X-Forwarded-For"; + private static List skipLoggingHeaders = Collections.singletonList(X_FORWARDED_FOR); + + private static final String IPV4_REGEX = "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(\\.(?!$)|$)){4}$"; + private static final String IPV6_STD_REGEX = "^(?:[0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}$"; + private static final String IPV6_HEX_COMPRESSED_REGEX = + "^((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)::((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)$"; + + private static final Pattern IPV4_PATTERN = Pattern.compile(IPV4_REGEX); + private static final Pattern IPV6_STD_PATTERN = Pattern.compile(IPV6_STD_REGEX); + private static final Pattern IPV6_HEX_COMPRESSED_PATTERN = Pattern.compile(IPV6_HEX_COMPRESSED_REGEX); + + private static String getIp(RoutingContext context) { + String ips = context.request().getHeader(X_FORWARDED_FOR); + if (!StringUtils.isEmpty(ips)) { + String ip = ips.split(", ")[0]; + + if (IPV4_PATTERN.matcher(ip).matches() + || IPV6_STD_PATTERN.matcher(ip).matches() + || IPV6_HEX_COMPRESSED_PATTERN.matcher(ip).matches()) { + return ip; + } + } + + return context.request().connection().remoteAddress().host(); + } + + private static void appendHeaders(MultiMap headers, StringBuilder buf) { + for (Map.Entry header : headers) { + if (!skipLoggingHeaders.contains(header.getKey())) { + buf.append(header.getKey()); + buf.append(" : "); + buf.append(header.getValue()); + buf.append('\n'); + } + } + } + + private static @NotNull AccessLog getOrCreateAccessLog(final @NotNull RoutingContext context) { + AccessLog accessLog = getAccessLog(context); + if (accessLog == null) { + accessLog = new AccessLog(); + context.put(ACCESS_LOG, accessLog); + } + return accessLog; + } + + public static @Nullable AccessLog getAccessLog(final @Nullable RoutingContext context) { + return (context == null) ? null : context.get(ACCESS_LOG); + } + + public static @Nullable JWTPayload getOrCreateJWT(final @Nullable RoutingContext context) { + if (context == null) { + return null; + } + JWTPayload payload = context.get(JWT); + if (payload == null && context.user() != null) { + payload = DatabindCodec.mapper().convertValue(context.user().principal(), JWTPayload.class); + context.put(JWT, payload); + } + + return payload; + } + + public static @Nullable StreamInfo getStreamInfo(final @Nullable RoutingContext context) { + return (context == null) ? null : context.get(STREAM_INFO_CTX_KEY); + } + + /** + * Add the basic request information into the AccessLog. + * + * @param context the routing context. + */ + public static void addRequestInfo(final @Nullable RoutingContext context) { + if (context == null) return; + final AccessLog accessLog = getOrCreateAccessLog(context); + final HttpMethod method = context.request().method(); + accessLog.reqInfo.method = context.request().method().name(); + // Remove access_token part from uri for security concerns + final String uri = context.request().uri(); + final int endPos = uri.indexOf("?"); + accessLog.reqInfo.uri = (endPos > 0) ? uri.substring(0, endPos) : uri; + accessLog.reqInfo.referer = context.request().getHeader(REFERER); + accessLog.reqInfo.origin = context.request().getHeader(ORIGIN); + if (POST.equals(method) || PUT.equals(method) || PATCH.equals(method)) { + accessLog.reqInfo.size = context.body() == null ? 0 : context.body().length(); + } + accessLog.clientInfo.ip = getIp(context); + accessLog.clientInfo.remoteAddress = + context.request().connection().remoteAddress().toString(); + accessLog.clientInfo.userAgent = context.request().getHeader(USER_AGENT); + accessLog.clientInfo.realm = context.request().getHeader(REALM); + accessLog.reqInfo.contentType = context.request().getHeader(CONTENT_TYPE); + accessLog.reqInfo.accept = context.request().getHeader(ACCEPT); + + context.put(STREAM_INFO_CTX_KEY, accessLog.streamInfo); + } + + /** + * Add the response information into the AccessLog object. + * As the authentication is done after the request has been received, this method will as well add + * the clientInfo to the request information. So, even while the clientInfo is part of the request + * information, for technical reasons it's added together with the response information, + * because the JWT token is processed after the {@link #addRequestInfo(RoutingContext)} was invoked + * and therefore this method does not have the necessary information. + * + * @param context the routing context + */ + public static @Nullable AccessLog addResponseInfo(final @Nullable RoutingContext context) { + if (context == null) return null; + final AccessLog accessLog = getAccessLog(context); + if (accessLog == null) return null; + accessLog.respInfo.statusCode = context.response().getStatusCode(); + accessLog.respInfo.statusMsg = context.response().getStatusMessage(); + accessLog.respInfo.size = context.response().bytesWritten(); + accessLog.respInfo.contentType = context.response().headers().get(CONTENT_TYPE); + + final JWTPayload tokenPayload = getOrCreateJWT(context); + if (tokenPayload != null) { + accessLog.clientInfo.userId = tokenPayload.aid; + accessLog.clientInfo.appId = tokenPayload.cid; + } + return accessLog; + } + + public static void writeAccessLog(final @Nullable RoutingContext context) { + if (context == null) return; + final AccessLog accessLog = getAccessLog(context); + if (accessLog == null) return; + + logger.info(accessLog.serialize()); + + // Log relevant details for generating API metrics + final AccessLog.RequestInfo req = accessLog.reqInfo; + final AccessLog.ResponseInfo res = accessLog.respInfo; + final StreamInfo si = accessLog.streamInfo; + logger.info( + "[REST API stats => eventType,spaceId,storageId,method,uri,status,timeTakenMs,resSize] - RESTAPIStats {} {} {} {} {} {} {}", + (si == null || si.getSpaceId() == null || si.getSpaceId().isEmpty()) ? "-" : si.getSpaceId(), + (si == null || si.getStorageId() == null || si.getStorageId().isEmpty()) ? "-" : si.getStorageId(), + req.method, + req.uri, + res.statusCode, + accessLog.ms, + res.size); + } +} diff --git a/here-naksha-app-service/src/main/resources/log4j2-rollingfile.xml b/here-naksha-app-service/src/main/resources/log4j2-rollingfile.xml new file mode 100644 index 000000000..e92cff42b --- /dev/null +++ b/here-naksha-app-service/src/main/resources/log4j2-rollingfile.xml @@ -0,0 +1,24 @@ + + + + %d{yyyy-MM-dd HH:mm:ss.SSS XX} [%-5p] %marker [%t] - %c{-3} (%M:%L) %X - %.-4096msg %enc{%ex}{JSON}%n + + + + + + + + + + + + + + + + + + + + diff --git a/here-naksha-app-service/src/main/resources/log4j2.component.properties b/here-naksha-app-service/src/main/resources/log4j2.component.properties deleted file mode 100644 index 85b48361f..000000000 --- a/here-naksha-app-service/src/main/resources/log4j2.component.properties +++ /dev/null @@ -1,24 +0,0 @@ -# -# Copyright (C) 2017-2019 HERE Europe B.V. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# License-Filename: LICENSE -# - -log4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector -log4j2.asyncLoggerRingBufferSize=65536 -log4j2.asyncQueueFullPolicy=Discard -log4j2.discardThreshold=INFO -log4j2.clock=CachedClock diff --git a/here-naksha-app-service/src/main/resources/log4j2.xml b/here-naksha-app-service/src/main/resources/log4j2.xml new file mode 100644 index 000000000..252670d31 --- /dev/null +++ b/here-naksha-app-service/src/main/resources/log4j2.xml @@ -0,0 +1,29 @@ + + + + + %d{yyyy-MM-dd HH:mm:ss.SSS XX} [%-5p] %marker [%t] - %c{-3} (%M:%L) %X - %.-4096msg %enc{%ex}{JSON}%n + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/here-naksha-app-service/src/main/resources/static/favicon-16x16.png b/here-naksha-app-service/src/main/resources/swagger/favicon-16x16.png similarity index 100% rename from here-naksha-app-service/src/main/resources/static/favicon-16x16.png rename to here-naksha-app-service/src/main/resources/swagger/favicon-16x16.png diff --git a/here-naksha-app-service/src/main/resources/static/favicon-32x32.png b/here-naksha-app-service/src/main/resources/swagger/favicon-32x32.png similarity index 100% rename from here-naksha-app-service/src/main/resources/static/favicon-32x32.png rename to here-naksha-app-service/src/main/resources/swagger/favicon-32x32.png diff --git a/here-naksha-app-service/src/main/resources/static/index.css b/here-naksha-app-service/src/main/resources/swagger/index.css similarity index 100% rename from here-naksha-app-service/src/main/resources/static/index.css rename to here-naksha-app-service/src/main/resources/swagger/index.css diff --git a/here-naksha-app-service/src/main/resources/static/index.html b/here-naksha-app-service/src/main/resources/swagger/index.html similarity index 100% rename from here-naksha-app-service/src/main/resources/static/index.html rename to here-naksha-app-service/src/main/resources/swagger/index.html diff --git a/here-naksha-app-service/src/main/resources/static/oauth2-redirect.html b/here-naksha-app-service/src/main/resources/swagger/oauth2-redirect.html similarity index 100% rename from here-naksha-app-service/src/main/resources/static/oauth2-redirect.html rename to here-naksha-app-service/src/main/resources/swagger/oauth2-redirect.html diff --git a/here-naksha-app-service/src/main/resources/static/openapi.yaml b/here-naksha-app-service/src/main/resources/swagger/openapi.yaml similarity index 99% rename from here-naksha-app-service/src/main/resources/static/openapi.yaml rename to here-naksha-app-service/src/main/resources/swagger/openapi.yaml index 748728554..6eb4c47b8 100644 --- a/here-naksha-app-service/src/main/resources/static/openapi.yaml +++ b/here-naksha-app-service/src/main/resources/swagger/openapi.yaml @@ -3,10 +3,10 @@ openapi: "3.0.2" servers: - url: "http://localhost:8080/" description: "Local" - - url: "https://naksha.ext.mapcreator.here.com/" - description: "PRD" - - url: "https://naksha-e2e.ext.mapcreator.here.com/" - description: "E2E" + #- url: "https://naksha.ext.mapcreator.here.com/" + # description: "PRD" + #- url: "https://naksha-e2e.ext.mapcreator.here.com/" + # description: "E2E" - url: "https://naksha-dev.ext.mapcreator.here.com/" description: "DEV" @@ -168,23 +168,19 @@ paths: '513': $ref: '#/components/responses/ErrorResponse513' '/hub/handlers': - post: + get: tags: - Manage Event Handlers - summary: Create event handler. - description: Create a new event handler. - operationId: postHandler - requestBody: - $ref: '#/components/requestBodies/HandlerRequest' + summary: List event handlers + description: Lists the event handlers, which the current authenticated user has access to. + operationId: getHandlers responses: '200': - $ref: '#/components/responses/HandlerResponse' + $ref: '#/components/responses/HandlersResponse' '401': $ref: '#/components/responses/ErrorResponse401' '403': $ref: '#/components/responses/ErrorResponse403' - '409': - $ref: '#/components/responses/ErrorResponse409' '429': $ref: '#/components/responses/ErrorResponse429' '500': @@ -195,19 +191,23 @@ paths: $ref: '#/components/responses/ErrorResponse504' '513': $ref: '#/components/responses/ErrorResponse513' - get: + post: tags: - Manage Event Handlers - summary: List event handlers - description: Lists the event handlers, which the current authenticated user has access to. - operationId: getHandlers + summary: Create event handler. + description: Create a new event handler. + operationId: postHandler + requestBody: + $ref: '#/components/requestBodies/HandlerRequest' responses: '200': - $ref: '#/components/responses/HandlersResponse' + $ref: '#/components/responses/HandlerResponse' '401': $ref: '#/components/responses/ErrorResponse401' '403': $ref: '#/components/responses/ErrorResponse403' + '409': + $ref: '#/components/responses/ErrorResponse409' '429': $ref: '#/components/responses/ErrorResponse429' '500': @@ -286,23 +286,19 @@ paths: '513': $ref: '#/components/responses/ErrorResponse513' '/hub/spaces': - post: + get: tags: - Manage Spaces - summary: Create new Space - description: Create new Space - operationId: postSpace - requestBody: - $ref: '#/components/requestBodies/SpaceRequest' + summary: List spaces + description: Lists the spaces, which the current authenticated user has access to. + operationId: getSpaces responses: '200': - $ref: '#/components/responses/SpaceResponse' + $ref: '#/components/responses/SpacesResponse' '401': $ref: '#/components/responses/ErrorResponse401' '403': $ref: '#/components/responses/ErrorResponse403' - '409': - $ref: '#/components/responses/ErrorResponse409' '429': $ref: '#/components/responses/ErrorResponse429' '500': @@ -313,19 +309,23 @@ paths: $ref: '#/components/responses/ErrorResponse504' '513': $ref: '#/components/responses/ErrorResponse513' - get: + post: tags: - Manage Spaces - summary: List spaces - description: Lists the spaces, which the current authenticated user has access to. - operationId: getSpaces + summary: Create new Space + description: Create new Space + operationId: postSpace + requestBody: + $ref: '#/components/requestBodies/SpaceRequest' responses: '200': - $ref: '#/components/responses/SpacesResponse' + $ref: '#/components/responses/SpaceResponse' '401': $ref: '#/components/responses/ErrorResponse401' '403': $ref: '#/components/responses/ErrorResponse403' + '409': + $ref: '#/components/responses/ErrorResponse409' '429': $ref: '#/components/responses/ErrorResponse429' '500': @@ -337,18 +337,16 @@ paths: '513': $ref: '#/components/responses/ErrorResponse513' '/hub/spaces/{spaceId}': - put: + get: tags: - Manage Spaces - summary: Update a Space with given ID - description: Update already existing Space - operationId: putSpace - requestBody: - $ref: '#/components/requestBodies/SpaceRequest' + summary: Get the space with the specific ID. + description: Return the space detail corresponding to the given ID if the current authenticated user has access to. + operationId: getSpaceById parameters: - name: spaceId in: path - description: ID of Space to update + description: ID of space to fetch required: true schema: type: string @@ -359,8 +357,8 @@ paths: $ref: '#/components/responses/ErrorResponse401' '403': $ref: '#/components/responses/ErrorResponse403' - '409': - $ref: '#/components/responses/ErrorResponse409' + '404': + $ref: '#/components/responses/ErrorResponse404' '429': $ref: '#/components/responses/ErrorResponse429' '500': @@ -371,16 +369,18 @@ paths: $ref: '#/components/responses/ErrorResponse504' '513': $ref: '#/components/responses/ErrorResponse513' - get: + put: tags: - Manage Spaces - summary: Get the space with the specific ID. - description: Return the space detail corresponding to the given ID if the current authenticated user has access to. - operationId: getSpaceById + summary: Update a Space with given ID + description: Update already existing Space + operationId: putSpace + requestBody: + $ref: '#/components/requestBodies/SpaceRequest' parameters: - name: spaceId in: path - description: ID of space to fetch + description: ID of Space to update required: true schema: type: string @@ -391,8 +391,8 @@ paths: $ref: '#/components/responses/ErrorResponse401' '403': $ref: '#/components/responses/ErrorResponse403' - '404': - $ref: '#/components/responses/ErrorResponse404' + '409': + $ref: '#/components/responses/ErrorResponse409' '429': $ref: '#/components/responses/ErrorResponse429' '500': diff --git a/here-naksha-app-service/src/main/resources/static/swagger-initializer.js b/here-naksha-app-service/src/main/resources/swagger/swagger-initializer.js similarity index 100% rename from here-naksha-app-service/src/main/resources/static/swagger-initializer.js rename to here-naksha-app-service/src/main/resources/swagger/swagger-initializer.js diff --git a/here-naksha-app-service/src/main/resources/static/swagger-ui-bundle.js b/here-naksha-app-service/src/main/resources/swagger/swagger-ui-bundle.js similarity index 100% rename from here-naksha-app-service/src/main/resources/static/swagger-ui-bundle.js rename to here-naksha-app-service/src/main/resources/swagger/swagger-ui-bundle.js diff --git a/here-naksha-app-service/src/main/resources/static/swagger-ui-bundle.js.map b/here-naksha-app-service/src/main/resources/swagger/swagger-ui-bundle.js.map similarity index 100% rename from here-naksha-app-service/src/main/resources/static/swagger-ui-bundle.js.map rename to here-naksha-app-service/src/main/resources/swagger/swagger-ui-bundle.js.map diff --git a/here-naksha-app-service/src/main/resources/static/swagger-ui-es-bundle-core.js b/here-naksha-app-service/src/main/resources/swagger/swagger-ui-es-bundle-core.js similarity index 100% rename from here-naksha-app-service/src/main/resources/static/swagger-ui-es-bundle-core.js rename to here-naksha-app-service/src/main/resources/swagger/swagger-ui-es-bundle-core.js diff --git a/here-naksha-app-service/src/main/resources/static/swagger-ui-es-bundle-core.js.map b/here-naksha-app-service/src/main/resources/swagger/swagger-ui-es-bundle-core.js.map similarity index 100% rename from here-naksha-app-service/src/main/resources/static/swagger-ui-es-bundle-core.js.map rename to here-naksha-app-service/src/main/resources/swagger/swagger-ui-es-bundle-core.js.map diff --git a/here-naksha-app-service/src/main/resources/static/swagger-ui-es-bundle.js b/here-naksha-app-service/src/main/resources/swagger/swagger-ui-es-bundle.js similarity index 100% rename from here-naksha-app-service/src/main/resources/static/swagger-ui-es-bundle.js rename to here-naksha-app-service/src/main/resources/swagger/swagger-ui-es-bundle.js diff --git a/here-naksha-app-service/src/main/resources/static/swagger-ui-es-bundle.js.map b/here-naksha-app-service/src/main/resources/swagger/swagger-ui-es-bundle.js.map similarity index 100% rename from here-naksha-app-service/src/main/resources/static/swagger-ui-es-bundle.js.map rename to here-naksha-app-service/src/main/resources/swagger/swagger-ui-es-bundle.js.map diff --git a/here-naksha-app-service/src/main/resources/static/swagger-ui-standalone-preset.js b/here-naksha-app-service/src/main/resources/swagger/swagger-ui-standalone-preset.js similarity index 100% rename from here-naksha-app-service/src/main/resources/static/swagger-ui-standalone-preset.js rename to here-naksha-app-service/src/main/resources/swagger/swagger-ui-standalone-preset.js diff --git a/here-naksha-app-service/src/main/resources/static/swagger-ui-standalone-preset.js.map b/here-naksha-app-service/src/main/resources/swagger/swagger-ui-standalone-preset.js.map similarity index 100% rename from here-naksha-app-service/src/main/resources/static/swagger-ui-standalone-preset.js.map rename to here-naksha-app-service/src/main/resources/swagger/swagger-ui-standalone-preset.js.map diff --git a/here-naksha-app-service/src/main/resources/static/swagger-ui.css b/here-naksha-app-service/src/main/resources/swagger/swagger-ui.css similarity index 100% rename from here-naksha-app-service/src/main/resources/static/swagger-ui.css rename to here-naksha-app-service/src/main/resources/swagger/swagger-ui.css diff --git a/here-naksha-app-service/src/main/resources/static/swagger-ui.css.map b/here-naksha-app-service/src/main/resources/swagger/swagger-ui.css.map similarity index 100% rename from here-naksha-app-service/src/main/resources/static/swagger-ui.css.map rename to here-naksha-app-service/src/main/resources/swagger/swagger-ui.css.map diff --git a/here-naksha-app-service/src/main/resources/static/swagger-ui.js b/here-naksha-app-service/src/main/resources/swagger/swagger-ui.js similarity index 100% rename from here-naksha-app-service/src/main/resources/static/swagger-ui.js rename to here-naksha-app-service/src/main/resources/swagger/swagger-ui.js diff --git a/here-naksha-app-service/src/main/resources/static/swagger-ui.js.map b/here-naksha-app-service/src/main/resources/swagger/swagger-ui.js.map similarity index 100% rename from here-naksha-app-service/src/main/resources/static/swagger-ui.js.map rename to here-naksha-app-service/src/main/resources/swagger/swagger-ui.js.map diff --git a/here-naksha-app-service/src/main/resources/unused_backup/log4j2-rollingfile-plain.json b/here-naksha-app-service/src/main/resources/unused_backup/log4j2-rollingfile-plain.json new file mode 100644 index 000000000..a1a8e0345 --- /dev/null +++ b/here-naksha-app-service/src/main/resources/unused_backup/log4j2-rollingfile-plain.json @@ -0,0 +1,34 @@ +{ + "configuration": { + "status": "info", + "name": "Default Log Config", + "packages": "com.here.xyz", + "appenders": { + "RollingFile": { + "name": "RollingFile", + "fileName": "log/xyz_stdout.txt", + "filePattern": "log/xyz_stdout.txt.%i", + "PatternLayout": { + "pattern": "%d %-5p %c %marker %.-4096msg %enc{%ex}{JSON}%n%xEx{none}" + }, + "Policies": { + "SizeBasedTriggeringPolicy": { + "size": "200 MB" + } + }, + "DefaultRolloverStrategy": { + "fileIndex": "min", + "max": "50" + } + } + }, + "loggers": { + "root": { + "level": "info", + "AppenderRef": { + "ref": "RollingFile" + } + } + } + } +} diff --git a/here-naksha-app-service/src/main/resources/log4j2-console-plain.json b/here-naksha-app-service/src/main/resources/unused_backup/log4j2.json similarity index 74% rename from here-naksha-app-service/src/main/resources/log4j2-console-plain.json rename to here-naksha-app-service/src/main/resources/unused_backup/log4j2.json index ef25782a7..0e97ce18e 100644 --- a/here-naksha-app-service/src/main/resources/log4j2-console-plain.json +++ b/here-naksha-app-service/src/main/resources/unused_backup/log4j2.json @@ -2,12 +2,12 @@ "configuration": { "status": "info", "name": "Default Log Config", - "packages": "com.here.xyz", + "packages": "com.here.naksha", "appenders": { "Console": { "name": "STDOUT", "PatternLayout": { - "pattern": "%d %-5p %c{1} %.-4096msg %enc{%ex}{JSON}%n%xEx{none}" + "pattern": "%d %-5p %c %marker %.-4096msg %enc{%ex}{JSON}%n%xEx{none}" } } }, diff --git a/here-naksha-handler-psql/src/main/resources/log4j2.properties b/here-naksha-handler-psql/src/main/resources/log4j2.properties deleted file mode 100644 index 5cb518257..000000000 --- a/here-naksha-handler-psql/src/main/resources/log4j2.properties +++ /dev/null @@ -1,29 +0,0 @@ -# -# Copyright (C) 2017-2020 HERE Europe B.V. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# License-Filename: LICENSE -# - -packages=com.amazonaws.services.lambda.runtime.log4j2 - -#Define the LAMBDA appender -appender.LAMBDA.type = Lambda -appender.LAMBDA.name = LAMBDA -appender.LAMBDA.layout.type = PatternLayout -appender.LAMBDA.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} <%X{AWSRequestId}> %-5p %c:%.-4096m%n - -rootLogger.level = ${env:LOG_LEVEL:-info} -rootLogger.appenderRef.stdout.ref = LAMBDA diff --git a/here-naksha-lib-core/src/main/java/com/here/naksha/lib/core/AbstractTask.java b/here-naksha-lib-core/src/main/java/com/here/naksha/lib/core/AbstractTask.java index 3802abf1f..6e94a6c3b 100644 --- a/here-naksha-lib-core/src/main/java/com/here/naksha/lib/core/AbstractTask.java +++ b/here-naksha-lib-core/src/main/java/com/here/naksha/lib/core/AbstractTask.java @@ -219,7 +219,7 @@ public void attachToCurrentThread() { this.thread = thread; this.oldName = threadName; this.oldUncaughtExceptionHandler = threadUncaughtExceptionHandler; - thread.setName(context.getStreamId()); + // thread.setName(context.getStreamId()); thread.setUncaughtExceptionHandler(this); MDC.put("streamId", context.getStreamId()); } @@ -238,7 +238,7 @@ public void detachFromCurrentThread() { throw new IllegalStateException("Can't unbind from foreign thread"); } assert oldName != null; - thread.setName(oldName); + // thread.setName(oldName); thread.setUncaughtExceptionHandler(oldUncaughtExceptionHandler); this.thread = null; this.oldName = null; diff --git a/here-naksha-lib-core/src/main/java/com/here/naksha/lib/core/NakshaContext.java b/here-naksha-lib-core/src/main/java/com/here/naksha/lib/core/NakshaContext.java index b10225d71..f90e9327c 100644 --- a/here-naksha-lib-core/src/main/java/com/here/naksha/lib/core/NakshaContext.java +++ b/here-naksha-lib-core/src/main/java/com/here/naksha/lib/core/NakshaContext.java @@ -22,6 +22,7 @@ import com.here.naksha.lib.core.exceptions.Unauthorized; import com.here.naksha.lib.core.util.NanoTime; +import com.here.naksha.lib.core.util.StreamInfo; import java.util.concurrent.ConcurrentHashMap; import org.apache.commons.lang3.RandomStringUtils; import org.jetbrains.annotations.ApiStatus.AvailableSince; @@ -280,6 +281,28 @@ public long startNanos() { return this; } + /** + * Sets the given streamInfo object in the {@link #attachments()}, if it is not null. + * + * @param streamInfo the streamInfo object to be added + */ + @AvailableSince(NakshaVersion.v2_0_7) + public void attachStreamInfo(final @Nullable StreamInfo streamInfo) { + if (streamInfo == null) return; + attachments().put(StreamInfo.class.getSimpleName(), streamInfo); + } + + /** + * Returns streamInfo object from {@link #attachments()}. + * + * @return the StreamInfo object if available, otherwise null + */ + @AvailableSince(NakshaVersion.v2_0_7) + public @Nullable StreamInfo getStreamInfo() { + final Object o = attachments().get(StreamInfo.class.getSimpleName()); + return (o == null) ? null : (StreamInfo) o; + } + /** * The attachments of this context. */ diff --git a/here-naksha-lib-core/src/main/java/com/here/naksha/lib/core/util/IoHelp.java b/here-naksha-lib-core/src/main/java/com/here/naksha/lib/core/util/IoHelp.java index b83fce095..2b7b9789e 100644 --- a/here-naksha-lib-core/src/main/java/com/here/naksha/lib/core/util/IoHelp.java +++ b/here-naksha-lib-core/src/main/java/com/here/naksha/lib/core/util/IoHelp.java @@ -273,7 +273,7 @@ public record LoadedConfig(@NotNull String path, CONFIG config) {} if (path == null) { continue; } - filePath = Paths.get(path).toAbsolutePath(); + filePath = Paths.get(path, filename).toAbsolutePath(); final File file = filePath.toFile(); if (file.exists() && file.isFile() && file.canRead()) { return new LoadedBytes(filePath.toString(), Files.readAllBytes(filePath)); diff --git a/here-naksha-lib-core/src/main/java/com/here/naksha/lib/core/util/StreamInfo.java b/here-naksha-lib-core/src/main/java/com/here/naksha/lib/core/util/StreamInfo.java new file mode 100644 index 000000000..5cdf38277 --- /dev/null +++ b/here-naksha-lib-core/src/main/java/com/here/naksha/lib/core/util/StreamInfo.java @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2017-2023 HERE Europe B.V. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + * License-Filename: LICENSE + */ +package com.here.naksha.lib.core.util; + +import java.util.Objects; +import org.jetbrains.annotations.NotNull; + +public class StreamInfo { + private String spaceId; + private String storageId; + + public void setSpaceId(final String spaceId) { + this.spaceId = spaceId; + } + + public void setStorageId(final String storageId) { + this.storageId = storageId; + } + + public void setSpaceIdIfMissing(final String spaceId) { + if (this.spaceId == null) this.spaceId = spaceId; + } + + public void setStorageIdIfMissing(final String storageId) { + if (this.storageId == null) this.storageId = storageId; + } + + public String getSpaceId() { + return this.spaceId; + } + + public String getStorageId() { + return this.storageId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + StreamInfo that = (StreamInfo) o; + return Objects.equals(spaceId, that.spaceId) && Objects.equals(storageId, that.storageId); + } + + @Override + public int hashCode() { + return Objects.hash(spaceId, storageId); + } + + public @NotNull String toColonSeparatedString() { + return "spaceId=" + ((spaceId == null || spaceId.isEmpty()) ? "-" : spaceId) + ";storageId=" + + ((storageId == null || storageId.isEmpty()) ? "-" : storageId); + } +} diff --git a/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/AbstractEventHandler.java b/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/AbstractEventHandler.java index aaee7c596..dd617160d 100644 --- a/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/AbstractEventHandler.java +++ b/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/AbstractEventHandler.java @@ -21,12 +21,15 @@ import com.here.naksha.lib.core.IEvent; import com.here.naksha.lib.core.IEventHandler; import com.here.naksha.lib.core.INaksha; +import com.here.naksha.lib.core.NakshaContext; import com.here.naksha.lib.core.models.XyzError; import com.here.naksha.lib.core.models.naksha.Plugin; import com.here.naksha.lib.core.models.storage.ErrorResult; import com.here.naksha.lib.core.models.storage.Result; import com.here.naksha.lib.core.models.storage.SuccessResult; +import com.here.naksha.lib.core.util.StreamInfo; import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; public abstract class AbstractEventHandler implements IEventHandler { @@ -53,4 +56,9 @@ public AbstractEventHandler(final @NotNull INaksha hub) { } return new SuccessResult(); } + + protected void addStorageIdToStreamInfo(final @Nullable String storageId, final @NotNull NakshaContext context) { + final StreamInfo streamInfo = context.getStreamInfo(); + if (streamInfo != null) streamInfo.setStorageIdIfMissing(storageId); + } } diff --git a/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/DefaultStorageHandler.java b/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/DefaultStorageHandler.java index 2d1510a04..816564c12 100644 --- a/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/DefaultStorageHandler.java +++ b/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/DefaultStorageHandler.java @@ -83,6 +83,7 @@ public DefaultStorageHandler( logger.error("No storageId configured"); return new ErrorResult(XyzError.NOT_FOUND, "No storageId configured for handler."); } + addStorageIdToStreamInfo(storageId, ctx); // Obtain IStorage implementation using NakshaHub final IStorage storageImpl = nakshaHub().getStorageById(storageId); diff --git a/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/IntHandlerForEventHandlers.java b/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/IntHandlerForEventHandlers.java index 957376651..5db53382b 100644 --- a/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/IntHandlerForEventHandlers.java +++ b/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/IntHandlerForEventHandlers.java @@ -27,6 +27,7 @@ import com.here.naksha.lib.core.models.storage.*; import com.here.naksha.lib.core.storage.IReadSession; import com.here.naksha.lib.core.storage.IWriteSession; +import com.here.naksha.lib.psql.PsqlStorage; import org.jetbrains.annotations.NotNull; public class IntHandlerForEventHandlers extends AbstractEventHandler { @@ -46,6 +47,7 @@ public IntHandlerForEventHandlers(final @NotNull INaksha hub) { final NakshaContext ctx = NakshaContext.currentContext(); final Request request = event.getRequest(); // process request using Naksha Admin Storage instance + addStorageIdToStreamInfo(PsqlStorage.ADMIN_STORAGE_ID, ctx); if (request instanceof ReadRequest rr) { try (final IReadSession reader = nakshaHub().getAdminStorage().newReadSession(ctx, false)) { return reader.execute(rr); diff --git a/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/IntHandlerForSpaces.java b/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/IntHandlerForSpaces.java index e6623ec6c..ed14d3734 100644 --- a/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/IntHandlerForSpaces.java +++ b/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/IntHandlerForSpaces.java @@ -24,6 +24,7 @@ import com.here.naksha.lib.core.models.storage.*; import com.here.naksha.lib.core.storage.IReadSession; import com.here.naksha.lib.core.storage.IWriteSession; +import com.here.naksha.lib.psql.PsqlStorage; import org.jetbrains.annotations.NotNull; public class IntHandlerForSpaces extends AbstractEventHandler { @@ -43,6 +44,7 @@ public IntHandlerForSpaces(final @NotNull INaksha hub) { final NakshaContext ctx = NakshaContext.currentContext(); final Request request = event.getRequest(); // process request using Naksha Admin Storage instance + addStorageIdToStreamInfo(PsqlStorage.ADMIN_STORAGE_ID, ctx); if (request instanceof ReadRequest rr) { try (final IReadSession reader = nakshaHub().getAdminStorage().newReadSession(ctx, false)) { return reader.execute(rr); diff --git a/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/IntHandlerForStorages.java b/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/IntHandlerForStorages.java index 98269727f..5c3509ec0 100644 --- a/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/IntHandlerForStorages.java +++ b/here-naksha-lib-handlers/src/main/java/com/here/naksha/lib/handlers/IntHandlerForStorages.java @@ -25,6 +25,7 @@ import com.here.naksha.lib.core.models.storage.*; import com.here.naksha.lib.core.storage.IReadSession; import com.here.naksha.lib.core.storage.IWriteSession; +import com.here.naksha.lib.psql.PsqlStorage; import org.jetbrains.annotations.NotNull; public class IntHandlerForStorages extends AbstractEventHandler { @@ -44,6 +45,7 @@ public IntHandlerForStorages(final @NotNull INaksha hub) { final NakshaContext ctx = NakshaContext.currentContext(); final Request request = event.getRequest(); // process request using Naksha Admin Storage instance + addStorageIdToStreamInfo(PsqlStorage.ADMIN_STORAGE_ID, ctx); if (request instanceof ReadRequest rr) { try (final IReadSession reader = nakshaHub().getAdminStorage().newReadSession(ctx, false)) { return reader.execute(rr); diff --git a/here-naksha-lib-hub/src/main/java/com/here/naksha/lib/hub/NakshaHub.java b/here-naksha-lib-hub/src/main/java/com/here/naksha/lib/hub/NakshaHub.java index 85014a1d3..7e4473775 100644 --- a/here-naksha-lib-hub/src/main/java/com/here/naksha/lib/hub/NakshaHub.java +++ b/here-naksha-lib-hub/src/main/java/com/here/naksha/lib/hub/NakshaHub.java @@ -83,7 +83,7 @@ public NakshaHub( final @Nullable NakshaHubConfig customCfg, final @Nullable String configId) { // create storage instance upfront - this.psqlStorage = new PsqlStorage(config, "naksha-admin-db"); + this.psqlStorage = new PsqlStorage(config, PsqlStorage.ADMIN_STORAGE_ID); this.adminStorageInstance = new NHAdminStorage(this.psqlStorage); this.spaceStorageInstance = new NHSpaceStorage(this, new NakshaEventPipelineFactory(this)); // setup backend storage DB and Hub config diff --git a/here-naksha-lib-hub/src/main/java/com/here/naksha/lib/hub/storages/NHSpaceStorageReader.java b/here-naksha-lib-hub/src/main/java/com/here/naksha/lib/hub/storages/NHSpaceStorageReader.java index 5afa11d07..960b796f0 100644 --- a/here-naksha-lib-hub/src/main/java/com/here/naksha/lib/hub/storages/NHSpaceStorageReader.java +++ b/here-naksha-lib-hub/src/main/java/com/here/naksha/lib/hub/storages/NHSpaceStorageReader.java @@ -30,6 +30,7 @@ import com.here.naksha.lib.core.models.naksha.Space; import com.here.naksha.lib.core.models.storage.*; import com.here.naksha.lib.core.storage.IReadSession; +import com.here.naksha.lib.core.util.StreamInfo; import com.here.naksha.lib.handlers.AuthorizationEventHandler; import com.here.naksha.lib.hub.EventPipelineFactory; import java.util.ArrayList; @@ -190,7 +191,9 @@ public void setLockTimeout(long timeout, @NotNull TimeUnit timeUnit) {} if (rf.getCollections().size() > 1) { throw new UnsupportedOperationException("Reading from multiple spaces not supported!"); } - if (virtualSpaces.containsKey(rf.getCollections().get(0))) { + final String spaceId = rf.getCollections().get(0); + addSpaceIdToStreamInfo(spaceId); + if (virtualSpaces.containsKey(spaceId)) { // Request is to read from Naksha Admin space return executeReadFeaturesFromAdminSpaces(rf); } else { @@ -301,4 +304,9 @@ public void setLockTimeout(long timeout, @NotNull TimeUnit timeUnit) {} @Override @ApiStatus.AvailableSince(NakshaVersion.v2_0_7) public void close() {} + + protected void addSpaceIdToStreamInfo(final @Nullable String spaceId) { + final StreamInfo streamInfo = context.getStreamInfo(); + if (streamInfo != null) streamInfo.setSpaceIdIfMissing(spaceId); + } } diff --git a/here-naksha-lib-hub/src/main/java/com/here/naksha/lib/hub/storages/NHSpaceStorageWriter.java b/here-naksha-lib-hub/src/main/java/com/here/naksha/lib/hub/storages/NHSpaceStorageWriter.java index 70cb93eca..621646e81 100644 --- a/here-naksha-lib-hub/src/main/java/com/here/naksha/lib/hub/storages/NHSpaceStorageWriter.java +++ b/here-naksha-lib-hub/src/main/java/com/here/naksha/lib/hub/storages/NHSpaceStorageWriter.java @@ -72,7 +72,9 @@ public NHSpaceStorageWriter( } private @NotNull Result executeWriteFeatures(final @NotNull WriteFeatures wf) { - if (virtualSpaces.containsKey(wf.getCollectionId())) { + final String spaceId = wf.getCollectionId(); + addSpaceIdToStreamInfo(spaceId); + if (virtualSpaces.containsKey(spaceId)) { // Request is to write to Naksha Admin space return executeWriteFeaturesToAdminSpaces(wf); } else { diff --git a/here-naksha-lib-hub/src/main/java/com/here/naksha/lib/hub/util/ConfigUtil.java b/here-naksha-lib-hub/src/main/java/com/here/naksha/lib/hub/util/ConfigUtil.java index a64b981b3..e72f913a4 100644 --- a/here-naksha-lib-hub/src/main/java/com/here/naksha/lib/hub/util/ConfigUtil.java +++ b/here-naksha-lib-hub/src/main/java/com/here/naksha/lib/hub/util/ConfigUtil.java @@ -30,12 +30,18 @@ public class ConfigUtil { private static final Logger logger = LoggerFactory.getLogger(ConfigUtil.class); + public static final String DEF_CFG_PATH_ENV = "NAKSHA_CONFIG_PATH"; public static NakshaHubConfig readConfigFile(final @NotNull String configId, final @NotNull String appName) throws IOException { NakshaHubConfig cfg = null; try (final Json json = Json.get()) { - final IoHelp.LoadedBytes loaded = IoHelp.readBytesFromHomeOrResource(configId + ".json", false, appName); + // use the path provided in NAKSHA_CONFIG_PATH (if it is set) + final String envVal = System.getenv(DEF_CFG_PATH_ENV); + final String path = envVal == null || envVal.isEmpty() || "null".equalsIgnoreCase(envVal) ? null : envVal; + // attempt loading config from file + final IoHelp.LoadedBytes loaded = + IoHelp.readBytesFromHomeOrResource(configId + ".json", false, appName, path); cfg = json.reader(ViewDeserialize.Storage.class) .forType(NakshaHubConfig.class) .readValue(loaded.bytes()); diff --git a/here-naksha-lib-psql/src/main/java/com/here/naksha/lib/psql/PsqlStorage.java b/here-naksha-lib-psql/src/main/java/com/here/naksha/lib/psql/PsqlStorage.java index 13245aa1e..9e2a15456 100644 --- a/here-naksha-lib-psql/src/main/java/com/here/naksha/lib/psql/PsqlStorage.java +++ b/here-naksha-lib-psql/src/main/java/com/here/naksha/lib/psql/PsqlStorage.java @@ -47,6 +47,8 @@ @SuppressWarnings({"unused", "SqlResolve"}) public final class PsqlStorage implements IStorage { + public static final String ADMIN_STORAGE_ID = "naksha-admin"; + private static final Logger log = LoggerFactory.getLogger(PsqlStorage.class); private static @NotNull PsqlDataSource dataSourceFromStorage(final @NotNull Storage storage) {