diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8d774885215..71e1cf1a28e 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,14 +1,17 @@ * @centreon/owners-cpp +gorgone/ @centreon/owners-perl +perl-libs/ @centreon/owners-perl + +.version* @centreon/owners-pipelines .github/** @centreon/owners-pipelines -packaging/** @centreon/owners-pipelines +**/packaging/** @centreon/owners-pipelines selinux/** @centreon/owners-pipelines tests/** @centreon/owners-robot-e2e +.github/scripts/*robot* @centreon/owners-robot-e2e -gorgone/ @centreon/owners-perl gorgone/docs/ @centreon/owners-doc +gorgone/tests/robot/tests @centreon/owners-robot-e2e -gorgone/tests/robot/config/ @centreon/owners-perl -*.pm @centreon/owners-perl -*.pl @centreon/owners-perl +*.ps1 @centreon/owners-cpp diff --git a/.github/actions/create-jira-ticket/action.yml b/.github/actions/create-jira-ticket/action.yml new file mode 100644 index 00000000000..f05a1e81e1e --- /dev/null +++ b/.github/actions/create-jira-ticket/action.yml @@ -0,0 +1,142 @@ +name: Workflow incident tracking +description: Create Jira ticket on incident + +inputs: + jira_base_url: + required: true + description: jira base url + jira_user_email: + required: true + description: jira user email + jira_api_token: + required: true + description: jira api token + module_name: + required: true + description: module name + ticket_labels: + required: true + description: ticket labels, usually Pipeline + Nightly/Veracode + x + default: 'Pipeline' + +runs: + using: "composite" + steps: + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Get ticket elements from context + id: get_context + run: | + # Safely set/unset IFS in order to parse the table of labels properly + [ -n "${IFS+set}" ] && saved_IFS=$IFS + IFS=', ' read -a ticket_labels <<< $(echo "${{ inputs.ticket_labels }}" | tr -d "[],'") + unset IFS + [ -n "${saved_IFS+set}" ] && { IFS=$saved_IFS; unset saved_IFS; } + + # Change context elements (summary, parent epic, etc.) that is checked depending on the ticket labels in the input + if [[ "${ticket_labels[@]}" =~ "Veracode" ]]; then + parent_epic_id=83818 + parent_epic_key="AT-268" + ticket_summary="PR-${{ github.event.pull_request.number }} incident on ${{ inputs.module_name }}" + + JSON_TEMPLATE_FILE="./.github/actions/create-jira-ticket/veracode-ticket-template.json" + sed -i \ + -e 's|@PULL_REQUEST_NUMBER@|${{ github.event.pull_request.number }}|g' \ + -e 's|@PULL_REQUEST_URL@|${{ github.event.pull_request.html_url }}|g' $JSON_TEMPLATE_FILE + elif [[ "${ticket_labels[@]}" =~ "Nightly" ]]; then + parent_epic_id=206242 + parent_epic_key="MON-151547" + ticket_summary="$(date '+%Y-%m-%d') ${{ inputs.module_name }}-${{ github.ref_name }} nightly build failure" + + JSON_TEMPLATE_FILE="./.github/actions/create-jira-ticket/nightly-ticket-template.json" + sed -i \ + -e 's|@MODULE_NAME@|${{ inputs.module_name }}|g' \ + -e "s|@DATE@|$(date '+%Y-%m-%d')|g" $JSON_TEMPLATE_FILE + else + echo "::error::Cannot find a valid labelling option for the ticket." + exit 1 + fi + + sed -i \ + -e 's|@GITHUB_BRANCH@|${{ github.base_ref || github.ref_name }}|g' \ + -e 's|@GITHUB_SERVER_URL@|${{ github.server_url }}|g' \ + -e 's|@GITHUB_REPOSITORY@|${{ github.repository }}|g' \ + -e 's|@GITHUB_RUN_ID@|${{ github.run_id }}|g' \ + -e 's|@GITHUB_RUN_ATTEMPT@|${{ github.run_attempt }}|g' $JSON_TEMPLATE_FILE + + echo "parent_epic_id=$parent_epic_id" >> $GITHUB_OUTPUT + echo "parent_epic_key=$parent_epic_key" >> $GITHUB_OUTPUT + echo "ticket_summary=$ticket_summary" >> $GITHUB_OUTPUT + echo "json_template_file=$JSON_TEMPLATE_FILE" >> $GITHUB_OUTPUT + + cat $JSON_TEMPLATE_FILE + cat $GITHUB_OUTPUT + shell: bash + env: + GH_TOKEN: ${{ github.token }} + + - name: Check if the ticket already exists + id: check_ticket + run: | + # Checking if an incident ticket already exists + response=$(curl \ + --write-out "%{http_code}" \ + --request POST \ + --url "${{ inputs.jira_base_url }}/rest/api/3/search" \ + --user "${{ inputs.jira_user_email }}:${{ inputs.jira_api_token }}" \ + --header "Accept:application/json" \ + --header "Content-Type:application/json" \ + --data '{ + "fields": ["summary"], + "jql": "project = MON AND parentEpic = ${{ steps.get_context.outputs.parent_epic_key }} AND issueType = Technical AND summary ~ \"${{ steps.get_context.outputs.ticket_summary }}\" AND component = \"${{ inputs.module_name }}\" AND resolution = unresolved ORDER BY key ASC", + "maxResults": 1 + }' + ) + echo "[DEBUG] $response" + if [[ $(echo "$response" | tr -d '\n' | tail -c 3) -ne 200 ]]; then + echo "::error:: Jira API request was not completed properly." + fi + check_if_ticket_exists=$(echo "$response" | head -c -4 | jq .issues[0].key) + if [[ "$check_if_ticket_exists" != "null" ]]; then + echo "abort_ticket_creation=true" >> $GITHUB_ENV + echo "::error::ticket found as $check_if_ticket_exists aborting ticket creation" + fi + shell: bash + + - name: Create Jira Issue + if: ${{ env.abort_ticket_creation != 'true' }} + run: | + # Creating a new incident ticket on Jira + DATA=$( cat <<-EOF + { + "fields": { + "summary": "${{ steps.get_context.outputs.ticket_summary }}", + "project": {"key": "MON"}, + "issuetype": {"id": "10209"}, + "parent": {"id": "${{ steps.get_context.outputs.parent_epic_id }}", "key": "${{ steps.get_context.outputs.parent_epic_key }}"}, + "labels": ${{ inputs.ticket_labels }}, + "components":[{"name": "${{ inputs.module_name }}"}], + "customfield_10902": {"id": "10524", "value": "DevSecOps"}, + "customfield_10005": 1.0, + "description": $(cat ${{ steps.get_context.outputs.json_template_file }}) + } + } + EOF + ) + + response=$(curl \ + --request POST \ + --url "${{ inputs.jira_base_url }}/rest/api/3/issue" \ + --user "${{ inputs.jira_user_email }}:${{ inputs.jira_api_token }}" \ + --header 'Accept: application/json' \ + --header 'Content-Type: application/json' \ + --data "$DATA") + echo $response + if [ $? -ne 0 ]; then + echo "::error::Failed to create ticket: $response" + exit 1 + fi + + ticket_key=$(echo "$response" | jq -r .key) + echo "::notice::Created ticket: $ticket_key" + shell: bash diff --git a/.github/actions/create-jira-ticket/nightly-ticket-template.json b/.github/actions/create-jira-ticket/nightly-ticket-template.json new file mode 100644 index 00000000000..aed95eed370 --- /dev/null +++ b/.github/actions/create-jira-ticket/nightly-ticket-template.json @@ -0,0 +1,32 @@ +{ + "version": 1, + "type": "doc", + "content": [ + { + "type": "paragraph", + "content": [ + { + "type": "text", + "text": "This incident ticket relates to the @MODULE_NAME@ nightly on the @GITHUB_BRANCH@ branch which failed on @DATE@." + } + ] + }, + { + "type": "paragraph", + "content": [ + { + "type": "text", + "text": "Link to the failed nightly", + "marks": [ + { + "type": "link", + "attrs": { + "href": "@GITHUB_SERVER_URL@/@GITHUB_REPOSITORY@/actions/runs/@GITHUB_RUN_ID@/attempts/@GITHUB_RUN_ATTEMPT@" + } + } + ] + } + ] + } + ] +} diff --git a/.github/actions/deb-delivery/action.yml b/.github/actions/deb-delivery/action.yml index 46b6c5ec189..1c6a3850ba0 100644 --- a/.github/actions/deb-delivery/action.yml +++ b/.github/actions/deb-delivery/action.yml @@ -22,7 +22,7 @@ inputs: release_type: description: "Type of release (hotfix, release)" required: true - release_cloud: + is_cloud: description: "Release context (cloud or not cloud)" required: true @@ -49,12 +49,12 @@ runs: echo "[DEBUG] - Version: ${{ inputs.version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" echo "[DEBUG] - module_name: ${{ inputs.module_name }}" - echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" + echo "[DEBUG] - is_cloud: ${{ inputs.is_cloud }}" echo "[DEBUG] - release_type: ${{ inputs.release_type }}" echo "[DEBUG] - stability: ${{ inputs.stability }}" # Make sure all required inputs are NOT empty - if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then + if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.version }} || -z "${{ inputs.is_cloud }}" ]]; then echo "Some mandatory inputs are empty, please check the logs." exit 1 fi diff --git a/.github/actions/delivery/action.yml b/.github/actions/delivery/action.yml index 8cbca5c8073..663b1f35549 100644 --- a/.github/actions/delivery/action.yml +++ b/.github/actions/delivery/action.yml @@ -22,7 +22,7 @@ inputs: release_type: description: "Type of release (hotfix, release)" required: true - release_cloud: + is_cloud: description: "Release context (cloud or not cloud)" required: true @@ -63,12 +63,12 @@ runs: echo "[DEBUG] - Major version: ${{ inputs.major_version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" echo "[DEBUG] - module_name: ${{ inputs.module_name }}" - echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" + echo "[DEBUG] - is_cloud: ${{ inputs.is_cloud }}" echo "[DEBUG] - release_type: ${{ inputs.release_type }}" echo "[DEBUG] - stability: ${{ inputs.stability }}" # Make sure all required inputs are NOT empty - if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.major_version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then + if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.major_version }} || -z "${{ inputs.is_cloud }}" ]]; then echo "Some mandatory inputs are empty, please check the logs." exit 1 fi @@ -87,32 +87,32 @@ runs: mv "$FILE" "$ARCH" done - # Build upload target path based on release_cloud and release_type values + # Build upload target path based on is_cloud and release_type values # if cloud + hotfix or cloud + release, deliver to internal testing- # if cloud + develop, delivery to internal unstable # if non-cloud, delivery to onprem testing or unstable # CLOUD + HOTFIX + REPO STANDARD INTERNAL OR CLOUD + RELEASE + REPO STANDARD INTERNAL - if [[ ${{ inputs.release_cloud }} -eq 1 && ( ${{ inputs.release_type }} == "hotfix" || ${{ inputs.release_type }} == "release" ) ]]; then + if [[ "${{ inputs.is_cloud }}" == "true" && ( "${{ inputs.release_type }}" == "hotfix" || "${{ inputs.release_type }}" == "release" ) ]]; then echo "[DEBUG] : Release cloud + ${{ inputs.release_type }}, using rpm-standard-internal." ROOT_REPO_PATHS="rpm-standard-internal" UPLOAD_REPO_PATH="${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" # CLOUD + NOT HOTFIX OR CLOUD + NOT RELEASE + REPO STANDARD INTERNAL - elif [[ ${{ inputs.release_cloud }} -eq 1 && ( ${{ inputs.release_type }} != "hotfix" && ${{ inputs.release_type }} != "release" ) ]]; then + elif [[ "${{ inputs.is_cloud }}" == "true" && ( "${{ inputs.release_type }}" != "hotfix" && "${{ inputs.release_type }}" != "release" ) ]]; then echo "[DEBUG] : Release cloud + NOT ${{ inputs.release_type }}, using rpm-standard-internal." ROOT_REPO_PATHS="rpm-standard-internal" UPLOAD_REPO_PATH="${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" # NON-CLOUD + (HOTFIX OR RELEASE) + REPO STANDARD - elif [[ ${{ inputs.release_cloud }} -eq 0 ]]; then + elif [[ "${{ inputs.is_cloud }}" == "false" ]]; then echo "[DEBUG] : NOT Release cloud + ${{ inputs.release_type }}, using rpm-standard." ROOT_REPO_PATHS="rpm-standard" UPLOAD_REPO_PATH="${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" # NOT VALID, DO NOT DELIVER else - echo "::error:: Invalid combination of release_type [${{ inputs.release_type }}] and release_cloud [${{ inputs.release_cloud }}]" + echo "::error:: Invalid combination of release_type [${{ inputs.release_type }}] and is_cloud [${{ inputs.is_cloud }}]" exit 1 fi @@ -141,12 +141,12 @@ runs: echo "[DEBUG] - Major version: ${{ inputs.major_version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" echo "[DEBUG] - module_name: ${{ inputs.module_name }}" - echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" + echo "[DEBUG] - is_cloud: ${{ inputs.is_cloud }}" echo "[DEBUG] - release_type: ${{ inputs.release_type }}" echo "[DEBUG] - stability: ${{ inputs.stability }}" # Make sure all required inputs are NOT empty - if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.major_version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then + if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.major_version }} || -z "${{ inputs.is_cloud }}" ]]; then echo "Some mandatory inputs are empty, please check the logs." exit 1 fi diff --git a/.github/actions/package/action.yml b/.github/actions/package/action.yml index c753cc054d5..e53d698f431 100644 --- a/.github/actions/package/action.yml +++ b/.github/actions/package/action.yml @@ -40,7 +40,6 @@ inputs: stability: description: "branch stability (stable, testing, unstable, canary)" required: true - runs: using: composite @@ -76,7 +75,6 @@ runs: MAJOR_LEFT=$( echo $MAJOR_VERSION | cut -d "." -f1 ) MAJOR_RIGHT=$( echo $MAJOR_VERSION | cut -d "-" -f1 | cut -d "." -f2 ) - BUMP_MAJOR_RIGHT=$(( MAJOR_RIGHT_PART + 1 )) if [ "$MAJOR_RIGHT" = "04" ]; then BUMP_MAJOR_LEFT="$MAJOR_LEFT" BUMP_MAJOR_RIGHT="10" @@ -111,11 +109,10 @@ runs: path: ./*.${{ inputs.package_extension }} key: ${{ inputs.cache_key }} - # Update if condition to true to get packages as artifacts - - if: ${{ false }} + - if: ${{ contains(github.event.pull_request.labels.*.name, 'upload-artifacts') }} name: Upload package artifacts - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: - name: ${{ inputs.arch != '' && format('packages-{0}-{1}', inputs.distrib, inputs.arch) || format('packages-{0}', inputs.distrib) }} + name: ${{ inputs.arch != '' && format('packages-{0}-{1}', inputs.distrib, inputs.arch) || format('packages-{0}', inputs.distrib) }}-${{ inputs.stability }} path: ./*.${{ inputs.package_extension}} retention-days: 1 diff --git a/.github/actions/promote-to-stable/action.yml b/.github/actions/promote-to-stable/action.yml index e450b3c4b90..b2def23754f 100644 --- a/.github/actions/promote-to-stable/action.yml +++ b/.github/actions/promote-to-stable/action.yml @@ -22,7 +22,7 @@ inputs: release_type: description: "Type of release (hotfix, release)" required: true - release_cloud: + is_cloud: description: "Release context (cloud or not cloud)" required: true @@ -48,7 +48,7 @@ runs: # DEBUG echo "[DEBUG] - Major version: ${{ inputs.major_version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" - echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" + echo "[DEBUG] - is_cloud: ${{ inputs.is_cloud }}" echo "[DEBUG] - release_type: ${{ inputs.release_type }}" # Cloud specific promote @@ -62,15 +62,15 @@ runs: # Search for testing packages candidate for promote for ARCH in "noarch" "x86_64"; do - # Build search path based on release_cloud and release_type values + # Build search path based on is_cloud and release_type values # if cloud, search in testing- path # if non-cloud, search in the testing usual path - if [[ ${{ inputs.release_cloud }} -eq 1 && ${{ inputs.release_type }} == "hotfix" ]] || [[ ${{ inputs.release_cloud }} -eq 1 && ${{ inputs.release_type }} == "release" ]]; then + if [[ "${{ inputs.is_cloud }}" == "true" && "${{ inputs.release_type }}" == "hotfix" ]] || [[ "${{ inputs.is_cloud }}" == "true" && "${{ inputs.release_type }}" == "release" ]]; then SEARCH_REPO_PATH="${{ inputs.major_version }}/${{ inputs.distrib }}/testing-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}" - elif [[ ${{ inputs.release_cloud }} -eq 0 ]]; then + elif [[ "${{ inputs.is_cloud }}" == "false" ]]; then SEARCH_REPO_PATH="${{ inputs.major_version }}/${{ inputs.distrib }}/testing/$ARCH/${{ inputs.module_name }}" else - echo "Invalid combination of release_type and release_cloud" + echo "Invalid combination of release_type and is_cloud" fi echo "[DEBUG] - Get path of $ARCH testing artifacts to promote to stable." @@ -115,7 +115,7 @@ runs: shell: bash - name: Promote DEB packages to stable - if: ${{ contains(fromJSON('["bullseye", "bookworm"]'), inputs.distrib) }} + if: ${{ inputs.is_cloud == 'false' && contains(fromJSON('["bullseye", "bookworm"]'), inputs.distrib) }} run: | set -eux diff --git a/.github/actions/rpm-delivery/action.yml b/.github/actions/rpm-delivery/action.yml index 3174c753300..b1fbc79e2d7 100644 --- a/.github/actions/rpm-delivery/action.yml +++ b/.github/actions/rpm-delivery/action.yml @@ -22,7 +22,7 @@ inputs: release_type: description: "Type of release (hotfix, release)" required: true - release_cloud: + is_cloud: description: "Release context (cloud or not cloud)" required: true @@ -61,12 +61,12 @@ runs: echo "[DEBUG] - Version: ${{ inputs.version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" echo "[DEBUG] - module_name: ${{ inputs.module_name }}" - echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" + echo "[DEBUG] - is_cloud: ${{ inputs.is_cloud }}" echo "[DEBUG] - release_type: ${{ inputs.release_type }}" echo "[DEBUG] - stability: ${{ inputs.stability }}" # Make sure all required inputs are NOT empty - if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then + if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.version }} || -z "${{ inputs.is_cloud }}" ]]; then echo "Some mandatory inputs are empty, please check the logs." exit 1 fi @@ -85,32 +85,32 @@ runs: mv "$FILE" "$ARCH" done - # Build upload target path based on release_cloud and release_type values + # Build upload target path based on is_cloud and release_type values # if cloud + hotfix or cloud + release, deliver to internal testing- # if cloud + develop, delivery to internal unstable # if non-cloud, delivery to onprem testing or unstable # CLOUD + HOTFIX + REPO STANDARD INTERNAL OR CLOUD + RELEASE + REPO STANDARD INTERNAL - if [[ ${{ inputs.release_cloud }} -eq 1 ]] && ([[ ${{ inputs.release_type }} == "hotfix" ]] || [[ ${{ inputs.release_type }} == "release" ]]); then + if [[ "${{ inputs.is_cloud }}" == "true" ]] && ([[ "${{ inputs.release_type }}" == "hotfix" ]] || [[ "${{ inputs.release_type }}" == "release" ]]); then echo "[DEBUG] : Release cloud + ${{ inputs.release_type }}, using rpm-standard-internal." ROOT_REPO_PATHS="rpm-standard-internal" UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" # CLOUD + NOT HOTFIX OR CLOUD + NOT RELEASE + REPO STANDARD INTERNAL - elif [[ ${{ inputs.release_cloud }} -eq 1 ]] && ([[ ${{ inputs.release_type }} != "hotfix" ]] || [[ ${{ inputs.release_type }} != "release" ]]); then + elif [[ "${{ inputs.is_cloud }}" == "true" ]] && ([[ "${{ inputs.release_type }}" != "hotfix" ]] || [[ "${{ inputs.release_type }}" != "release" ]]); then echo "[DEBUG] : Release cloud + NOT ${{ inputs.release_type }}, using rpm-standard-internal." ROOT_REPO_PATHS="rpm-standard-internal" UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" # NON-CLOUD + (HOTFIX OR RELEASE) + REPO STANDARD - elif [[ ${{ inputs.release_cloud }} -eq 0 ]]; then + elif [[ "${{ inputs.is_cloud }}" == "false" ]]; then echo "[DEBUG] : NOT Release cloud + ${{ inputs.release_type }}, using rpm-standard." ROOT_REPO_PATHS="rpm-standard" UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" # ANYTHING ELSE else - echo "::error:: Invalid combination of release_type [${{ inputs.release_type }}] and release_cloud [${{ inputs.release_cloud }}]" + echo "::error:: Invalid combination of release_type [${{ inputs.release_type }}] and is_cloud [${{ inputs.is_cloud }}]" exit 1 fi diff --git a/.github/docker/Dockerfile.centreon-collect-alma8 b/.github/docker/Dockerfile.centreon-collect-alma8 index a5592ac59ef..6da1feea67f 100644 --- a/.github/docker/Dockerfile.centreon-collect-alma8 +++ b/.github/docker/Dockerfile.centreon-collect-alma8 @@ -54,6 +54,8 @@ dnf install -y cmake \ libssh2-devel \ libcurl-devel \ zlib-devel \ + perl-HTTP-Daemon-SSL.noarch \ + perl-JSON \ sudo dnf update libarchive diff --git a/.github/docker/Dockerfile.centreon-collect-alma9 b/.github/docker/Dockerfile.centreon-collect-alma9 index d2ae761de8d..a00ca951489 100644 --- a/.github/docker/Dockerfile.centreon-collect-alma9 +++ b/.github/docker/Dockerfile.centreon-collect-alma9 @@ -35,6 +35,7 @@ dnf --best install -y cmake \ lua-devel \ make \ perl-ExtUtils-Embed.noarch \ + perl-JSON \ python3 \ python3-pip \ perl-Thread-Queue \ @@ -49,6 +50,7 @@ dnf --best install -y cmake \ libssh2-devel \ libcurl-devel \ zlib-devel \ + perl-HTTP-Daemon-SSL.noarch \ sudo git clone --depth 1 -b 2024.01.12 https://github.com/Microsoft/vcpkg.git diff --git a/.github/docker/Dockerfile.centreon-collect-alma9-test b/.github/docker/Dockerfile.centreon-collect-alma9-test index 7e0bde0c59d..e3755b0056e 100644 --- a/.github/docker/Dockerfile.centreon-collect-alma9-test +++ b/.github/docker/Dockerfile.centreon-collect-alma9-test @@ -27,6 +27,7 @@ dnf --best install -y \ libgcrypt \ lua \ perl-ExtUtils-Embed \ + perl-JSON \ python3 \ python3-devel \ python3-pip \ @@ -36,6 +37,7 @@ dnf --best install -y \ procps-ng \ zstd \ psmisc \ + perl-HTTP-Daemon-SSL.noarch \ sudo dnf clean all diff --git a/.github/docker/Dockerfile.centreon-collect-debian-bookworm b/.github/docker/Dockerfile.centreon-collect-debian-bookworm index d395c7cba52..965c3888be5 100644 --- a/.github/docker/Dockerfile.centreon-collect-debian-bookworm +++ b/.github/docker/Dockerfile.centreon-collect-debian-bookworm @@ -11,38 +11,41 @@ RUN bash -e < + $oProcessInfo = New-Object System.Diagnostics.ProcessStartInfo + $oProcessInfo.FileName = $sProcess + $oProcessInfo.RedirectStandardError = $true + $oProcessInfo.RedirectStandardOutput = $true + $oProcessInfo.UseShellExecute = $false + $oProcessInfo.Arguments = $sArgs + $oProcess = New-Object System.Diagnostics.Process + $oProcess.StartInfo = $oProcessInfo + $oProcess.Start() | Out-Null + $oProcess.WaitForExit() | Out-Null + $sSTDOUT = $oProcess.StandardOutput.ReadToEnd() + $sSTDERR = $oProcess.StandardError.ReadToEnd() + $pOutPut.Value = "Commandline: $sProcess $sArgs`r`n" + $pOutPut.Value += "STDOUT: " + $sSTDOUT + "`r`n" + $pOutPut.Value += "STDERR: " + $sSTDERR + "`r`n" + return $oProcess.ExitCode +} -function test_args_to_registry { -<# +function test_args_to_registry ([string] $exe_path, [string[]] $exe_args, $expected_registry_values) { + <# .SYNOPSIS start a program and check values in registry @@ -33,108 +69,183 @@ function test_args_to_registry { .PARAMETER expected_registry_values hash_table as @{'host'='host_1';'endpoint'='127.0.0.1'} #> - param ( - [string] $exe_path, - [string[]] $exe_args, - $expected_registry_values - ) - Write-Host "arguments: $exe_args" + Write-Host "execute $exe_path arguments: $exe_args" - $process_info= Start-Process -PassThru $exe_path $exe_args - Wait-Process -Id $process_info.Id - if ($process_info.ExitCode -ne 0) { + $process_output = @{} + $exit_code = f_start_process $exe_path $exe_args ([ref]$process_output) + + if ($exit_code -ne 0) { Write-Host "fail to execute $exe_path with arguments $exe_args" Write-Host "exit status = " $process_info.ExitCode exit 1 } - foreach ($value_name in $expected_registry_values.Keys) { - $expected_value = $($expected_registry_values[$value_name]) - $real_value = (Get-ItemProperty -Path HKLM:\Software\Centreon\CentreonMonitoringAgent -Name $value_name).$value_name - if ($expected_value -ne $real_value) { - Write-Host "unexpected value for $value_name, expected: $expected_value, read: $real_value" - exit 1 + for (($i = 0); $i -lt 10; $i++) { + Start-Sleep -Seconds 1 + try { + Get-ItemProperty -Path HKLM:\Software\Centreon\CentreonMonitoringAgent + break + } + catch { + continue + } + } + + for (($i = 0); $i -lt 10; $i++) { + Start-Sleep -Seconds 1 + $read_success = 1 + foreach ($value_name in $expected_registry_values.Keys) { + $expected_value = $($expected_registry_values[$value_name]) + try { + $real_value = (Get-ItemProperty -Path HKLM:\Software\Centreon\CentreonMonitoringAgent -Name $value_name).$value_name + if ($expected_value -ne $real_value) { + Write-Host "unexpected value for $value_name, expected: $expected_value, read: $real_value" + $read_success = 0 + break + } + } + catch { + $read_success = 0 + break + } + } + if ($read_success -eq 1) { + break } } + + if ($read_success -ne 1) { + Write-Host "fail to read expected registry values in 10s installer output: $process_output" + exit 1 + } } -Write-Host "############################ all install uninstall ############################" +function test_all_silent_install_uninstall([string]$plugins_flag) { + <# + .SYNOPSIS + test all silent install uninstall -$args = '/S','--install_cma', '--install_plugins', '--hostname', "my_host_name_1", "--endpoint","127.0.0.1:4317" -$expected = @{ 'endpoint'='127.0.0.1:4317';'host'='my_host_name_1';'log_type'='EventLog'; 'log_level' = 'error'; 'encryption' = 0;'reversed_grpc_streaming'= 0 } -test_args_to_registry "agent/installer/centreon-monitoring-agent.exe" $args $expected + .DESCRIPTION + test all silent install uninstall -if (!(Get-ItemProperty -Path HKLM:\Software\Centreon\CentreonMonitoringAgent)) { - Write-Host "no registry entry created" - exit 1 -} + .PARAMETER plugins_flag + Can be --install_plugins or --install_embedded_plugins -Get-Process | Select-Object -Property ProcessName | Select-String centagent + #> -$info = Get-Process | Select-Object -Property ProcessName | Select-String centagent -#$info = Get-Process centagent 2>$null -if (!$info) { - Write-Host "centagent.exe not started" - exit 1 -} -if (![System.Io.File]::Exists("C:\Program Files\Centreon\Plugins\centreon_plugins.exe")) { - Write-Host "centreon_plugins.exe not installed" - exit 1 -} + Write-Host "############################ all install uninstall with flag: $plugins_flag ############################" -$process_info= Start-Process -PassThru "C:\Program Files\Centreon\CentreonMonitoringAgent\uninstall.exe" "/S", "--uninstall_cma","--uninstall_plugins" -Wait-Process -Id $process_info.Id -if ($process_info.ExitCode -ne 0) { - Write-Host "bad uninstaller exit code" - exit 1 -} + $exe_args = '/S', '--install_cma', $plugins_flag, '--hostname', "my_host_name_1", "--endpoint", "127.0.0.1:4317" + $expected = @{ 'endpoint' = '127.0.0.1:4317'; 'host' = 'my_host_name_1'; 'log_type' = 'event-log'; 'log_level' = 'error'; 'encryption' = 0; 'reversed_grpc_streaming' = 0 } + test_args_to_registry "agent/installer/centreon-monitoring-agent.exe" $exe_args $expected -Start-Sleep -Seconds 5 + if (!(Get-ItemProperty -Path HKLM:\Software\Centreon\CentreonMonitoringAgent)) { + Write-Host "no registry entry created" + exit 1 + } -Get-Process | Select-Object -Property ProcessName | Select-String centagent + Get-Process | Select-Object -Property ProcessName | Select-String centagent -$info = Get-Process | Select-Object -Property ProcessName | Select-String centagent -#$info = Get-Process centagent 2>$null -if ($info) { - Write-Host "centagent.exe running" - exit 1 -} + $info = Get-Process | Select-Object -Property ProcessName | Select-String centagent -if ([System.Io.File]::Exists("C:\Program Files\Centreon\Plugins\centreon_plugins.exe")) { - Write-Host "centreon_plugins.exe not removed" - exit 1 -} + #$info = Get-Process centagent 2>$null + if (!$info) { + Write-Host "centagent.exe not started" + exit 1 + } -Write-Host "The followind command will output errors, don't take it into account" -#the only mean I have found to test key erasure under CI -#Test-Path doesn't work -$key_found = true -try { - Get-ChildItem -Path HKLM:\Software\Centreon\CentreonMonitoringAgent -} -catch { - $key_found = false -} + if (![System.Io.File]::Exists("C:\Program Files\Centreon\Plugins\centreon_plugins.exe")) { + Write-Host "centreon_plugins.exe not installed" + exit 1 + } -if ($key_found) { - Write-Host "registry entry not removed" - exit 1 + $process_info = Start-Process -PassThru "C:\Program Files\Centreon\CentreonMonitoringAgent\uninstall.exe" "/S", "--uninstall_cma", "--uninstall_plugins" + Wait-Process -Id $process_info.Id + if ($process_info.ExitCode -ne 0) { + Write-Host "bad uninstaller exit code" + exit 1 + } + + + for (($i = 0); $i -lt 10; $i++) { + Start-Sleep -Seconds 1 + $info = Get-Process | Select-Object -Property ProcessName | Select-String centagent + if (! $info) { + break + } + } + + if ($info) { + Write-Host "centagent.exe running" + exit 1 + } + + if ([System.Io.File]::Exists("C:\Program Files\Centreon\Plugins\centreon_plugins.exe")) { + Write-Host "centreon_plugins.exe not removed" + exit 1 + } + + Write-Host "The followind command will output errors, don't take it into account" + #the only mean I have found to test key erasure under CI + #Test-Path doesn't work + $key_found = true + try { + Get-ChildItem -Path HKLM:\Software\Centreon\CentreonMonitoringAgent + } + catch { + $key_found = false + } + + if ($key_found) { + Write-Host "registry entry not removed" + exit 1 + } + + if ($info) { + Write-Host "centagent.exe running" + exit 1 + } + + if ([System.Io.File]::Exists("C:\Program Files\Centreon\Plugins\centreon_plugins.exe")) { + Write-Host "centreon_plugins.exe not removed" + exit 1 + } + + Write-Host "The followind command will output errors, don't take it into account" + #the only mean I have found to test key erasure under CI + #Test-Path doesn't work + $key_found = true + try { + Get-ChildItem -Path HKLM:\Software\Centreon\CentreonMonitoringAgent + } + catch { + $key_found = false + } + + if ($key_found) { + Write-Host "registry entry not removed" + exit 1 + } + + Start-Sleep -Seconds 10 } +test_all_silent_install_uninstall("--install_plugins") +test_all_silent_install_uninstall("--install_embedded_plugins") Write-Host "############################ installer test ############################" -$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--help" +$process_info = Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--help" Wait-Process -Id $process_info.Id if ($process_info.ExitCode -ne 2) { Write-Host "bad --help exit code" exit 1 } -$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--version" +$process_info = Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--version" Wait-Process -Id $process_info.Id if ($process_info.ExitCode -ne 2) { Write-Host "bad --version exit code" @@ -142,70 +253,70 @@ if ($process_info.ExitCode -ne 2) { } #missing mandatory parameters -$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma" +$process_info = Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma" Wait-Process -Id $process_info.Id if ($process_info.ExitCode -ne 1) { Write-Host "bad no parameter exit code " $process_info.ExitCode exit 1 } -$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma","--hostname","toto" +$process_info = Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma", "--hostname", "toto" Wait-Process -Id $process_info.Id if ($process_info.ExitCode -ne 1) { Write-Host "bad no endpoint exit code " $process_info.ExitCode exit 1 } -$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma","--hostname","toto","--endpoint","turlututu" +$process_info = Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma", "--hostname", "toto", "--endpoint", "turlututu" Wait-Process -Id $process_info.Id if ($process_info.ExitCode -ne 1) { Write-Host "bad wrong endpoint exit code " $process_info.ExitCode exit 1 } -$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma","--hostname","toto","--endpoint","127.0.0.1:4317","--log_type","file" +$process_info = Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma", "--hostname", "toto", "--endpoint", "127.0.0.1:4317", "--log_type", "file" Wait-Process -Id $process_info.Id if ($process_info.ExitCode -ne 1) { Write-Host "bad no log file path " $process_info.ExitCode exit 1 } -$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma","--hostname","toto","--endpoint","127.0.0.1:4317","--log_type","file","--log_file","C:" +$process_info = Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma", "--hostname", "toto", "--endpoint", "127.0.0.1:4317", "--log_type", "file", "--log_file", "C:" Wait-Process -Id $process_info.Id if ($process_info.ExitCode -ne 1) { Write-Host "bad log file path " $process_info.ExitCode exit 1 } -$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma","--hostname","toto","--endpoint","127.0.0.1:4317","--log_level","dsfsfd" +$process_info = Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma", "--hostname", "toto", "--endpoint", "127.0.0.1:4317", "--log_level", "dsfsfd" Wait-Process -Id $process_info.Id if ($process_info.ExitCode -ne 1) { Write-Host "bad log level " $process_info.ExitCode exit 1 } -$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma","--hostname","toto","--endpoint","127.0.0.1:4317","--reverse","--log_type","file","--log_file","C:\Users\Public\cma.log","--encryption" +$process_info = Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma", "--hostname", "toto", "--endpoint", "127.0.0.1:4317", "--reverse", "--log_type", "file", "--log_file", "C:\Users\Public\cma.log", "--encryption" Wait-Process -Id $process_info.Id if ($process_info.ExitCode -ne 1) { Write-Host "reverse mode, encryption and no private_key " $process_info.ExitCode exit 1 } -$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma","--hostname","toto","--endpoint","127.0.0.1:4317","--reverse","--log_type","file","--log_file","C:\Users\Public\cma.log","--encryption","--private_key","C:" +$process_info = Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma", "--hostname", "toto", "--endpoint", "127.0.0.1:4317", "--reverse", "--log_type", "file", "--log_file", "C:\Users\Public\cma.log", "--encryption", "--private_key", "C:" Wait-Process -Id $process_info.Id if ($process_info.ExitCode -ne 1) { Write-Host "reverse mode, encryption and bad private_key path" $process_info.ExitCode exit 1 } -$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma","--hostname","toto","--endpoint","127.0.0.1:4317","--reverse","--log_type","file","--log_file","C:\Users\Public\cma.log","--encryption","--private_key","C:\Users\Public\private_key.key" +$process_info = Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma", "--hostname", "toto", "--endpoint", "127.0.0.1:4317", "--reverse", "--log_type", "file", "--log_file", "C:\Users\Public\cma.log", "--encryption", "--private_key", "C:\Users\Public\private_key.key" Wait-Process -Id $process_info.Id if ($process_info.ExitCode -ne 1) { Write-Host "reverse mode, encryption and no certificate" $process_info.ExitCode exit 1 } -$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma","--hostname","toto","--endpoint","127.0.0.1:4317","--reverse","--log_type","file","--log_file","C:\Users\Public\cma.log","--encryption","--private_key","C:\Users\Public\private_key.key", "--public_cert", "C:" +$process_info = Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma", "--hostname", "toto", "--endpoint", "127.0.0.1:4317", "--reverse", "--log_type", "file", "--log_file", "C:\Users\Public\cma.log", "--encryption", "--private_key", "C:\Users\Public\private_key.key", "--public_cert", "C:" Wait-Process -Id $process_info.Id if ($process_info.ExitCode -ne 1) { Write-Host "reverse mode, encryption and bad certificate path" $process_info.ExitCode @@ -213,52 +324,52 @@ if ($process_info.ExitCode -ne 1) { } -$args = '/S','--install_cma','--hostname', "my_host_name_1", "--endpoint","127.0.0.1:4317" -$expected = @{ 'endpoint'='127.0.0.1:4317';'host'='my_host_name_1';'log_type'='EventLog'; 'log_level' = 'error'; 'encryption' = 0;'reversed_grpc_streaming'= 0 } -test_args_to_registry "agent/installer/centreon-monitoring-agent.exe" $args $expected +$exe_args = '/S', '--install_cma', '--hostname', "my_host_name_1", "--endpoint", "127.0.0.1:4317" +$expected = @{ 'endpoint' = '127.0.0.1:4317'; 'host' = 'my_host_name_1'; 'log_type' = 'event-log'; 'log_level' = 'error'; 'encryption' = 0; 'reversed_grpc_streaming' = 0 } +test_args_to_registry "agent/installer/centreon-monitoring-agent.exe" $exe_args $expected -$args = '/S','--install_cma','--hostname', "my_host_name_2", "--endpoint","127.0.0.2:4317", "--log_type", "file", "--log_file", "C:\Users\Public\cma.log", "--log_level", "trace", "--log_max_file_size", "15", "--log_max_files", "10" -$expected = @{ 'endpoint'='127.0.0.2:4317';'host'='my_host_name_2';'log_type'='File'; 'log_level' = 'trace'; 'log_file'='C:\Users\Public\cma.log'; 'encryption' = 0;'reversed_grpc_streaming'= 0; 'log_max_file_size' = 15; 'log_max_files' = 10; } -test_args_to_registry "agent/installer/centreon-monitoring-agent.exe" $args $expected +$exe_args = '/S', '--install_cma', '--hostname', "my_host_name_2", "--endpoint", "127.0.0.2:4317", "--log_type", "file", "--log_file", "C:\Users\Public\cma.log", "--log_level", "trace", "--log_max_file_size", "15", "--log_max_files", "10" +$expected = @{ 'endpoint' = '127.0.0.2:4317'; 'host' = 'my_host_name_2'; 'log_type' = 'File'; 'log_level' = 'trace'; 'log_file' = 'C:\Users\Public\cma.log'; 'encryption' = 0; 'reversed_grpc_streaming' = 0; 'log_max_file_size' = 15; 'log_max_files' = 10; } +test_args_to_registry "agent/installer/centreon-monitoring-agent.exe" $exe_args $expected -$args = '/S','--install_cma','--hostname', "my_host_name_2", "--endpoint","127.0.0.3:4317", "--log_type", "file", "--log_file", "C:\Users\Public\cma.log", "--log_level", "trace", "--encryption" -$expected = @{ 'endpoint'='127.0.0.3:4317';'host'='my_host_name_2';'log_type'='File'; 'log_level' = 'trace'; 'log_file'='C:\Users\Public\cma.log'; 'encryption' = 1;'reversed_grpc_streaming'= 0 } -test_args_to_registry "agent/installer/centreon-monitoring-agent.exe" $args $expected +$exe_args = '/S', '--install_cma', '--hostname', "my_host_name_2", "--endpoint", "127.0.0.3:4317", "--log_type", "file", "--log_file", "C:\Users\Public\cma.log", "--log_level", "trace", "--encryption" +$expected = @{ 'endpoint' = '127.0.0.3:4317'; 'host' = 'my_host_name_2'; 'log_type' = 'File'; 'log_level' = 'trace'; 'log_file' = 'C:\Users\Public\cma.log'; 'encryption' = 1; 'reversed_grpc_streaming' = 0 } +test_args_to_registry "agent/installer/centreon-monitoring-agent.exe" $exe_args $expected -$args = '/S','--install_cma','--hostname', "my_host_name_2", "--endpoint","127.0.0.4:4317", "--log_type", "file", "--log_file", "C:\Users\Public\cma.log", "--log_level", "trace", "--encryption", "--private_key", "C:\Users crypto\private.key", "--public_cert", "D:\tutu\titi.crt", "--ca", "C:\Users\Public\ca.crt", "--ca_name", "tls_ca_name" -$expected = @{ 'endpoint'='127.0.0.4:4317';'host'='my_host_name_2';'log_type'='File'; 'log_level' = 'trace'; 'log_file'='C:\Users\Public\cma.log'; 'encryption' = 1;'reversed_grpc_streaming'= 0; 'certificate'='D:\tutu\titi.crt'; 'private_key'='C:\Users crypto\private.key'; 'ca_certificate' = 'C:\Users\Public\ca.crt'; 'ca_name' = 'tls_ca_name' } -test_args_to_registry "agent/installer/centreon-monitoring-agent.exe" $args $expected +$exe_args = '/S', '--install_cma', '--hostname', "my_host_name_2", "--endpoint", "127.0.0.4:4317", "--log_type", "file", "--log_file", "C:\Users\Public\cma.log", "--log_level", "trace", "--encryption", "--private_key", "C:\Users crypto\private.key", "--public_cert", "D:\tutu\titi.crt", "--ca", "C:\Users\Public\ca.crt", "--ca_name", "tls_ca_name" +$expected = @{ 'endpoint' = '127.0.0.4:4317'; 'host' = 'my_host_name_2'; 'log_type' = 'File'; 'log_level' = 'trace'; 'log_file' = 'C:\Users\Public\cma.log'; 'encryption' = 1; 'reversed_grpc_streaming' = 0; 'certificate' = 'D:\tutu\titi.crt'; 'private_key' = 'C:\Users crypto\private.key'; 'ca_certificate' = 'C:\Users\Public\ca.crt'; 'ca_name' = 'tls_ca_name' } +test_args_to_registry "agent/installer/centreon-monitoring-agent.exe" $exe_args $expected -$args = '/S','--install_cma','--hostname', "my_host_name_2", "--endpoint","127.0.0.5:4317", "--log_type", "file", "--log_file", "C:\Users\Public\cma_rev.log", "--log_level", "trace", "--encryption", "--reverse", "--private_key", "C:\Users crypto\private_rev.key", "--public_cert", "D:\tutu\titi_rev.crt", "--ca", "C:\Users\Public\ca_rev.crt", "--ca_name", "tls_ca_name_rev" -$expected = @{ 'endpoint'='127.0.0.5:4317';'host'='my_host_name_2';'log_type'='File'; 'log_level' = 'trace'; 'log_file'='C:\Users\Public\cma_rev.log'; 'encryption' = 1;'reversed_grpc_streaming'= 1; 'certificate'='D:\tutu\titi_rev.crt'; 'private_key'='C:\Users crypto\private_rev.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev.crt'; 'ca_name' = 'tls_ca_name_rev' } -test_args_to_registry "agent/installer/centreon-monitoring-agent.exe" $args $expected +$exe_args = '/S', '--install_cma', '--hostname', "my_host_name_2", "--endpoint", "127.0.0.5:4317", "--log_type", "file", "--log_file", "C:\Users\Public\cma_rev.log", "--log_level", "trace", "--encryption", "--reverse", "--private_key", "C:\Users crypto\private_rev.key", "--public_cert", "D:\tutu\titi_rev.crt", "--ca", "C:\Users\Public\ca_rev.crt", "--ca_name", "tls_ca_name_rev" +$expected = @{ 'endpoint' = '127.0.0.5:4317'; 'host' = 'my_host_name_2'; 'log_type' = 'File'; 'log_level' = 'trace'; 'log_file' = 'C:\Users\Public\cma_rev.log'; 'encryption' = 1; 'reversed_grpc_streaming' = 1; 'certificate' = 'D:\tutu\titi_rev.crt'; 'private_key' = 'C:\Users crypto\private_rev.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev.crt'; 'ca_name' = 'tls_ca_name_rev' } +test_args_to_registry "agent/installer/centreon-monitoring-agent.exe" $exe_args $expected Write-Host "############################ modifier test ############################" -$args = '/S','--hostname', "my_host_name_10", "--endpoint","127.0.0.10:4317", "--no_reverse" -$expected = @{ 'endpoint'='127.0.0.10:4317';'host'='my_host_name_10';'log_type'='File'; 'log_level' = 'trace'; 'log_file'='C:\Users\Public\cma_rev.log'; 'encryption' = 1;'reversed_grpc_streaming'= 0; 'certificate'='D:\tutu\titi_rev.crt'; 'private_key'='C:\Users crypto\private_rev.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev.crt'; 'ca_name' = 'tls_ca_name_rev' } -test_args_to_registry "agent/installer/centreon-monitoring-agent-modify.exe" $args $expected +$exe_args = '/S', '--hostname', "my_host_name_10", "--endpoint", "127.0.0.10:4317", "--no_reverse" +$expected = @{ 'endpoint' = '127.0.0.10:4317'; 'host' = 'my_host_name_10'; 'log_type' = 'File'; 'log_level' = 'trace'; 'log_file' = 'C:\Users\Public\cma_rev.log'; 'encryption' = 1; 'reversed_grpc_streaming' = 0; 'certificate' = 'D:\tutu\titi_rev.crt'; 'private_key' = 'C:\Users crypto\private_rev.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev.crt'; 'ca_name' = 'tls_ca_name_rev' } +test_args_to_registry "agent/installer/centreon-monitoring-agent-modify.exe" $exe_args $expected -$args = '/S',"--log_type", "file", "--log_file", "C:\Users\Public\cma_rev2.log", "--log_level", "debug", "--log_max_file_size", "50", "--log_max_files", "20" -$expected = @{ 'endpoint'='127.0.0.10:4317';'host'='my_host_name_10';'log_type'='File'; 'log_level' = 'debug'; 'log_file'='C:\Users\Public\cma_rev2.log'; 'encryption' = 1;'reversed_grpc_streaming'= 0; 'certificate'='D:\tutu\titi_rev.crt'; 'log_max_file_size' = 50; 'log_max_files' = 20;'private_key'='C:\Users crypto\private_rev.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev.crt'; 'ca_name' = 'tls_ca_name_rev' } -test_args_to_registry "agent/installer/centreon-monitoring-agent-modify.exe" $args $expected +$exe_args = '/S', "--log_type", "file", "--log_file", "C:\Users\Public\cma_rev2.log", "--log_level", "debug", "--log_max_file_size", "50", "--log_max_files", "20" +$expected = @{ 'endpoint' = '127.0.0.10:4317'; 'host' = 'my_host_name_10'; 'log_type' = 'File'; 'log_level' = 'debug'; 'log_file' = 'C:\Users\Public\cma_rev2.log'; 'encryption' = 1; 'reversed_grpc_streaming' = 0; 'certificate' = 'D:\tutu\titi_rev.crt'; 'log_max_file_size' = 50; 'log_max_files' = 20; 'private_key' = 'C:\Users crypto\private_rev.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev.crt'; 'ca_name' = 'tls_ca_name_rev' } +test_args_to_registry "agent/installer/centreon-monitoring-agent-modify.exe" $exe_args $expected -$args = '/S',"--log_type", "EventLog", "--log_level", "error" -$expected = @{ 'endpoint'='127.0.0.10:4317';'host'='my_host_name_10';'log_type'='event-log'; 'log_level' = 'error'; 'encryption' = 1;'reversed_grpc_streaming'= 0; 'certificate'='D:\tutu\titi_rev.crt'; 'private_key'='C:\Users crypto\private_rev.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev.crt'; 'ca_name' = 'tls_ca_name_rev' } -test_args_to_registry "agent/installer/centreon-monitoring-agent-modify.exe" $args $expected +$exe_args = '/S', "--log_type", "event-log", "--log_level", "error" +$expected = @{ 'endpoint' = '127.0.0.10:4317'; 'host' = 'my_host_name_10'; 'log_type' = 'event-log'; 'log_level' = 'error'; 'encryption' = 1; 'reversed_grpc_streaming' = 0; 'certificate' = 'D:\tutu\titi_rev.crt'; 'private_key' = 'C:\Users crypto\private_rev.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev.crt'; 'ca_name' = 'tls_ca_name_rev' } +test_args_to_registry "agent/installer/centreon-monitoring-agent-modify.exe" $exe_args $expected -$args = '/S',"--private_key", "C:\Users crypto\private_rev2.key", "--public_cert", "D:\tutu\titi_rev2.crt" -$expected = @{ 'endpoint'='127.0.0.10:4317';'host'='my_host_name_10';'log_type'='event-log'; 'log_level' = 'error'; 'encryption' = 1;'reversed_grpc_streaming'= 0; 'certificate'='D:\tutu\titi_rev2.crt'; 'private_key'='C:\Users crypto\private_rev2.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev.crt'; 'ca_name' = 'tls_ca_name_rev' } -test_args_to_registry "agent/installer/centreon-monitoring-agent-modify.exe" $args $expected +$exe_args = '/S', "--private_key", "C:\Users crypto\private_rev2.key", "--public_cert", "D:\tutu\titi_rev2.crt" +$expected = @{ 'endpoint' = '127.0.0.10:4317'; 'host' = 'my_host_name_10'; 'log_type' = 'event-log'; 'log_level' = 'error'; 'encryption' = 1; 'reversed_grpc_streaming' = 0; 'certificate' = 'D:\tutu\titi_rev2.crt'; 'private_key' = 'C:\Users crypto\private_rev2.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev.crt'; 'ca_name' = 'tls_ca_name_rev' } +test_args_to_registry "agent/installer/centreon-monitoring-agent-modify.exe" $exe_args $expected -$args = '/S',"--ca", "C:\Users\Public\ca_rev2.crt", "--ca_name", "tls_ca_name_rev2" -$expected = @{ 'endpoint'='127.0.0.10:4317';'host'='my_host_name_10';'log_type'='event-log'; 'log_level' = 'error'; 'encryption' = 1;'reversed_grpc_streaming'= 0; 'certificate'='D:\tutu\titi_rev2.crt'; 'private_key'='C:\Users crypto\private_rev2.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev2.crt'; 'ca_name' = 'tls_ca_name_rev2' } -test_args_to_registry "agent/installer/centreon-monitoring-agent-modify.exe" $args $expected +$exe_args = '/S', "--ca", "C:\Users\Public\ca_rev2.crt", "--ca_name", "tls_ca_name_rev2" +$expected = @{ 'endpoint' = '127.0.0.10:4317'; 'host' = 'my_host_name_10'; 'log_type' = 'event-log'; 'log_level' = 'error'; 'encryption' = 1; 'reversed_grpc_streaming' = 0; 'certificate' = 'D:\tutu\titi_rev2.crt'; 'private_key' = 'C:\Users crypto\private_rev2.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev2.crt'; 'ca_name' = 'tls_ca_name_rev2' } +test_args_to_registry "agent/installer/centreon-monitoring-agent-modify.exe" $exe_args $expected -$args = '/S',"--no_encryption" -$expected = @{ 'endpoint'='127.0.0.10:4317';'host'='my_host_name_10';'log_type'='event-log'; 'log_level' = 'error'; 'encryption' = 0;'reversed_grpc_streaming'= 0; 'certificate'='D:\tutu\titi_rev2.crt'; 'private_key'='C:\Users crypto\private_rev2.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev2.crt'; 'ca_name' = 'tls_ca_name_rev2' } -test_args_to_registry "agent/installer/centreon-monitoring-agent-modify.exe" $args $expected +$exe_args = '/S', "--no_encryption" +$expected = @{ 'endpoint' = '127.0.0.10:4317'; 'host' = 'my_host_name_10'; 'log_type' = 'event-log'; 'log_level' = 'error'; 'encryption' = 0; 'reversed_grpc_streaming' = 0; 'certificate' = 'D:\tutu\titi_rev2.crt'; 'private_key' = 'C:\Users crypto\private_rev2.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev2.crt'; 'ca_name' = 'tls_ca_name_rev2' } +test_args_to_registry "agent/installer/centreon-monitoring-agent-modify.exe" $exe_args $expected diff --git a/.github/scripts/agent_robot_test.ps1 b/.github/scripts/agent_robot_test.ps1 index 37345457f0a..9719550041a 100644 --- a/.github/scripts/agent_robot_test.ps1 +++ b/.github/scripts/agent_robot_test.ps1 @@ -34,7 +34,7 @@ Write-Host "Work in" $pwd.ToString() $current_dir = (pwd).Path -$wsl_path = "/mnt/" + $current_dir.SubString(0,1).ToLower() + "/" + $current_dir.SubString(3).replace('\','/') +$wsl_path = "/mnt/" + $current_dir.SubString(0, 1).ToLower() + "/" + $current_dir.SubString(3).replace('\', '/') mkdir reports @@ -82,7 +82,7 @@ Start-Sleep -Seconds 1 Set-ItemProperty -Path HKLM:\SOFTWARE\Centreon\CentreonMonitoringAgent -Name ca_certificate -Value "" Set-ItemProperty -Path HKLM:\SOFTWARE\Centreon\CentreonMonitoringAgent -Name encryption -Value 0 Set-ItemProperty -Path HKLM:\SOFTWARE\Centreon\CentreonMonitoringAgent -Name endpoint -Value 0.0.0.0:4320 -Set-ItemProperty -Path HKLM:\SOFTWARE\Centreon\CentreonMonitoringAgent -Name reverse_connection -Value 1 +Set-ItemProperty -Path HKLM:\SOFTWARE\Centreon\CentreonMonitoringAgent -Name reversed_grpc_streaming -Value 1 $agent_log_path = $current_dir + "\reports\reverse_centagent.log" Set-ItemProperty -Path HKLM:\SOFTWARE\Centreon\CentreonMonitoringAgent -Name log_file -Value $agent_log_path @@ -100,7 +100,45 @@ Set-ItemProperty -Path HKLM:\SOFTWARE\Centreon\CentreonMonitoringAgent -Name lo Start-Process -FilePath build_windows\agent\Release\centagent.exe -ArgumentList "--standalone" -RedirectStandardOutput reports\encrypted_reversed_centagent_stdout.log -RedirectStandardError reports\encrypted_reversed_centagent_stderr.log -wsl cd $wsl_path `&`& .github/scripts/wsl-collect-test-robot.sh broker-engine/cma.robot $my_host_name $my_ip $pwsh_path ${current_dir}.replace('\','/') +$uptime = (Get-WmiObject -Class Win32_OperatingSystem).LastBootUpTime #dtmf format +$d_uptime = [Management.ManagementDateTimeConverter]::ToDateTime($uptime) #datetime format +$ts_uptime = ([DateTimeOffset]$d_uptime).ToUnixTimeSeconds() #timestamp format + +$systeminfo_data = systeminfo /FO CSV | ConvertFrom-Csv +$snapshot = @{ + 'total' = $systeminfo_data.'Total Physical Memory' + 'free' = $systeminfo_data.'Available Physical Memory' + 'virtual_max' = $systeminfo_data.'Virtual Memory: Max Size' + 'virtual_free' = $systeminfo_data.'Virtual Memory: Available' +} + +$serv_list = Get-Service + +$serv_stat = @{ + 'services.running.count' = ($serv_list | Where-Object { $_.Status -eq "Running" } | measure).Count + 'services.stopped.count' = ($serv_list | Where-Object { $_.Status -eq "stopped" } | measure).Count +} + +$test_param = @{ + 'host' = $my_host_name + 'ip' = $my_ip + 'wsl_path' = $wsl_path + 'pwsh_path' = $pwsh_path + 'drive' = @() + 'current_dir' = $current_dir.replace('\', '/') + 'uptime' = $ts_uptime + 'mem_info' = $snapshot + 'serv_stat' = $serv_stat +} + +Get-PSDrive -PSProvider FileSystem | Select Name, Used, Free | ForEach-Object -Process { $test_param.drive += $_ } + +$json_test_param = $test_param | ConvertTo-Json -Compress + +Write-Host "json_test_param" $json_test_param +$quoted_json_test_param = "'" + $json_test_param + "'" + +wsl cd $wsl_path `&`& .github/scripts/wsl-collect-test-robot.sh broker-engine/cma.robot $quoted_json_test_param #something wrong in robot test => exit 1 => failure if (Test-Path -Path 'reports\windows-cma-failed' -PathType Container) { diff --git a/.github/scripts/wsl-collect-test-robot.sh b/.github/scripts/wsl-collect-test-robot.sh index ba54ad04fb0..6f2376c6828 100755 --- a/.github/scripts/wsl-collect-test-robot.sh +++ b/.github/scripts/wsl-collect-test-robot.sh @@ -4,10 +4,12 @@ set -x test_file=$1 export RUN_ENV=WSL -export HOST_NAME=$2 -export USED_ADDRESS=$3 -export PWSH_PATH=$4 -export WINDOWS_PROJECT_PATH=$5 +export JSON_TEST_PARAMS=$2 +export USED_ADDRESS=`echo $JSON_TEST_PARAMS | jq -r .ip` +export HOST_NAME=`echo $JSON_TEST_PARAMS | jq -r .host` +export PWSH_PATH=`echo $JSON_TEST_PARAMS | jq -r .pwsh_path` +export WINDOWS_PROJECT_PATH=`echo $JSON_TEST_PARAMS | jq -r .current_dir` + #in order to connect to windows we neeed to use windows ip @@ -17,7 +19,7 @@ echo "${USED_ADDRESS} ${HOST_NAME}" >> /etc/hosts echo "##### /etc/hosts: ######" cat /etc/hosts -echo "##### Starting tests #####" +echo "##### Starting tests ##### with params: $JSON_TEST_PARAMS" cd tests ./init-proto.sh diff --git a/.github/workflows/actionlint.yml b/.github/workflows/actionlint.yml index 1966692a01a..1f76a89cc31 100644 --- a/.github/workflows/actionlint.yml +++ b/.github/workflows/actionlint.yml @@ -23,7 +23,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Download actionlint id: get_actionlint @@ -47,9 +47,9 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: '3.12' diff --git a/.github/workflows/centreon-collect.yml b/.github/workflows/centreon-collect.yml index d59282d0fe9..5fe81e19fbc 100644 --- a/.github/workflows/centreon-collect.yml +++ b/.github/workflows/centreon-collect.yml @@ -1,7 +1,7 @@ name: Centreon collect run-name: | ${{ - (github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && github.event.inputs.is_nightly == 'true')) + (github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && github.event.inputs.nightly_manual_trigger == 'true')) && format('collect nightly {0}', github.ref_name) || '' }} @@ -13,7 +13,7 @@ concurrency: on: workflow_dispatch: inputs: - is_nightly: + nightly_manual_trigger: description: 'Set to true for nightly run' required: true default: false @@ -21,7 +21,7 @@ on: legacy_engine: description: 'Compile Engine with legacy configuration library' required: true - default: true + default: false type: boolean packages_in_artifact: description: 'Save packages in artifacts' @@ -47,7 +47,7 @@ on: - custom-triplets/** - engine/** - grpc/** - - packaging/** + - packaging/centreon-collect/** - cmake.sh - cmake-vcpkg.sh - CMakeLists.txt @@ -55,6 +55,7 @@ on: - vcpkg.json - overlays/** - selinux/** + - "!selinux/centreon-common/**" - "!.veracode-exclusions" - "!veracode.json" - "!**/test/**" @@ -74,7 +75,7 @@ on: - custom-triplets/** - engine/** - grpc/** - - packaging/** + - packaging/centreon-collect/** - cmake.sh - cmake-vcpkg.sh - CMakeLists.txt @@ -82,78 +83,87 @@ on: - vcpkg.json - overlays/** - selinux/** + - "!selinux/centreon-common/**" - "!.veracode-exclusions" - "!veracode.json" - "!**/test/**" jobs: - dispatch-to-maintained-branches: - if: ${{ github.event_name == 'schedule' && github.ref_name == 'develop' }} - runs-on: ubuntu-24.04 - steps: - - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - - run: | - gh workflow run robot-nightly.yml -r "dev-24.04.x" - gh workflow run robot-nightly.yml -r "dev-23.10.x" - gh workflow run robot-nightly.yml -r "dev-23.04.x" - gh workflow run robot-nightly.yml -r "dev-22.10.x" - shell: bash - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - get-version: - uses: ./.github/workflows/get-version.yml + # dispatch-to-maintained-branches: + # if: ${{ github.run_attempt == 1 && github.event_name == 'schedule' && github.ref_name == 'develop' }} + # runs-on: ubuntu-24.04 + # steps: + # - name: Checkout sources + # uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + # - name: Check current day of the week + # id: day_check + # run: echo "day_of_week=$(date +%u)" >> $GITHUB_OUTPUT + # shell: bash + + # - if: ${{ steps.day_check.outputs.day_of_week == '1' }} + # run: | + # NIGHTLY_TARGETS=("dev-22.10.x" "dev-23.04.x" "dev-23.10.x" "dev-24.04.x" "dev-24.10.x") + # for target in "${NIGHTLY_TARGETS[@]}"; do + # echo "[INFO] - Dispatching nightly run to $target branch." + # gh workflow run centreon-collect.yml -r "$target" -f is_nightly=true + # done + # shell: bash + # env: + # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + get-environment: + uses: ./.github/workflows/get-environment.yml with: version_file: CMakeLists.txt + nightly_manual_trigger: ${{ inputs.nightly_manual_trigger || false }} veracode-analysis: - needs: [get-version] + needs: [get-environment] if: ${{ github.event_name == 'schedule' && github.ref_name == 'develop' }} uses: ./.github/workflows/veracode-analysis.yml with: module_name: centreon-collect - major_version: ${{ needs.get-version.outputs.major_version }} - minor_version: ${{ needs.get-version.outputs.minor_version }} - img_version: ${{ needs.get-version.outputs.img_version }} + major_version: ${{ needs.get-environment.outputs.major_version }} + minor_version: ${{ needs.get-environment.outputs.minor_version }} + img_version: ${{ needs.get-environment.outputs.img_version }} secrets: veracode_api_id: ${{ secrets.VERACODE_API_ID_COLL }} veracode_api_key: ${{ secrets.VERACODE_API_KEY_COLL }} veracode_srcclr_token: ${{ secrets.VERACODE_SRCCLR_TOKEN }} - docker_registry_id: ${{ secrets.DOCKER_REGISTRY_ID }} - docker_registry_passwd: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + docker_registry_id: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + docker_registry_passwd: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} unit-test: - needs: [get-version] - if: ${{ github.event.inputs.unit_tests == 'true' && ! contains(fromJson('["stable"]'), needs.get-version.outputs.stability) }} - + needs: [get-environment] + if: | + github.event.inputs.unit_tests == 'true' && + needs.get-environment.outputs.skip_workflow == 'false' && + needs.get-environment.outputs.stability != 'stable' strategy: fail-fast: false matrix: distrib: [alma8, alma9, debian-bookworm] - runs-on: [self-hosted, collect] - env: SCCACHE_PATH: "/usr/bin/sccache" SCCACHE_BUCKET: "centreon-github-sccache" SCCACHE_REGION: "eu-west-1" AWS_ACCESS_KEY_ID: ${{ secrets.COLLECT_S3_ACCESS_KEY }} AWS_SECRET_ACCESS_KEY: ${{ secrets.COLLECT_S3_SECRET_KEY }} - LEGACY_ENGINE: ${{ github.event.inputs.legacy_engine != 'false' && 'ON' || 'OFF' }} + LEGACY_ENGINE: ${{ github.event.inputs.legacy_engine != 'true' && 'OFF' || 'ON' }} container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/centreon-collect-${{ matrix.distrib }}:${{ needs.get-version.outputs.img_version }} + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/centreon-collect-${{ matrix.distrib }}:${{ needs.get-environment.outputs.img_version }} credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} name: unit test ${{ matrix.distrib }} steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Install sccache run: | @@ -239,8 +249,10 @@ jobs: shell: bash package: - needs: [get-version] - if: ${{ ! contains(fromJson('["stable"]'), needs.get-version.outputs.stability) }} + needs: [get-environment] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + needs.get-environment.outputs.stability != 'stable' strategy: fail-fast: false matrix: @@ -260,11 +272,6 @@ jobs: package_extension: deb runner: collect arch: amd64 - - image: centreon-collect-ubuntu-jammy - distrib: jammy - package_extension: deb - runner: collect - arch: amd64 - image: centreon-collect-debian-bookworm-arm64 distrib: bookworm package_extension: deb @@ -274,28 +281,43 @@ jobs: uses: ./.github/workflows/package-collect.yml with: - major_version: ${{ needs.get-version.outputs.major_version }} - minor_version: ${{ needs.get-version.outputs.minor_version }} - img_version: ${{ needs.get-version.outputs.img_version }} - release: ${{ needs.get-version.outputs.release }} + major_version: ${{ needs.get-environment.outputs.major_version }} + minor_version: ${{ needs.get-environment.outputs.minor_version }} + img_version: ${{ needs.get-environment.outputs.img_version }} + release: ${{ needs.get-environment.outputs.release }} commit_hash: ${{ github.sha }} - stability: ${{ needs.get-version.outputs.stability }} - legacy_engine: ${{ github.event.inputs.legacy_engine != 'false' && 'ON' || 'OFF' }} + stability: ${{ needs.get-environment.outputs.stability }} + legacy_engine: ${{ github.event.inputs.legacy_engine != 'true' && 'OFF' || 'ON' }} + build_debug_packages: ${{ needs.get-environment.outputs.stability == 'testing' || contains(github.event.pull_request.labels.*.name, 'build-debug-packages') }} packages_in_artifact: ${{ github.event.inputs.packages_in_artifact == 'true' }} image: ${{ matrix.image }} distrib: ${{ matrix.distrib }} package_extension: ${{ matrix.package_extension }} runner: ${{ matrix.runner }} arch: ${{ matrix.arch }} - secrets: inherit + is_nightly: ${{ needs.get-environment.outputs.is_nightly }} + secrets: + collect_s3_access_key: ${{ secrets.COLLECT_S3_ACCESS_KEY }} + collect_s3_secret_key: ${{ secrets.COLLECT_S3_SECRET_KEY }} + registry_username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + registry_password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} + rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} + rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} + rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + jira_base_url: ${{ secrets.JIRA_BASE_URL }} + jira_user_email: ${{ secrets.XRAY_JIRA_USER_EMAIL }} + jira_api_token: ${{ secrets.XRAY_JIRA_TOKEN }} robot-test: - needs: [get-version, package] + needs: [get-environment, package] if: | - (github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && github.event.inputs.is_nightly == 'true')) && + needs.get-environment.outputs.skip_workflow == 'false' && + needs.get-environment.outputs.stability != 'stable' && + needs.get-environment.outputs.is_nightly == 'true' && ! cancelled() && ! contains(needs.*.result, 'failure') && - ! contains(needs.*.result, 'cancelled') + ! contains(needs.*.result, 'cancelled') && + github.repository == 'centreon/centreon-collect' strategy: fail-fast: false @@ -344,34 +366,39 @@ jobs: distrib: ${{ matrix.distrib }} arch: ${{ matrix.arch }} image: ${{ matrix.image }} - image_test: ${{ matrix.image }}:${{ needs.get-version.outputs.test_img_version }} - image_version: ${{ needs.get-version.outputs.img_version }} + image_test: ${{ matrix.image }}:${{ needs.get-environment.outputs.test_img_version }} + image_version: ${{ needs.get-environment.outputs.img_version }} package_cache_key: ${{ github.run_id }}-${{ github.sha }}-${{ matrix.package_extension }}-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} - package_cache_path: ./*.${{ matrix.package_extension}} + package_cache_path: ./*.${{ matrix.package_extension }} database_type: ${{ matrix.database_type }} - tests_params: ${{matrix.tests_params}} - test_group_name: ${{matrix.test_group_name}} + tests_params: ${{ matrix.tests_params }} + test_group_name: ${{ matrix.test_group_name }} + is_nightly: ${{ needs.get-environment.outputs.is_nightly }} secrets: - registry_username: ${{ secrets.DOCKER_REGISTRY_ID }} - registry_password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + registry_username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + registry_password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} collect_s3_access_key: ${{ secrets.COLLECT_S3_ACCESS_KEY }} collect_s3_secret_key: ${{ secrets.COLLECT_S3_SECRET_KEY }} xray_client_id: ${{ secrets.XRAY_CLIENT_ID }} xray_client_secret: ${{ secrets.XRAY_CLIENT_SECRET }} + jira_base_url: ${{ secrets.JIRA_BASE_URL }} + jira_user_email: ${{ secrets.XRAY_JIRA_USER_EMAIL }} + jira_api_token: ${{ secrets.XRAY_JIRA_TOKEN }} deliver-sources: runs-on: [self-hosted, common] - needs: [get-version, package] + needs: [get-environment, package] if: | github.event_name != 'workflow_dispatch' && - contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && + needs.get-environment.outputs.stability == 'stable' && ! cancelled() && ! contains(needs.*.result, 'failure') && - ! contains(needs.*.result, 'cancelled') + ! contains(needs.*.result, 'cancelled') && + github.repository == 'centreon/centreon-collect' steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 with: path: centreon-collect @@ -381,18 +408,20 @@ jobs: bucket_directory: centreon-collect module_directory: centreon-collect module_name: centreon-collect - major_version: ${{ needs.get-version.outputs.major_version }} - minor_version: ${{ needs.get-version.outputs.minor_version }} + major_version: ${{ needs.get-environment.outputs.major_version }} + minor_version: ${{ needs.get-environment.outputs.minor_version }} token_download_centreon_com: ${{ secrets.TOKEN_DOWNLOAD_CENTREON_COM }} deliver-rpm: if: | - contains(fromJson('["unstable", "testing"]'), needs.get-version.outputs.stability) && + needs.get-environment.outputs.skip_workflow == 'false' && + contains(fromJson('["unstable", "testing"]'), needs.get-environment.outputs.stability) && ! cancelled() && ! contains(needs.*.result, 'failure') && - ! contains(needs.*.result, 'cancelled') - needs: [get-version, robot-test] - environment: ${{ needs.get-version.outputs.environment }} + ! contains(needs.*.result, 'cancelled') && + github.repository == 'centreon/centreon-collect' + + needs: [get-environment, robot-test] runs-on: [self-hosted, common] strategy: matrix: @@ -406,28 +435,43 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Publish RPM packages uses: ./.github/actions/delivery with: module_name: collect distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.major_version }} + major_version: ${{ needs.get-environment.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-rpm-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} + + - name: Create Jira ticket if nightly build failure + if: | + needs.get-environment.outputs.is_nightly == 'true' && github.run_attempt == 1 && + failure() && + startsWith(github.ref_name, 'dev') + uses: ./.github/actions/create-jira-ticket + with: + jira_base_url: ${{ secrets.JIRA_BASE_URL }} + jira_user_email: ${{ secrets.XRAY_JIRA_USER_EMAIL }} + jira_api_token: ${{ secrets.XRAY_JIRA_TOKEN }} + module_name: "centreon-collect" + ticket_labels: '["Nightly", "Pipeline", "nightly-${{ github.ref_name }}", "${{ github.job }}"]' deliver-deb: if: | - contains(fromJson('["unstable", "testing"]'), needs.get-version.outputs.stability) && + needs.get-environment.outputs.skip_workflow == 'false' && + contains(fromJson('["unstable", "testing"]'), needs.get-environment.outputs.stability) && ! cancelled() && ! contains(needs.*.result, 'failure') && - ! contains(needs.*.result, 'cancelled') - needs: [get-version, robot-test] - environment: ${{ needs.get-version.outputs.environment }} + ! contains(needs.*.result, 'cancelled') && + github.repository == 'centreon/centreon-collect' + + needs: [get-environment, robot-test] runs-on: [self-hosted, common] strategy: matrix: @@ -439,27 +483,44 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Publish DEB packages uses: ./.github/actions/delivery with: module_name: collect distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.major_version }} + major_version: ${{ needs.get-environment.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-deb-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} + + - name: Create Jira ticket if nightly build failure + if: | + needs.get-environment.outputs.is_nightly == 'true' && github.run_attempt == 1 && + failure() && + startsWith(github.ref_name, 'dev') && + github.repository == 'centreon/centreon-collect' + uses: ./.github/actions/create-jira-ticket + with: + jira_base_url: ${{ secrets.JIRA_BASE_URL }} + jira_user_email: ${{ secrets.XRAY_JIRA_USER_EMAIL }} + jira_api_token: ${{ secrets.XRAY_JIRA_TOKEN }} + module_name: "centreon-collect" + ticket_labels: '["Nightly", "Pipeline", "nightly-${{ github.ref_name }}", "${{ github.job }}"]' promote: - needs: [get-version, deliver-rpm, deliver-deb] + needs: [get-environment, deliver-rpm, deliver-deb] if: | - (contains(fromJson('["stable", "testing"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch') && + needs.get-environment.outputs.skip_workflow == 'false' && + (contains(fromJson('["stable", "testing"]'), needs.get-environment.outputs.stability) && github.event_name != 'workflow_dispatch') && ! cancelled() && ! contains(needs.*.result, 'failure') && - ! contains(needs.*.result, 'cancelled') + ! contains(needs.*.result, 'cancelled') && + github.repository == 'centreon/centreon-collect' + runs-on: [self-hosted, common] strategy: matrix: @@ -467,7 +528,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Promote ${{ matrix.distrib }} to stable uses: ./.github/actions/promote-to-stable @@ -475,8 +536,17 @@ jobs: artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} module_name: collect distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.major_version }} - stability: ${{ needs.get-version.outputs.stability }} + major_version: ${{ needs.get-environment.outputs.major_version }} + stability: ${{ needs.get-environment.outputs.stability }} github_ref_name: ${{ github.ref_name }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} + + set-skip-label: + needs: [get-environment, deliver-rpm, deliver-deb, promote] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + uses: ./.github/workflows/set-pull-request-skip-label.yml diff --git a/.github/workflows/centreon-common.yml b/.github/workflows/centreon-common.yml new file mode 100644 index 00000000000..167dbeeb5c7 --- /dev/null +++ b/.github/workflows/centreon-common.yml @@ -0,0 +1,191 @@ +name: centreon-common + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + pull_request: + paths: + - "packaging/centreon-common/**" + - "selinux/centreon-common/**" + push: + branches: + - develop + - dev-[2-9][0-9].[0-9][0-9].x + - master + - "[2-9][0-9].[0-9][0-9].x" + paths: + - "packaging/centreon-common/**" + - "selinux/centreon-common/**" + +jobs: + get-environment: + uses: ./.github/workflows/get-environment.yml + with: + version_file: .version.centreon-common + + package: + needs: [get-environment] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + needs.get-environment.outputs.stability != 'stable' + + strategy: + fail-fast: false + matrix: + include: + - package_extension: rpm + image: centreon-collect-alma8 + distrib: el8 + - package_extension: rpm + image: centreon-collect-alma9 + distrib: el9 + - package_extension: deb + image: centreon-collect-debian-bookworm + distrib: bookworm + + runs-on: ubuntu-24.04 + + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-environment.outputs.img_version }} + credentials: + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} + + name: package ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Generate selinux binaries + if: ${{ matrix.package_extension == 'rpm' }} + run: | + cd selinux/centreon-common + sed -i "s/@VERSION@/${{ needs.get-environment.outputs.major_version }}.${{ needs.get-environment.outputs.minor_version }}/g" *.te + make -f /usr/share/selinux/devel/Makefile + shell: bash + + - name: Remove selinux packaging files on debian + if: ${{ matrix.package_extension == 'deb' }} + run: rm -f packaging/centreon-common/*-selinux.yaml + shell: bash + + - name: Package Centreon + uses: ./.github/actions/package + with: + nfpm_file_pattern: "packaging/centreon-common/*.yaml" + distrib: ${{ matrix.distrib }} + package_extension: ${{ matrix.package_extension }} + major_version: ${{ needs.get-environment.outputs.major_version }} + minor_version: ${{ needs.get-environment.outputs.minor_version }} + release: ${{ needs.get-environment.outputs.release }} + arch: all + commit_hash: ${{ github.sha }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }} + rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} + rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} + rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + stability: ${{ needs.get-environment.outputs.stability }} + + deliver-rpm: + runs-on: [self-hosted, common] + needs: [get-environment, package] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + contains(fromJson('["unstable", "testing"]'), needs.get-environment.outputs.stability) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + + strategy: + matrix: + distrib: [el8, el9] + + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Delivery + uses: ./.github/actions/rpm-delivery + with: + module_name: common + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-environment.outputs.major_version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.distrib }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} + + deliver-deb: + runs-on: [self-hosted, common] + needs: [get-environment, package] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + contains(fromJson('["unstable", "testing"]'), needs.get-environment.outputs.stability) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + + strategy: + matrix: + distrib: [bookworm] + + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Delivery + uses: ./.github/actions/deb-delivery + with: + module_name: common + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-environment.outputs.major_version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.distrib }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} + + promote: + needs: [get-environment, deliver-deb, deliver-rpm] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + (contains(fromJson('["stable", "testing"]'), needs.get-environment.outputs.stability) && github.event_name != 'workflow_dispatch') && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') && + github.repository == 'centreon/centreon-collect' + + runs-on: [self-hosted, common] + strategy: + matrix: + distrib: [el8, el9, bookworm] + + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Promote ${{ matrix.distrib }} to stable + uses: ./.github/actions/promote-to-stable + with: + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + module_name: common + distrib: ${{ matrix.distrib }} + major_version: ${{ needs.get-environment.outputs.major_version }} + stability: ${{ needs.get-environment.outputs.stability }} + github_ref_name: ${{ github.ref_name }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} + + set-skip-label: + needs: [get-environment, deliver-rpm, deliver-deb, promote] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + uses: ./.github/workflows/set-pull-request-skip-label.yml diff --git a/.github/workflows/check-status.yml b/.github/workflows/check-status.yml index cd7a383daef..501e355f9a8 100644 --- a/.github/workflows/check-status.yml +++ b/.github/workflows/check-status.yml @@ -86,8 +86,10 @@ jobs: core.summary.addList(failedCheckRuns); core.summary.write() - core.setFailed(`${failure.length} workflow(s) failed`); - return; + if (failedCheckRuns.length > 0) { + core.setFailed(`${failedCheckRuns.length} job(s) failed`); + return; + } } if (pending.length === 1) { @@ -101,3 +103,80 @@ jobs: } core.setFailed("Timeout: some jobs are still in progress"); + + get-environment: + if: | + contains(fromJSON('["pull_request", "pull_request_target"]') , github.event_name) && + (startsWith(github.base_ref, 'release-') || startsWith(github.base_ref, 'hotfix-')) + uses: ./.github/workflows/get-environment.yml + with: + version_file: CMakeLists.txt + + check-cherry-pick: + needs: [get-environment, check-status] + runs-on: ubuntu-24.04 + if: | + contains(fromJSON('["pull_request", "pull_request_target"]') , github.event_name) && + needs.get-environment.outputs.target_stability == 'testing' && + ! contains(needs.get-environment.outputs.labels, 'skip-cherry-pick') + + steps: + - name: Check if the PR is a cherry-pick from dev branch + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + let linkedPrs = []; + let errorMessage = `This pull request is not a cherry-pick from ${{ needs.get-environment.outputs.linked_dev_branch }} or has no reference to a pull request which has been merged on ${{ needs.get-environment.outputs.linked_dev_branch }}\n`; + + try { + const pull = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: context.issue.number + }); + + const { title, body } = pull.data; + + [title, body].forEach((text) => { + const linkedPrMatches = text.matchAll(/(?:#|\/pull\/)(\d+)/g); + if (linkedPrMatches) { + [...linkedPrMatches].forEach((match) => { + linkedPrs.push(Number(match[1])); + }); + } + }); + + // remove duplicates + linkedPrs = [...new Set(linkedPrs)]; + console.log(`Linked pull requests found in PR title and body: ${linkedPrs.join(', ')}`); + } catch (e) { + throw new Error(`Failed to get information of pull request #${context.issue.number}: ${e}`); + } + + for await (const prNumber of linkedPrs) { + try { + const pull = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: prNumber + }); + + if (pull.data.base.ref === '${{ needs.get-environment.outputs.linked_dev_branch }}') { + if (pull.data.state === 'closed' && pull.data.merged === true) { + console.log(`This pull request is a cherry-pick from pull request #${prNumber} on ${{ needs.get-environment.outputs.linked_dev_branch }}`); + return; + } else { + errorMessage += `This pull request seems to be a cherry-pick from pull request #${prNumber} on ${{ needs.get-environment.outputs.linked_dev_branch }} but it is not merged yet\n`; + } + } else { + errorMessage += `Pull request #${prNumber} is linked to ${pull.data.base.ref} instead of ${{ needs.get-environment.outputs.linked_dev_branch }}\n`; + } + } catch (e) { + errorMessage += `Failed to get information on pull request #${prNumber}: ${e}\n`; + } + } + + errorMessage += `\nIf you are sure this PR does not need to be a cherry-pick from ${{ needs.get-environment.outputs.linked_dev_branch }} or must be merged urgently, `; + errorMessage += `open the pull request on ${{ needs.get-environment.outputs.linked_dev_branch }} and add label "skip-cherry-pick" to the PR and re-run all jobs of workflow check-status\n`; + + throw new Error(errorMessage); diff --git a/.github/workflows/docker-builder.yml b/.github/workflows/docker-builder.yml index 469b29e404d..ef149f95b91 100644 --- a/.github/workflows/docker-builder.yml +++ b/.github/workflows/docker-builder.yml @@ -17,13 +17,17 @@ on: - '.github/docker/Dockerfile.centreon-collect-*' jobs: - get-version: - uses: ./.github/workflows/get-version.yml + get-environment: + if: github.repository == 'centreon/centreon-collect' + uses: ./.github/workflows/get-environment.yml with: version_file: CMakeLists.txt - create-and-push-docker: - needs: [get-version] + dockerize: + needs: [get-environment] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + needs.get-environment.outputs.stability != 'stable' strategy: fail-fast: false @@ -32,59 +36,59 @@ jobs: - runner: collect dockerfile: centreon-collect-alma8 image: centreon-collect-alma8 - tag: ${{ needs.get-version.outputs.img_version }} + tag: ${{ needs.get-environment.outputs.img_version }} - runner: collect dockerfile: centreon-collect-alma9 image: centreon-collect-alma9 - tag: ${{ needs.get-version.outputs.img_version }} + tag: ${{ needs.get-environment.outputs.img_version }} - runner: collect dockerfile: centreon-collect-alma9-test image: centreon-collect-alma9-test - tag: ${{ needs.get-version.outputs.test_img_version }} + tag: ${{ needs.get-environment.outputs.test_img_version }} - runner: collect dockerfile: centreon-collect-mysql-alma9 image: centreon-collect-mysql-alma9 - tag: ${{ needs.get-version.outputs.img_version }} + tag: ${{ needs.get-environment.outputs.img_version }} - runner: collect dockerfile: centreon-collect-mysql-alma9-test image: centreon-collect-mysql-alma9-test - tag: ${{ needs.get-version.outputs.test_img_version }} + tag: ${{ needs.get-environment.outputs.test_img_version }} - runner: collect dockerfile: centreon-collect-debian-bullseye image: centreon-collect-debian-bullseye - tag: ${{ needs.get-version.outputs.img_version }} + tag: ${{ needs.get-environment.outputs.img_version }} - runner: collect dockerfile: centreon-collect-debian-bullseye-test image: centreon-collect-debian-bullseye-test - tag: ${{ needs.get-version.outputs.test_img_version }} + tag: ${{ needs.get-environment.outputs.test_img_version }} - runner: collect dockerfile: centreon-collect-debian-bookworm image: centreon-collect-debian-bookworm - tag: ${{ needs.get-version.outputs.img_version }} + tag: ${{ needs.get-environment.outputs.img_version }} - runner: collect dockerfile: centreon-collect-debian-bookworm-test image: centreon-collect-debian-bookworm-test - tag: ${{ needs.get-version.outputs.test_img_version }} - - runner: collect - dockerfile: centreon-collect-ubuntu-jammy - image: centreon-collect-ubuntu-jammy - tag: ${{ needs.get-version.outputs.img_version }} + tag: ${{ needs.get-environment.outputs.test_img_version }} + # - runner: collect + # dockerfile: centreon-collect-ubuntu-jammy + # image: centreon-collect-ubuntu-jammy + # tag: ${{ needs.get-environment.outputs.img_version }} - runner: collect-arm64 dockerfile: centreon-collect-debian-bullseye image: centreon-collect-debian-bullseye-arm64 - tag: ${{ needs.get-version.outputs.img_version }} + tag: ${{ needs.get-environment.outputs.img_version }} - runner: collect-arm64 dockerfile: centreon-collect-debian-bullseye-test image: centreon-collect-debian-bullseye-arm64-test - tag: ${{ needs.get-version.outputs.test_img_version }} + tag: ${{ needs.get-environment.outputs.test_img_version }} - runner: collect-arm64 dockerfile: centreon-collect-debian-bookworm image: centreon-collect-debian-bookworm-arm64 - tag: ${{ needs.get-version.outputs.img_version }} + tag: ${{ needs.get-environment.outputs.img_version }} - runner: collect-arm64 dockerfile: centreon-collect-debian-bookworm-test image: centreon-collect-debian-bookworm-arm64-test - tag: ${{ needs.get-version.outputs.test_img_version }} + tag: ${{ needs.get-environment.outputs.test_img_version }} runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', matrix.runner)) }} @@ -92,7 +96,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Login to Registry uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 @@ -105,13 +109,13 @@ jobs: uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ${{ vars.DOCKER_PROXY_REGISTRY_URL }} - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PUSH_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PUSH_TOKEN }} - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Build image ${{ matrix.image }}:${{ matrix.tag }} - uses: docker/build-push-action@5176d81f87c23d6fc96624dfdbcd9f3830bbe445 # v6.5.0 + uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v6.9.0 with: file: .github/docker/Dockerfile.${{ matrix.dockerfile }} context: . @@ -122,3 +126,12 @@ jobs: pull: true push: true tags: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ matrix.tag }} + + set-skip-label: + needs: [get-environment, dockerize] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + uses: ./.github/workflows/set-pull-request-skip-label.yml diff --git a/.github/workflows/docker-gorgone-testing.yml b/.github/workflows/docker-gorgone-testing.yml index 36e7d1b6094..396ca164538 100644 --- a/.github/workflows/docker-gorgone-testing.yml +++ b/.github/workflows/docker-gorgone-testing.yml @@ -19,11 +19,15 @@ on: - ".github/workflows/docker-gorgone-testing.yml" jobs: - get-version: - uses: ./.github/workflows/get-version.yml + get-environment: + if: github.repository == 'centreon/centreon-collect' + uses: ./.github/workflows/get-environment.yml dockerize: - needs: [get-version] + needs: [get-environment] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + needs.get-environment.outputs.stability != 'stable' runs-on: ubuntu-24.04 strategy: @@ -54,12 +58,20 @@ jobs: file: .github/docker/Dockerfile.gorgone-testing-${{ matrix.distrib }} context: . build-args: | - "REGISTRY_URL=${{ vars.DOCKER_PROXY_REGISTRY_URL }}" - "VERSION=${{ needs.get-version.outputs.major_version }}" - "IS_CLOUD=${{ needs.get-version.outputs.release_cloud }}" + "VERSION=${{ needs.get-environment.outputs.major_version }}" + "IS_CLOUD=${{ needs.get-environment.outputs.is_cloud }}" pull: true push: true - tags: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/gorgone-testing-${{ matrix.distrib }}:${{ needs.get-version.outputs.major_version }} + tags: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/gorgone-testing-${{ matrix.distrib }}:${{ needs.get-environment.outputs.major_version }} secrets: | "ARTIFACTORY_INTERNAL_REPO_USERNAME=${{ secrets.ARTIFACTORY_INTERNAL_REPO_USERNAME }}" "ARTIFACTORY_INTERNAL_REPO_PASSWORD=${{ secrets.ARTIFACTORY_INTERNAL_REPO_PASSWORD }}" + + set-skip-label: + needs: [get-environment, dockerize] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + uses: ./.github/workflows/set-pull-request-skip-label.yml diff --git a/.github/workflows/get-environment.yml b/.github/workflows/get-environment.yml new file mode 100644 index 00000000000..7861921acea --- /dev/null +++ b/.github/workflows/get-environment.yml @@ -0,0 +1,473 @@ +on: + workflow_call: + inputs: + version_file: + required: false + type: string + default: CMakeLists.txt + nightly_manual_trigger: + required: false + type: boolean + outputs: + latest_major_version: + description: "latest major version" + value: ${{ jobs.get-environment.outputs.latest_major_version }} + is_cloud: + description: "context of release (cloud or not cloud)" + value: ${{ jobs.get-environment.outputs.is_cloud }} + linked_dev_branch: + description: "linked develop branch" + value: ${{ jobs.get-environment.outputs.linked_dev_branch }} + linked_stable_branch: + description: "linked stable branch" + value: ${{ jobs.get-environment.outputs.linked_stable_branch }} + major_version: + description: "major version" + value: ${{ jobs.get-environment.outputs.major_version }} + minor_version: + description: "minor version" + value: ${{ jobs.get-environment.outputs.minor_version }} + release: + description: "release" + value: ${{ jobs.get-environment.outputs.release }} + stability: + description: "branch stability (stable, testing, unstable, canary)" + value: ${{ jobs.get-environment.outputs.stability }} + target_stability: + description: "Final target branch stability (stable, testing, unstable, canary or not defined if not a pull request)" + value: ${{ jobs.get-environment.outputs.target_stability }} + release_type: + description: "type of release (hotfix, release or not defined if not a release)" + value: ${{ jobs.get-environment.outputs.release_type }} + is_targeting_feature_branch: + description: "if it is a PR, check if targeting a feature branch" + value: ${{ jobs.get-environment.outputs.is_targeting_feature_branch }} + is_nightly: + description: "if the current workflow run is considered a nightly" + value: ${{ jobs.get-environment.outputs.is_nightly }} + img_version: + description: "docker image version (vcpkg checksum)" + value: ${{ jobs.get-environment.outputs.img_version }} + test_img_version: + description: "test docker image version (checksum of database sql, script and dockerfiles)" + value: ${{ jobs.get-environment.outputs.test_img_version }} + skip_workflow: + description: "if the current workflow should be skipped" + value: ${{ jobs.get-environment.outputs.skip_workflow }} + labels: + description: "list of labels on the PR" + value: ${{ jobs.get-environment.outputs.labels }} + +jobs: + get-environment: + runs-on: ubuntu-24.04 + outputs: + latest_major_version: ${{ steps.latest_major_version.outputs.latest_major_version }} + is_cloud: ${{ steps.detect_cloud_version.outputs.result }} + linked_dev_branch: ${{ steps.linked_branches.outputs.linked_dev_branch }} + linked_stable_branch: ${{ steps.linked_branches.outputs.linked_stable_branch }} + major_version: ${{ steps.get_version.outputs.major_version }} + minor_version: ${{ steps.get_version.outputs.minor_version }} + release: ${{ steps.get_release.outputs.release }} + stability: ${{ steps.get_stability.outputs.stability }} + target_stability: ${{ steps.get_stability.outputs.target_stability }} + release_type: ${{ steps.get_release_type.outputs.release_type }} + is_targeting_feature_branch: ${{ steps.get_stability.outputs.is_targeting_feature_branch }} + is_nightly: ${{ steps.get_nightly_status.outputs.is_nightly }} + img_version: ${{ steps.get_docker_images_version.outputs.img_version }} + test_img_version: ${{ steps.get_docker_images_version.outputs.test_img_version }} + skip_workflow: ${{ steps.skip_workflow.outputs.result }} + labels: ${{ steps.has_skip_label.outputs.labels }} + + steps: + - name: Check if PR has skip label + id: has_skip_label + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + let hasSkipLabel = false; + let labels = []; + + if (${{ contains(fromJSON('["pull_request", "pull_request_target"]') , github.event_name) }} === true) { + try { + const fetchedLabels = await github.rest.issues.listLabelsOnIssue({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number + }); + fetchedLabels.data.forEach(({ name }) => { + labels.push(name); + if (name === '${{ format('skip-workflow-{0}', github.workflow) }}') { + hasSkipLabel = true; + } + }); + } catch (e) { + core.warning(`failed to list labels: ${e}`); + } + } + + core.setOutput('labels', labels); + + return hasSkipLabel; + + - name: Checkout sources (current branch) + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + with: + fetch-depth: ${{ steps.has_skip_label.outputs.result == 'true' && 100 || 1 }} + + # get latest major version to detect cloud / on-prem versions + - name: Checkout sources (develop branch) + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + with: + ref: develop + path: centreon-develop + sparse-checkout: .version + + - if: ${{ steps.has_skip_label.outputs.result == 'true' }} + name: Get workflow triggered paths + id: get_workflow_triggered_paths + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + const fs = require('fs'); + + let paths = []; + + const workflowFilePath = '${{ github.workflow_ref }}'.replace('${{ github.repository }}/', '').split('@').shift(); + + if (fs.existsSync(workflowFilePath)) { + const workflowFileContent = fs.readFileSync(workflowFilePath, 'utf8'); + const workflowFileContentLines = workflowFileContent.split('\n'); + + let hasReadOn = false; + let hasReadPullRequest = false; + let hasReadPaths = false; + for (const line of workflowFileContentLines) { + if (line.match(/^on:\s*$/)) { + hasReadOn = true; + continue; + } + if (line.match(/^\s{2}pull_request(_target)?:\s*$/)) { + hasReadPullRequest = true; + continue; + } + if (line.match(/^\s{4}paths:\s*$/)) { + hasReadPaths = true; + continue; + } + + if (hasReadOn && hasReadPullRequest && hasReadPaths) { + const matches = line.match(/^\s{6}-\s['"](.+)['"]\s*$/); + if (matches) { + paths.push(matches[1].trim()); + } else { + break; + } + } + } + } + + if (paths.length === 0) { + paths = ['**']; + } + + console.log(paths); + + return paths; + + - if: ${{ steps.has_skip_label.outputs.result == 'true' }} + name: Get push changes + id: get_push_changes + uses: tj-actions/changed-files@bab30c2299617f6615ec02a68b9a40d10bd21366 # v45.0.5 + with: + since_last_remote_commit: true + json: true + escape_json: false + files: ${{ join(fromJSON(steps.get_workflow_triggered_paths.outputs.result), ';') }} + files_separator: ';' + + - name: Check if current workflow should be skipped + id: skip_workflow + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + if (/^bump-.+/.test('${{ github.head_ref || github.ref_name }}')) { + core.notice('skipping workflow because it is a bump branch'); + return true; + } + + if (${{ steps.has_skip_label.outputs.result }} === false) { + return false; + } + + const label = '${{ format('skip-workflow-{0}', github.workflow) }}'; + if ('${{ steps.get_push_changes.outputs.any_changed }}' === 'true') { + try { + await github.rest.issues.removeLabel({ + name: label, + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number + }); + core.notice(`label ${label} removed because changes were detected on last push.`); + } catch (e) { + core.warning(`failed to remove label ${label}: ${e}`); + } + + return false; + } + + return true; + + - name: Store latest major version + id: latest_major_version + run: | + . centreon-develop/.version + echo "latest_major_version=$MAJOR" >> $GITHUB_OUTPUT + shell: bash + + - if: ${{ github.event_name == 'pull_request' }} + name: Get nested pull request path + id: pr_path + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + const prPath = ['${{ github.head_ref }}', '${{ github.base_ref }}']; + + const result = await github.rest.pulls.list({ + owner: context.repo.owner, + repo: context.repo.repo, + per_page: 100, + state: 'open' + }); + + let found = true; + while (found) { + found = false; + result.data.forEach(({ head: { ref: headRef }, base: { ref: baseRef} }) => { + if (headRef === prPath[prPath.length - 1] && ! prPath.includes(baseRef)) { + found = true; + prPath.push(baseRef); + } + }); + } + + return prPath; + + - name: Get stability + id: get_stability + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + const getStability = (branchName) => { + switch (true) { + case /(^develop$)|(^dev-\d{2}\.\d{2}\.x$)|(^prepare-release-cloud.*)/.test(branchName): + return 'unstable'; + case /(^release.+)|(^hotfix.+)/.test(branchName): + return 'testing'; + case /(^master$)|(^\d{2}\.\d{2}\.x$)/.test(branchName): + return 'stable'; + default: + return 'canary'; + } + }; + + core.setOutput('stability', getStability('${{ github.head_ref || github.ref_name }}')); + + let isTargetingFeatureBranch = false; + if ("${{ github.event_name }}" === "pull_request") { + let targetStability = 'canary'; + const prPath = ${{ steps.pr_path.outputs.result || '[]' }}; + prPath.shift(); // remove current branch + + if (prPath.length && getStability(prPath[0]) === 'canary') { + isTargetingFeatureBranch = true; + } + + prPath.every((branchName) => { + console.log(`checking stability of ${branchName}`) + targetStability = getStability(branchName); + + if (targetStability !== 'canary') { + return false; + } + + return true; + }); + + core.setOutput('target_stability', targetStability); + } + + core.setOutput('is_targeting_feature_branch', isTargetingFeatureBranch); + + - name: Get version from ${{ inputs.version_file }} + id: get_version + run: | + if [[ "${{ inputs.version_file }}" == *.version* ]]; then + . .version + . ${{ inputs.version_file }} + VERSION="$MAJOR.$MINOR" + elif [[ "${{ inputs.version_file }}" == CMakeLists.txt ]]; then + MAJOR=$(awk '$1 ~ "COLLECT_MAJOR" {maj=substr($2, 1, length($2)-1)} $1 ~ "COLLECT_MINOR" {min=substr($2, 1, length($2)-1) ; print maj "." min}' CMakeLists.txt) + MINOR=$(awk '$1 ~ "COLLECT_PATCH" {print substr($2, 1, length($2) - 1)}' CMakeLists.txt) + VERSION="$MAJOR.$MINOR" + else + echo "Unable to parse version file ${{ inputs.version_file }}" + exit 1 + fi + + if grep -E '^[2-9][0-9]\.[0-9][0-9]\.[0-9]+' <<<"$VERSION" >/dev/null 2>&1 ; then + n=${VERSION//[!0-9]/ } + a=(${n//\./ }) + echo "major_version=${a[0]}.${a[1]}" >> $GITHUB_OUTPUT + MAJOR=${a[0]}.${a[1]} + echo "minor_version=${a[2]}" >> $GITHUB_OUTPUT + else + echo "Cannot parse version number from ${{ inputs.version_file }}" + exit 1 + fi + shell: bash + + - name: "Get release: 1 for testing / stable, . for others" + id: get_release + run: | + if [[ "${{ steps.get_stability.outputs.stability }}" == "testing" || "${{ steps.get_stability.outputs.stability }}" == "stable" ]]; then + RELEASE="1" + else + RELEASE="$(date +%s).$(echo ${{ github.sha }} | cut -c -7)" + fi + + echo "release=$RELEASE" >> $GITHUB_OUTPUT + shell: bash + + - name: "Get release type: hotfix, release or not defined if not a release" + id: get_release_type + run: | + RELEASE_TYPE=$(echo "${{ github.head_ref || github.ref_name }}" | cut -d '-' -f 1) + if [[ "$RELEASE_TYPE" == "hotfix" || "$RELEASE_TYPE" == "release" ]]; then + echo "release_type=$RELEASE_TYPE" >> $GITHUB_OUTPUT + fi + shell: bash + + - name: "Detect cloud version" + id: detect_cloud_version + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + if ("${{ github.ref_name }}" === "master") { + return true; + } + + if ("${{ github.event_name }}" === "pull_request") { + const prPath = ${{ steps.pr_path.outputs.result || '[]' }}; + const finalTargetBranch = prPath.pop(); + if (['develop', 'master'].includes(finalTargetBranch)) { + return true; + } else if (/\d{2}\.\d{2}\.x$/.test(finalTargetBranch)) { + return false; + } + } + + const developMajorVersion = "${{ steps.latest_major_version.outputs.latest_major_version }}"; + const currentMajorVersion = "${{ steps.get_version.outputs.major_version }}"; + + if (Number(currentMajorVersion) >= Number(developMajorVersion)) { + return true; + } + + return false; + + - name: Detect linked dev and stable branches + id: linked_branches + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + let linkedDevBranch = 'develop'; + let linkedStableBranch = 'master'; + if ('${{ steps.detect_cloud_version.outputs.result }}' === 'false') { + linkedDevBranch = 'dev-' + '${{ steps.get_version.outputs.major_version }}' + '.x'; + linkedStableBranch = '${{ steps.get_version.outputs.major_version }}' + '.x'; + } + core.setOutput('linked_dev_branch', linkedDevBranch); + core.setOutput('linked_stable_branch', linkedStableBranch); + + - name: Detect nightly status + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + id: get_nightly_status + env: + NIGHTLY_MANUAL_TRIGGER: ${{ inputs.nightly_manual_trigger }} + with: + script: | + const getNightlyInput = () => { + const nightly_manual_trigger = process.env.NIGHTLY_MANUAL_TRIGGER; + console.log(nightly_manual_trigger); + if (typeof nightly_manual_trigger === 'undefined' || nightly_manual_trigger === '') { + return 'false'; + } else if (context.eventName === 'schedule' || context.eventName === 'workflow_dispatch' && nightly_manual_trigger === 'true' ) { + return 'true'; + } + return 'false'; + }; + core.setOutput('is_nightly', getNightlyInput()); + + - name: Get docker images version + id: get_docker_images_version + run: | + IMG_VERSION=$( cat `ls .github/docker/Dockerfile.centreon-collect-* | grep -v test` vcpkg.json | md5sum | awk '{print substr($1, 0, 8)}') + echo "img_version=$IMG_VERSION" >> $GITHUB_OUTPUT + + TEST_IMG_VERSION=$(cat .github/docker/Dockerfile.centreon-collect-*-test .github/scripts/collect-prepare-test-robot.sh resources/*.sql | md5sum | cut -c1-8) + echo "test_img_version=$TEST_IMG_VERSION" >> $GITHUB_OUTPUT + + - name: Display info in job summary + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + const outputTable = [ + [{data: 'Name', header: true}, {data: 'Value', header: true}], + ['latest_major_version', '${{ steps.latest_major_version.outputs.latest_major_version }}'], + ['is_cloud', '${{ steps.detect_cloud_version.outputs.result }}'], + ['linked_dev_branch', '${{ steps.linked_branches.outputs.linked_dev_branch }}'], + ['linked_stable_branch', '${{ steps.linked_branches.outputs.linked_stable_branch }}'], + ['major_version', '${{ steps.get_version.outputs.major_version }}'], + ['minor_version', '${{ steps.get_version.outputs.minor_version }}'], + ['release', '${{ steps.get_release.outputs.release }}'], + ['stability', '${{ steps.get_stability.outputs.stability }}'], + ['release_type', '${{ steps.get_release_type.outputs.release_type || 'not defined because this is not a release' }}'], + ['is_targeting_feature_branch', '${{ steps.get_stability.outputs.is_targeting_feature_branch }}'], + ['is_nightly', '${{ steps.get_nightly_status.outputs.is_nightly }}'], + ['img_version', '${{ steps.get_docker_images_version.outputs.img_version }}'], + ['test_img_version', '${{ steps.get_docker_images_version.outputs.test_img_version }}'], + ['skip_workflow', '${{ steps.skip_workflow.outputs.result }}'], + ['labels', '${{ steps.has_skip_label.outputs.labels }}'], + ]; + + outputTable.push(['target_stability', '${{ steps.get_stability.outputs.target_stability || 'not defined because current run is not triggered by pull request event' }}']); + + core.summary + .addHeading(`${context.workflow} environment outputs`) + .addTable(outputTable); + + if ("${{ github.event_name }}" === "pull_request") { + const prPath = ${{ steps.pr_path.outputs.result || '[]' }}; + const mainBranchName = prPath.pop(); + let codeBlock = ` + %%{ init: { 'gitGraph': { 'mainBranchName': '${mainBranchName}', 'showCommitLabel': false } } }%% + gitGraph + commit`; + prPath.reverse().forEach((branchName) => { + codeBlock = `${codeBlock} + branch ${branchName} + checkout ${branchName} + commit`; + }); + + core.summary + .addHeading('Git workflow') + .addCodeBlock( + codeBlock, + "mermaid" + ); + } + + core.summary.write(); diff --git a/.github/workflows/get-version.yml b/.github/workflows/get-version.yml deleted file mode 100644 index b4689794d98..00000000000 --- a/.github/workflows/get-version.yml +++ /dev/null @@ -1,226 +0,0 @@ -on: - workflow_call: - inputs: - version_file: - required: false - type: string - default: CMakeLists.txt - outputs: - major_version: - description: "major version" - value: ${{ jobs.get-version.outputs.major_version }} - minor_version: - description: "minor version" - value: ${{ jobs.get-version.outputs.minor_version }} - img_version: - description: "docker image version (vcpkg checksum)" - value: ${{ jobs.get-version.outputs.img_version }} - test_img_version: - description: "test docker image version (checksum of database sql, script and dockerfiles)" - value: ${{ jobs.get-version.outputs.test_img_version }} - version: - description: "major version" - value: ${{ jobs.get-version.outputs.version }} - release: - description: "release" - value: ${{ jobs.get-version.outputs.release }} - stability: - description: "branch stability (stable, testing, unstable, canary)" - value: ${{ jobs.get-version.outputs.stability }} - environment: - description: "branch stability (stable, testing, unstable, canary)" - value: ${{ jobs.get-version.outputs.environment }} - release_type: - description: "type of release (hotfix, release)" - value: ${{ jobs.get-version.outputs.release_type }} - release_cloud: - description: "context of release (cloud or not cloud)" - value: ${{ jobs.get-version.outputs.release_cloud }} - -jobs: - get-version: - runs-on: ubuntu-24.04 - outputs: - major_version: ${{ steps.get_version.outputs.major_version }} - minor_version: ${{ steps.get_version.outputs.minor_version }} - img_version: ${{ steps.get_version.outputs.img_version }} - test_img_version: ${{ steps.get_version.outputs.test_img_version }} - version: ${{ steps.get_version.outputs.version }} - release: ${{ steps.get_version.outputs.release }} - stability: ${{ steps.get_version.outputs.stability }} - environment: ${{ steps.get_version.outputs.env }} - release_type: ${{ steps.get_version.outputs.release_type }} - release_cloud: ${{ steps.get_version.outputs.release_cloud}} - - steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - with: - ref: develop - path: centreon-collect-develop - sparse-checkout: .version - - - name: store latest major version - id: latest_major_version - run: | - . centreon-collect-develop/.version - echo "latest_major_version=$MAJOR" >> $GITHUB_OUTPUT - shell: bash - - - name: install gh cli on self-hosted runner - run: | - if ! command -v gh &> /dev/null; then - echo "Installing GH CLI." - type -p curl >/dev/null || (sudo apt update && sudo apt install curl -y) - curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg - sudo chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg - echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null - sudo apt update - sudo apt install gh -y - else - echo "GH CLI is already installed." - fi - shell: bash - - - id: get_version - run: | - set -x - - if [[ "${{ inputs.version_file }}" == */.version ]]; then - . .version - . ${{ inputs.version_file }} - VERSION="$MAJOR.$MINOR" - elif [[ "${{ inputs.version_file }}" == CMakeLists.txt ]]; then - MAJOR=$(awk '$1 ~ "COLLECT_MAJOR" {maj=substr($2, 1, length($2)-1)} $1 ~ "COLLECT_MINOR" {min=substr($2, 1, length($2)-1) ; print maj "." min}' CMakeLists.txt) - MINOR=$(awk '$1 ~ "COLLECT_PATCH" {print substr($2, 1, length($2) - 1)}' CMakeLists.txt) - VERSION="$MAJOR.$MINOR" - else - echo "Unable to parse ${{ inputs.version_file }}" - exit 1 - fi - - echo "VERSION=$VERSION" - - if egrep '^[2-9][0-9]\.[0-9][0-9]\.[0-9]+' <<<"$VERSION" >/dev/null 2>&1 ; then - n=${VERSION//[!0-9]/ } - a=(${n//\./ }) - echo "major_version=${a[0]}.${a[1]}" >> $GITHUB_OUTPUT - MAJOR=${a[0]}.${a[1]} - echo "minor_version=${a[2]}" >> $GITHUB_OUTPUT - else - echo "Cannot parse version number from ${{ inputs.version_file }}" - exit 1 - fi - - IMG_VERSION=$( cat `ls .github/docker/Dockerfile.centreon-collect-* | grep -v test` vcpkg.json | md5sum | awk '{print substr($1, 0, 8)}') - TEST_IMG_VERSION=$(cat .github/docker/Dockerfile.centreon-collect-*-test .github/scripts/collect-prepare-test-robot.sh resources/*.sql | md5sum | cut -c1-8) - echo "img_version=$IMG_VERSION" >> $GITHUB_OUTPUT - echo "test_img_version=$TEST_IMG_VERSION" >> $GITHUB_OUTPUT - echo "version=$VERSION" >> $GITHUB_OUTPUT - - if [[ -z "$GITHUB_HEAD_REF" ]]; then - BRANCHNAME="$GITHUB_REF_NAME" - else - BRANCHNAME="$GITHUB_HEAD_REF" - fi - - echo "BRANCHNAME is: $BRANCHNAME" - - # Set default release values - GITHUB_RELEASE_CLOUD=0 - GITHUB_RELEASE_TYPE=$(echo $BRANCHNAME |cut -d '-' -f 1) - - # if current branch major version has a matching dev-$MAJOR branch ==> onprem version - if git ls-remote -q | grep -E "refs/heads/dev-$MAJOR.x$" >/dev/null 2>&1; then - GITHUB_RELEASE_CLOUD=0 - # if current branch major version is greater or equal than the develop branch major version ==> cloud version - elif [[ "$(printf '%s\n' "${{ steps.latest_major_version.outputs.latest_major_version }}" "$MAJOR" | sort -V | head -n1)" == "${{ steps.latest_major_version.outputs.latest_major_version }}" ]]; then - GITHUB_RELEASE_CLOUD=1 - fi - - case "$BRANCHNAME" in - master) - echo "release=1" >> $GITHUB_OUTPUT - GITHUB_RELEASE_CLOUD=1 - echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT - ;; - [2-9][0-9].[0-9][0-9].x) - echo "release=1" >> $GITHUB_OUTPUT - echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT - ;; - develop) - echo "release=`date +%s`.`echo ${{ github.sha }} | cut -c -7`" >> $GITHUB_OUTPUT - GITHUB_RELEASE_CLOUD=1 - echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT - ;; - dev-[2-9][0-9].[0-9][0-9].x) - echo "release=`date +%s`.`echo ${{ github.sha }} | cut -c -7`" >> $GITHUB_OUTPUT - echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT - ;; - release* | hotfix*) - # Handle workflow_dispatch run triggers and run a dispatch ONLY for cloud release - GITHUB_RELEASE_BRANCH_BASE_REF_NAME="$(gh pr view $BRANCHNAME -q .baseRefName --json headRefName,baseRefName,state)" - echo "GITHUB_RELEASE_BRANCH_BASE_REF_NAME is: $GITHUB_RELEASE_BRANCH_BASE_REF_NAME" - GITHUB_RELEASE_BRANCH_PR_STATE="$(gh pr view $BRANCHNAME -q .state --json headRefName,baseRefName,state)" - echo "GITHUB_RELEASE_BRANCH_PR_STATE is: $GITHUB_RELEASE_BRANCH_PR_STATE" - - # Check if the release context (cloud and hotfix or cloud and release) - if [[ "$GITHUB_RELEASE_BRANCH_BASE_REF_NAME" == "master" ]] && [[ "$GITHUB_RELEASE_BRANCH_PR_STATE" == "OPEN" ]]; then - # Get release pull request ID - GITHUB_RELEASE_BRANCH_PR_NUMBER="$(gh pr view $BRANCHNAME -q .[] --json number)" - # Set release cloud to 1 (0=not-cloud, 1=cloud) - GITHUB_RELEASE_CLOUD=1 - # Debug - echo "GITHUB_RELEASE_TYPE is: $GITHUB_RELEASE_TYPE" - echo "GITHUB_RELEASE_BRANCH_PR_NUMBER is: $GITHUB_RELEASE_BRANCH_PR_NUMBER" # We do leave this here as debug help. - echo "GITHUB_RELEASE_CLOUD is: $GITHUB_RELEASE_CLOUD" - # Github ouputs - echo "release=`date +%s`.`echo ${{ github.sha }} | cut -c -7`" >> $GITHUB_OUTPUT - echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT - else - echo "release=1" >> $GITHUB_OUTPUT - echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT - fi - ;; - prepare-release-cloud*) - # Set release cloud to 1 (0=not-cloud, 1=cloud) - GITHUB_RELEASE_CLOUD=1 - # Debug - echo "GITHUB_RELEASE_TYPE is: $GITHUB_RELEASE_TYPE" - echo "GITHUB_RELEASE_CLOUD is: $GITHUB_RELEASE_CLOUD" - # Github ouputs - echo "release=`date +%s`.`echo ${{ github.sha }} | cut -c -7`" >> $GITHUB_OUTPUT - echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT - ;; - *) - echo "release=`date +%s`.`echo ${{ github.sha }} | cut -c -7`" >> $GITHUB_OUTPUT - echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT - ;; - esac - - echo "release_cloud=$GITHUB_RELEASE_CLOUD" >> $GITHUB_OUTPUT - - case "$BRANCHNAME" in - develop | dev-[2-9][0-9].[0-9][0-9].x | prepare-release-cloud*) - STABILITY="unstable" - ENV="development" - ;; - release* | hotfix*) - STABILITY="testing" - ENV="testing" - ;; - master | [2-9][0-9].[0-9][0-9].x) - STABILITY="stable" - ENV="production" - ;; - *) - STABILITY="canary" - ;; - esac - echo "stability=$STABILITY" >> $GITHUB_OUTPUT - echo "env=$VERSION-$ENV" >> $GITHUB_OUTPUT - echo "GH_ENV: $VERSION-$ENV" - shell: bash - env: - GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/gorgone.yml b/.github/workflows/gorgone.yml index acd9b069695..3857ed82eec 100644 --- a/.github/workflows/gorgone.yml +++ b/.github/workflows/gorgone.yml @@ -6,6 +6,12 @@ concurrency: on: workflow_dispatch: + inputs: + unit_tests: + description: 'Execute the unit tests' + required: true + default: true + type: boolean pull_request: types: - opened @@ -13,8 +19,10 @@ on: - reopened - ready_for_review paths: + - ".version.centreon-gorgone" - ".github/workflows/gorgone.yml" - "gorgone/**" + - "perl-libs/**" - "!gorgone/tests/**" - "!gorgone/veracode.json" - "!gorgone/.veracode-exclusions" @@ -25,7 +33,10 @@ on: - master - "[2-9][0-9].[0-9][0-9].x" paths: + - ".version.centreon-gorgone" + - ".github/workflows/gorgone.yml" - "gorgone/**" + - "perl-libs/**" - "!gorgone/tests/**" - "!gorgone/veracode.json" - "!gorgone/.veracode-exclusions" @@ -34,30 +45,79 @@ env: base_directory: gorgone jobs: - get-version: - uses: ./.github/workflows/get-version.yml + get-environment: + uses: ./.github/workflows/get-environment.yml with: - version_file: gorgone/.version + version_file: .version.centreon-gorgone veracode-analysis: - needs: [get-version] + needs: [get-environment] + if: ${{ needs.get-environment.outputs.is_targeting_feature_branch != 'true' && github.event.pull_request.draft != 'true' }} uses: ./.github/workflows/veracode-analysis.yml with: module_directory: gorgone module_name: centreon-gorgone - major_version: ${{ needs.get-version.outputs.major_version }} - minor_version: ${{ needs.get-version.outputs.minor_version }} - img_version: ${{ needs.get-version.outputs.img_version }} + major_version: ${{ needs.get-environment.outputs.major_version }} + minor_version: ${{ needs.get-environment.outputs.minor_version }} + img_version: ${{ needs.get-environment.outputs.img_version }} secrets: veracode_api_id: ${{ secrets.VERACODE_API_ID_GORG }} veracode_api_key: ${{ secrets.VERACODE_API_KEY_GORG }} veracode_srcclr_token: ${{ secrets.VERACODE_SRCCLR_TOKEN }} - docker_registry_id: ${{ secrets.DOCKER_REGISTRY_ID }} - docker_registry_passwd: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + docker_registry_id: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + docker_registry_passwd: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} + + unit-test-perl: + needs: [get-environment] + if: | + github.event.inputs.unit_tests == 'true' && + needs.get-environment.outputs.skip_workflow == 'false' && + needs.get-environment.outputs.stability != 'stable' + strategy: + fail-fast: false + matrix: + image: [unit-tests-alma8, unit-tests-alma9, unit-tests-bullseye-arm64, unit-tests-bookworm] + include: + - runner_name: ubuntu-22.04 + - package_extension: rpm + image: unit-tests-alma8 + distrib: el8 + - package_extension: rpm + image: unit-tests-alma9 + distrib: el9 + - package_extension: deb + image: unit-tests-bullseye-arm64 + distrib: bullseye-arm64 + runner_name: ["self-hosted", "collect-arm64"] + - package_extension: deb + image: unit-tests-bookworm + distrib: bookworm + runs-on: ${{ matrix.runner_name }} + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }} + credentials: + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Run unit tests + run: yath -L test ./perl-libs/lib/ ./gorgone/tests/unit/ + + - name: Upload logs as artifacts if tests failed + if: failure() + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + with: + name: centreon-collect-perl-unit-tests-${{ matrix.distrib }} + path: ./lastlog.jsonl + retention-days: 1 package: - needs: [get-version] - if: ${{ needs.get-version.outputs.stability != 'stable' }} + needs: [get-environment] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + needs.get-environment.outputs.stability != 'stable' strategy: fail-fast: false @@ -77,10 +137,10 @@ jobs: runs-on: ubuntu-24.04 container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.major_version }} + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-environment.outputs.major_version }} credentials: - username: ${{ secrets.HARBOR_CENTREON_PUSH_USERNAME }} - password: ${{ secrets.HARBOR_CENTREON_PUSH_TOKEN }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} name: package ${{ matrix.distrib }} @@ -102,7 +162,7 @@ jobs: if: ${{ matrix.package_extension == 'rpm' }} run: | cd gorgone/selinux - sed -i "s/@VERSION@/${{ needs.get-version.outputs.major_version }}.${{ needs.get-version.outputs.minor_version }}/g" centreon-gorgoned.te + sed -i "s/@VERSION@/${{ needs.get-environment.outputs.major_version }}.${{ needs.get-environment.outputs.minor_version }}/g" centreon-gorgoned.te make -f /usr/share/selinux/devel/Makefile shell: bash @@ -114,22 +174,26 @@ jobs: - name: Package uses: ./.github/actions/package with: - nfpm_file_pattern: "gorgone/packaging/*.yaml" + nfpm_file_pattern: "gorgone/packaging/*.yaml perl-libs/packaging/*.yaml" distrib: ${{ matrix.distrib }} package_extension: ${{ matrix.package_extension }} - major_version: ${{ needs.get-version.outputs.major_version }} - minor_version: ${{ needs.get-version.outputs.minor_version }} - release: ${{ needs.get-version.outputs.release }} + major_version: ${{ needs.get-environment.outputs.major_version }} + minor_version: ${{ needs.get-environment.outputs.minor_version }} + release: ${{ needs.get-environment.outputs.release }} arch: all commit_hash: ${{ github.sha }} cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }} rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} - stability: ${{ needs.get-version.outputs.stability }} + stability: ${{ needs.get-environment.outputs.stability }} - test-gorgone: - needs: [get-version, package] + robot-test-gorgone: + needs: [get-environment, package] + if: | + github.repository == 'centreon/centreon-collect' && + needs.get-environment.outputs.skip_workflow == 'false' && + needs.get-environment.outputs.stability != 'stable' strategy: fail-fast: false @@ -148,10 +212,10 @@ jobs: runs-on: ubuntu-24.04 container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.major_version }} + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-environment.outputs.major_version }} credentials: - username: ${{ secrets.HARBOR_CENTREON_PUSH_USERNAME }} - password: ${{ secrets.HARBOR_CENTREON_PUSH_TOKEN }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} services: mariadb: @@ -167,7 +231,7 @@ jobs: - name: Get linked branch of centreon repository id: centreon_repo_linked_branch run: | - CENTREON_REPO_LINKED_BRANCH=$(git ls-remote -h https://github.com/centreon/centreon.git | grep -E "refs/heads/dev-${{ needs.get-version.outputs.major_version }}\.x$" >/dev/null 2>&1 && echo "dev-${{ needs.get-version.outputs.major_version }}.x" || echo develop) + CENTREON_REPO_LINKED_BRANCH=$(git ls-remote -h https://github.com/centreon/centreon.git | grep -E "refs/heads/dev-${{ needs.get-environment.outputs.major_version }}\.x$" >/dev/null 2>&1 && echo "dev-${{ needs.get-environment.outputs.major_version }}.x" || echo develop) GIT_BRANCH_EXISTS=$(git ls-remote -h https://github.com/centreon/centreon.git | grep -E "refs/heads/${{ github.head_ref || github.ref_name }}$" >/dev/null 2>&1 && echo yes || echo no) if [[ "$GIT_BRANCH_EXISTS" == "yes" ]]; then @@ -190,7 +254,7 @@ jobs: centreon/www/install/createTables.sql centreon/www/install/createTablesCentstorage.sql - - name: get cached gorgone package + - name: get cached gorgone and perl-libs package uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: path: ./*.${{ matrix.package_extension }} @@ -208,9 +272,9 @@ jobs: run: | if [[ "${{ matrix.package_extension }}" == "deb" ]]; then apt update - apt install -y ./centreon-gorgone*${{ steps.parse-distrib.outputs.package_distrib_name }}* + apt install -y ./centreon-gorgone*${{ steps.parse-distrib.outputs.package_distrib_name }}* ./centreon-perl-libs-common*${{ steps.parse-distrib.outputs.package_distrib_name }}* else - dnf install -y ./centreon-gorgone*${{ steps.parse-distrib.outputs.package_distrib_name }}* ./centreon-gorgone-centreon-config*${{ steps.parse-distrib.outputs.package_distrib_name }}* + dnf install -y ./centreon-gorgone*${{ steps.parse-distrib.outputs.package_distrib_name }}* ./centreon-perl-libs-common*${{ steps.parse-distrib.outputs.package_distrib_name }}* # in el8 at least, there is a package for the configuration and a package for the actual code. # this is not the case for debian, and for now I don't know why it was made any different between the 2 Os. fi @@ -229,7 +293,7 @@ jobs: - name: Upload gorgone and robot debug artifacts if: failure() - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: gorgone-debug-${{ matrix.distrib }} path: | @@ -240,8 +304,13 @@ jobs: deliver-sources: runs-on: [self-hosted, common] - needs: [get-version, package] - if: ${{ contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch' }} + needs: [get-environment, package] + if: | + github.event_name != 'workflow_dispatch' && + needs.get-environment.outputs.stability == 'stable' && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') steps: - name: Checkout sources @@ -253,14 +322,19 @@ jobs: bucket_directory: centreon-gorgone module_directory: gorgone module_name: centreon-gorgone - major_version: ${{ needs.get-version.outputs.major_version }} - minor_version: ${{ needs.get-version.outputs.minor_version }} + major_version: ${{ needs.get-environment.outputs.major_version }} + minor_version: ${{ needs.get-environment.outputs.minor_version }} token_download_centreon_com: ${{ secrets.TOKEN_DOWNLOAD_CENTREON_COM }} deliver-rpm: runs-on: [self-hosted, common] - needs: [get-version, package] - if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} + needs: [get-environment, unit-test-perl, robot-test-gorgone] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + contains(fromJson('["unstable", "testing"]'), needs.get-environment.outputs.stability) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') strategy: matrix: @@ -275,17 +349,22 @@ jobs: with: module_name: gorgone distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.major_version }} + version: ${{ needs.get-environment.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.distrib }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} deliver-deb: runs-on: [self-hosted, common] - needs: [get-version, package] - if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} + needs: [get-environment, unit-test-perl, robot-test-gorgone] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + contains(fromJson('["unstable", "testing"]'), needs.get-environment.outputs.stability) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') strategy: matrix: @@ -300,20 +379,23 @@ jobs: with: module_name: gorgone distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.major_version }} + version: ${{ needs.get-environment.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.distrib }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} promote: - needs: [get-version, deliver-rpm, deliver-deb] + needs: [get-environment, deliver-rpm, deliver-deb] if: | - (contains(fromJson('["stable", "testing"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch') && + needs.get-environment.outputs.skip_workflow == 'false' && + (contains(fromJson('["stable", "testing"]'), needs.get-environment.outputs.stability) && github.event_name != 'workflow_dispatch') && ! cancelled() && ! contains(needs.*.result, 'failure') && - ! contains(needs.*.result, 'cancelled') + ! contains(needs.*.result, 'cancelled') && + github.repository == 'centreon/centreon-collect' + runs-on: [self-hosted, common] strategy: matrix: @@ -329,8 +411,17 @@ jobs: artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} module_name: gorgone distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.major_version }} - stability: ${{ needs.get-version.outputs.stability }} + major_version: ${{ needs.get-environment.outputs.major_version }} + stability: ${{ needs.get-environment.outputs.stability }} github_ref_name: ${{ github.ref_name }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} + + set-skip-label: + needs: [get-environment, deliver-rpm, deliver-deb, promote] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + uses: ./.github/workflows/set-pull-request-skip-label.yml diff --git a/.github/workflows/libzmq.yml b/.github/workflows/libzmq.yml index bc342cc8d09..2f0a5937a3b 100644 --- a/.github/workflows/libzmq.yml +++ b/.github/workflows/libzmq.yml @@ -19,11 +19,14 @@ on: - '.github/workflows/libzmq.yml' jobs: - get-version: - uses: ./.github/workflows/get-version.yml + get-environment: + uses: ./.github/workflows/get-environment.yml package-rpm: - needs: [get-version] + needs: [get-environment] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + needs.get-environment.outputs.stability != 'stable' strategy: fail-fast: false @@ -36,13 +39,13 @@ jobs: distrib: el9 arch: amd64 - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.major_version }} + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-environment.outputs.major_version }} credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} name: package ${{ matrix.distrib }} @@ -73,7 +76,10 @@ jobs: key: ${{ github.run_id }}-${{ github.sha }}-rpm-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} package-deb: - needs: [get-version] + needs: [get-environment] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + needs.get-environment.outputs.stability != 'stable' strategy: fail-fast: false @@ -81,26 +87,26 @@ jobs: include: - image: packaging-nfpm-bookworm distrib: bookworm - runner: ubuntu-22.04 + runner: ubuntu-24.04 arch: amd64 - image: packaging-nfpm-jammy distrib: jammy - runner: ubuntu-22.04 + runner: ubuntu-24.04 arch: amd64 runs-on: ${{ matrix.runner }} container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.major_version }} + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-environment.outputs.major_version }} credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} name: package ${{ matrix.distrib }} ${{ matrix.arch }} steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Parse distrib name id: parse-distrib @@ -138,9 +144,13 @@ jobs: key: ${{ github.run_id }}-${{ github.sha }}-deb-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} deliver-rpm: - if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} - needs: [get-version, package-rpm] - environment: ${{ needs.get-version.outputs.environment }} + needs: [get-environment, package-rpm] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + contains(fromJson('["unstable", "testing"]'), needs.get-environment.outputs.stability) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') runs-on: [self-hosted, common] strategy: matrix: @@ -154,24 +164,28 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Publish RPM packages uses: ./.github/actions/delivery with: module_name: libzmq distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.major_version }} + major_version: ${{ needs.get-environment.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-rpm-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} deliver-deb: - if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} - needs: [get-version, package-deb] - environment: ${{ needs.get-version.outputs.environment }} + needs: [get-environment, package-deb] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + contains(fromJson('["unstable", "testing"]'), needs.get-environment.outputs.stability) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') runs-on: [self-hosted, common] strategy: matrix: @@ -183,27 +197,29 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Publish DEB packages uses: ./.github/actions/delivery with: module_name: libzmq distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.major_version }} + major_version: ${{ needs.get-environment.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-deb-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} promote: - needs: [get-version, deliver-rpm, deliver-deb] + needs: [get-environment, deliver-rpm, deliver-deb] if: | - (contains(fromJson('["stable", "testing"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch') && + needs.get-environment.outputs.skip_workflow == 'false' && + (contains(fromJson('["stable", "testing"]'), needs.get-environment.outputs.stability) && github.event_name != 'workflow_dispatch') && ! cancelled() && ! contains(needs.*.result, 'failure') && - ! contains(needs.*.result, 'cancelled') + ! contains(needs.*.result, 'cancelled') && + github.repository == 'centreon/centreon-collect' runs-on: [self-hosted, common] strategy: matrix: @@ -211,7 +227,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Promote ${{ matrix.distrib }} to stable uses: ./.github/actions/promote-to-stable @@ -219,8 +235,17 @@ jobs: artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} module_name: libzmq distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.major_version }} - stability: ${{ needs.get-version.outputs.stability }} + major_version: ${{ needs.get-environment.outputs.major_version }} + stability: ${{ needs.get-environment.outputs.stability }} github_ref_name: ${{ github.ref_name }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} + + set-skip-label: + needs: [get-environment, deliver-rpm, deliver-deb, promote] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + uses: ./.github/workflows/set-pull-request-skip-label.yml diff --git a/.github/workflows/lua-curl.yml b/.github/workflows/lua-curl.yml index a0051595337..ed05b63b055 100644 --- a/.github/workflows/lua-curl.yml +++ b/.github/workflows/lua-curl.yml @@ -24,12 +24,14 @@ env: release: 21 # 10 for openssl 1.1.1 / 20 for openssl system / 21 for openssl system and possible issue with ~ jobs: - get-version: - uses: ./.github/workflows/get-version.yml + get-environment: + uses: ./.github/workflows/get-environment.yml package: - needs: [get-version] - if: ${{ needs.get-version.outputs.stability != 'stable' }} + needs: [get-environment] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + needs.get-environment.outputs.stability != 'stable' strategy: fail-fast: false @@ -69,19 +71,19 @@ jobs: runs-on: ${{ matrix.runner }} container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.img_version }} + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-environment.outputs.img_version }} credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} name: package ${{ matrix.distrib }} ${{ matrix.arch }} steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Checkout sources of lua-curl - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 with: repository: Lua-cURL/Lua-cURLv3 path: lua-curl-src @@ -126,12 +128,17 @@ jobs: rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} - stability: ${{ needs.get-version.outputs.stability }} + stability: ${{ needs.get-environment.outputs.stability }} deliver-rpm: - if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} - needs: [get-version, package] - runs-on: ubuntu-22.04 + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + contains(fromJson('["unstable", "testing"]'), needs.get-environment.outputs.stability) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + needs: [get-environment, package] + runs-on: ubuntu-24.04 strategy: matrix: include: @@ -143,24 +150,29 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Publish RPM packages uses: ./.github/actions/rpm-delivery with: module_name: lua-curl distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.major_version }} + version: ${{ needs.get-environment.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-lua-curl-${{ matrix.distrib }}-${{ matrix.arch }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} deliver-deb: - if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} - needs: [get-version, package] - runs-on: ubuntu-22.04 + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + contains(fromJson('["unstable", "testing"]'), needs.get-environment.outputs.stability) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + needs: [get-environment, package] + runs-on: ubuntu-24.04 strategy: matrix: include: @@ -171,27 +183,29 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Publish DEB packages uses: ./.github/actions/deb-delivery with: module_name: lua-curl distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.major_version }} + version: ${{ needs.get-environment.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-lua-curl-${{ matrix.distrib }}-${{ matrix.arch }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} promote: - needs: [get-version, deliver-rpm, deliver-deb] + needs: [get-environment, deliver-rpm, deliver-deb] if: | - (contains(fromJson('["stable", "testing"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch') && + needs.get-environment.outputs.skip_workflow == 'false' && + (contains(fromJson('["stable", "testing"]'), needs.get-environment.outputs.stability) && github.event_name != 'workflow_dispatch') && ! cancelled() && ! contains(needs.*.result, 'failure') && - ! contains(needs.*.result, 'cancelled') + ! contains(needs.*.result, 'cancelled') && + github.repository == 'centreon/centreon-collect' runs-on: [self-hosted, common] strategy: matrix: @@ -199,7 +213,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Promote ${{ matrix.distrib }} to stable uses: ./.github/actions/promote-to-stable @@ -207,8 +221,17 @@ jobs: artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} module_name: lua-curl distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.major_version }} - stability: ${{ needs.get-version.outputs.stability }} + major_version: ${{ needs.get-environment.outputs.major_version }} + stability: ${{ needs.get-environment.outputs.stability }} github_ref_name: ${{ github.ref_name }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} + + set-skip-label: + needs: [get-environment, deliver-rpm, deliver-deb, promote] + if: | + needs.get-environment.outputs.skip_workflow == 'false' && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + uses: ./.github/workflows/set-pull-request-skip-label.yml diff --git a/.github/workflows/package-collect.yml b/.github/workflows/package-collect.yml index 4f101ca71fb..21baac43e8f 100644 --- a/.github/workflows/package-collect.yml +++ b/.github/workflows/package-collect.yml @@ -24,6 +24,9 @@ on: legacy_engine: required: true type: string + build_debug_packages: + required: true + type: boolean packages_in_artifact: required: true type: boolean @@ -42,8 +45,30 @@ on: arch: required: true type: string - - + is_nightly: + required: false + type: string + secrets: + collect_s3_access_key: + required: true + collect_s3_secret_key: + required: true + registry_username: + required: true + registry_password: + required: true + rpm_gpg_key: + required: true + rpm_gpg_signing_key_id: + required: true + rpm_gpg_signing_passphrase: + required: true + jira_base_url: + required: true + jira_user_email: + required: true + jira_api_token: + required: true jobs: package: runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.runner)) }} @@ -52,20 +77,20 @@ jobs: SCCACHE_PATH: "/usr/bin/sccache" SCCACHE_BUCKET: "centreon-github-sccache" SCCACHE_REGION: "eu-west-1" - AWS_ACCESS_KEY_ID: ${{ secrets.COLLECT_S3_ACCESS_KEY }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.COLLECT_S3_SECRET_KEY }} + AWS_ACCESS_KEY_ID: ${{ secrets.collect_s3_access_key }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.collect_s3_secret_key }} container: image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ inputs.image }}:${{ inputs.img_version }} credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.registry_username }} + password: ${{ secrets.registry_password }} name: package ${{ inputs.distrib }} ${{ inputs.arch }} steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Install sccache run: | @@ -100,7 +125,7 @@ jobs: - name: Remove selinux packaging files on debian if: ${{ inputs.package_extension == 'deb' }} - run: rm -f packaging/*-selinux.yaml + run: rm -f packaging/centreon-collect/*-selinux.yaml shell: bash - name: Compile sources @@ -162,7 +187,7 @@ jobs: run: | for file in $(find build/{broker,engine,clib,connectors} -name '*.so' -type f); do echo "Making a debug file of $file" - objcopy --only-keep-debug $file $file.debug + objcopy --merge-notes --only-keep-debug $file $file.debug objcopy --strip-debug $file objcopy --add-gnu-debuglink $file.debug $file done @@ -177,15 +202,19 @@ jobs: "build/agent/centagent") for file in "${exe[@]}"; do echo "Making a debug file of $file" - objcopy --only-keep-debug $file $file.debug + objcopy --merge-notes --only-keep-debug $file $file.debug objcopy --strip-debug $file objcopy --add-gnu-debuglink $file.debug $file done shell: bash + - if: ${{ inputs.build_debug_packages == false }} + run: rm -f packaging/centreon-collect/*debuginfo*.yaml + shell: bash + - uses: ./.github/actions/package with: - nfpm_file_pattern: "packaging/*.yaml" + nfpm_file_pattern: "packaging/centreon-collect/*.yaml" distrib: ${{ inputs.distrib }} package_extension: ${{ inputs.package_extension }} major_version: ${{ inputs.major_version }} @@ -194,20 +223,37 @@ jobs: arch: ${{ inputs.arch }} commit_hash: ${{ inputs.commit_hash }} cache_key: ${{ github.run_id }}-${{ github.sha }}-${{ inputs.package_extension}}-centreon-collect-${{ inputs.distrib }}-${{ inputs.arch }}-${{ github.head_ref || github.ref_name }} - rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} - rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} - rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + rpm_gpg_key: ${{ secrets.rpm_gpg_key }} + rpm_gpg_signing_key_id: ${{ secrets.rpm_gpg_signing_key_id }} + rpm_gpg_signing_passphrase: ${{ secrets.rpm_gpg_signing_passphrase }} stability: ${{ inputs.stability }} - name: Cleaning not needed packages - shell: bash run: rm -rf *-debuginfo*.${{ inputs.package_extension }} + shell: bash # set condition to true if artifacts are needed - if: inputs.packages_in_artifact == true name: Upload package artifacts - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: packages-${{ inputs.distrib }}-${{ inputs.arch }} - path: ./*.${{ inputs.package_extension}} + path: ./*.${{ inputs.package_extension }} retention-days: 1 + + create-jira-nightly-ticket: + needs: [package] + runs-on: ubuntu-24.04 + if: | + inputs.is_nightly == 'true' && github.run_attempt == 1 && + (failure() || cancelled()) && + startsWith(github.ref_name, 'dev') + steps: + - name: Create Jira ticket if nightly build failure + uses: ./.github/actions/create-jira-ticket + with: + jira_base_url: ${{ secrets.jira_base_url }} + jira_user_email: ${{ secrets.jira_user_email }} + jira_api_token: ${{ secrets.jira_api_token }} + module_name: "centreon-collect" + ticket_labels: '["Nightly", "Pipeline", "nightly-${{ github.ref_name }}", "package"]' diff --git a/.github/workflows/rebase-master.yml b/.github/workflows/rebase-master.yml index 03520557266..57a76f8eebd 100644 --- a/.github/workflows/rebase-master.yml +++ b/.github/workflows/rebase-master.yml @@ -12,11 +12,11 @@ on: jobs: main: name: Sync Stable Branches - runs-on: ubuntu-22.04 - if: github.event.pull_request.merged == true + runs-on: ubuntu-24.04 + if: github.event.pull_request.merged == true && github.repository == 'centreon/centreon-collect' steps: - name: git checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 with: fetch-depth: 0 token: ${{ secrets.CENTREON_TECHNIQUE_PAT }} diff --git a/.github/workflows/rebase-version.yml b/.github/workflows/rebase-version.yml index c89b3fe98b5..8e812f122d4 100644 --- a/.github/workflows/rebase-version.yml +++ b/.github/workflows/rebase-version.yml @@ -12,11 +12,11 @@ on: jobs: main: name: Sync Stable Branches - runs-on: ubuntu-22.04 - if: github.event.pull_request.merged == true + runs-on: ubuntu-24.04 + if: github.event.pull_request.merged == true && github.repository == 'centreon/centreon-collect' steps: - name: git checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 with: fetch-depth: 0 token: ${{ secrets.CENTREON_TECHNIQUE_PAT }} diff --git a/.github/workflows/release-trigger-builds.yml b/.github/workflows/release-trigger-builds.yml index 3769c527747..3f74e5f412e 100644 --- a/.github/workflows/release-trigger-builds.yml +++ b/.github/workflows/release-trigger-builds.yml @@ -8,7 +8,7 @@ on: description: "Cloud release branch to trigger" required: true dispatch_content: - description: "Regular (only centreon named components) or Full (every component, including php and extra libs)" + description: "Regular (only centreon named components) or Full (every component, including docker builders, php and extra libs)" required: true type: choice options: @@ -17,9 +17,10 @@ on: jobs: release-trigger-builds: - runs-on: ubuntu-22.04 + if: github.repository == 'centreon/centreon-collect' + runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Install Github CLI run: | @@ -47,8 +48,8 @@ jobs: #COMPONENTS_OSS_FULL=("awie" "dsm" "gorgone" "ha" "open-tickets" "web") #COMPONENTS_MODULES=("anomaly-detection" "autodiscovery" "bam" "cloud-business-extensions" "cloud-extensions" "it-edition-extensions" "lm" "map" "mbi" "ppm") #COMPONENTS_MODULES_FULL=("anomaly-detection" "autodiscovery" "bam" "cloud-business-extensions" "cloud-extensions" "it-edition-extensions" "lm" "map" "mbi" "ppm" "php-pecl-gnupg" "sourceguardian-loader") - COMPONENTS_COLLECT=("Centreon collect" "gorgone") - COMPONENTS_COLLECT_FULL=("Centreon collect" "gorgone") + COMPONENTS_COLLECT=("Centreon collect" "centreon-common" "gorgone" "Centreon Monitoring Agent Windows build and packaging") + COMPONENTS_COLLECT_FULL=("Centreon collect" "centreon-common" "gorgone" "Centreon Monitoring Agent Windows build and packaging" "docker-builder" "docker-gorgone-testing" "libzmq" "lua-curl") RUNS_URL="" # Accept release prefixed or develop branches, nothing else @@ -61,7 +62,7 @@ jobs: fi if [[ "${{ inputs.dispatch_content }}" == "FULL" ]]; then - echo "Requested ${{ inputs.dispatch_content }} content, triggering all component workflows." + echo "Requested ${{ inputs.dispatch_content }} content, triggering all components and docker builders, php and extra libs." for COMPONENT in "${COMPONENTS_COLLECT_FULL[@]}"; do gh workflow run "$COMPONENT" -r ${{ inputs.dispatch_target_release_branch }} done diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 594d0392f0a..74b462ec145 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -20,8 +20,8 @@ on: jobs: release: - if: ${{ github.event.pull_request.merged == true }} - runs-on: ubuntu-22.04 + if: ${{ github.event.pull_request.merged == true && github.repository == 'centreon/centreon-collect' }} + runs-on: ubuntu-24.04 steps: - name: Check base_ref run: | @@ -37,7 +37,7 @@ jobs: shell: bash - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 with: fetch-depth: 0 diff --git a/.github/workflows/robot-test.yml b/.github/workflows/robot-test.yml index d6f8bc622e3..fd3f7560940 100644 --- a/.github/workflows/robot-test.yml +++ b/.github/workflows/robot-test.yml @@ -31,6 +31,9 @@ on: test_group_name: required: true type: string + is_nightly: + required: false + type: string secrets: registry_username: required: true @@ -44,13 +47,18 @@ on: required: true xray_client_secret: required: true - + jira_base_url: + required: true + jira_user_email: + required: true + jira_api_token: + required: true jobs: test-image-to-cache: - runs-on: ${{ contains(inputs.image, 'arm') && fromJson('["self-hosted", "collect-arm64"]') || 'ubuntu-22.04' }} + runs-on: ${{ contains(inputs.image, 'arm') && fromJson('["self-hosted", "collect-arm64"]') || 'ubuntu-24.04' }} steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Login to Registry uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 @@ -84,13 +92,13 @@ jobs: robot-test-list: needs: [test-image-to-cache] - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: features: ${{ steps.list-features.outputs.features }} steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: List features id: list-features @@ -100,7 +108,7 @@ jobs: robot-test: needs: [robot-test-list] - runs-on: ${{ contains(inputs.image, 'arm') && fromJson('["self-hosted", "collect-arm64"]') || 'ubuntu-22.04' }} + runs-on: ${{ contains(inputs.image, 'arm') && fromJson('["self-hosted", "collect-arm64"]') || 'ubuntu-24.04' }} strategy: fail-fast: false @@ -111,7 +119,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 with: fetch-depth: 0 @@ -182,7 +190,7 @@ jobs: - name: Upload Test Results if: ${{ failure() }} - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: reports-${{inputs.test_group_name}}-${{ steps.feature-path.outputs.feature_name_with_dash }} path: reports @@ -191,10 +199,10 @@ jobs: robot-test-report: needs: [robot-test] if: ${{ failure() }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Download Artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 @@ -204,7 +212,7 @@ jobs: merge-multiple: true - name: Upload the regrouped artifact - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: reports-${{inputs.test_group_name}} path: reports/ @@ -243,12 +251,12 @@ jobs: shell: bash # setup-python v5.0.0 relies on node20 which is not supported by el7 distributions - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 if: ${{ inputs.distrib == 'el7'}} with: python-version: "3.10" - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 if: ${{ inputs.distrib != 'el7' }} with: python-version: "3.10" @@ -265,3 +273,23 @@ jobs: gh_access_token: ${{ secrets.GITHUB_TOKEN }} report_path: reports show_passed_tests: false + + create-jira-nightly-ticket: + needs: [robot-test-list, robot-test, robot-test-report] + runs-on: ubuntu-24.04 + if: | + inputs.is_nightly == 'true' && github.run_attempt == 1 && + (failure() || cancelled()) && + startsWith(github.ref_name, 'dev') + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Create Jira ticket if nightly build failure + uses: ./.github/actions/create-jira-ticket + with: + jira_base_url: ${{ secrets.jira_base_url }} + jira_user_email: ${{ secrets.jira_user_email }} + jira_api_token: ${{ secrets.jira_api_token }} + module_name: "centreon-collect" + ticket_labels: '["Nightly", "Pipeline", "nightly-${{ github.ref_name }}", "robot-test"]' diff --git a/.github/workflows/set-pull-request-external-label.yml b/.github/workflows/set-pull-request-external-label.yml new file mode 100644 index 00000000000..bedeb755b95 --- /dev/null +++ b/.github/workflows/set-pull-request-external-label.yml @@ -0,0 +1,32 @@ +name: set-pull-request-external-label + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + pull_request_target: + +jobs: + set-pull-request-external-label: + if: | + github.event.pull_request.head.repo.fork && + ! contains(github.event.pull_request.labels.*.name, 'external') + runs-on: ubuntu-24.04 + + steps: + - name: Set PR external label + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + const label = 'external'; + try { + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + labels: [label] + }); + } catch (e) { + core.warning(`failed to add label ${label}: ${e}`); + } diff --git a/.github/workflows/set-pull-request-skip-label.yml b/.github/workflows/set-pull-request-skip-label.yml new file mode 100644 index 00000000000..ffab0b955e2 --- /dev/null +++ b/.github/workflows/set-pull-request-skip-label.yml @@ -0,0 +1,26 @@ +name: set-pull-request-skip-label + +on: + workflow_call: + +jobs: + set-pull-request-skip-label: + if: ${{ success() && contains(fromJSON('["pull_request", "pull_request_target"]') , github.event_name) }} + runs-on: ubuntu-24.04 + + steps: + - name: Set PR skip label + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + const label = '${{ format('skip-workflow-{0}', github.workflow) }}'; + try { + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + labels: [label] + }); + } catch (e) { + core.warning(`failed to add label ${label}: ${e}`); + } diff --git a/.github/workflows/veracode-analysis.yml b/.github/workflows/veracode-analysis.yml index 23361521e81..64832006e35 100644 --- a/.github/workflows/veracode-analysis.yml +++ b/.github/workflows/veracode-analysis.yml @@ -32,7 +32,7 @@ on: jobs: routing: name: Check before analysis - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: development_stage: ${{ steps.routing-mode.outputs.development_stage }} skip_analysis: ${{ steps.routing-mode.outputs.skip_analysis }} @@ -72,7 +72,7 @@ jobs: password: ${{ secrets.docker_registry_passwd }} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - if: ${{ inputs.module_name == 'centreon-collect' }} name: Compiling Cpp sources @@ -169,7 +169,7 @@ jobs: name: Sandbox scan needs: [routing, build] if: needs.routing.outputs.development_stage != 'Development' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Promote latest scan diff --git a/.github/workflows/windows-agent-robot-test.yml b/.github/workflows/windows-agent-robot-test.yml index 30abb02db7b..91256393044 100644 --- a/.github/workflows/windows-agent-robot-test.yml +++ b/.github/workflows/windows-agent-robot-test.yml @@ -10,29 +10,41 @@ on: - cron: '30 0 * * *' jobs: - get-version: - uses: ./.github/workflows/get-version.yml + get-environment: + uses: ./.github/workflows/get-environment.yml with: version_file: CMakeLists.txt build-collect: - needs: [get-version] + needs: [get-environment] uses: ./.github/workflows/package-collect.yml with: - major_version: ${{ needs.get-version.outputs.major_version }} - minor_version: ${{ needs.get-version.outputs.minor_version }} - img_version: ${{ needs.get-version.outputs.img_version }} - release: ${{ needs.get-version.outputs.release }} + major_version: ${{ needs.get-environment.outputs.major_version }} + minor_version: ${{ needs.get-environment.outputs.minor_version }} + img_version: ${{ needs.get-environment.outputs.img_version }} + release: ${{ needs.get-environment.outputs.release }} commit_hash: ${{ github.sha }} - stability: ${{ needs.get-version.outputs.stability }} - legacy_engine: 'ON' + stability: ${{ needs.get-environment.outputs.stability }} + legacy_engine: 'OFF' + build_debug_packages: false packages_in_artifact: false image: centreon-collect-debian-bullseye distrib: bullseye package_extension: deb runner: collect arch: amd64 - secrets: inherit + is_nightly: ${{ needs.get-environment.outputs.is_nightly }} + secrets: + collect_s3_access_key: ${{ secrets.COLLECT_S3_ACCESS_KEY }} + collect_s3_secret_key: ${{ secrets.COLLECT_S3_SECRET_KEY }} + registry_username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + registry_password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} + rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} + rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} + rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + jira_base_url: ${{ secrets.JIRA_BASE_URL }} + jira_user_email: ${{ secrets.XRAY_JIRA_USER_EMAIL }} + jira_api_token: ${{ secrets.XRAY_JIRA_TOKEN }} build-agent-and-execute-test: @@ -48,8 +60,7 @@ jobs: run: git config --system core.autocrlf false - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: distrib availables run: wsl --list --online @@ -68,6 +79,7 @@ jobs: python3 python3-pip rrdtool + jq - name: IP info run: | @@ -123,7 +135,7 @@ jobs: - name: Upload Test Results if: ${{ failure() }} - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: reports-cma-windows path: reports diff --git a/.github/workflows/windows-agent.yml b/.github/workflows/windows-agent.yml index ffd3033a623..2fcdbf04ff2 100644 --- a/.github/workflows/windows-agent.yml +++ b/.github/workflows/windows-agent.yml @@ -16,6 +16,7 @@ on: pull_request: paths: - agent/** + - common/** - custom-triplets/** - CMakeLists.txt - CMakeListsWindows.txt @@ -28,19 +29,20 @@ on: - "[2-9][0-9].[0-9][0-9].x" paths: - agent/** + - common/** - custom-triplets/** - CMakeLists.txt - CMakeListsWindows.txt - vcpkg.json jobs: - get-version: - uses: ./.github/workflows/get-version.yml + get-environment: + uses: ./.github/workflows/get-environment.yml with: version_file: CMakeLists.txt build-and-test-agent: - needs: [get-version] + needs: [get-environment] runs-on: windows-latest env: AWS_ACCESS_KEY_ID: ${{ secrets.COLLECT_S3_ACCESS_KEY }} @@ -57,9 +59,9 @@ jobs: run: git config --system core.autocrlf false - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - name: Compile Agent + - name: Compile Agent only run: .github/scripts/windows-agent-compile.ps1 shell: powershell @@ -73,33 +75,112 @@ jobs: cd build_windows tests/ut_agent + - name: Sign agent + if: | + contains(fromJson('["unstable", "testing"]'), needs.get-environment.outputs.stability) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + uses: azure/trusted-signing-action@v0.4.0 + with: + azure-tenant-id: ${{ secrets.AZURE_TENANT_ID }} + azure-client-id: ${{ secrets.AZURE_CLIENT_ID }} + azure-client-secret: ${{ secrets.AZURE_CLIENT_SECRET }} + endpoint: https://weu.codesigning.azure.net/ + trusted-signing-account-name: Centreon-signature-RD + certificate-profile-name: Cert-Signature-RD + files-folder: build_windows\agent\Release + files-folder-filter: centagent.exe + files-folder-recurse: false + file-digest: SHA256 + timestamp-rfc3161: http://timestamp.acs.microsoft.com + timestamp-digest: SHA256 + + - name: Build modifier + run: | + cmake -DCMAKE_BUILD_TYPE=Release -DWITH_TESTING=On -DWINDOWS=On -DBUILD_FROM_CACHE=On -S. -DVCPKG_CRT_LINKAGE=dynamic -DBUILD_SHARED_LIBS=OFF -DWITH_BUILD_AGENT_MODIFIER=On -Bbuild_windows + cmake --build build_windows --config Release + + - name: Sign modifier + if: | + contains(fromJson('["unstable", "testing"]'), needs.get-environment.outputs.stability) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + uses: azure/trusted-signing-action@v0.4.0 + with: + azure-tenant-id: ${{ secrets.AZURE_TENANT_ID }} + azure-client-id: ${{ secrets.AZURE_CLIENT_ID }} + azure-client-secret: ${{ secrets.AZURE_CLIENT_SECRET }} + endpoint: https://weu.codesigning.azure.net/ + trusted-signing-account-name: Centreon-signature-RD + certificate-profile-name: Cert-Signature-RD + files-folder: agent\installer + files-folder-filter: centreon-monitoring-agent-modify.exe + file-digest: SHA256 + timestamp-rfc3161: http://timestamp.acs.microsoft.com + timestamp-digest: SHA256 + + - name: Build installer + run: | + cmake -DCMAKE_BUILD_TYPE=Release -DWITH_TESTING=On -DWINDOWS=On -DBUILD_FROM_CACHE=On -S. -DVCPKG_CRT_LINKAGE=dynamic -DBUILD_SHARED_LIBS=OFF -DWITH_BUILD_AGENT_INSTALLER=On -Bbuild_windows + cmake --build build_windows --config Release + + - name: Sign installer + if: | + contains(fromJson('["unstable", "testing"]'), needs.get-environment.outputs.stability) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + uses: azure/trusted-signing-action@v0.4.0 + with: + azure-tenant-id: ${{ secrets.AZURE_TENANT_ID }} + azure-client-id: ${{ secrets.AZURE_CLIENT_ID }} + azure-client-secret: ${{ secrets.AZURE_CLIENT_SECRET }} + endpoint: https://weu.codesigning.azure.net/ + trusted-signing-account-name: Centreon-signature-RD + certificate-profile-name: Cert-Signature-RD + files-folder: agent\installer + files-folder-filter: centreon-monitoring-agent.exe + file-digest: SHA256 + timestamp-rfc3161: http://timestamp.acs.microsoft.com + timestamp-digest: SHA256 + - name: Installer test run: .github/scripts/agent_installer_test.ps1 shell: powershell - name: Upload package artifacts if: | - inputs.installer_in_artifact == true - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + inputs.installer_in_artifact == true || + (github.event_name != 'workflow_dispatch' && + contains(fromJson('["stable"]'), needs.get-environment.outputs.stability) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled')) + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: packages-centreon-monitoring-agent-windows - path: agent\installer\centreon-monitoring-agent.exe + path: | + agent\installer\centreon-monitoring-agent.exe + build_windows\agent\Release\centagent.exe - name: Deliver if: | - contains(fromJson('["unstable", "testing"]'), needs.get-version.outputs.stability) && + contains(fromJson('["unstable", "testing"]'), needs.get-environment.outputs.stability) && ! cancelled() && ! contains(needs.*.result, 'failure') && - ! contains(needs.*.result, 'cancelled') + ! contains(needs.*.result, 'cancelled') && + github.repository == 'centreon/centreon-collect' run: | - Write-Host "[DEBUG] deliver to testing - Major version: ${{ needs.get-version.outputs.major_version }}" - Write-Host "[DEBUG] deliver to testing - Minor version: ${{ needs.get-version.outputs.minor_version }}" + Write-Host "[DEBUG] deliver to testing - Major version: ${{ needs.get-environment.outputs.major_version }}" + Write-Host "[DEBUG] deliver to testing - Minor version: ${{ needs.get-environment.outputs.minor_version }}" - $VERSION = "${{ needs.get-version.outputs.version }}" + $VERSION = "${{ needs.get-environment.outputs.major_version }}.${{ needs.get-environment.outputs.minor_version }}" $MODULE_NAME = "monitoring-agent-$VERSION" - $STABILITY = "${{ needs.get-version.outputs.stability }}" + $STABILITY = "${{ needs.get-environment.outputs.stability }}" - $TARGET_PATH = "installers/monitoring-agent/${{ needs.get-version.outputs.major_version }}/$STABILITY/$MODULE_NAME/" + $TARGET_PATH = "installers/monitoring-agent/${{ needs.get-environment.outputs.major_version }}/$STABILITY/$MODULE_NAME/" $VERSION_EXE = "centreon-monitoring-agent-${VERSION}.exe" @@ -110,17 +191,17 @@ jobs: - name: Promote testing to stable if: | - needs.get-version.outputs.stability == 'stable' && github.event_name != 'workflow_dispatch' && ! cancelled() + needs.get-environment.outputs.stability == 'stable' && github.event_name != 'workflow_dispatch' && ! cancelled() && github.repository == 'centreon/centreon-collect' run: | - Write-Host "[DEBUG] promote to stable - Major version: ${{ needs.get-version.outputs.major_version }}" - Write-Host "[DEBUG] promote to stable - Minor version: ${{ needs.get-version.outputs.minor_version }}" + Write-Host "[DEBUG] promote to stable - Major version: ${{ needs.get-environment.outputs.major_version }}" + Write-Host "[DEBUG] promote to stable - Minor version: ${{ needs.get-environment.outputs.minor_version }}" - $VERSION= "${{ needs.get-version.outputs.version }}" - $MODULE_NAME= "monitoring-agent-${{ needs.get-version.outputs.version }}" - $STABILITY= "${{ needs.get-version.outputs.stability }}" + $VERSION= "${{ needs.get-environment.outputs.major_version }}.${{ needs.get-environment.outputs.minor_version }}" + $MODULE_NAME= "monitoring-agent-${{ needs.get-environment.outputs.major_version }}.${{ needs.get-environment.outputs.minor_version }}" + $STABILITY= "${{ needs.get-environment.outputs.stability }}" - $SRC_PATH = "installers/monitoring-agent/${{ needs.get-version.outputs.major_version }}/testing/$MODULE_NAME/" - $TARGET_PATH = "installers/monitoring-agent/${{ needs.get-version.outputs.major_version }}/$STABILITY/$MODULE_NAME/" + $SRC_PATH = "installers/monitoring-agent/${{ needs.get-environment.outputs.major_version }}/testing/$MODULE_NAME/" + $TARGET_PATH = "installers/monitoring-agent/${{ needs.get-environment.outputs.major_version }}/$STABILITY/$MODULE_NAME/" $VERSION_EXE = "centreon-monitoring-agent-${VERSION}.exe" diff --git a/.gitignore b/.gitignore index 4ce637ed29b..1864d994e7f 100644 --- a/.gitignore +++ b/.gitignore @@ -147,3 +147,5 @@ tests/resources/*_pb2.py tests/resources/*_pb2_grpc.py tests/resources/grpc_stream.proto tests/resources/opentelemetry + +compile_commands.json diff --git a/.version b/.version index 36f30465402..c5ecd6a5c6b 100644 --- a/.version +++ b/.version @@ -1,2 +1,2 @@ -MAJOR=24.11 +MAJOR=25.01 MINOR=0 diff --git a/gorgone/.version b/.version.centreon-common similarity index 100% rename from gorgone/.version rename to .version.centreon-common diff --git a/.version.centreon-gorgone b/.version.centreon-gorgone new file mode 100644 index 00000000000..de743bd193d --- /dev/null +++ b/.version.centreon-gorgone @@ -0,0 +1 @@ +MINOR=0 \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index e8433be844c..8559b31d75c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -117,8 +117,8 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm") endif() # Version. -set(COLLECT_MAJOR 24) -set(COLLECT_MINOR 11) +set(COLLECT_MAJOR 25) +set(COLLECT_MINOR 01) set(COLLECT_PATCH 0) set(COLLECT_VERSION "${COLLECT_MAJOR}.${COLLECT_MINOR}.${COLLECT_PATCH}") diff --git a/CMakeListsWindows.txt b/CMakeListsWindows.txt index f3d9d8de57a..c693c450963 100644 --- a/CMakeListsWindows.txt +++ b/CMakeListsWindows.txt @@ -59,6 +59,9 @@ set(VCPKG_INCLUDE_DIR ${OPENSSL_INCLUDE_DIR}) include(GNUInstallDirs) option(WITH_TESTING "Build unit tests." OFF) +option(WITH_BUILD_AGENT_INSTALLER "Build agent windows installer." OFF) +option(WITH_BUILD_AGENT_MODIFIER "Build agent windows config update program (not needed if WITH_BUILD_INSTALLER=ON)." OFF) + set(protobuf_MODULE_COMPATIBLE True) diff --git a/agent/CMakeLists.txt b/agent/CMakeLists.txt index d2af2dfaae8..385a2addd1a 100644 --- a/agent/CMakeLists.txt +++ b/agent/CMakeLists.txt @@ -107,12 +107,15 @@ set(NATIVE_INC "${PROJECT_SOURCE_DIR}/${NATIVE_DIR}/inc/com/centreon/agent") set(NATIVE_SRC "${PROJECT_SOURCE_DIR}/${NATIVE_DIR}/src") set( SRC_COMMON + ${NATIVE_SRC}/agent_info.cc ${NATIVE_SRC}/check_cpu.cc ${SRC_DIR}/agent.grpc.pb.cc ${SRC_DIR}/agent.pb.cc ${SRC_DIR}/bireactor.cc ${SRC_DIR}/check.cc ${SRC_DIR}/check_exec.cc + ${SRC_DIR}/drive_size.cc + ${SRC_DIR}/check_health.cc ${SRC_DIR}/opentelemetry/proto/collector/metrics/v1/metrics_service.grpc.pb.cc ${SRC_DIR}/opentelemetry/proto/collector/metrics/v1/metrics_service.pb.cc ${SRC_DIR}/opentelemetry/proto/metrics/v1/metrics.pb.cc @@ -125,6 +128,11 @@ set( SRC_COMMON set( SRC_WINDOWS ${SRC_DIR}/config_win.cc + ${NATIVE_SRC}/check_uptime.cc + ${NATIVE_SRC}/check_drive_size.cc + ${NATIVE_SRC}/check_memory.cc + ${NATIVE_SRC}/check_service.cc + ${NATIVE_SRC}/ntdll.cc ) set( SRC_LINUX @@ -189,9 +197,12 @@ else() gRPC::gpr gRPC::grpc gRPC::grpc++ gRPC::grpc++_alts absl::any absl::log absl::base absl::bits Boost::program_options - fmt::fmt) + fmt::fmt + pdh) - add_subdirectory(installer) + if(WITH_BUILD_AGENT_INSTALLER OR WITH_BUILD_AGENT_MODIFIER) + add_subdirectory(installer) + endif() endif() diff --git a/agent/doc/agent-doc.md b/agent/doc/agent-doc.md index 7f279210860..062b962f3f5 100644 --- a/agent/doc/agent-doc.md +++ b/agent/doc/agent-doc.md @@ -22,8 +22,10 @@ We don't care about the duration of tests, we work with time points. In the previous example, the second check for the first service will be scheduled at 12:00:10 even if all other checks has not been yet started. In case of check duration is too long, we might exceed maximum of concurrent checks. In that case checks will be executed as soon one will be ended. -This means that the second check may start later than the scheduled time point (12:00:10) if the other first checks are too long. The order of checks is always respected even in case of a bottleneck. -For example, a check lambda has a start_expected to 12:00, because of bottleneck, it starts at 12:15. Next start_expected of check lambda will then be 12:15 + check_period. + +This means that the second check may start later than the scheduled time point (12:00:10) if the first checks take too long. + +When a check completes, it is inserted into _waiting_check_queue, and its start will be scheduled as soon as a slot in the queue is available (the queue is a set indexed by expected_start) minus old_start plus check_period. ## native checks @@ -33,13 +35,12 @@ Then you have to override constructor and start_check method. All is asynchronous. When start_check is called, it must not block caller for a long time. At the end of measure, it must call check::on_completion(). That method need 4 arguments: -* start_check_index: For long asynchronous operation, at the beginning, asynchronous job must store running_index and use it when he have to call check::on_completion(). It is useful for scheduler to check is the result is the result of the last asynchronous job start. The new class can get running index with check::_get_running_check_index() +* start_check_index: For long asynchronous operation, at the beginning, asynchronous job must store running_index and use it when he has to call check::on_completion(). It is useful for scheduler to check if it's the result of the last asynchronous job start. The new class can get running index with check::_get_running_check_index() + An example, checks starts a first measure, the timeout expires, a second measure starts, the first measure ends,we don't take into account his result and we wait for the end off second one. * status: plugins status equivalent. Values are 0:Ok, 1: warning, 2: critical, 3: unknown (https://nagios-plugins.org/doc/guidelines.html#AEN41) * perfdata: a list of com::centreon::common::perfdata objects * outputs: equivalent of plugins output as "CPU 54% OK" -BEWARE, in some cases, we can have recursion, check::on_completion can call start_check - A little example: ```c++ class dummy_check : public check { @@ -48,7 +49,9 @@ class dummy_check : public check { public: void start_check(const duration& timeout) override { - check::start_check(timeout); + if (!check::start_check(timeout)) { + return; + } _command_timer.expires_from_now(_command_duration); _command_timer.async_wait([me = shared_from_this(), this, running_index = _get_running_check_index()]( @@ -71,6 +74,7 @@ class dummy_check : public check { : check(g_io_context, spdlog::default_logger(), std::chrono::system_clock::now(), + std::chrono::seconds(1), serv, command_name, command_line, @@ -79,4 +83,48 @@ class dummy_check : public check { _command_duration(command_duration), _command_timer(*g_io_context) {} }; -``` \ No newline at end of file +``` + +### native_check_cpu (linux version) +It uses /proc/stat to measure cpu statistics. When start_check is called, a first snapshot of /proc/stat is done. Then a timer is started and will expires at max time_out or check_interval minus 1 second. When this timer expires, we do a second snapshot and create plugin output and perfdata from this difference. +The arguments accepted by this check (in json format) are: +* cpu-detailed: + * if false, produces only average cpu usage perfdata per processor and one for the average + * if true, produces per processor and average one perfdata for user, nice, system, idle, iowait, irq, soft_irq, steal, guest, guest_nice and total used counters + +Output is inspired from centreon local cpu and cpu-detailed plugins +Examples of output: +* OK: CPU(s) average usage is 24.08% +* CRITICAL: CPU'0' Usage: 24.66%, User 17.58%, Nice 0.00%, System 5.77%, Idle 75.34%, IOWait 0.39%, Interrupt 0.00%, Soft Irq 0.91%, Steal 0.00%, Guest 0.00%, Guest Nice 0.00% WARNING: CPU'2' Usage: 24.18%, User 17.69%, Nice 0.00%, System 5.99%, Idle 75.82%, IOWait 0.38%, Interrupt 0.00%, Soft Irq 0.12%, Steal 0.00%, Guest 0.00%, Guest Nice 0.00% CRITICAL: CPU(s) average Usage: 24.08%, User 17.65%, Nice 0.00%, System 5.80%, Idle 75.92%, IOWait 0.36%, Interrupt 0.00%, Soft Irq 0.27%, Steal 0.00%, Guest 0.00%, Guest Nice 0.00% + +Example of perfdatas in not cpu-detailed mode: +* cpu.utilization.percentage +* 0#core.cpu.utilization.percentage +* 1#core.cpu.utilization.percentage + +Example of perfdatas in cpu-detailed mode: +* 0~user#core.cpu.utilization.percentage +* 0~system#core.cpu.utilization.percentage +* 1~interrupt#core.cpu.utilization.percentage +* iowait#cpu.utilization.percentage +* used#cpu.utilization.percentage + +### native_check_cpu (windows version) +metrics aren't the same as linux version. We collect user, idle, kernel , interrupt and dpc times. + +There are two methods, you can use internal microsoft function NtQuerySystemInformation. Yes Microsoft says that they can change signature or data format at any moment, but it's quite stable for many years. A trick, idle time is included un kernel time, so we subtract first from the second. Dpc time is yet included in interrupt time, so we don't sum it to calculate total time. +The second one relies on performance data counters (pdh API), it gives us percentage despite that sum of percentage is not quite 100%. That's why the default method is the first one. +The choice between the two methods is done by 'use-nt-query-system-information' boolean parameter. + +### check_drive_size +we have to get free space on server drives. In case of network drives, this call can block in case of network failure. Unfortunately, there is no asynchronous API to do that. So a dedicated thread (drive_size_thread) computes these statistics. In order to be os independent and to test it, drive_size_thread relies on a functor that do the job: drive_size_thread::os_fs_stats. This functor is initialized in main function. drive_size thread is stopped at the end of main function. + +So it works like that: +* check_drive_size post query in drive_size_thread queue +* drive_size_thread call os_fs_stats +* drive_size_thread post result in io_context +* io_context calls check_drive_size::_completion_handler + +### check_health +This little check sends agent's statistics to the poller. In order to do that, each check shares a common checks_statistics object. +This object is created by scheduler each time agent receives config from poller. This object contains last check interval and last check duration of each command. The first time it's executed, it can send unknown state if there is no other yet executed checks. \ No newline at end of file diff --git a/agent/inc/com/centreon/agent/agent_info.hh b/agent/inc/com/centreon/agent/agent_info.hh new file mode 100644 index 00000000000..33ecb345146 --- /dev/null +++ b/agent/inc/com/centreon/agent/agent_info.hh @@ -0,0 +1,31 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_AGENT_INFO_HH +#define CENTREON_AGENT_AGENT_INFO_HH + +#include "agent.pb.h" + +namespace com::centreon::agent { + +void read_os_version(); + +void fill_agent_info(const std::string& supervised_host, + ::com::centreon::agent::AgentInfo* agent_info); +} // namespace com::centreon::agent +#endif \ No newline at end of file diff --git a/agent/inc/com/centreon/agent/bireactor.hh b/agent/inc/com/centreon/agent/bireactor.hh index 16af5594c81..1cb4347a286 100644 --- a/agent/inc/com/centreon/agent/bireactor.hh +++ b/agent/inc/com/centreon/agent/bireactor.hh @@ -28,7 +28,12 @@ class bireactor : public bireactor_class, public std::enable_shared_from_this> { private: - static std::set> _instances; + /** + * @brief we store reactor instances in this container until OnDone is called + * by grpc layers. We allocate this container and never free this because + * threads terminate in unknown order. + */ + static std::set>* _instances; static std::mutex _instances_m; bool _write_pending; diff --git a/agent/inc/com/centreon/agent/check.hh b/agent/inc/com/centreon/agent/check.hh index c2808293e0e..e4c1511b7ec 100644 --- a/agent/inc/com/centreon/agent/check.hh +++ b/agent/inc/com/centreon/agent/check.hh @@ -30,6 +30,108 @@ using engine_to_agent_request_ptr = using time_point = std::chrono::system_clock::time_point; using duration = std::chrono::system_clock::duration; +class checks_statistics { + struct check_stat { + std::string cmd_name; + duration last_check_interval; + duration last_check_duration; + }; + + using statistic_container = multi_index::multi_index_container< + check_stat, + multi_index::indexed_by< + multi_index::hashed_unique< + BOOST_MULTI_INDEX_MEMBER(check_stat, std::string, cmd_name)>, + boost::multi_index::ordered_non_unique, + boost::multi_index::ordered_non_unique>>; + + statistic_container _stats; + + public: + using pointer = std::shared_ptr; + + void add_interval_stat(const std::string& cmd_name, + const duration& check_interval); + + void add_duration_stat(const std::string& cmd_name, + const duration& check_interval); + + const auto& get_ordered_by_interval() const { return _stats.get<1>(); } + const auto& get_ordered_by_duration() const { return _stats.get<2>(); } + + size_t size() const { return _stats.size(); } +}; + +/** + * @brief nagios status values + * + */ +enum e_status : unsigned { ok = 0, warning = 1, critical = 2, unknown = 3 }; + +/** + * @brief in order to have a non derive scheduling, we use this class to iterate + * time to time in case of we want to schedule an event every 30s for example + * + */ +class time_step { + time_point _start_point; + duration _step; + uint64_t _step_index = 0; + + public: + /** + * @brief Construct a new time step object + * + * @param start_point this time_point is the first time_point of the sequence + * @param step value() will return start_point + step * step_index + */ + time_step(time_point start_point, duration step) + : _start_point(start_point), _step(step) {} + + time_step() : _start_point(), _step() {} + + /** + * @brief increment time of one duration (one step) + * + * @return time_step& + */ + time_step& operator++() { + ++_step_index; + return *this; + } + + /** + * @brief set _step_index to the first step after or equal to now + * + */ + void increment_to_after_now() { + time_point now = std::chrono::system_clock::now(); + _step_index = + (now - _start_point + _step - std::chrono::microseconds(1)) / _step; + } + + /** + * @brief set _step_index to the first step after or equal to min_tp + * + */ + void increment_to_after_min(time_point min_tp) { + _step_index = + (min_tp - _start_point + _step - std::chrono::microseconds(1)) / _step; + } + + time_point value() const { return _start_point + _step_index * _step; } + + uint64_t get_step_index() const { return _step_index; } + + duration get_step() const { return _step; } +}; + /** * @brief base class for check * start_expected is set by scheduler and increased by check_period on each @@ -46,8 +148,9 @@ class check : public std::enable_shared_from_this { private: //_start_expected is set on construction on config receive - // it's updated on check_start and added of check_period on check completion - time_point _start_expected; + // it's updated on check_start and added of multiple of check_interval + // (check_period / nb_check) on check completion + time_step _start_expected; const std::string& _service; const std::string& _command_name; const std::string& _command_line; @@ -67,6 +170,10 @@ class check : public std::enable_shared_from_this { unsigned _running_check_index = 0; completion_handler _completion_handler; + // statistics used by check_health + time_point _last_start; + checks_statistics::pointer _stat; + protected: std::shared_ptr _io_context; std::shared_ptr _logger; @@ -79,30 +186,44 @@ class check : public std::enable_shared_from_this { virtual void _timeout_timer_handler(const boost::system::error_code& err, unsigned start_check_index); + bool _start_check(const duration& timeout); + + virtual void _on_timeout(){}; + public: using pointer = std::shared_ptr; + static const std::array status_label; + check(const std::shared_ptr& io_context, const std::shared_ptr& logger, - time_point exp, + time_point first_start_expected, + duration check_interval, const std::string& serv, const std::string& command_name, const std::string& cmd_line, const engine_to_agent_request_ptr& cnf, - completion_handler&& handler); + completion_handler&& handler, + const checks_statistics::pointer& stat); virtual ~check() = default; struct pointer_start_compare { bool operator()(const check::pointer& left, const check::pointer& right) const { - return left->_start_expected < right->_start_expected; + return left->_start_expected.value() < right->_start_expected.value(); } }; - void add_duration_to_start_expected(const duration& to_add); + void increment_start_expected_to_after_min_timepoint(time_point min_tp) { + _start_expected.increment_to_after_min(min_tp); + } + + void add_check_interval_to_start_expected() { ++_start_expected; } + + time_point get_start_expected() const { return _start_expected.value(); } - time_point get_start_expected() const { return _start_expected; } + const time_step & get_raw_start_expected() const { return _start_expected; } const std::string& get_service() const { return _service; } @@ -117,7 +238,18 @@ class check : public std::enable_shared_from_this { const std::list& perfdata, const std::list& outputs); - virtual void start_check(const duration& timeout); + virtual void start_check(const duration& timeout) = 0; + + static std::optional get_double(const std::string& cmd_name, + const char* field_name, + const rapidjson::Value& val, + bool must_be_positive); + + static std::optional get_bool(const std::string& cmd_name, + const char* field_name, + const rapidjson::Value& val); + + const checks_statistics& get_stats() const { return *_stat; } }; } // namespace com::centreon::agent diff --git a/agent/inc/com/centreon/agent/check_exec.hh b/agent/inc/com/centreon/agent/check_exec.hh index c458194bb18..37b932c1d6f 100644 --- a/agent/inc/com/centreon/agent/check_exec.hh +++ b/agent/inc/com/centreon/agent/check_exec.hh @@ -84,33 +84,38 @@ class check_exec : public check { protected: using check::completion_handler; - void _timeout_timer_handler(const boost::system::error_code& err, - unsigned start_check_index) override; + void _on_timeout() override; void _init(); public: check_exec(const std::shared_ptr& io_context, const std::shared_ptr& logger, - time_point exp, + time_point first_start_expected, + duration check_interval, const std::string& serv, const std::string& cmd_name, const std::string& cmd_line, const engine_to_agent_request_ptr& cnf, - check::completion_handler&& handler); + check::completion_handler&& handler, + const checks_statistics::pointer& stat); static std::shared_ptr load( const std::shared_ptr& io_context, const std::shared_ptr& logger, - time_point exp, + time_point first_start_expected, + duration check_interval, const std::string& serv, const std::string& cmd_name, const std::string& cmd_line, const engine_to_agent_request_ptr& cnf, - check::completion_handler&& handler); + check::completion_handler&& handler, + const checks_statistics::pointer& stat); void start_check(const duration& timeout) override; + int get_pid() const; + void on_completion(unsigned running_index); }; diff --git a/agent/inc/com/centreon/agent/check_health.hh b/agent/inc/com/centreon/agent/check_health.hh new file mode 100644 index 00000000000..d62dafd3392 --- /dev/null +++ b/agent/inc/com/centreon/agent/check_health.hh @@ -0,0 +1,63 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_HEALTH_CHECK_HH +#define CENTREON_AGENT_HEALTH_CHECK_HH + +#include "check.hh" + +namespace com::centreon::agent { + +class check_health : public check { + unsigned _warning_check_interval; + unsigned _critical_check_interval; + unsigned _warning_check_duration; + unsigned _critical_check_duration; + + std::string _info_output; + + // we use this timer to delay measure in order to have some checks yet done + // when we will compute the first statistics + asio::system_timer _measure_timer; + + void _measure_timer_handler(const boost::system::error_code& err, + unsigned start_check_index); + + public: + check_health(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point first_start_expected, + duration check_interval, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const rapidjson::Value& args, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler, + const checks_statistics::pointer& stat); + + static void help(std::ostream& help_stream); + + void start_check(const duration& timeout) override; + + e_status compute(std::string* output, std::list* perfs); +}; + +} // namespace com::centreon::agent + +#endif // CENTREON_AGENT_HEALTH_CHECK_HH diff --git a/agent/inc/com/centreon/agent/config.hh b/agent/inc/com/centreon/agent/config.hh index 0cd7b9d4821..0a7669ccfb1 100644 --- a/agent/inc/com/centreon/agent/config.hh +++ b/agent/inc/com/centreon/agent/config.hh @@ -18,6 +18,7 @@ #ifndef CENTREON_AGENT_CONFIG_HH #define CENTREON_AGENT_CONFIG_HH +#include #include "com/centreon/common/grpc/grpc_config.hh" namespace com::centreon::agent { @@ -43,10 +44,38 @@ class config { std::string _ca_name; std::string _host; bool _reverse_connection; + unsigned _second_max_reconnect_backoff; + + static std::unique_ptr _global_conf; public: + static const config& load(const std::string& path) { + _global_conf = std::make_unique(path); + return *_global_conf; + } + + /** + * @brief used only for UT + * + * @param reverse_connection + * @return const config& + */ + static const config& load(bool reverse_connection) { + _global_conf = std::make_unique(reverse_connection); + return *_global_conf; + } + + static const config& instance() { return *_global_conf; } + config(const std::string& path); + /** + * @brief used only for UT + * + * @param reverse_connection + */ + config(bool reverse_connection) : _reverse_connection(reverse_connection) {} + const std::string& get_endpoint() const { return _endpoint; } spdlog::level::level_enum get_log_level() const { return _log_level; }; log_type get_log_type() const { return _log_type; } @@ -63,6 +92,9 @@ class config { const std::string& get_ca_name() const { return _ca_name; } const std::string& get_host() const { return _host; } bool use_reverse_connection() const { return _reverse_connection; } + unsigned get_second_max_reconnect_backoff() const { + return _second_max_reconnect_backoff; + } }; }; // namespace com::centreon::agent diff --git a/agent/inc/com/centreon/agent/drive_size.hh b/agent/inc/com/centreon/agent/drive_size.hh new file mode 100644 index 00000000000..94c4d3d4609 --- /dev/null +++ b/agent/inc/com/centreon/agent/drive_size.hh @@ -0,0 +1,277 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_NATIVE_DRIVE_SIZE_BASE_HH +#define CENTREON_AGENT_NATIVE_DRIVE_SIZE_BASE_HH + +#include "check.hh" +#include "re2/re2.h" + +namespace com::centreon::agent { +namespace check_drive_size_detail { + +/** + * @brief these flags are passed in check parameter:filter-storage-type and + * filter-type + * + */ +enum e_drive_fs_type : uint64_t { + hr_unknown = 0, + hr_storage_ram = 1 << 0, + hr_storage_virtual_memory = 1 << 1, + hr_storage_fixed_disk = 1 << 2, + hr_storage_removable_disk = 1 << 3, + hr_storage_floppy_disk = 1 << 4, + hr_storage_compact_disc = 1 << 5, + hr_storage_ram_disk = 1 << 6, + hr_storage_flash_memory = 1 << 7, + hr_storage_network_disk = 1 << 8, + hr_fs_other = 1 << 9, + hr_fs_unknown = 1 << 10, + hr_fs_berkeley_ffs = 1 << 11, + hr_fs_sys5_fs = 1 << 12, + hr_fs_fat = 1 << 13, + hr_fs_hpfs = 1 << 14, + hr_fs_hfs = 1 << 15, + hr_fs_mfs = 1 << 16, + hr_fs_ntfs = 1 << 17, + hr_fs_vnode = 1 << 18, + hr_fs_journaled = 1 << 19, + hr_fs_iso9660 = 1 << 20, + hr_fs_rock_ridge = 1 << 21, + hr_fs_nfs = 1 << 22, + hr_fs_netware = 1 << 23, + hr_fs_afs = 1 << 24, + hr_fs_dfs = 1 << 25, + hr_fs_appleshare = 1 << 26, + hr_fs_rfs = 1 << 27, + hr_fs_dgcfs = 1 << 28, + hr_fs_bfs = 1 << 29, + hr_fs_fat32 = 1 << 30, + hr_fs_linux_ext2 = 1U << 31, + hr_fs_linux_ext4 = 1ULL << 32, + hr_fs_exfat = 1ULL << 33 +}; + +/** + * @brief user can check only some fs by using filters + * This is the goal of this class + * In order to improve perf, results of previous tests are saved + * in cache sets. That's why is_allowed is not const + * + */ +class filter { + using string_set = absl::flat_hash_set; + + string_set _cache_allowed_fs ABSL_GUARDED_BY(_protect); + string_set _cache_excluded_fs ABSL_GUARDED_BY(_protect); + string_set _cache_allowed_mountpoint ABSL_GUARDED_BY(_protect); + string_set _cache_excluded_mountpoint ABSL_GUARDED_BY(_protect); + + mutable absl::Mutex _protect; + + unsigned _fs_type_filter; + + std::unique_ptr _filter_fs, _filter_exclude_fs; + std::unique_ptr _filter_mountpoint, _filter_exclude_mountpoint; + + public: + filter(const rapidjson::Value& args); + + bool is_allowed(const std::string_view& fs, + const std::string_view& mount_point, + e_drive_fs_type fs_type); + + bool is_fs_yet_allowed(const std::string_view& fs) const; + + bool is_fs_yet_excluded(const std::string_view& fs) const; +}; + +/** + * @brief tupple where we store statistics of a fs + * + */ +struct fs_stat { + fs_stat() = default; + fs_stat(std::string&& fs_in, uint64_t used_in, uint64_t total_in) + : fs(fs_in), mount_point(fs), used(used_in), total(total_in) {} + + fs_stat(std::string&& fs_in, + std::string&& mount_point_in, + uint64_t used_in, + uint64_t total_in) + : fs(fs_in), + mount_point(mount_point_in), + used(used_in), + total(total_in) {} + + fs_stat(const std::string_view& fs_in, + const std::string_view& mount_point_in, + uint64_t used_in, + uint64_t total_in) + : fs(fs_in), + mount_point(mount_point_in), + used(used_in), + total(total_in) {} + + std::string fs; + std::string mount_point; + uint64_t used; + uint64_t total; + + bool is_used_more_than_threshold(uint64_t threshold) const { + return used >= threshold; + } + + bool is_free_less_than_threshold(uint64_t threshold) const { + return total - used < threshold; + } + + bool is_used_more_than_prct_threshold(uint64_t percent_hundredth) const { + if (!total) { + return true; + } + return (used * 10000) / total >= percent_hundredth; + } + + bool is_free_less_than_prct_threshold(uint64_t percent_hundredth) const { + if (!total) { + return true; + } + return ((total - used) * 10000) / total < percent_hundredth; + } + + double get_used_prct() const { + if (!total) + return 0.0; + return static_cast(used * 100) / total; + } + + double get_free_prct() const { + if (!total) + return 0.0; + return static_cast((total - used) * 100) / total; + } +}; + +/** + * @brief get fs statistics can block on network drives, so we use this thread + * to do the job and not block main thread + * + */ +class drive_size_thread + : public std::enable_shared_from_this { + std::shared_ptr _io_context; + + using completion_handler = std::function)>; + + struct async_data { + std::shared_ptr request_filter; + completion_handler handler; + time_point timeout; + }; + + std::list _queue ABSL_GUARDED_BY(_queue_m); + absl::Mutex _queue_m; + + bool _active = true; + + std::shared_ptr _logger; + + bool has_to_stop_wait() const { return !_active || !_queue.empty(); } + + public: + typedef std::list ( + *get_fs_stats)(filter&, const std::shared_ptr& logger); + + static get_fs_stats os_fs_stats; + + drive_size_thread(const std::shared_ptr& io_context, + const std::shared_ptr& logger) + : _io_context(io_context), _logger(logger) {} + + void run(); + + void kill(); + + template + void async_get_fs_stats(const std::shared_ptr& request_filter, + const time_point& timeout, + handler_type&& handler); +}; + +} // namespace check_drive_size_detail + +/** + * @brief drive size check object (same for linux and windows) + * + */ +class check_drive_size : public check { + std::shared_ptr _filter; + bool _prct_threshold; + bool _free_threshold; + uint64_t _warning; // value in bytes or percent * 100 + uint64_t _critical; + + typedef e_status (check_drive_size::*fs_stat_test)( + const check_drive_size_detail::fs_stat&) const; + + fs_stat_test _fs_test; + + e_status _used_test(const check_drive_size_detail::fs_stat& fs) const; + e_status _prct_used_test(const check_drive_size_detail::fs_stat& fs) const; + + e_status _free_test(const check_drive_size_detail::fs_stat& fs) const; + e_status _prct_free_test(const check_drive_size_detail::fs_stat& fs) const; + + e_status _no_test(const check_drive_size_detail::fs_stat& fs) const; + + void _completion_handler( + unsigned start_check_index, + const std::list& result); + + public: + check_drive_size(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point first_start_expected, + duration check_interval, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const rapidjson::Value& args, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler, + const checks_statistics::pointer& stat); + + virtual ~check_drive_size() = default; + + std::shared_ptr shared_from_this() { + return std::static_pointer_cast( + check::shared_from_this()); + } + + static void help(std::ostream& help_stream); + + void start_check(const duration& timeout) override; + + static void thread_kill(); +}; + +} // namespace com::centreon::agent + +#endif // CENTREON_AGENT_NATIVE_DRIVE_SIZE_HH diff --git a/agent/inc/com/centreon/agent/native_check_base.hh b/agent/inc/com/centreon/agent/native_check_base.hh new file mode 100644 index 00000000000..158cad781d1 --- /dev/null +++ b/agent/inc/com/centreon/agent/native_check_base.hh @@ -0,0 +1,161 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_NATIVE_CHECK_MEMORY_BASE_HH +#define CENTREON_AGENT_NATIVE_CHECK_MEMORY_BASE_HH + +#include "check.hh" + +namespace com::centreon::agent { + +namespace native_check_detail { + +/** + * @brief we store the result of a measure in this struct + * + * @tparam nb_metric + */ +template +class snapshot { + protected: + std::array _metrics; + + public: + virtual ~snapshot() = default; + + uint64_t get_metric(unsigned data_index) const { + return _metrics[data_index]; + } + + double get_proportional_value(unsigned data_index, + unsigned total_data_index) const { + const uint64_t& total = _metrics[total_data_index]; + if (!total) { + return 0.0; + } + return (static_cast(_metrics[data_index]) / total); + } + + virtual void dump_to_output(std::string* output) const = 0; +}; + +/** + * @brief this class compare a measure with threshold and returns a plugins + * status + * + * @tparam nb_metric + */ +template +class measure_to_status { + e_status _status; + unsigned _data_index; + double _threshold; + unsigned _total_data_index; + bool _percent; + bool _free_threshold; + + public: + measure_to_status(e_status status, + unsigned data_index, + double threshold, + unsigned total_data_index, + bool _percent, + bool free_threshold); + + virtual ~measure_to_status() = default; + + unsigned get_data_index() const { return _data_index; } + unsigned get_total_data_index() const { return _total_data_index; } + e_status get_status() const { return _status; } + double get_threshold() const { return _threshold; } + + virtual void compute_status(const snapshot& to_test, + e_status* status) const; +}; + +/** + * @brief this struct will be used to create metrics + * + */ +struct metric_definition { + std::string_view name; + unsigned data_index; + unsigned total_data_index; + bool percent; +}; + +} // namespace native_check_detail + +/** + * @brief native check base (to inherit) + * + * @tparam nb_metric + */ +template +class native_check_base : public check { + protected: + /** + * @brief key used to store measure_to_status + * @tparam 1 index (phys, virtual..) + * @tparam 2 total index (phys, virtual..) + * @tparam 3 e_status warning or critical + * + */ + using mem_to_status_key = std::tuple; + + boost::container::flat_map< + mem_to_status_key, + std::unique_ptr>> + _measure_to_status; + + const char* _no_percent_unit = nullptr; + + public: + native_check_base(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point first_start_expected, + duration check_interval, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const rapidjson::Value& args, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler, + const checks_statistics::pointer& stat); + + std::shared_ptr> shared_from_this() { + return std::static_pointer_cast>( + check::shared_from_this()); + } + + void start_check(const duration& timeout) override; + + virtual std::shared_ptr> + measure() = 0; + + e_status compute(const native_check_detail::snapshot& data, + std::string* output, + std::list* perfs) const; + + virtual const std::vector& + get_metric_definitions() const = 0; +}; + +} // namespace com::centreon::agent + +#endif diff --git a/agent/inc/com/centreon/agent/native_check_cpu_base.hh b/agent/inc/com/centreon/agent/native_check_cpu_base.hh new file mode 100644 index 00000000000..0460cbf008c --- /dev/null +++ b/agent/inc/com/centreon/agent/native_check_cpu_base.hh @@ -0,0 +1,247 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_NATIVE_CHECK_CPU_BASE_HH +#define CENTREON_AGENT_NATIVE_CHECK_CPU_BASE_HH + +#include "check.hh" + +namespace com::centreon::agent { + +namespace check_cpu_detail { +// all data is indexed by processor number, this fake index points to cpus +// average +constexpr unsigned average_cpu_index = std::numeric_limits::max(); + +/** + * @brief this class contains all counter for one core + * + * @tparam nb_metric number of metrics given by the kernel + */ +template +class per_cpu_time_base { + protected: + std::array _metrics; + uint64_t _total_used = 0; + uint64_t _total = 0; + + public: + per_cpu_time_base(); + + double get_proportional_value(unsigned data_index) const { + if (!_total || data_index >= nb_metric) { + return 0.0; + } + return (static_cast(_metrics[data_index])) / _total; + } + + double get_proportional_used() const { + if (!_total) { + return 0.0; + } + return (static_cast(_total_used)) / _total; + } + + /** + * @brief Set the metric object + * + * @param index index of the metric like user or cpu + * @param value + */ + void set_metric(unsigned index, uint64_t value) { + if (index < nb_metric) { + _metrics[index] = value; + } + } + + /** + * @brief Set the metric object and add value to the total + * + * @param index index of the metric like user or cpu + * @param value + */ + void set_metric_total(unsigned index, uint64_t value) { + if (index < nb_metric) { + _metrics[index] = value; + _total += value; + } + } + + /** + * @brief Set the metric object and add value to the total and total_used + * + * @param index index of the metric like user or cpu + * @param value + */ + void set_metric_total_used(unsigned index, uint64_t value) { + if (index < nb_metric) { + _metrics[index] = value; + _total_used += value; + _total += value; + } + } + + void set_total(uint64_t total) { _total = total; } + + void set_total_used(uint64_t total_used) { _total_used = total_used; } + + uint64_t get_total() const { return _total; } + + void dump(const unsigned& cpu_index, + const std::string_view metric_label[], + std::string* output) const; + + void dump_values(std::string* output) const; + + void subtract(const per_cpu_time_base& to_subtract); + + void add(const per_cpu_time_base& to_add); +}; + +template +using index_to_cpu = + boost::container::flat_map>; + +/** + * @brief contains one per_cpu_time_base per core and a total one + * + * @tparam nb_metric number of metrics given by the kernel + */ +template +class cpu_time_snapshot { + protected: + index_to_cpu _data; + + public: + index_to_cpu subtract(const cpu_time_snapshot& to_subtract) const; + + const index_to_cpu& get_values() const { return _data; } + + void dump(std::string* output) const; +}; + +/** + * @brief this little class compare cpu usages values to threshold and set + * plugin status + * + */ +template +class cpu_to_status { + e_status _status; + unsigned _data_index; + bool _average; + double _threshold; + + public: + cpu_to_status(e_status status, + unsigned data_index, + bool average, + double threshold) + : _status(status), + _data_index(data_index), + _average(average), + _threshold(threshold) {} + + unsigned get_proc_stat_index() const { return _data_index; } + bool is_critical() const { return _status == e_status::critical; } + bool is_average() const { return _average; } + double get_threshold() const { return _threshold; } + e_status get_status() const { return _status; } + + void compute_status( + const index_to_cpu& to_test, + boost::container::flat_map* per_cpu_status) const; +}; + +} // namespace check_cpu_detail + +/** + * @brief native cpu check base class + * + * @tparam nb_metric + */ +template +class native_check_cpu : public check { + protected: + unsigned _nb_core; + + /** + * @brief key used to store cpu_to_status + * @tparam 1 index (user, system, iowait.... and idle for all except idle) + * @tparam 2 true if average, false if per core + * @tparam 3 e_status warning or critical + * + */ + using cpu_to_status_key = std::tuple; + + boost::container::flat_map> + _cpu_to_status; + + bool _cpu_detailed; + + asio::system_timer _measure_timer; + + void _measure_timer_handler( + const boost::system::error_code& err, + unsigned start_check_index, + std::unique_ptr>&& + first_measure); + + e_status _compute( + const check_cpu_detail::cpu_time_snapshot& first_measure, + const check_cpu_detail::cpu_time_snapshot& second_measure, + const std::string_view summary_labels[], + const std::string_view perfdata_labels[], + std::string* output, + std::list* perfs); + + public: + native_check_cpu(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point first_start_expected, + duration check_interval, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const rapidjson::Value& args, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler, + const checks_statistics::pointer& stat); + + virtual ~native_check_cpu() = default; + + std::shared_ptr> shared_from_this() { + return std::static_pointer_cast>( + check::shared_from_this()); + } + + virtual std::unique_ptr> + get_cpu_time_snapshot(bool first_measure) = 0; + + void start_check(const duration& timeout) override; + + virtual e_status compute( + const check_cpu_detail::cpu_time_snapshot& first_measure, + const check_cpu_detail::cpu_time_snapshot& second_measure, + std::string* output, + std::list* perfs) = 0; +}; +} // namespace com::centreon::agent + +#endif diff --git a/agent/inc/com/centreon/agent/scheduler.hh b/agent/inc/com/centreon/agent/scheduler.hh index bc96f39477b..623b31bb617 100644 --- a/agent/inc/com/centreon/agent/scheduler.hh +++ b/agent/inc/com/centreon/agent/scheduler.hh @@ -37,16 +37,19 @@ class scheduler : public std::enable_shared_from_this { const std::shared_ptr&, const std::shared_ptr& /*logger*/, time_point /* start expected*/, + duration /* check interval */, const std::string& /*service*/, const std::string& /*cmd_name*/, const std::string& /*cmd_line*/, const engine_to_agent_request_ptr& /*engine to agent request*/, - check::completion_handler&&)>; + check::completion_handler&&, + const checks_statistics::pointer& /*stat*/)>; private: - using check_queue = std::set; + using check_queue = + absl::btree_set; - check_queue _check_queue; + check_queue _waiting_check_queue; // running check counter that must not exceed max_concurrent_check unsigned _active_check = 0; bool _alive = true; @@ -72,6 +75,8 @@ class scheduler : public std::enable_shared_from_this { metric_sender _metric_sender; asio::system_timer _send_timer; asio::system_timer _check_timer; + time_step + _check_time_step; // time point used when too many checks are running check_builder _check_builder; // in order to send check_results at regular intervals, we work with absolute // time points that we increment @@ -154,12 +159,14 @@ class scheduler : public std::enable_shared_from_this { static std::shared_ptr default_check_builder( const std::shared_ptr& io_context, const std::shared_ptr& logger, - time_point start_expected, + time_point first_start_expected, + duration check_interval, const std::string& service, const std::string& cmd_name, const std::string& cmd_line, const engine_to_agent_request_ptr& conf, - check::completion_handler&& handler); + check::completion_handler&& handler, + const checks_statistics::pointer& stat); engine_to_agent_request_ptr get_last_message_to_agent() const { return _conf; @@ -182,10 +189,10 @@ scheduler::scheduler( const std::shared_ptr& config, sender&& met_sender, chck_builder&& builder) - : _metric_sender(met_sender), - _io_context(io_context), + : _io_context(io_context), _logger(logger), _supervised_host(supervised_host), + _metric_sender(met_sender), _send_timer(*io_context), _check_timer(*io_context), _check_builder(builder), diff --git a/agent/inc/com/centreon/agent/version.hh.in b/agent/inc/com/centreon/agent/version.hh.in index f4c2d2e0136..205199267dd 100644 --- a/agent/inc/com/centreon/agent/version.hh.in +++ b/agent/inc/com/centreon/agent/version.hh.in @@ -25,4 +25,6 @@ constexpr unsigned CENTREON_AGENT_VERSION_MAJOR = @COLLECT_MAJOR@; constexpr unsigned CENTREON_AGENT_VERSION_MINOR = @COLLECT_MINOR@.0; constexpr unsigned CENTREON_AGENT_VERSION_PATCH = @COLLECT_PATCH@.0; +#define CENTREON_AGENT_VERSION "@COLLECT_MAJOR@.@COLLECT_MINOR@.@COLLECT_PATCH@" + #endif // !CCE_VERSION_HH diff --git a/agent/installer/CMakeLists.txt b/agent/installer/CMakeLists.txt index 7f9cd769439..b22bac207c6 100644 --- a/agent/installer/CMakeLists.txt +++ b/agent/installer/CMakeLists.txt @@ -40,6 +40,7 @@ configure_file("${PROJECT_SOURCE_DIR}/version.nsi.in" "${PROJECT_SOURCE_DIR}/ver file(GLOB COMMON_INSTALLERS_FILES "${PROJECT_SOURCE_DIR}/version.nsi" "${PROJECT_SOURCE_DIR}/dlg_helper.nsi" "${PROJECT_SOURCE_DIR}/resources/*") + message(NOTICE "---------------- Generate installer in ${PROJECT_SOURCE_DIR} ---------------") # modify binary called from the application manager @@ -51,14 +52,19 @@ add_custom_command( WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}" COMMAND ${MKNSIS} "${PROJECT_SOURCE_DIR}/centreon-monitoring-agent-modify.nsi") +add_custom_target("centreon-monitoring-agent-modifier" ALL DEPENDS "${PROJECT_SOURCE_DIR}/centreon-monitoring-agent-modify.exe") + +configure_file("${PROJECT_SOURCE_DIR}/compile_installer.ps1.in" "${PROJECT_SOURCE_DIR}/compile_installer.ps1") #final installer add_custom_command( - DEPENDS "${PROJECT_SOURCE_DIR}/centreon-monitoring-agent.nsi" "${PROJECT_SOURCE_DIR}/centreon-monitoring-agent-modify.exe" ${COMMON_INSTALLERS_FILES} + DEPENDS "${PROJECT_SOURCE_DIR}/centreon-monitoring-agent.nsi" "centreon-monitoring-agent-modifier" ${COMMON_INSTALLERS_FILES} "${PROJECT_SOURCE_DIR}/compile_installer.ps1" COMMENT "--------- Generating cma configuration installer --------" OUTPUT "${PROJECT_SOURCE_DIR}/centreon-monitoring-agent.exe" WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}" - COMMAND ${MKNSIS} "${PROJECT_SOURCE_DIR}/centreon-monitoring-agent.nsi") + COMMAND pwsh.exe -File "${PROJECT_SOURCE_DIR}/compile_installer.ps1") -add_custom_target("centreon-monitoring-agent-installer" ALL DEPENDS "${PROJECT_SOURCE_DIR}/centreon-monitoring-agent.exe") +if (WITH_BUILD_AGENT_INSTALLER) + add_custom_target("centreon-monitoring-agent-installer" ALL DEPENDS "${PROJECT_SOURCE_DIR}/centreon-monitoring-agent.exe") +endif() diff --git a/agent/installer/centreon-monitoring-agent.nsi b/agent/installer/centreon-monitoring-agent.nsi index 163d57a1ee0..4ad01fbc9b4 100644 --- a/agent/installer/centreon-monitoring-agent.nsi +++ b/agent/installer/centreon-monitoring-agent.nsi @@ -83,11 +83,12 @@ VIAddVersionKey "ProductVersion" "${VERSIONMAJOR}.${VERSIONMINOR}.${VERSIONBUILD InstallDir "$PROGRAMFILES64\${COMPANYNAME}\${APPNAME}" !define PLUGINS_DIR "$PROGRAMFILES64\${COMPANYNAME}\Plugins" +!define PLUGINS_FULL_PATH "${PLUGINS_DIR}\centreon_plugins.exe" !define HELPURL "https://www.centreon.com/" Var plugins_url - +Var plugins_download_failure !macro verify_user_is_admin @@ -126,20 +127,17 @@ Function get_plugins_url ClearErrors inetc::get /header "Accept: application/vnd.github+json" ${NSCLIENT_URL} $json_content_path /End ${If} ${Errors} - MessageBox MB_OK|MB_ICONSTOP "Failed to get plugin information from ${NSCLIENT_URL}" - Abort + MessageBox MB_YESNO "Failed to get latest Centreon plugins from ${NSCLIENT_URL}.$\nDo you want to install local Centreon plugins (version ${PLUGINS_VERSION})?" /SD IDYES IDYES continue_with_embedded_plugins IDNO continue_without_plugins ${EndIf} Pop $0 ${If} $0 != "OK" - MessageBox MB_OK|MB_ICONSTOP "Failed to get plugin information from ${NSCLIENT_URL}: $0" - Abort + MessageBox MB_YESNO "Failed to get latest Centreon plugins from ${NSCLIENT_URL}.$\nDo you want to install local Centreon plugins (version ${PLUGINS_VERSION})?" /SD IDYES IDYES continue_with_embedded_plugins IDNO continue_without_plugins ${EndIf} #parse json response nsJSON::Set /file $json_content_path ${If} ${Errors} - MessageBox MB_OK|MB_ICONSTOP "bad json received from ${NSCLIENT_URL}" - Abort + MessageBox MB_YESNO "Bad json received from ${NSCLIENT_URL}.$\nDo you want to install local Centreon plugins (version ${PLUGINS_VERSION})?" /SD IDYES IDYES continue_with_embedded_plugins IDNO continue_without_plugins ${EndIf} nsJSON::Get /count `assets` /end @@ -158,8 +156,13 @@ Function get_plugins_url ${EndIf} ${Next} - MessageBox MB_OK|MB_ICONSTOP "No Plugins Asset found at ${NSCLIENT_URL}" - Abort + MessageBox MB_YESNO "No Plugins found at ${NSCLIENT_URL} $\nDo you want to install local Centreon plugins (version ${PLUGINS_VERSION})?" /SD IDYES IDYES continue_with_embedded_plugins IDNO continue_without_plugins + continue_without_plugins: + StrCpy $plugins_download_failure 2 + Return + continue_with_embedded_plugins: + StrCpy $plugins_download_failure 1 + Return FunctionEnd @@ -167,14 +170,56 @@ FunctionEnd * @brief this section download plugings from the asset of the last centreon-nsclient-build release */ Section "Plugins" PluginsInstSection - Call get_plugins_url CreateDirectory ${PLUGINS_DIR} - DetailPrint "download plugins from $plugins_url" - inetc::get /caption "plugins" /banner "Downloading plugins..." "$plugins_url" "${PLUGINS_DIR}/centreon_plugins.exe" - ${If} ${Silent} - System::Call 'kernel32::AttachConsole(i -1)i.r0' ;attach to parent console - System::Call 'kernel32::GetStdHandle(i -11)i.r0' ;console attached -- get stdout - FileWrite $0 "Centreon plugins installed$\n" + ${IfNot} ${Silent} + Call get_plugins_url + ${If} $plugins_download_failure == 1 + DetailPrint "Install centreon plugins version ${PLUGINS_VERSION}" + File /oname=${PLUGINS_FULL_PATH} "centreon_plugins.exe" + ${ElseIf} $plugins_download_failure == 2 + DetailPrint 'centreon plugins not installed' + ${Else} + DetailPrint "download plugins from $plugins_url" + ClearErrors + inetc::get /caption "plugins" /banner "Downloading plugins..." "$plugins_url" "${PLUGINS_DIR}/centreon_plugins.exe" + ${If} ${Errors} + MessageBox MB_YESNO "Failed to download latest Centreon plugins.$\nDo you want to install local Centreon plugins (version ${PLUGINS_VERSION})?" /SD IDYES IDYES ui_continue_with_embedded_plugins IDNO ui_continue_without_plugins + ui_continue_with_embedded_plugins: + File /oname=${PLUGINS_FULL_PATH} "centreon_plugins.exe" + DetailPrint "Local Centreon plugins (version ${PLUGINS_VERSION}) installed" + ui_continue_without_plugins: + DetailPrint 'Centreon plugins have not been installed' + ${EndIf} + ${EndIf} + + ${Else} + ${If} $silent_install_plugins == 2 + File /oname=${PLUGINS_FULL_PATH} "centreon_plugins.exe" + System::Call 'kernel32::AttachConsole(i -1)i.r0' ;attach to parent console + System::Call 'kernel32::GetStdHandle(i -11)i.r0' ;console attached -- get stdout + FileWrite $0 "Local Centreon plugins (version ${PLUGINS_VERSION}) installed$\n" + ${Else} + Call get_plugins_url + ${If} $plugins_download_failure > 0 + File /oname=${PLUGINS_FULL_PATH} "centreon_plugins.exe" + System::Call 'kernel32::AttachConsole(i -1)i.r0' ;attach to parent console + System::Call 'kernel32::GetStdHandle(i -11)i.r0' ;console attached -- get stdout + FileWrite $0 "Failed to download latest Centreon plugins => local Centreon plugins (version ${PLUGINS_VERSION}) installed$\n" + ${Else} + ClearErrors + inetc::get /caption "plugins" /banner "Downloading plugins..." "$plugins_url" "${PLUGINS_DIR}/centreon_plugins.exe" + ${If} ${Errors} + File /oname=${PLUGINS_FULL_PATH} "centreon_plugins.exe" + System::Call 'kernel32::AttachConsole(i -1)i.r0' ;attach to parent console + System::Call 'kernel32::GetStdHandle(i -11)i.r0' ;console attached -- get stdout + FileWrite $0 "Failed to download latest Centreon plugins => local Centreon plugins (version ${PLUGINS_VERSION}) installed$\n" + ${Else} + System::Call 'kernel32::AttachConsole(i -1)i.r0' ;attach to parent console + System::Call 'kernel32::GetStdHandle(i -11)i.r0' ;console attached -- get stdout + FileWrite $0 "Centreon plugins installed$\n" + ${EndIf} + ${EndIf} + ${EndIf} ${EndIf} SectionEnd @@ -243,7 +288,8 @@ function .onInit SetErrorLevel 0 ${GetParameters} $cmdline_parameters Strcpy $1 "--install_cma Set this flag if you want to install centreon monitoring agent$\n\ ---install_plugins Set this flag if you want to install centreon plugins$\n" +--install_plugins Set this flag if you want to download and install latest version of centreon plugins$\n\ +--install_embedded_plugins Set this flag if you want to install the plugins embedded in the installer$\n" Call show_help Call show_version Call silent_verify_admin @@ -257,7 +303,7 @@ function .onInit SectionSetFlags ${CMAInstSection} 0 ${EndIf} - ${If} $silent_install_plugins == 1 + ${If} $silent_install_plugins > 0 SectionSetFlags ${PluginsInstSection} ${SF_SELECTED} ${Else} SectionSetFlags ${PluginsInstSection} 0 @@ -279,7 +325,7 @@ Function setup_cma_show FunctionEnd /** - * @brief show cma log dialogbox ig user has choosen to install cma + * @brief show cma log dialogbox if user has choosen to install cma */ Function setup_log_show ${If} ${SectionIsSelected} ${CMAInstSection} @@ -288,7 +334,7 @@ Function setup_log_show FunctionEnd /** - * @brief show cma encryption dialogbox ig user has choosen to install cma + * @brief show cma encryption dialogbox if user has choosen to install cma */ Function setup_cma_encryption_show ${If} ${SectionIsSelected} ${CMAInstSection} diff --git a/agent/installer/compile_installer.ps1.in b/agent/installer/compile_installer.ps1.in new file mode 100644 index 00000000000..76f0be9437c --- /dev/null +++ b/agent/installer/compile_installer.ps1.in @@ -0,0 +1,68 @@ +# +# Copyright 2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +# For more information : contact@centreon.com +# + +# This script test windows CMA +# We first start four instances of centreon agent (reverse or not, encryption or not) +# Then, we install collect in a wsl and start robot test on it. +# Used ports are: +# - 4317 no reversed, no encryption +# - 4318 no reversed, encryption +# - 4320 reversed, no encryption +# - 4321 reversed, encryption +# All files are shared between wsl and windows, we translate it with $wsl_path +# By this share, we use certificates (server.*) on both world +# In order to communicate bteween two worlds, we use hostname and IP of the host +# That's why we rewrite /etc/hosts on wsl side +# agent logs are saved in reports and wsl fail tests are saved in it also in case of failure + + +$plugins_release_url = "https://api.github.com/repos/centreon/centreon-nsclient-build/releases/latest" +Write-Host "Downloading plugins release info from $plugins_release_url" + +$release_info = Invoke-WebRequest -Uri $plugins_release_url -Headers @{Accept = 'application/vnd.github+json' } | ConvertFrom-Json +$assets = $release_info.assets +$tag_name = ($release_info.tag_name) + +$asset_url = "" +foreach ($asset in $assets) { + if ($asset.name -eq "centreon_plugins.exe") { + $asset_url = $asset.browser_download_url + break + } +} + +if ($asset_url -eq "") { + Write-Host "Failed to get asset url" + exit 1 +} + +Write-Host "Downloading plugin from $asset_url" +$request_result = Invoke-WebRequest -Uri $asset_url -OutFile "centreon_plugins.exe" -PassThru + +if ($request_result.StatusCode -ne 200) { + Write-Host "Failed to download plugins from $asset_url" + exit 1 +} + +$compile_res = Start-Process -FilePath "${MKNSIS}" -ArgumentList @("/DPLUGINS_VERSION=$tag_name", "${PROJECT_SOURCE_DIR}/centreon-monitoring-agent.nsi") -Wait -PassThru + +if ($compile_res.ExitCode -ne 0) { + Write-Host "Failed to compile installer" + exit 1 +} +exit 0 diff --git a/agent/installer/dlg_helper.nsi b/agent/installer/dlg_helper.nsi index c6a864426a9..6a35e5c9fee 100644 --- a/agent/installer/dlg_helper.nsi +++ b/agent/installer/dlg_helper.nsi @@ -158,7 +158,7 @@ Function init_log_dlg ${NSD_CB_SelectString} $hCtl_log_dlg_log_level $0 ReadRegStr $0 HKLM ${CMA_REG_KEY} "log_type" ${If} $0 == "" - StrCpy $0 "EventLog" + StrCpy $0 "Event-Log" ${EndIf} ${NSD_CB_SelectString} $hCtl_log_dlg_log_type $0 ReadRegDWORD $0 HKLM ${CMA_REG_KEY} "log_max_file_size" diff --git a/agent/installer/silent.nsi b/agent/installer/silent.nsi index f7e0c9477dd..0bc14085920 100644 --- a/agent/installer/silent.nsi +++ b/agent/installer/silent.nsi @@ -47,7 +47,7 @@ Function show_help FileWrite $0 "usage: centreon-monitoring-agent.exe args$\n" FileWrite $0 "This installer works into mode:$\n" FileWrite $0 " - Without argument: interactive windows UI$\n" - FileWrite $0 " - Silent mode with the /S flag$\n" + FileWrite $0 " - Silent mode with the /S flag in first position, before others arguments$\n" FileWrite $0 "Silent mode arguments:$\n" ${If} $1 != "" FileWrite $0 "$1$\n" @@ -55,7 +55,7 @@ Function show_help FileWrite $0 "--hostname The name of the host as defined in the Centreon interface.$\n" FileWrite $0 "--endpoint IP address of DNS name of the poller the agent will connect to.$\n" FileWrite $0 " In case of Poller-initiated connection mode, it is the interface and port on which the agent will accept connections from the poller. 0.0.0.0 means all interfaces.$\n" - FileWrite $0 " The format is :" + FileWrite $0 " The format is :$\n" FileWrite $0 "--reverse Add this flag for Poller-initiated connection mode.$\n" FileWrite $0 "$\n" FileWrite $0 "--log_type event_log or file. In case of logging in a file, log_file param is mandatory $\n" @@ -125,6 +125,11 @@ Function cmd_line_to_registry Call silent_fatal_error ${EndIf} WriteRegStr HKLM ${CMA_REG_KEY} "host" "$0" + ${If} ${Errors} + StrCpy $1 "Failed to write registry key for host" + Call silent_fatal_error + ${EndIf} + ClearErrors ${GetOptions} $cmdline_parameters "--endpoint" $0 ${If} ${Errors} @@ -175,7 +180,7 @@ Function cmd_line_to_registry ${EndIf} ${Else} - WriteRegStr HKLM ${CMA_REG_KEY} "log_type" "EventLog" + WriteRegStr HKLM ${CMA_REG_KEY} "log_type" "event-log" ${EndIf} ClearErrors ${GetOptions} $cmdline_parameters "--log_level" $0 @@ -404,11 +409,17 @@ Function silent_update_conf FunctionEnd /** - * @brief checks --install_plugins and --install_cma cmdline flags + * @brief checks --install_plugins, --install_embedded_plugins and --install_cma cmdline flags */ Function installer_parse_cmd_line Push $0 + ClearErrors + ${GetOptions} $cmdline_parameters "--install_embedded_plugins" $0 + ${IfNot} ${Errors} + StrCpy $silent_install_plugins 2 + ${EndIf} + ClearErrors ${GetOptions} $cmdline_parameters "--install_plugins" $0 ${IfNot} ${Errors} @@ -474,4 +485,4 @@ Function un.silent_verify_admin SetErrorLevel 1 Quit ${EndIf} -FunctionEnd \ No newline at end of file +FunctionEnd diff --git a/agent/native_linux/inc/com/centreon/agent/check_cpu.hh b/agent/native_linux/inc/com/centreon/agent/check_cpu.hh index f11f02b039e..9481f61fa0a 100644 --- a/agent/native_linux/inc/com/centreon/agent/check_cpu.hh +++ b/agent/native_linux/inc/com/centreon/agent/check_cpu.hh @@ -19,6 +19,94 @@ #ifndef CENTREON_AGENT_CHECK_CPU_HH #define CENTREON_AGENT_CHECK_CPU_HH -namespace com::centreon::agent {} +#include "native_check_cpu_base.hh" +namespace com::centreon::agent { + +namespace check_cpu_detail { + +enum e_proc_stat_index { + user = 0, + nice, + system, + idle, + iowait, + irq, + soft_irq, + steal, + guest, + guest_nice, + nb_field +}; + +/** + * @brief this class is the result of /proc/stat one line parsing like + * cpu0 2930565 15541 1250726 10453908 54490 0 27068 0 0 0 + * if _cpu_index == std::numeric_limits::max(), it represents the sum + * of all cpus + * + */ +class per_cpu_time : public per_cpu_time_base { + unsigned _cpu_index = 0; + + public: + per_cpu_time() = default; + per_cpu_time(const std::string_view& line); + unsigned get_cpu_index() const { return _cpu_index; } +}; + +/** + * @brief datas of /proc/stat + * + */ +class proc_stat_file : public cpu_time_snapshot { + public: + proc_stat_file(size_t nb_to_reserve) + : proc_stat_file("/proc/stat", nb_to_reserve) {} + + proc_stat_file(const char* proc_file, size_t nb_to_reserve); +}; + +}; // namespace check_cpu_detail + +/** + * @brief native linux check_cpu + * every _measure_interval, we read /proc/stat and we calculate cpu usage + * when a check starts, we read last measure and passed it to completion_handler + * If we not have yet done a measure, we wait to timeout to calculate cpu usage + */ +class check_cpu + : public native_check_cpu { + public: + check_cpu(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point first_start_expected, + duration check_interval, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const rapidjson::Value& args, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler, + const checks_statistics::pointer& stat); + + static void help(std::ostream& help_stream); + + std::shared_ptr shared_from_this() { + return std::static_pointer_cast(check::shared_from_this()); + } + + std::unique_ptr> + get_cpu_time_snapshot(bool first_measure) override; + + e_status compute( + const check_cpu_detail::cpu_time_snapshot< + check_cpu_detail::e_proc_stat_index::nb_field>& first_measure, + const check_cpu_detail::cpu_time_snapshot< + check_cpu_detail::e_proc_stat_index::nb_field>& second_measure, + std::string* output, + std::list* perfs) override; +}; +} // namespace com::centreon::agent #endif diff --git a/agent/native_linux/src/agent_info.cc b/agent/native_linux/src/agent_info.cc new file mode 100644 index 00000000000..42d6d26f6a2 --- /dev/null +++ b/agent/native_linux/src/agent_info.cc @@ -0,0 +1,74 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "agent_info.hh" +#include "version.hh" + +static std::string _os; +static std::string _os_version; + +/** + * @brief read os version + * to call at the beginning of program + * + */ +void com::centreon::agent::read_os_version() { + std::fstream os_release("/etc/os-release", std::fstream::in); + if (os_release.is_open()) { + enum { os_found = 1, version_found = 2, all_found = 3 }; + unsigned found = 0; + std::string line; + while (std::getline(os_release, line) && found != all_found) { + if (!line.compare(0, 3, "ID=")) { + line.erase(0, 3); + boost::algorithm::trim_if(line, [](const char c) { + return c == '"' || c == ' ' || c == '\''; + }); + _os = line; + found |= os_found; + } else if (!line.compare(0, 11, "VERSION_ID=")) { + line.erase(0, 11); + boost::algorithm::trim_if(line, [](const char c) { + return c == '"' || c == ' ' || c == '\''; + }); + _os_version = line; + found |= version_found; + } + } + } +} + +/** + * @brief fill agent_info with agent and os versions + * + * @param supervised_host host configured + * @param agent_info pointer to object to fill + */ +void com::centreon::agent::fill_agent_info( + const std::string& supervised_host, + ::com::centreon::agent::AgentInfo* agent_info) { + agent_info->mutable_centreon_version()->set_major( + CENTREON_AGENT_VERSION_MAJOR); + agent_info->mutable_centreon_version()->set_minor( + CENTREON_AGENT_VERSION_MINOR); + agent_info->mutable_centreon_version()->set_patch( + CENTREON_AGENT_VERSION_PATCH); + agent_info->set_host(supervised_host); + agent_info->set_os(_os); + agent_info->set_os_version(_os_version); +} diff --git a/agent/native_linux/src/check_cpu.cc b/agent/native_linux/src/check_cpu.cc index ff1a150ce8e..1959d1acd2f 100644 --- a/agent/native_linux/src/check_cpu.cc +++ b/agent/native_linux/src/check_cpu.cc @@ -1 +1,296 @@ -class dummy{}; +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "check_cpu.hh" + +#include "native_check_cpu_base.cc" + +using namespace com::centreon::agent; +using namespace com::centreon::agent::check_cpu_detail; + +namespace com::centreon::agent::check_cpu_detail { +template class per_cpu_time_base; +} + +/** + * @brief Construct a new per cpu time::per cpu time object + * it parses a line like cpu0 2930565 15541 1250726 10453908 54490 0 27068 0 0 0 + * + * @param line + */ +per_cpu_time::per_cpu_time(const std::string_view& line) { + using namespace std::literals; + auto split_res = absl::StrSplit(line, ' ', absl::SkipEmpty()); + auto field_iter = split_res.begin(); + + if ((*field_iter).substr(0, 3) != "cpu"sv) { + throw std::invalid_argument("no cpu"); + } + if (!absl::SimpleAtoi(field_iter->substr(3), &_cpu_index)) { + _cpu_index = check_cpu_detail::average_cpu_index; + } + + auto to_fill = _metrics.begin(); + auto end = _metrics.end(); + for (++field_iter; field_iter != split_res.end(); ++field_iter, ++to_fill) { + unsigned counter; + if (!absl::SimpleAtoi(*field_iter, &counter)) { + throw std::invalid_argument("not a number"); + } + // On some OS we may have more fields than user to guest_nice, we have to + // take them into account only for total compute + if (to_fill < end) { + *to_fill = counter; + } + _total += counter; + } + + // On some OS, we might have fewer fields than expected, so we initialize + // the remaining fields + for (; to_fill < end; ++to_fill) + *to_fill = 0; + + // Calculate the 'used' CPU time by subtracting idle time from total time + _total_used = _total - _metrics[e_proc_stat_index::idle]; +} + +/** + * @brief Construct a new proc stat file::proc stat file object + * + * @param proc_file path of the proc file usually: /proc/stat, other for unit + * tests + * @param nb_to_reserve nb host cores + */ +proc_stat_file::proc_stat_file(const char* proc_file, size_t nb_to_reserve) { + _data.reserve(nb_to_reserve + 1); + std::ifstream proc_stat(proc_file); + char line_buff[1024]; + while (1) { + try { + proc_stat.getline(line_buff, sizeof(line_buff)); + line_buff[1023] = 0; + per_cpu_time to_ins(line_buff); + _data.emplace(to_ins.get_cpu_index(), to_ins); + } catch (const std::exception&) { + return; + } + } +} + +using linux_cpu_to_status = cpu_to_status; + +using cpu_to_status_constructor = + std::function; + +#define BY_TYPE_CPU_TO_STATUS(TYPE_METRIC) \ + {"warning-core-" #TYPE_METRIC, \ + [](double threshold) { \ + return linux_cpu_to_status( \ + e_status::warning, e_proc_stat_index::TYPE_METRIC, false, threshold); \ + }}, \ + {"critical-core-" #TYPE_METRIC, \ + [](double threshold) { \ + return linux_cpu_to_status(e_status::critical, \ + e_proc_stat_index::TYPE_METRIC, false, \ + threshold); \ + }}, \ + {"warning-average-" #TYPE_METRIC, \ + [](double threshold) { \ + return linux_cpu_to_status(e_status::warning, \ + e_proc_stat_index::TYPE_METRIC, true, \ + threshold); \ + }}, \ + { \ + "critical-average-" #TYPE_METRIC, [](double threshold) { \ + return linux_cpu_to_status(e_status::critical, \ + e_proc_stat_index::TYPE_METRIC, true, \ + threshold); \ + } \ + } + +/** + * @brief this map is used to generate cpus values comparator from check + * configuration fields + * + */ +static const absl::flat_hash_map + _label_to_cpu_to_status = { + {"warning-core", + [](double threshold) { + return linux_cpu_to_status(e_status::warning, + e_proc_stat_index::nb_field, false, + threshold); + }}, + {"critical-core", + [](double threshold) { + return linux_cpu_to_status(e_status::critical, + e_proc_stat_index::nb_field, false, + threshold); + }}, + {"warning-average", + [](double threshold) { + return linux_cpu_to_status( + e_status::warning, e_proc_stat_index::nb_field, true, threshold); + }}, + {"critical-average", + [](double threshold) { + return linux_cpu_to_status(e_status::critical, + e_proc_stat_index::nb_field, true, + threshold); + }}, + BY_TYPE_CPU_TO_STATUS(user), + BY_TYPE_CPU_TO_STATUS(nice), + BY_TYPE_CPU_TO_STATUS(system), + BY_TYPE_CPU_TO_STATUS(iowait), + BY_TYPE_CPU_TO_STATUS(guest)}; + +/** + * @brief Construct a new check cpu::check cpu object + * + * @param io_context + * @param logger + * @param first_start_expected start expected + * @param check_interval check interval between two checks (not only this but + * also others) + * @param serv service + * @param cmd_name + * @param cmd_line + * @param args native plugin arguments + * @param cnf engine configuration received object + * @param handler called at measure completion + */ +check_cpu::check_cpu(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point first_start_expected, + duration check_interval, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const rapidjson::Value& args, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler, + const checks_statistics::pointer& stat) + : native_check_cpu( + io_context, + logger, + first_start_expected, + check_interval, + serv, + cmd_name, + cmd_line, + args, + cnf, + std::move(handler), + stat) + +{ + com::centreon::common::rapidjson_helper arg(args); + if (args.IsObject()) { + for (auto member_iter = args.MemberBegin(); member_iter != args.MemberEnd(); + ++member_iter) { + auto cpu_to_status_search = _label_to_cpu_to_status.find( + absl::AsciiStrToLower(member_iter->name.GetString())); + if (cpu_to_status_search != _label_to_cpu_to_status.end()) { + std::optional val = get_double( + cmd_name, member_iter->name.GetString(), member_iter->value, true); + if (val) { + check_cpu_detail::cpu_to_status cpu_checker = + cpu_to_status_search->second(*val / 100); + _cpu_to_status.emplace( + std::make_tuple(cpu_checker.get_proc_stat_index(), + cpu_checker.is_average(), + cpu_checker.get_status()), + cpu_checker); + } + } else if (member_iter->name != "cpu-detailed") { + SPDLOG_LOGGER_ERROR(logger, "command: {}, unknown parameter: {}", + cmd_name, member_iter->name); + } + } + } +} + +std::unique_ptr< + check_cpu_detail::cpu_time_snapshot> +check_cpu::get_cpu_time_snapshot([[maybe_unused]] bool first_measure) { + return std::make_unique(_nb_core); +} + +constexpr std::array + _sz_summary_labels = {", User ", ", Nice ", ", System ", + ", Idle ", ", IOWait ", ", Interrupt ", + ", Soft Irq ", ", Steal ", ", Guest ", + ", Guest Nice "}; + +constexpr std::array + _sz_perfdata_name = {"user", "nice", "system", "idle", + "iowait", "interrupt", "softirq", "steal", + "guest", "guestnice"}; + +/** + * @brief compute the difference between second_measure and first_measure and + * generate status, output and perfdatas + * + * @param first_measure first snapshot of /proc/stat + * @param second_measure second snapshot of /proc/stat + * @param output out plugin output + * @param perfs perfdatas + * @return e_status plugin out status + */ +e_status check_cpu::compute( + const check_cpu_detail::cpu_time_snapshot< + check_cpu_detail::e_proc_stat_index::nb_field>& first_measure, + const check_cpu_detail::cpu_time_snapshot< + check_cpu_detail::e_proc_stat_index::nb_field>& second_measure, + std::string* output, + std::list* perfs) { + output->reserve(256 * _nb_core); + + return _compute(first_measure, second_measure, _sz_summary_labels.data(), + _sz_perfdata_name.data(), output, perfs); +} + +void check_cpu::help(std::ostream& help_stream) { + help_stream << R"( +- cpu params: + warning-core: threshold for warning status on core usage in percentage + critical-core: threshold for critical status on core usage in percentage + warning-average: threshold for warning status on average usage in percentage + critical-average: threshold for critical status on average usage in percentage + warning-core-user: threshold for warning status on core user usage in percentage + critical-core-user: threshold for critical status on core user usage in percentage + warning-average-user: threshold for warning status on average user usage in percentage + critical-average-user: threshold for critical status on average user usage in percentage + warning-core-nice: threshold for warning status on core nice usage in percentage + critical-core-nice: threshold for critical status on core nice usage in percentage + warning-average-nice: threshold for warning status on average nice usage in percentage + critical-average-nice: threshold for critical status on average nice usage in percentage + warning-core-system: threshold for warning status on core system usage in percentage + critical-core-system: threshold for critical status on core system usage in percentage + warning-average-system: threshold for warning status on average system usage in percentage + critical-average-system: threshold for critical status on average system usage in percentage + warning-core-iowait: threshold for warning status on core iowait usage in percentage + critical-core-iowait: threshold for critical status on core iowait usage in percentage + warning-average-iowait: threshold for warning status on average iowait usage in percentage + critical-average-iowait: threshold for critical status on average iowait usage in percentage + warning-core-guest: threshold for warning status on core guest usage in percentage + critical-core-guest: threshold for critical status on core guest usage in percentage + warning-average-guest: threshold for warning status on average guest usage in percentage + critical-average-guest: threshold for critical status on average guest usage in percentage + )"; +} diff --git a/agent/native_windows/inc/com/centreon/agent/check_cpu.hh b/agent/native_windows/inc/com/centreon/agent/check_cpu.hh index f11f02b039e..94654fffad5 100644 --- a/agent/native_windows/inc/com/centreon/agent/check_cpu.hh +++ b/agent/native_windows/inc/com/centreon/agent/check_cpu.hh @@ -19,6 +19,133 @@ #ifndef CENTREON_AGENT_CHECK_CPU_HH #define CENTREON_AGENT_CHECK_CPU_HH -namespace com::centreon::agent {} +#include "ntdll.hh" +#include "native_check_cpu_base.hh" + +namespace com::centreon::agent { + +namespace check_cpu_detail { +enum e_proc_stat_index { user = 0, system, idle, interrupt, dpc, nb_field }; + +/** + * @brief this class contains all counter for one core contained in a + * M_SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION structure + */ +class kernel_per_cpu_time + : public per_cpu_time_base { + public: + kernel_per_cpu_time() = default; + + kernel_per_cpu_time(const M_SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION& info); +}; + +/** + * we can collect cpu metrics in two manners, the first one is to use +microsoft + * internal NtQuerySystemInformation, the second one is to use the official + * Performance Data Helper + * So we have two classes to collect metrics +** / + +/** + * @brief metrics collected by NtQuerySystemInformation + * + */ +class kernel_cpu_time_snapshot + : public cpu_time_snapshot { + public: + kernel_cpu_time_snapshot(unsigned nb_core); + + // used by TU + template + kernel_cpu_time_snapshot(processor_performance_info_iter begin, + processor_performance_info_iter end); + + void dump(std::string* output) const; +}; + +template +kernel_cpu_time_snapshot::kernel_cpu_time_snapshot( + processor_performance_info_iter begin, + processor_performance_info_iter end) { + unsigned cpu_index = 0; + for (processor_performance_info_iter it = begin; it != end; + ++it, ++cpu_index) { + _data[cpu_index] = kernel_per_cpu_time(*it); + } + + per_cpu_time_base& total = + _data[average_cpu_index]; + for (auto to_add_iter = _data.begin(); + to_add_iter != _data.end() && to_add_iter->first != average_cpu_index; + ++to_add_iter) { + total.add(to_add_iter->second); + } +} + +struct pdh_counters; + +/** + * @brief metrics collected by Performance Data Helper + * + */ +class pdh_cpu_time_snapshot + : public cpu_time_snapshot { + public: + pdh_cpu_time_snapshot(unsigned nb_core, + const pdh_counters& counters, + bool first_measure); +}; + +} // namespace check_cpu_detail + +/** + * @brief native windows check cpu + * + */ +class check_cpu + : public native_check_cpu { + // this check can collect metrics in two manners, the first one is to use the + // unofficial NtQuerySystemInformation, the second one is to use the official + // Performance Data Helper + bool _use_nt_query_system_information = true; + + std::unique_ptr _pdh_counters; + + public: + check_cpu(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point first_start_expected, + duration check_interval, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const rapidjson::Value& args, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler, + const checks_statistics::pointer& stat); + + ~check_cpu(); + + static void help(std::ostream& help_stream); + + std::shared_ptr shared_from_this() { + return std::static_pointer_cast(check::shared_from_this()); + } + + std::unique_ptr> + check_cpu::get_cpu_time_snapshot(bool first_measure) override; + + e_status compute( + const check_cpu_detail::cpu_time_snapshot< + check_cpu_detail::e_proc_stat_index::nb_field>& first_measure, + const check_cpu_detail::cpu_time_snapshot< + check_cpu_detail::e_proc_stat_index::nb_field>& second_measure, + std::string* output, + std::list* perfs) override; +}; + +} // namespace com::centreon::agent #endif diff --git a/agent/native_windows/inc/com/centreon/agent/check_memory.hh b/agent/native_windows/inc/com/centreon/agent/check_memory.hh new file mode 100644 index 00000000000..76a11b928a2 --- /dev/null +++ b/agent/native_windows/inc/com/centreon/agent/check_memory.hh @@ -0,0 +1,100 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_NATIVE_CHECK_MEMORY_HH +#define CENTREON_AGENT_NATIVE_CHECK_MEMORY_HH + +#include "native_check_base.hh" + +struct _PERFORMANCE_INFORMATION; + +namespace com::centreon::agent { +namespace native_check_detail { + +enum e_memory_metric : unsigned { + phys_total, + phys_free, + phys_used, + swap_total, + swap_free, + swap_used, + virtual_total, + virtual_free, + virtual_used, + nb_metric +}; + +/** + * @brief this class compute a measure of memory metrics and store in _metrics + * member + * + */ +class w_memory_info + : public snapshot { + unsigned _output_flags = 0; + + public: + enum output_flags : unsigned { dump_swap = 1, dump_virtual }; + + w_memory_info(unsigned flags); + w_memory_info(const MEMORYSTATUSEX& mem_status, + const struct _PERFORMANCE_INFORMATION& perf_mem_status, + unsigned flags = 0); + void init(const MEMORYSTATUSEX& mem_status, + const struct _PERFORMANCE_INFORMATION& perf_mem_status); + + void dump_to_output(std::string* output) const override; +}; + +} // namespace native_check_detail + +/** + * @brief native final check object + * + */ +class check_memory : public native_check_base< + native_check_detail::e_memory_metric::nb_metric> { + protected: + unsigned _output_flags = 0; + + public: + check_memory(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point first_start_expected, + duration check_interval, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const rapidjson::Value& args, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler, + const checks_statistics::pointer& stat); + + std::shared_ptr> + measure() override; + + static void help(std::ostream& help_stream); + + const std::vector& + get_metric_definitions() const override; +}; + +} // namespace com::centreon::agent + +#endif \ No newline at end of file diff --git a/agent/native_windows/inc/com/centreon/agent/check_service.hh b/agent/native_windows/inc/com/centreon/agent/check_service.hh new file mode 100644 index 00000000000..d35de66a8f3 --- /dev/null +++ b/agent/native_windows/inc/com/centreon/agent/check_service.hh @@ -0,0 +1,194 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_NATIVE_CHECK_SERVICE_HH +#define CENTREON_AGENT_NATIVE_CHECK_SERVICE_HH + +#include "native_check_base.hh" + +namespace com::centreon::agent { +namespace native_check_detail { + +enum e_service_metric : unsigned { + stopped, + start_pending, + stop_pending, + running, + continue_pending, + pause_pending, + paused, + total, + nb_service_metric +}; + +/** + * @brief service filter + * it can filter services by their name and also by their start_auto + */ +class service_filter { + using string_set = absl::flat_hash_set; + + string_set _name_cache_allowed; + string_set _name_cache_excluded; + string_set _display_cache_allowed; + string_set _display_cache_excluded; + + std::unique_ptr _name_filter, _name_filter_exclude; + std::unique_ptr _display_filter, _display_filter_exclude; + + std::optional _start_auto; + + public: + service_filter(const rapidjson::Value& args); + + bool is_allowed(bool start_auto, + const std::string_view& service_name, + const std::string_view& service_display); + + bool use_start_auto_filter() const { return _start_auto.has_value(); } +}; + +/** + * @brief service enumerator + * enumerate services and call a callback on each service allowed by filter + */ +class service_enumerator { + public: + using listener = std::function; + using constructor = std::function()>; + + private: + template + void _enumerate_services(service_filter& filter, + listener&& callback, + const std::shared_ptr& logger); + + protected: + static constexpr size_t service_array_size = 512; + + SC_HANDLE _sc_manager_handler = nullptr; + DWORD _resume_handle = 0; + + using serv_array = ENUM_SERVICE_STATUSA[service_array_size]; + + virtual bool _enumerate_services(serv_array& services, + DWORD* services_returned); + + virtual bool _query_service_config( + LPCSTR service_name, + QUERY_SERVICE_CONFIGA& serv_conf, + const std::shared_ptr& logger); + + public: + service_enumerator(); + + void reset_resume_handle() { _resume_handle = 0; } + + virtual ~service_enumerator(); + + void enumerate_services(service_filter& filter, + listener&& callback, + const std::shared_ptr& logger); +}; + +/** + * snapshot of services informations, used to create output and perfdatas + */ +class w_service_info : public snapshot { + std::string _output; + unsigned _state_to_warning; + unsigned _state_to_critical; + e_status _status = e_status::ok; + + public: + w_service_info(service_enumerator& service_enumerator, + service_filter& filter, + unsigned state_to_warning, + unsigned state_to_critical, + const std::shared_ptr& logger); + + void on_service(const ENUM_SERVICE_STATUSA& service_status); + + e_status get_status() const { return _status; } + + void dump_to_output(std::string* output) const override; +}; + +} // namespace native_check_detail + +/** + * @brief native final check object + * + */ +class check_service + : public native_check_base< + native_check_detail::e_service_metric::nb_service_metric> { + /** + * @brief these enums are indexed by service states values + * https://learn.microsoft.com/en-us/windows/win32/api/winsvc/ns-winsvc-service_status + */ + enum state_mask : unsigned { + stopped = 1, + start_pending = 2, + stop_pending = 4, + running = 8, + continue_pending = 16, + pause_pending = 32, + paused = 64 + }; + + static const std::array, 7> + _label_state; + + unsigned _state_to_warning = 0; + unsigned _state_to_critical = 0; + native_check_detail::service_filter _filter; + std::unique_ptr _enumerator; + + public: + /** + * in order to mock services API, this static constructor is used to replace + * service_enumarator by a mock + */ + static native_check_detail::service_enumerator::constructor + _enumerator_constructor; + + check_service(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point first_start_expected, + duration check_interval, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const rapidjson::Value& args, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler, + const checks_statistics::pointer& stat); + + std::shared_ptr> + measure() override; + + static void help(std::ostream& help_stream); + + const std::vector& + get_metric_definitions() const override; +}; + +} // namespace com::centreon::agent +#endif diff --git a/agent/native_windows/inc/com/centreon/agent/check_uptime.hh b/agent/native_windows/inc/com/centreon/agent/check_uptime.hh new file mode 100644 index 00000000000..3a43d32f1c1 --- /dev/null +++ b/agent/native_windows/inc/com/centreon/agent/check_uptime.hh @@ -0,0 +1,56 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_CHECK_UPTIME_HH +#define CENTREON_AGENT_CHECK_UPTIME_HH + +#include "check.hh" + +namespace com::centreon::agent { + +/** + * @brief check uptime + * + */ +class check_uptime : public check { + unsigned _second_warning_threshold; + unsigned _second_critical_threshold; + + public: + check_uptime(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point first_start_expected, + duration check_interval, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const rapidjson::Value& args, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler, + const checks_statistics::pointer& stat); + + static void help(std::ostream& help_stream); + + void start_check(const duration& timeout) override; + + e_status compute(uint64_t ms_uptime, + std::string* output, + common::perfdata* perfs); +}; +} // namespace com::centreon::agent +#endif \ No newline at end of file diff --git a/agent/native_windows/inc/com/centreon/agent/ntdll.hh b/agent/native_windows/inc/com/centreon/agent/ntdll.hh new file mode 100644 index 00000000000..c4f57edc3d3 --- /dev/null +++ b/agent/native_windows/inc/com/centreon/agent/ntdll.hh @@ -0,0 +1,51 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_NTDLL_HH +#define CENTREON_AGENT_NTDLL_HH + +namespace com::centreon::agent { + +/**As winternl.h may be included, we define our own + * SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION */ +struct M_SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION { + LARGE_INTEGER IdleTime; + LARGE_INTEGER KernelTime; + LARGE_INTEGER UserTime; + LARGE_INTEGER DpcTime; + LARGE_INTEGER InterruptTime; + ULONG InterruptCount; +}; + +void load_nt_dll(); + +typedef LONG(WINAPI* NtQuerySystemInformationPtr)(ULONG SystemInformationClass, + PVOID SystemInformation, + ULONG SystemInformationLength, + PULONG ReturnLength); + +extern NtQuerySystemInformationPtr nt_query_system_information; + +typedef NTSTATUS(NTAPI* RtlGetVersionPtr)( + POSVERSIONINFOEXW lpVersionInformation); + +extern RtlGetVersionPtr rtl_get_version; + +} // namespace com::centreon::agent + +#endif diff --git a/agent/native_windows/src/agent_info.cc b/agent/native_windows/src/agent_info.cc new file mode 100644 index 00000000000..f2250aca5bd --- /dev/null +++ b/agent/native_windows/src/agent_info.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "agent_info.hh" +#include "ntdll.hh" +#include "version.hh" + +static std::string _os; +static std::string _os_version; + +/** + * @brief read os version + * to call at the beginning of program + * + */ +void com::centreon::agent::read_os_version() { + RTL_OSVERSIONINFOEXW osvi; + ZeroMemory(&osvi, sizeof(osvi)); + osvi.dwOSVersionInfoSize = sizeof(osvi); + if (rtl_get_version(&osvi) == 0) { + _os = osvi.wProductType == VER_NT_SERVER ? "windows-server" : "windows"; + _os_version = std::to_string(osvi.dwMajorVersion) + "." + + std::to_string(osvi.dwMinorVersion) + "." + + std::to_string(osvi.dwBuildNumber); + } +} + +/** + * @brief fill agent_info with agent and os versions + * + * @param supervised_host host configured + * @param agent_info pointer to object to fill + */ +void com::centreon::agent::fill_agent_info( + const std::string& supervised_host, + ::com::centreon::agent::AgentInfo* agent_info) { + agent_info->mutable_centreon_version()->set_major( + CENTREON_AGENT_VERSION_MAJOR); + agent_info->mutable_centreon_version()->set_minor( + CENTREON_AGENT_VERSION_MINOR); + agent_info->mutable_centreon_version()->set_patch( + CENTREON_AGENT_VERSION_PATCH); + agent_info->set_host(supervised_host); + agent_info->set_os(_os); + agent_info->set_os_version(_os_version); +} \ No newline at end of file diff --git a/agent/native_windows/src/check_cpu.cc b/agent/native_windows/src/check_cpu.cc index e69de29bb2d..90e83d924f4 100644 --- a/agent/native_windows/src/check_cpu.cc +++ b/agent/native_windows/src/check_cpu.cc @@ -0,0 +1,509 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include +#include + +#include "check_cpu.hh" +#include "com/centreon/common/rapidjson_helper.hh" +#include "com/centreon/exceptions/msg_fmt.hh" +#include "native_check_cpu_base.cc" + +using namespace com::centreon::agent; +using namespace com::centreon::agent::check_cpu_detail; + +/************************************************************************** + Kernel measure method +***************************************************************************/ + +/** + * @brief Construct a kernel_per_cpu_time object from a + * SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION + * + * @param info SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION collected by + * NtQuerySystemInformation + */ +kernel_per_cpu_time::kernel_per_cpu_time( + const M_SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION& info) { + _metrics[e_proc_stat_index::user] = info.UserTime.QuadPart; + _metrics[e_proc_stat_index::system] = + info.KernelTime.QuadPart - info.IdleTime.QuadPart; + _metrics[e_proc_stat_index::idle] = info.IdleTime.QuadPart; + _metrics[e_proc_stat_index::interrupt] = info.InterruptTime.QuadPart; + _metrics[e_proc_stat_index::dpc] = info.DpcTime.QuadPart; + _total = _metrics[e_proc_stat_index::user] + + _metrics[e_proc_stat_index::system] + + _metrics[e_proc_stat_index::idle] + + _metrics[e_proc_stat_index::interrupt]; + _total_used = _total - _metrics[e_proc_stat_index::idle]; +} + +/** + * @brief Construct a new kernel cpu time snapshot::kernel cpu time snapshot + * object it loads alls CPUs time and compute the average + * + * @param nb_core + */ +kernel_cpu_time_snapshot::kernel_cpu_time_snapshot(unsigned nb_core) { + std::unique_ptr buffer( + new M_SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION[nb_core]); + ULONG buffer_size = + sizeof(M_SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION) * nb_core; + ULONG return_length = 0; + + memset(buffer.get(), 0, buffer_size); + + if (nt_query_system_information( + 8 /*SystemProcessorPerformanceInformationClass*/ + , + buffer.get(), buffer_size, &return_length) != 0) { + throw std::runtime_error("Failed to get kernel cpu time"); + } + + for (unsigned i = 0; i < nb_core; ++i) { + _data[i] = kernel_per_cpu_time(buffer[i]); + } + per_cpu_time_base& total = + _data[average_cpu_index]; + for (auto to_add_iter = _data.begin(); + to_add_iter != _data.end() && to_add_iter->first != average_cpu_index; + ++to_add_iter) { + total.add(to_add_iter->second); + } +} + +/** + * @brief used for debug, dump all values + * + * @param output + */ +void kernel_cpu_time_snapshot::dump(std::string* output) const { + cpu_time_snapshot::dump(output); +} + +/************************************************************************** + Pdh measure method +***************************************************************************/ + +namespace com::centreon::agent::check_cpu_detail { +struct pdh_counters { + HQUERY query; + HCOUNTER user; + HCOUNTER idle; + HCOUNTER kernel; + HCOUNTER interrupt; + HCOUNTER dpc; + + pdh_counters(); + + ~pdh_counters(); +}; +} // namespace com::centreon::agent::check_cpu_detail + +pdh_counters::pdh_counters() : query(nullptr) { + if (PdhOpenQuery(nullptr, 0, &query) != ERROR_SUCCESS) { + throw std::runtime_error("Failed to open pdh query"); + } + + if (PdhAddEnglishCounterA(query, "\\Processor(*)\\% User Time", 0, &user) != + ERROR_SUCCESS) { + throw std::runtime_error("Failed to add counter user"); + } + + if (PdhAddEnglishCounterA(query, "\\Processor(*)\\% Idle Time", 0, &idle) != + ERROR_SUCCESS) { + throw std::runtime_error("Failed to add counter idle"); + } + + if (PdhAddEnglishCounterA(query, "\\Processor(*)\\% Privileged Time", 0, + &kernel) != ERROR_SUCCESS) { + throw std::runtime_error("Failed to add counter kernel"); + } + + if (PdhAddEnglishCounterA(query, "\\Processor(*)\\% Interrupt Time", 0, + &interrupt) != ERROR_SUCCESS) { + throw std::runtime_error("Failed to add counter interrupt"); + } + + if (PdhAddEnglishCounterA(query, "\\Processor(*)\\% DPC Time", 0, &dpc) != + ERROR_SUCCESS) { + throw std::runtime_error("Failed to add counter dpc"); + } +} + +pdh_counters::~pdh_counters() { + if (query) + PdhCloseQuery(query); +} + +/** + * @brief Construct a new pdh cpu time snapshot::pdh cpu time snapshot object + * when we use pdh, we collect data twice, the first time we only collect query, + * the second collect and get counters values + * @param nb_core + * @param first_measure if true, we only collect query data + */ +pdh_cpu_time_snapshot::pdh_cpu_time_snapshot(unsigned nb_core, + const pdh_counters& counters, + bool first_measure) { + if (PdhCollectQueryData(counters.query) != ERROR_SUCCESS) { + throw std::runtime_error("Failed to collect query data"); + } + + if (first_measure) { + return; + } + + DWORD orginal_buffer_size = 0; + DWORD item_count = 0; + unsigned cpu_index = 0; + + PDH_STATUS status = + PdhGetFormattedCounterArrayA(counters.user, PDH_FMT_DOUBLE, + &orginal_buffer_size, &item_count, nullptr); + if (status != PDH_MORE_DATA) { + throw exceptions::msg_fmt("Failed to get user pdh counter array size: {:x}", + static_cast(status)); + } + + orginal_buffer_size = + (orginal_buffer_size / sizeof(PDH_FMT_COUNTERVALUE_ITEM_A)) * + sizeof(PDH_FMT_COUNTERVALUE_ITEM_A) + + sizeof(PDH_FMT_COUNTERVALUE_ITEM_A); + std::unique_ptr buffer( + new PDH_FMT_COUNTERVALUE_ITEM_A[orginal_buffer_size / + sizeof(PDH_FMT_COUNTERVALUE_ITEM_A)]); + const PDH_FMT_COUNTERVALUE_ITEM_A* buffer_end = buffer.get() + nb_core + 1; + + DWORD buffer_size = orginal_buffer_size; + if (PdhGetFormattedCounterArrayA(counters.user, PDH_FMT_DOUBLE, &buffer_size, + &item_count, + buffer.get()) == ERROR_SUCCESS) { + for (const PDH_FMT_COUNTERVALUE_ITEM_A* it = buffer.get(); it < buffer_end; + ++it) { + if (!absl::SimpleAtoi(it->szName, &cpu_index)) { + cpu_index = average_cpu_index; + } + // we multiply by 100 to store 2 decimal after comma in an integer + _data[cpu_index].set_metric_total_used(e_proc_stat_index::user, + it->FmtValue.doubleValue * 100); + } + } + + buffer_size = orginal_buffer_size; + if (PdhGetFormattedCounterArrayA(counters.kernel, PDH_FMT_DOUBLE, + &buffer_size, &item_count, + buffer.get()) == ERROR_SUCCESS) { + for (const PDH_FMT_COUNTERVALUE_ITEM_A* it = buffer.get(); it < buffer_end; + ++it) { + if (!absl::SimpleAtoi(it->szName, &cpu_index)) { + cpu_index = average_cpu_index; + } + _data[cpu_index].set_metric_total_used(e_proc_stat_index::system, + it->FmtValue.doubleValue * 100); + } + } + + buffer_size = orginal_buffer_size; + if (PdhGetFormattedCounterArrayA(counters.idle, PDH_FMT_DOUBLE, &buffer_size, + &item_count, + buffer.get()) == ERROR_SUCCESS) { + for (const PDH_FMT_COUNTERVALUE_ITEM_A* it = buffer.get(); it < buffer_end; + ++it) { + if (!absl::SimpleAtoi(it->szName, &cpu_index)) { + cpu_index = average_cpu_index; + } + _data[cpu_index].set_metric_total(e_proc_stat_index::idle, + it->FmtValue.doubleValue * 100); + } + } + + buffer_size = orginal_buffer_size; + if (PdhGetFormattedCounterArrayA(counters.interrupt, PDH_FMT_DOUBLE, + &buffer_size, &item_count, + buffer.get()) == ERROR_SUCCESS) { + for (const PDH_FMT_COUNTERVALUE_ITEM_A* it = buffer.get(); it < buffer_end; + ++it) { + if (!absl::SimpleAtoi(it->szName, &cpu_index)) { + cpu_index = average_cpu_index; + } + _data[cpu_index].set_metric_total_used(e_proc_stat_index::interrupt, + it->FmtValue.doubleValue * 100); + } + } + + buffer_size = orginal_buffer_size; + if (PdhGetFormattedCounterArrayA(counters.dpc, PDH_FMT_DOUBLE, &buffer_size, + &item_count, + buffer.get()) == ERROR_SUCCESS) { + for (const PDH_FMT_COUNTERVALUE_ITEM_A* it = buffer.get(); it < buffer_end; + ++it) { + if (!absl::SimpleAtoi(it->szName, &cpu_index)) { + cpu_index = average_cpu_index; + } + _data[cpu_index].set_metric(e_proc_stat_index::dpc, + it->FmtValue.doubleValue * 100); + } + } +} + +/************************************************************************** + Check cpu +***************************************************************************/ +using windows_cpu_to_status = cpu_to_status; + +using cpu_to_status_constructor = + std::function; + +#define BY_TYPE_CPU_TO_STATUS(TYPE_METRIC) \ + {"warning-core-" #TYPE_METRIC, \ + [](double threshold) { \ + return windows_cpu_to_status( \ + e_status::warning, e_proc_stat_index::TYPE_METRIC, false, threshold); \ + }}, \ + {"critical-core-" #TYPE_METRIC, \ + [](double threshold) { \ + return windows_cpu_to_status(e_status::critical, \ + e_proc_stat_index::TYPE_METRIC, false, \ + threshold); \ + }}, \ + {"warning-average-" #TYPE_METRIC, \ + [](double threshold) { \ + return windows_cpu_to_status(e_status::warning, \ + e_proc_stat_index::TYPE_METRIC, true, \ + threshold); \ + }}, \ + { \ + "critical-average-" #TYPE_METRIC, [](double threshold) { \ + return windows_cpu_to_status(e_status::critical, \ + e_proc_stat_index::TYPE_METRIC, true, \ + threshold); \ + } \ + } + +/** + * @brief this map is used to generate cpus values comparator from check + * configuration fields + * + */ +static const absl::flat_hash_map + _label_to_cpu_to_status = { + {"warning-core", + [](double threshold) { + return windows_cpu_to_status(e_status::warning, + e_proc_stat_index::nb_field, false, + threshold); + }}, + {"critical-core", + [](double threshold) { + return windows_cpu_to_status(e_status::critical, + e_proc_stat_index::nb_field, false, + threshold); + }}, + {"warning-average", + [](double threshold) { + return windows_cpu_to_status( + e_status::warning, e_proc_stat_index::nb_field, true, threshold); + }}, + {"critical-average", + [](double threshold) { + return windows_cpu_to_status(e_status::critical, + e_proc_stat_index::nb_field, true, + threshold); + }}, + BY_TYPE_CPU_TO_STATUS(user), + BY_TYPE_CPU_TO_STATUS(system)}; + +/** + * @brief Construct a new check cpu::check cpu object + * + * @param io_context + * @param logger + * @param first_start_expected start expected + * @param check_interval check interval between two checks (not only this but + * also others) + * @param serv service + * @param cmd_name + * @param cmd_line + * @param args native plugin arguments + * @param cnf engine configuration received object + * @param handler called at measure completion + */ +check_cpu::check_cpu(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point first_start_expected, + duration check_interval, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const rapidjson::Value& args, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler, + const checks_statistics::pointer& stat) + : native_check_cpu( + io_context, + logger, + first_start_expected, + check_interval, + serv, + cmd_name, + cmd_line, + args, + cnf, + std::move(handler), + stat) + +{ + try { + if (args.IsObject()) { + for (auto member_iter = args.MemberBegin(); + member_iter != args.MemberEnd(); ++member_iter) { + auto cpu_to_status_search = _label_to_cpu_to_status.find( + absl::AsciiStrToLower(member_iter->name.GetString())); + if (cpu_to_status_search != _label_to_cpu_to_status.end()) { + std::optional threshold = + check::get_double(cmd_name, member_iter->name.GetString(), + member_iter->value, true); + if (threshold) { + check_cpu_detail::cpu_to_status cpu_checker = + cpu_to_status_search->second(*threshold / 100); + _cpu_to_status.emplace( + std::make_tuple(cpu_checker.get_proc_stat_index(), + cpu_checker.is_average(), + cpu_checker.get_status()), + cpu_checker); + } + } else if (member_iter->name == "use-nt-query-system-information") { + std::optional val = get_bool( + cmd_name, "use-nt-query-system-information", member_iter->value); + if (val) { + _use_nt_query_system_information = *val; + } + } else if (member_iter->name != "cpu-detailed") { + SPDLOG_LOGGER_ERROR(logger, "command: {}, unknown parameter: {}", + cmd_name, member_iter->name); + } + } + } + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(_logger, "check_cpu fail to parse check params: {}", + e.what()); + throw; + } + + if (!_use_nt_query_system_information) { + _pdh_counters = std::make_unique(); + } +} + +check_cpu::~check_cpu() {} + +std::unique_ptr< + check_cpu_detail::cpu_time_snapshot> +check_cpu::get_cpu_time_snapshot(bool first_measure) { + if (_use_nt_query_system_information) { + return std::make_unique( + _nb_core); + } else { + return std::make_unique( + _nb_core, *_pdh_counters, first_measure); + } +} + +constexpr std::array + _sz_summary_labels = {", User ", ", System ", ", Idle ", ", Interrupt ", + ", Dpc Interrupt "}; + +constexpr std::array + _sz_perfdata_name = {"user", "system", "idle", "interrupt", + "dpc_interrupt"}; + +/** + * @brief compute the difference between second_measure and first_measure and + * generate status, output and perfdatas + * + * @param first_measure first snapshot of /proc/stat + * @param second_measure second snapshot of /proc/stat + * @param output out plugin output + * @param perfs perfdatas + * @return e_status plugin out status + */ +e_status check_cpu::compute( + const check_cpu_detail::cpu_time_snapshot< + check_cpu_detail::e_proc_stat_index::nb_field>& first_measure, + const check_cpu_detail::cpu_time_snapshot< + check_cpu_detail::e_proc_stat_index::nb_field>& second_measure, + std::string* output, + std::list* perfs) { + output->reserve(256 * _nb_core); + + return _compute(first_measure, second_measure, _sz_summary_labels.data(), + _sz_perfdata_name.data(), output, perfs); +} + +void check_cpu::help(std::ostream& help_stream) { + help_stream << R"( +- cpu params: + use-nt-query-system-information (default true): true: use NtQuerySystemInformation instead of performance counters + cpu-detailed (default false): true: add detailed cpu usage metrics + warning-core: threshold for warning status on core usage in percentage + critical-core: threshold for critical status on core usage in percentage + warning-average: threshold for warning status on average usage in percentage + critical-average: threshold for critical status on average usage in percentage + warning-core-user: threshold for warning status on core user usage in percentage + critical-core-user: threshold for critical status on core user usage in percentage + warning-average-user: threshold for warning status on average user usage in percentage + critical-average-user: threshold for critical status on average user usage in percentage + warning-core-system: threshold for warning status on core system usage in percentage + critical-core-system: threshold for critical status on core system usage in percentage + warning-average-system: threshold for warning status on average system usage in percentage + critical-average-system: threshold for critical status on average system usage in percentage + An example of configuration: + { + "check": "cpu_percentage", + "args": { + "cpu-detailed": true, + "warning-core": 80, + "critical-core": 90, + "warning-average": 60, + "critical-average": 70 + } + } + Examples of output: + OK: CPU(s) average usage is 50.00% + WARNING: CPU'0' Usage: 40.00%, User 25.00%, System 10.00%, Idle 60.00%, Interrupt 5.00%, Dpc Interrupt 1.00% CRITICAL: CPU'1' Usage: 60.00%, User 45.00%, System 10.00%, Idle 40.00%, Interrupt 5.00%, Dpc Interrupt 0.00% WARNING: CPU(s) average Usage: 50.00%, User 35.00%, System 10.00%, Idle 50.00%, Interrupt 5.00%, Dpc Interrupt 0.50% + Metrics: + Normal mode: + #core.cpu.utilization.percentage + cpu.utilization.percentage + cpu-detailed mode: + ~user#core.cpu.utilization.percentage + ~system#core.cpu.utilization.percentage + ~idle#core.cpu.utilization.percentage + ~interrupt#core.cpu.utilization.percentage + ~dpc_interrupt#core.cpu.utilization.percentage + ~used#core.cpu.utilization.percentage + user#cpu.utilization.percentage + system#cpu.utilization.percentage + idle#cpu.utilization.percentage + interrupt#cpu.utilization.percentage + dpc_interrupt#cpu.utilization.percentage +)"; +} diff --git a/agent/native_windows/src/check_drive_size.cc b/agent/native_windows/src/check_drive_size.cc new file mode 100644 index 00000000000..9a37f6e7feb --- /dev/null +++ b/agent/native_windows/src/check_drive_size.cc @@ -0,0 +1,153 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "drive_size.hh" + +namespace com::centreon::agent::check_drive_size_detail { + +static const absl::flat_hash_map + _sz_filesystem_map = {{"fat", e_drive_fs_type::hr_fs_fat}, + {"fat32", e_drive_fs_type::hr_fs_fat32}, + {"ntfs", e_drive_fs_type::hr_fs_ntfs}, + {"exfat", e_drive_fs_type::hr_fs_exfat}}; + +/** + * @brief Get the type of drive and type of filesystem + * + * @param fs_root like C:\ + * @param logger + * @return e_drive_fs_type + */ +static e_drive_fs_type get_fs_type( + const std::string& fs_root, + const std::shared_ptr& logger) { + // drive type + uint64_t fs_type = e_drive_fs_type::hr_unknown; + UINT drive_type = GetDriveTypeA(fs_root.c_str()); + switch (drive_type) { + case DRIVE_FIXED: + fs_type = e_drive_fs_type::hr_storage_fixed_disk; + break; + case DRIVE_REMOVABLE: + fs_type = e_drive_fs_type::hr_storage_removable_disk; + break; + case DRIVE_REMOTE: + fs_type = e_drive_fs_type::hr_storage_network_disk; + break; + case DRIVE_CDROM: + fs_type = e_drive_fs_type::hr_storage_compact_disc; + break; + case DRIVE_RAMDISK: + fs_type = e_drive_fs_type::hr_storage_ram_disk; + break; + default: + fs_type = e_drive_fs_type::hr_unknown; + SPDLOG_LOGGER_ERROR(logger, "{} unknown drive type {}", fs_root, + drive_type); + break; + } + + // format type + char file_system_name[MAX_PATH]; // Tampon pour le nom du syst�me de + // fichiers + + BOOL result = + GetVolumeInformation(fs_root.c_str(), nullptr, 0, nullptr, nullptr, + nullptr, file_system_name, sizeof(file_system_name)); + + if (!result) { + SPDLOG_LOGGER_ERROR(logger, "{} unable to get file system type", fs_root); + } else { + std::string lower_fs_name = file_system_name; + absl::AsciiStrToLower(&lower_fs_name); + auto fs_search = _sz_filesystem_map.find(lower_fs_name); + if (fs_search != _sz_filesystem_map.end()) { + fs_type |= fs_search->second; + } else { + fs_type |= e_drive_fs_type::hr_fs_unknown; + SPDLOG_LOGGER_ERROR(logger, "{} unknown file system type {}", fs_root, + file_system_name); + } + } + return static_cast(fs_type); +} + +/** + * @brief Get the used and total space of all drives allowed by filt + * + * @param filt fs filter (drive and fs type) + * @param logger + * @return std::list + */ +std::list os_fs_stats(filter& filt, + const std::shared_ptr& logger) { + DWORD drives = GetLogicalDrives(); + std::list result; + + std::string fs_to_test; + for (char letter = 'A'; letter <= 'Z'; ++letter) { + // test if drive bit is set + if (drives & (1 << (letter - 'A'))) { + fs_to_test.clear(); + fs_to_test.push_back(letter); + fs_to_test.push_back(':'); + fs_to_test.push_back('\\'); + + // first use cache of filter + if (filt.is_fs_yet_excluded(fs_to_test)) { + continue; + } + + if (!filt.is_fs_yet_allowed(fs_to_test)) { + // not in cache so test it + if (!filt.is_allowed(fs_to_test, "", get_fs_type(fs_to_test, logger))) { + SPDLOG_LOGGER_TRACE(logger, "{} refused by filter", fs_to_test); + continue; + } else { + SPDLOG_LOGGER_TRACE(logger, "{} allowed by filter", fs_to_test); + } + } + + ULARGE_INTEGER total_number_of_bytes; + ULARGE_INTEGER total_number_of_free_bytes; + + BOOL success = GetDiskFreeSpaceEx(fs_to_test.c_str(), nullptr, + &total_number_of_bytes, + &total_number_of_free_bytes); + + if (success) { + SPDLOG_LOGGER_TRACE(logger, "{} total: {}, free {}", fs_to_test, + total_number_of_bytes.QuadPart, + total_number_of_free_bytes.QuadPart); + + result.emplace_back(std::move(fs_to_test), + total_number_of_bytes.QuadPart - + total_number_of_free_bytes.QuadPart, + total_number_of_bytes.QuadPart); + + } else { + SPDLOG_LOGGER_ERROR(logger, "unable to get free space of {}", + fs_to_test); + } + } + } + + return result; +} + +} // namespace com::centreon::agent::check_drive_size_detail diff --git a/agent/native_windows/src/check_memory.cc b/agent/native_windows/src/check_memory.cc new file mode 100644 index 00000000000..ec94ba4c4da --- /dev/null +++ b/agent/native_windows/src/check_memory.cc @@ -0,0 +1,541 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include +#include + +#include "check_memory.hh" +#include "native_check_base.cc" + +using namespace com::centreon::agent; +using namespace com::centreon::agent::native_check_detail; + +namespace com::centreon::agent::native_check_detail { +/** + * @brief little struct used to format memory output (B, KB, MB or GB) + * + */ +struct byte_memory_metric { + uint64_t byte_value; +}; +} // namespace com::centreon::agent::native_check_detail + +namespace fmt { + +/** + * @brief formatter of byte_memory_metric + * + * @tparam + */ +template <> +struct formatter< + com::centreon::agent::native_check_detail::byte_memory_metric> { + constexpr auto parse(format_parse_context& ctx) + -> format_parse_context::iterator { + return ctx.begin(); + } + auto format( + const com::centreon::agent::native_check_detail::byte_memory_metric& v, + format_context& ctx) const -> format_context::iterator { + if (v.byte_value < 1024) { + return fmt::format_to(ctx.out(), "{} B", v.byte_value); + } + if (v.byte_value < 1024 * 1024) { + return fmt::format_to( + ctx.out(), "{} KB", + static_cast(v.byte_value * 100 / 1024) / 100); + } + + if (v.byte_value < 1024 * 1024 * 1024) { + return fmt::format_to( + ctx.out(), "{} MB", + static_cast(v.byte_value * 100 / 1024 / 1024) / 100); + } + if (v.byte_value < 1024ull * 1024 * 1024 * 1024) { + return fmt::format_to( + ctx.out(), "{} GB", + static_cast(v.byte_value * 100 / 1024ull / 1024 / 1024) / + 100); + } + return fmt::format_to( + ctx.out(), "{} TB", + static_cast(v.byte_value * 100 / 1024ull / 1024 / 1024 / 1024) / + 100); + } +}; +} // namespace fmt + +namespace com::centreon::agent::native_check_detail { + +/** + * @brief Construct a new w_memory info + * it measures memory usage and fill _metrics + * + */ +w_memory_info::w_memory_info(unsigned flags) : _output_flags(flags) { + MEMORYSTATUSEX mem_status; + mem_status.dwLength = sizeof(mem_status); + if (!GlobalMemoryStatusEx(&mem_status)) { + throw std::runtime_error("fail to get memory status"); + } + + PERFORMANCE_INFORMATION perf_mem_status; + perf_mem_status.cb = sizeof(perf_mem_status); + if (!GetPerformanceInfo(&perf_mem_status, sizeof(perf_mem_status))) { + throw std::runtime_error("fail to get memory status"); + } + + init(mem_status, perf_mem_status); +} + +/** + * @brief mock for tests + * + * @param mem_status + */ +w_memory_info::w_memory_info(const MEMORYSTATUSEX& mem_status, + const PERFORMANCE_INFORMATION& perf_mem_status, + unsigned flags) + : _output_flags(flags) { + init(mem_status, perf_mem_status); +} + +/** + * @brief fills _metrics + * + * @param mem_status + */ +void w_memory_info::init(const MEMORYSTATUSEX& mem_status, + const PERFORMANCE_INFORMATION& perf_mem_status) { + _metrics[e_memory_metric::phys_total] = mem_status.ullTotalPhys; + _metrics[e_memory_metric::phys_free] = mem_status.ullAvailPhys; + _metrics[e_memory_metric::phys_used] = + mem_status.ullTotalPhys - mem_status.ullAvailPhys; + _metrics[e_memory_metric::swap_total] = + perf_mem_status.PageSize * + (perf_mem_status.CommitLimit - perf_mem_status.PhysicalTotal); + _metrics[e_memory_metric::swap_used] = + perf_mem_status.PageSize * + (perf_mem_status.CommitTotal + perf_mem_status.PhysicalAvailable - + perf_mem_status.PhysicalTotal); + _metrics[e_memory_metric::swap_free] = _metrics[e_memory_metric::swap_total] - + _metrics[e_memory_metric::swap_used]; + _metrics[e_memory_metric::virtual_total] = mem_status.ullTotalPageFile; + _metrics[e_memory_metric::virtual_free] = mem_status.ullAvailPageFile; + _metrics[e_memory_metric::virtual_used] = + _metrics[e_memory_metric::virtual_total] - + _metrics[e_memory_metric::virtual_free]; +} + +/** + * @brief plugins output + * + * @param output + */ +void w_memory_info::dump_to_output(std::string* output) const { + fmt::format_to(std::back_inserter(*output), + "Ram total: {}, used (-buffers/cache): {} ({:.2f}%), " + "free: {} ({:.2f}%)", + byte_memory_metric{_metrics[e_memory_metric::phys_total]}, + byte_memory_metric{_metrics[e_memory_metric::phys_used]}, + get_proportional_value(e_memory_metric::phys_used, + e_memory_metric::phys_total) * + 100, + byte_memory_metric{_metrics[e_memory_metric::phys_free]}, + get_proportional_value(e_memory_metric::phys_free, + e_memory_metric::phys_total) * + 100); + + if (_output_flags & output_flags::dump_swap) { + fmt::format_to(std::back_inserter(*output), + " Swap total: {}, used: {} ({:.2f}%), free: {} ({:.2f}%)", + byte_memory_metric{_metrics[e_memory_metric::swap_total]}, + byte_memory_metric{_metrics[e_memory_metric::swap_used]}, + get_proportional_value(e_memory_metric::swap_used, + e_memory_metric::swap_total) * + 100, + byte_memory_metric{_metrics[e_memory_metric::swap_free]}, + get_proportional_value(e_memory_metric::swap_free, + e_memory_metric::swap_total) * + 100); + } + + if (_output_flags & output_flags::dump_virtual) { + fmt::format_to(std::back_inserter(*output), + " Virtual total: {}, used: {} ({:.2f}%), free: {} ({:.2f}%)", + byte_memory_metric{_metrics[e_memory_metric::virtual_total]}, + byte_memory_metric{_metrics[e_memory_metric::virtual_used]}, + get_proportional_value(e_memory_metric::virtual_used, + e_memory_metric::virtual_total) * + 100, + byte_memory_metric{_metrics[e_memory_metric::virtual_free]}, + get_proportional_value(e_memory_metric::virtual_free, + e_memory_metric::virtual_total) * + 100); + } +} + +} // namespace com::centreon::agent::native_check_detail + +using windows_mem_to_status = measure_to_status; + +using mem_to_status_constructor = + std::function(double /*threshold*/)>; + +/** + * @brief status threshold defines + * + */ +static const absl::flat_hash_map + _label_to_mem_to_status = { + // phys + {"critical-usage", + [](double threshold) { + return std::make_unique( + e_status::critical, e_memory_metric::phys_used, threshold, + e_memory_metric::phys_total, false, false); + }}, + {"warning-usage", + [](double threshold) { + return std::make_unique( + e_status::warning, e_memory_metric::phys_used, threshold, + e_memory_metric::phys_total, false, false); + }}, + {"critical-usage-free", + [](double threshold) { + return std::make_unique( + e_status::critical, e_memory_metric::phys_free, threshold, + e_memory_metric::phys_total, false, true); + }}, + {"warning-usage-free", + [](double threshold) { + return std::make_unique( + e_status::warning, e_memory_metric::phys_free, threshold, + e_memory_metric::phys_total, false, true); + }}, + {"critical-usage-prct", + [](double threshold) { + return std::make_unique( + e_status::critical, e_memory_metric::phys_used, threshold / 100, + e_memory_metric::phys_total, true, false); + }}, + {"warning-usage-prct", + [](double threshold) { + return std::make_unique( + e_status::warning, e_memory_metric::phys_used, threshold / 100, + e_memory_metric::phys_total, true, false); + }}, + {"critical-usage-free-prct", + [](double threshold) { + return std::make_unique( + e_status::critical, e_memory_metric::phys_free, threshold / 100, + e_memory_metric::phys_total, true, true); + }}, + {"warning-usage-free-prct", + [](double threshold) { + return std::make_unique( + e_status::warning, e_memory_metric::phys_free, threshold / 100, + e_memory_metric::phys_total, true, true); + }}, + // swap + {"critical-swap", + [](double threshold) { + return std::make_unique( + e_status::critical, e_memory_metric::swap_used, threshold, + e_memory_metric::swap_total, false, false); + }}, + {"warning-swap", + [](double threshold) { + return std::make_unique( + e_status::warning, e_memory_metric::swap_used, threshold, + e_memory_metric::swap_total, false, false); + }}, + {"critical-swap-free", + [](double threshold) { + return std::make_unique( + e_status::critical, e_memory_metric::swap_free, threshold, + e_memory_metric::swap_total, false, true); + }}, + {"warning-swap-free", + [](double threshold) { + return std::make_unique( + e_status::warning, e_memory_metric::swap_free, threshold, + e_memory_metric::swap_total, false, true); + }}, + {"critical-swap-prct", + [](double threshold) { + return std::make_unique( + e_status::critical, e_memory_metric::swap_used, threshold / 100, + e_memory_metric::swap_total, true, false); + }}, + {"warning-swap-prct", + [](double threshold) { + return std::make_unique( + e_status::warning, e_memory_metric::swap_used, threshold / 100, + e_memory_metric::swap_total, true, false); + }}, + {"critical-swap-free-prct", + [](double threshold) { + return std::make_unique( + e_status::critical, e_memory_metric::swap_free, threshold / 100, + e_memory_metric::swap_total, true, true); + }}, + {"warning-swap-free-prct", + [](double threshold) { + return std::make_unique( + e_status::warning, e_memory_metric::swap_free, threshold / 100, + e_memory_metric::swap_total, true, true); + }}, + // virtual memory + {"critical-virtual", + [](double threshold) { + return std::make_unique( + e_status::critical, e_memory_metric::virtual_used, threshold, + e_memory_metric::virtual_total, false, false); + }}, + {"warning-virtual", + [](double threshold) { + return std::make_unique( + e_status::warning, e_memory_metric::virtual_used, threshold, + e_memory_metric::virtual_total, false, false); + }}, + {"critical-virtual-free", + [](double threshold) { + return std::make_unique( + e_status::critical, e_memory_metric::virtual_free, threshold, + e_memory_metric::virtual_total, false, true); + }}, + {"warning-virtual-free", + [](double threshold) { + return std::make_unique( + e_status::warning, e_memory_metric::virtual_free, threshold, + e_memory_metric::virtual_total, false, true); + }}, + {"critical-virtual-prct", + [](double threshold) { + return std::make_unique( + e_status::critical, e_memory_metric::virtual_used, + threshold / 100, e_memory_metric::virtual_total, true, false); + }}, + {"warning-virtual-prct", + [](double threshold) { + return std::make_unique( + e_status::warning, e_memory_metric::virtual_used, + threshold / 100, e_memory_metric::virtual_total, true, false); + }}, + {"critical-virtual-free-prct", + [](double threshold) { + return std::make_unique( + e_status::critical, e_memory_metric::virtual_free, + threshold / 100, e_memory_metric::virtual_total, true, true); + }}, + {"warning-virtual-free-prct", + [](double threshold) { + return std::make_unique( + e_status::warning, e_memory_metric::virtual_free, + threshold / 100, e_memory_metric::virtual_total, true, true); + }} + +}; + +/** + * @brief Construct a new check memory::check memory object + * + * @param io_context + * @param logger + * @param first_start_expected + * @param check_interval + * @param serv + * @param cmd_name + * @param cmd_line + * @param args + * @param cnf + * @param handler + */ +check_memory::check_memory(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point first_start_expected, + duration check_interval, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const rapidjson::Value& args, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler, + const checks_statistics::pointer& stat) + : native_check_base(io_context, + logger, + first_start_expected, + check_interval, + serv, + cmd_name, + cmd_line, + args, + cnf, + std::move(handler), + stat) { + _no_percent_unit = "B"; + if (args.IsObject()) { + for (auto member_iter = args.MemberBegin(); member_iter != args.MemberEnd(); + ++member_iter) { + std::string key = absl::AsciiStrToLower(member_iter->name.GetString()); + if (key == "swap") { + std::optional val = get_bool( + cmd_name, member_iter->name.GetString(), member_iter->value); + if (val && *val) { + _output_flags |= w_memory_info::output_flags::dump_swap; + } + continue; + } + if (key == "virtual") { + std::optional val = get_bool( + cmd_name, member_iter->name.GetString(), member_iter->value); + if (val && *val) { + _output_flags |= w_memory_info::output_flags::dump_virtual; + } + continue; + } + + auto mem_to_status_search = _label_to_mem_to_status.find(key); + if (mem_to_status_search != _label_to_mem_to_status.end()) { + std::optional val = get_double( + cmd_name, member_iter->name.GetString(), member_iter->value, true); + if (val) { + std::unique_ptr mem_checker = + mem_to_status_search->second(*val); + _measure_to_status.emplace( + std::make_tuple(mem_checker->get_data_index(), + mem_checker->get_total_data_index(), + mem_checker->get_status()), + std::move(mem_checker)); + } + } else { + SPDLOG_LOGGER_ERROR(logger, "command: {}, unknown parameter {}", + cmd_name, member_iter->name); + } + } + } +} + +/** + * @brief create a w_memory_info + * + * @return std::shared_ptr< + * native_check_detail::snapshot> + */ +std::shared_ptr> +check_memory::measure() { + return std::make_shared(_output_flags); +} + +/** + * @brief metric defines + * + */ +static const std::vector + metric_definitions = { + {"memory.usage.bytes", e_memory_metric::phys_used, + e_memory_metric::phys_total, false}, + {"memory.free.bytes", e_memory_metric::phys_free, + e_memory_metric::phys_total, false}, + {"memory.usage.percentage", e_memory_metric::phys_used, + e_memory_metric::phys_total, true}, + + {"swap.usage.bytes", e_memory_metric::swap_used, + e_memory_metric::swap_total, false}, + {"swap.free.bytes", e_memory_metric::swap_free, + e_memory_metric::swap_total, false}, + {"swap.usage.percentage", e_memory_metric::swap_used, + e_memory_metric::swap_total, true}, + + {"virtual-memory.usage.bytes", e_memory_metric::virtual_used, + e_memory_metric::virtual_total, false}, + {"virtual-memory.free.bytes", e_memory_metric::virtual_free, + e_memory_metric::virtual_total, false}, + {"virtual-memory.usage.percentage", e_memory_metric::virtual_used, + e_memory_metric::virtual_total, true}, +}; + +const std::vector& +check_memory::get_metric_definitions() const { + return metric_definitions; +} + +void check_memory::help(std::ostream& help_stream) { + help_stream << R"( +- memory params: + swap (default false): true: add swap to output + virtual (default false): true: add virtual memory to output + critical-usage: threshold for critical status on physical memory usage in bytes + warning-usage: threshold for warning status on physyical memory usage in bytes + critical-usage-free: threshold for critical status on free physical memory in bytes, if free memory is lower than threshold, service is critical + warning-usage-free: threshold for warning status on free physical memory in bytes + critical-usage-prct: threshold for critical status on memory usage in percentage + warning-usage-prct: threshold for warning status on memory usage in percentage + critical-usage-free-prct: threshold for critical status on free memory in percentage + warning-usage-free-prct: threshold for warning status on free memory in percentage + critical-swap: threshold for critical status on swap usage in bytes + warning-swap: threshold for warning status on swap usage in bytes + critical-swap-free: threshold for critical status on free swap in bytes + warning-swap-free: threshold for warning status on free swap in bytes + critical-swap-prct: threshold for critical status on swap usage in percentage + warning-swap-prct: threshold for warning status on swap usage in percentage + critical-swap-free-prct: threshold for critical status on free swap in percentage + warning-swap-free-prct: threshold for warning status on free swap in percentage + critical-virtual: threshold for critical status on virtual memory usage in bytes + warning-virtual: threshold for warning status on virtual memory usage in bytes + critical-virtual-free: threshold for critical status on free virtual memory in bytes + warning-virtual-free: threshold for warning status on free virtual memory in bytes + critical-virtual-prct: threshold for critical status on virtual memory usage in percentage + warning-virtual-prct: threshold for warning status on virtual memory usage in percentage + critical-virtual-free-prct: threshold for critical status on free virtual memory in percentage + warning-virtual-free-prct: threshold for warning status on free virtual memory in percentage + An example of configuration: + { + "check": "memory", + "args: { + "swap": true, + "virtual": true, + "warning-usage-prct": 80, + "critical-usage-prct": 90 + } + } + Examples of output: + OK: Ram total: 16 GB, used (-buffers/cache): 15.99 GB (99.96%), free: 7 MB (0.04%) + With swap flag: + OK: Ram total: 16 GB, used (-buffers/cache): 15.99 GB (99.96%), free: 7 MB (0.04%) Swap total: 44 GB, used: 4 GB (9.11%), free: 39.99 GB (90.89%) + With swap and virtual flag: + OK: Ram total: 16 GB, used (-buffers/cache): 15.99 GB (99.96%), free: 7 MB (0.04%) Swap total: 44 GB, used: 4 GB (9.11%), free: 39.99 GB (90.89%) Virtual total: 24 GB, used: 18 GB (75.00%), free: 6 GB (25.00%) + Metrics: + memory.usage.bytes + memory.free.bytes + memory.usage.percentage + swap.usage.bytes + swap.free.bytes + swap.usage.percentage + virtual-memory.usage.bytes + virtual-memory.free.bytes + virtual-memory.usage.percentage +)"; +} + +namespace com::centreon::agent { +template class native_check_base< + native_check_detail::e_memory_metric::nb_metric>; +} diff --git a/agent/native_windows/src/check_service.cc b/agent/native_windows/src/check_service.cc new file mode 100644 index 00000000000..cacda0e0cdb --- /dev/null +++ b/agent/native_windows/src/check_service.cc @@ -0,0 +1,719 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include "check_service.hh" +#include "native_check_base.cc" + +using namespace com::centreon::agent; +using namespace com::centreon::agent::native_check_detail; + +namespace com::centreon::agent::native_check_detail { + +/*********************************************************************************************** + * service_enumerator + ***********************************************************************************************/ + +/** + * @brief Constructor + */ +service_enumerator::service_enumerator() { + _sc_manager_handler = OpenSCManager(nullptr, nullptr, GENERIC_READ); + if (!_sc_manager_handler) { + throw exceptions::msg_fmt("OpenSCManager failed"); + } +} + +/** + * @brief Destructor + */ +service_enumerator::~service_enumerator() { + CloseServiceHandle(_sc_manager_handler); +} + +/** + * @brief Enumerate services (just call a version of _enumerate_services) + */ +void service_enumerator::enumerate_services( + service_filter& filter, + service_enumerator::listener&& callback, + const std::shared_ptr& logger) { + if (filter.use_start_auto_filter()) { + _enumerate_services(filter, std::move(callback), logger); + } else { + _enumerate_services(filter, std::move(callback), logger); + } +} + +/** + * @brief Abstract layer used to enumerate services (overloaded in tests to do a + * mock) + */ +bool service_enumerator::_enumerate_services(serv_array& services, + DWORD* services_returned) { + DWORD buff_size = sizeof(services); + return EnumServicesStatusA(_sc_manager_handler, SERVICE_TYPE_ALL, + SERVICE_STATE_ALL, services, sizeof(services), + &buff_size, services_returned, &_resume_handle); +} + +/** + * @brief Query the service configuration (overloaded in tests to do a mock) + */ +bool service_enumerator::_query_service_config( + LPCSTR service_name, + QUERY_SERVICE_CONFIGA& serv_conf, + const std::shared_ptr& logger) { + SC_HANDLE serv_handle = + OpenService(_sc_manager_handler, service_name, GENERIC_READ); + if (!serv_handle) { + SPDLOG_LOGGER_ERROR(logger, " fail to open service {}", service_name); + return false; + } + DWORD bytes_needed = 0; + if (!QueryServiceConfigA(serv_handle, &serv_conf, sizeof(serv_conf), + &bytes_needed)) { + SPDLOG_LOGGER_ERROR(logger, " fail to query service config {}", + service_name); + CloseServiceHandle(serv_handle); + return false; + } + CloseServiceHandle(serv_handle); + return true; +} + +/** + * @brief Enumerate services + * @tparam start_auto if true, start_auto config parameter will be checked + * @param filter service filter + * @param callback callback to call on each service + * @param logger logger + */ +template +void service_enumerator::_enumerate_services( + service_filter& filter, + service_enumerator::listener&& callback, + const std::shared_ptr& logger) { + ENUM_SERVICE_STATUSA services[512]; + + _resume_handle = 0; + + DWORD bytes_needed = 0; + DWORD services_count = 0; + + while (true) { + BOOL success = _enumerate_services(services, &services_count); + if (success || GetLastError() == ERROR_MORE_DATA) { + LPENUM_SERVICE_STATUSA services_end = services + services_count; + for (LPENUM_SERVICE_STATUS serv = services; serv < services_end; ++serv) { + if constexpr (start_auto) { + QUERY_SERVICE_CONFIGA serv_conf; + if (!_query_service_config(serv->lpServiceName, serv_conf, logger)) { + continue; + } + + bool this_serv_auto_start = + (serv_conf.dwStartType & + (SERVICE_AUTO_START | SERVICE_BOOT_START | + SERVICE_SYSTEM_START)) != 0; + if (!filter.is_allowed(this_serv_auto_start, serv->lpServiceName, + serv->lpDisplayName)) { + continue; + } + callback(*serv); + } else { + if (!filter.is_allowed(false, serv->lpServiceName, + serv->lpDisplayName)) { + continue; + } + callback(*serv); + } + } + } + if (success) { + break; + } + } +} + +/*********************************************************************************************** + * w_service_info + **********************************************************************************************/ + +/** + * service status printed in plugin output + */ +static constexpr std::array _labels = { + "", "stopped", "starting", "stopping", + "running", "continuing", "pausing", "paused"}; + +/** + * @brief Constructor + * @param service_enumerator service enumerator + * @param filter service filter + * @param state_to_warning state to warning, if a service state is in this mask, + * output status will be at less warning + * @param state_to_critical state to critical + * @param logger logger + */ +w_service_info::w_service_info(service_enumerator& service_enumerator, + service_filter& filter, + unsigned state_to_warning, + unsigned state_to_critical, + const std::shared_ptr& logger) + : _state_to_warning(state_to_warning), + _state_to_critical(state_to_critical) { + memset(&_metrics, 0, sizeof(_metrics)); + service_enumerator.enumerate_services( + filter, + [this](const ENUM_SERVICE_STATUSA& service_status) { + on_service(service_status); + }, + logger); + if (_metrics[e_service_metric::total] == 0) { + _status = e_status::critical; + } +} + +/** + * @brief callback called by enumerator + */ +void w_service_info::on_service(const ENUM_SERVICE_STATUSA& service_status) { + unsigned state = service_status.ServiceStatus.dwCurrentState & 7; + unsigned state_flag = 1 << (state - 1); + if (state & _state_to_critical) { + _status = e_status::critical; + if (!_output.empty()) { + _output.push_back(' '); + } + _output += fmt::format("CRITICAL: {} is {}", service_status.lpServiceName, + _labels[state]); + } else if (state & _state_to_warning) { + if (_status == e_status::ok) { + _status = e_status::warning; + } + if (!_output.empty()) { + _output.push_back(' '); + } + _output += fmt::format("WARNING: {} is {}", service_status.lpServiceName, + _labels[state]); + } + ++_metrics[state - 1]; + ++_metrics[e_service_metric::total]; +} + +/** + * plugin output + */ +void w_service_info::dump_to_output(std::string* output) const { + uint64_t total = _metrics[e_service_metric::total]; + if (total == 0) { + output->append("no service found"); + } else if (total == _metrics[e_service_metric::running]) { + output->append("all services are running"); + } else { + output->append("services: "); + bool first = true; + for (unsigned i = 0; i < e_service_metric::total; ++i) { + if (_metrics[i] > 0) { + if (first) { + first = false; + } else { + output->append(", "); + } + output->append(fmt::format("{} {}", _metrics[i], _labels[i + 1])); + } + } + } + if (!_output.empty()) { + output->push_back(' '); + output->append(_output); + } +} + +/*********************************************************************************************** + * service_filter + **********************************************************************************************/ + +/** + * @brief Constructor that initializes the service filter based on the provided + * arguments. + * @param args JSON value containing the plugin config. + * @throws exceptions::msg_fmt if any of the filter parameters are invalid. + */ +service_filter::service_filter(const rapidjson::Value& args) { + if (args.IsObject()) { + for (auto member_iter = args.MemberBegin(); member_iter != args.MemberEnd(); + ++member_iter) { + std::string key = absl::AsciiStrToLower(member_iter->name.GetString()); + if (key == "start-auto") { + const rapidjson::Value& val = member_iter->value; + if (val.IsBool()) { + _start_auto = val.GetBool(); + } else { + throw exceptions::msg_fmt("start-auto must be a boolean"); + } + } else if (key == "filter-name") { + const rapidjson::Value& val = member_iter->value; + if (val.IsString()) { + std::string value = val.GetString(); + absl::AsciiStrToLower(&value); + _name_filter = std::make_unique(value); + if (!_name_filter->ok()) { + throw exceptions::msg_fmt("filter-name: {} is not a valid regex", + val.GetString()); + } + } else { + throw exceptions::msg_fmt("filter-name must be a string"); + } + } else if (key == "exclude-name") { + const rapidjson::Value& val = member_iter->value; + if (val.IsString()) { + std::string value = val.GetString(); + absl::AsciiStrToLower(&value); + _name_filter_exclude = std::make_unique(value); + if (!_name_filter_exclude->ok()) { + throw exceptions::msg_fmt("exclude-name: {} is not a valid regex", + val.GetString()); + } + } else { + throw exceptions::msg_fmt("exclude-name must be a string"); + } + } else if (key == "filter-display") { + const rapidjson::Value& val = member_iter->value; + if (val.IsString()) { + std::string value = val.GetString(); + absl::AsciiStrToLower(&value); + _display_filter = std::make_unique(value); + if (!_display_filter->ok()) { + throw exceptions::msg_fmt("filter-display: {} is not a valid regex", + val.GetString()); + } + } else { + throw exceptions::msg_fmt("filter-display must be a string"); + } + } else if (key == "exclude-display") { + const rapidjson::Value& val = member_iter->value; + if (val.IsString()) { + std::string value = val.GetString(); + absl::AsciiStrToLower(&value); + _display_filter_exclude = std::make_unique(value); + if (!_display_filter_exclude->ok()) { + throw exceptions::msg_fmt( + "exclude-display: {} is not a valid regex", val.GetString()); + } + } else { + throw exceptions::msg_fmt("exclude-display must be a string"); + } + } + } + } +} + +/** + * @brief remove all negative chars + * @param sz string to clean + */ +static void remove_accents(std::string* sz) { + for (char& chr : *sz) { + if (chr < 0) { + chr = '_'; + } + } +} + +/** + * @brief Check if a service is allowed by the filter. + * @param start_auto Whether the service is set to start automatically. + * @param service_name The name of the service. + */ +bool service_filter::is_allowed(bool start_auto, + const std::string_view& service_name, + const std::string_view& service_display) { + std::string lower_service_name(service_name.data(), service_name.length()); + absl::AsciiStrToLower(&lower_service_name); + + std::string lower_display(service_display.data(), service_display.length()); + absl::AsciiStrToLower(&lower_display); + + // accented characters are not supported by RE2 so we remove them + remove_accents(&lower_display); + + if (_start_auto && _start_auto.value() != start_auto) { + return false; + } + if (_name_cache_excluded.find(lower_service_name) != + _name_cache_excluded.end()) { + return false; + } + if (_display_cache_excluded.find(lower_display) != + _display_cache_excluded.end()) { + return false; + } + + auto check_display = [&]() { + if (_display_filter_exclude && + RE2::FullMatch(lower_display, *_display_filter_exclude)) { + _display_cache_excluded.emplace(lower_display); + return false; + } + if (_display_filter && !RE2::FullMatch(lower_display, *_display_filter)) { + _display_cache_excluded.emplace(lower_display); + return false; + } + _display_cache_allowed.emplace(lower_display); + return true; + }; + + auto check_name = [&]() { + if (_name_filter_exclude && + RE2::FullMatch(lower_service_name, *_name_filter_exclude)) { + _name_cache_excluded.emplace(lower_service_name); + return false; + } + if (_name_filter && !RE2::FullMatch(lower_service_name, *_name_filter)) { + _name_cache_excluded.emplace(lower_service_name); + return false; + } + _name_cache_allowed.emplace(lower_service_name); + return true; + }; + + if (_name_cache_allowed.find(lower_service_name) != + _name_cache_allowed.end()) { + if (_display_cache_allowed.find(lower_display) != + _display_cache_allowed.end()) { + return true; + } + return check_display(); + } + + if (_display_cache_allowed.find(lower_display) != + _display_cache_allowed.end()) { + return check_name(); + } + + return check_name() && check_display(); +} + +/*********************************************************************************************** + * w_service_info_to_status + **********************************************************************************************/ + +/** + * The goal of this class is to convert the status field of w_service_info into + * check_status. It compares nothing + * */ +class w_service_info_to_status + : public measure_to_status { + public: + w_service_info_to_status() + : measure_to_status(e_status::ok, + 0, + 0, + 0, + false, + false) {} + + void compute_status( + const snapshot& to_test, + e_status* status) const override { + e_status serv_status = + static_cast(to_test).get_status(); + if (serv_status > *status) { + *status = serv_status; + } + } +}; + +} // namespace com::centreon::agent::native_check_detail + +/*********************************************************************************************** + * check_service + **********************************************************************************************/ + +/** + * we can allow different service states, so we use check_service::state_mask to + * filter or set status to critical + */ +const std::array, 7> + check_service::_label_state = { + std::make_pair("stopped", check_service::state_mask::stopped), + std::make_pair("starting", check_service::state_mask::start_pending), + std::make_pair("stopping", check_service::state_mask::stop_pending), + std::make_pair("running", check_service::state_mask::running), + std::make_pair("continuing", + check_service::state_mask::continue_pending), + std::make_pair("pausing", check_service::state_mask::pause_pending), + std::make_pair("paused", check_service::state_mask::paused)}; + +using w_service_to_status = + measure_to_status; + +using service_to_status_constructor = + std::function(double /*threshold*/)>; + +static const absl::flat_hash_map + _label_to_service_status = { + {"warning-total-running", + [](double threshold) { + return std::make_unique( + e_status::warning, e_service_metric::running, threshold, + e_service_metric::nb_service_metric, false, true); + }}, + {"critical-total-running", + [](double threshold) { + return std::make_unique( + e_status::critical, e_service_metric::running, threshold, + e_service_metric::nb_service_metric, false, true); + }}, + {"warning-total-paused", + [](double threshold) { + return std::make_unique( + e_status::warning, e_service_metric::paused, threshold, + e_service_metric::nb_service_metric, false, false); + }}, + {"critical-total-paused", + [](double threshold) { + return std::make_unique( + e_status::critical, e_service_metric::paused, threshold, + e_service_metric::nb_service_metric, false, false); + }}, + {"warning-total-stopped", + [](double threshold) { + return std::make_unique( + e_status::warning, e_service_metric::stopped, threshold, + e_service_metric::nb_service_metric, false, false); + }}, + {"critical-total-stopped", + [](double threshold) { + return std::make_unique( + e_status::critical, e_service_metric::stopped, threshold, + e_service_metric::nb_service_metric, false, false); + }} + +}; + +/** + * default service enumerator constructor + */ +service_enumerator::constructor check_service::_enumerator_constructor = []() { + return std::make_unique(); +}; + +/** + * @brief constructor + */ +check_service::check_service( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point first_start_expected, + duration check_interval, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const rapidjson::Value& args, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler, + const checks_statistics::pointer& stat) + : native_check_base(io_context, + logger, + first_start_expected, + check_interval, + serv, + cmd_name, + cmd_line, + args, + cnf, + std::move(handler), + stat), + _filter(args), + _enumerator(_enumerator_constructor()) { + _measure_to_status.emplace( + std::make_tuple(e_service_metric::nb_service_metric, + e_service_metric::nb_service_metric, e_status::ok), + std::make_unique()); + + if (!args.IsObject()) { + return; + } + + for (auto member_iter = args.MemberBegin(); member_iter != args.MemberEnd(); + ++member_iter) { + std::string key = absl::AsciiStrToLower(member_iter->name.GetString()); + + if (key == "warning-state") { + const rapidjson::Value& val = member_iter->value; + if (val.IsString()) { + re2::RE2 filter_typ_re(val.GetString()); + if (!filter_typ_re.ok()) { + throw exceptions::msg_fmt( + "command: {} warning-state: {} is not a valid regex", cmd_name, + val.GetString()); + } else { + for (const auto& [label, flag] : _label_state) { + if (RE2::FullMatch(label, filter_typ_re)) { + _state_to_warning |= flag; + } + } + } + } else { + throw exceptions::msg_fmt("command: {} warning-state must be a string", + cmd_name); + } + } else if (key == "critical-state") { + const rapidjson::Value& val = member_iter->value; + if (val.IsString()) { + re2::RE2 filter_typ_re(val.GetString()); + if (!filter_typ_re.ok()) { + throw exceptions::msg_fmt( + "command: {} critical-state: {} is not a valid regex", cmd_name, + val.GetString()); + } else { + for (const auto& [label, flag] : _label_state) { + if (RE2::FullMatch(label, filter_typ_re)) { + _state_to_critical |= flag; + } + } + } + } else { + throw exceptions::msg_fmt("command: {} critical-state must be a string", + cmd_name); + } + } else { + auto threshold = _label_to_service_status.find(key); + if (threshold != _label_to_service_status.end()) { + std::optional val = get_double( + cmd_name, member_iter->name.GetString(), member_iter->value, true); + if (val) { + std::unique_ptr to_ins = threshold->second(*val); + _measure_to_status.emplace( + std::make_tuple(to_ins->get_data_index(), + e_service_metric::nb_service_metric, + to_ins->get_status()), + std::move(to_ins)); + } + } else if (key != "filter-name" && key != "exclude-name" && + key != "filter-display" && key != "exclude-display" && + key != "start-auto") { + SPDLOG_LOGGER_ERROR(logger, "command: {}, unknown parameter: {}", + cmd_name, member_iter->name); + } + } + } +} + +/** + * @brief create a snapshot of services state + */ +std::shared_ptr> +check_service::measure() { + // used to reset service list walking + _enumerator->reset_resume_handle(); + return std::make_shared( + *_enumerator, _filter, _state_to_warning, _state_to_critical, _logger); +} + +static const std::vector + metric_definitions = { + {"services.stopped.count", e_service_metric::stopped, + e_service_metric::total, false}, + {"services.starting.count", e_service_metric::start_pending, + e_service_metric::total, false}, + {"services.stopping.count", e_service_metric::stop_pending, + e_service_metric::total, false}, + {"services.running.count", e_service_metric::running, + e_service_metric::total, false}, + {"services.continuing.count", e_service_metric::continue_pending, + e_service_metric::total, false}, + {"services.pausing.count", e_service_metric::pause_pending, + e_service_metric::total, false}, + {"services.paused.count", e_service_metric::paused, + e_service_metric::total, false}}; + +const std::vector& +check_service::get_metric_definitions() const { + return metric_definitions; +} + +/** + * @brief some help + */ +void check_service::help(std::ostream& help_stream) { + help_stream << R"( +- service params: + warning-state: regex to match service state that will trigger a warning + states are: + - stopped + - starting + - stopping + - running + - continuing + - pausing + - paused + critical-state: regex to match service state that will trigger a critical + warning-total-running: running service number threshold below which the service will pass in the warning state + critical-total-running: running service number threshold below which the service will pass in the critical state + warning-total-paused: number of services in the pause state above which the service goes into the warning state + critical-total-paused: number of services in the pause state above which the service goes into the critical state + warning-total-stopped: number of services in the stop state above which the service goes into the warning state + critical-total-stopped: number of services in the stop state above which the service goes into the critical state + start-auto: true: only services that start automatically will be counted + filter-name: regex to filter service names + exclude-name: regex to exclude service names + filter-display: regex to filter service display names as they appear in service manager + exclude-display: regex to exclude service display names + An example of a configuration file: + { + "check": "service", + "args": { + "warning-state": "stopped", + "critical-state": "running", + "warning-total-running": 20, + "critical-total-running": 150, + "start-auto": true, + "filter-name": ".*", + "exclude-name": ".*" + } + } + Examples of output: + OK: all services are running + In case of a too restricted filter: + CRITICAL: no service found + In case on some services not in running state: + OK: services: 1 stopped, 1 starting, 1 stopping + In case of a service in a critical state: + CRITICAL: services: 1 stopped, 1 starting, 1 stopping CRITICAL: logon is stopped CRITICAL: httpd is stopping + Metrics: + services.stopped.count + services.starting.count + services.stopping.count + services.running.count + services.continuing.count + services.pausing.count + services.paused.count +)"; +} + +namespace com::centreon::agent { +template class native_check_base< + native_check_detail::e_service_metric::nb_service_metric>; +} diff --git a/agent/native_windows/src/check_uptime.cc b/agent/native_windows/src/check_uptime.cc new file mode 100644 index 00000000000..31256ac3e26 --- /dev/null +++ b/agent/native_windows/src/check_uptime.cc @@ -0,0 +1,188 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include +#include + +#include "absl/container/flat_hash_map.h" +#include "check_uptime.hh" + +#include "com/centreon/common/rapidjson_helper.hh" + +using namespace com::centreon::agent; + +static const absl::flat_hash_map _unit_multiplier = + {{"m", 60}, {"minute", 60}, {"h", 3600}, {"hour", 3600}, + {"d", 86400}, {"day", 86400}, {"w", 604800}, {"week", 604800}}; + +/** + * @brief Construct a new check uptime::check uptime object + * + * @param io_context + * @param logger + * @param first_start_expected + * @param check_interval + * @param serv + * @param cmd_name + * @param cmd_line + * @param args + * @param cnf + * @param handler + */ +check_uptime::check_uptime(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point first_start_expected, + duration check_interval, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const rapidjson::Value& args, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler, + const checks_statistics::pointer& stat) + : check(io_context, + logger, + first_start_expected, + check_interval, + serv, + cmd_name, + cmd_line, + cnf, + std::move(handler), + stat), + _second_warning_threshold(0), + _second_critical_threshold(0) { + com::centreon::common::rapidjson_helper arg(args); + try { + if (args.IsObject()) { + _second_warning_threshold = arg.get_unsigned("warning-uptime", 0); + _second_critical_threshold = arg.get_unsigned("critical-uptime", 0); + std::string unit = arg.get_string("unit", "s"); + boost::to_lower(unit); + auto multiplier = _unit_multiplier.find(unit); + if (multiplier != _unit_multiplier.end()) { + _second_warning_threshold *= multiplier->second; + _second_critical_threshold *= multiplier->second; + } + } + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(_logger, "check_uptime, fail to parse arguments: {}", + e.what()); + throw; + } +} + +/** + * @brief get uptime with GetTickCount64 + * + * @param timeout unused + */ +void check_uptime::start_check([[maybe_unused]] const duration& timeout) { + if (!_start_check(timeout)) { + return; + } + std::string output; + common::perfdata perf; + e_status status = compute(GetTickCount64(), &output, &perf); + + _io_context->post([me = shared_from_this(), this, out = std::move(output), + status, performance = std::move(perf)]() { + on_completion(_get_running_check_index(), status, {performance}, {out}); + }); +} + +/** + * @brief calculate status, output and perfdata from uptime + * + * @param ms_uptime + * @param output + * @param perfs + * @return e_status + */ +e_status check_uptime::compute(uint64_t ms_uptime, + std::string* output, + common::perfdata* perf) { + uint64_t uptime = ms_uptime / 1000; + uint64_t uptime_bis = uptime; + + std::string sz_uptime; + if (uptime > 86400) { + sz_uptime = fmt::format("{}d ", uptime / 86400); + uptime %= 86400; + } + if (uptime > 3600 || !sz_uptime.empty()) { + absl::StrAppend(&sz_uptime, uptime / 3600, "h "); + uptime %= 3600; + } + if (uptime > 60 || !sz_uptime.empty()) { + absl::StrAppend(&sz_uptime, uptime / 60, "m "); + uptime %= 60; + } + absl::StrAppend(&sz_uptime, uptime, "s"); + + using namespace std::literals; + e_status status = e_status::ok; + if (_second_critical_threshold && uptime_bis < _second_critical_threshold) { + *output = "CRITICAL: System uptime is: " + sz_uptime; + status = e_status::critical; + } else if (_second_warning_threshold && + uptime_bis < _second_warning_threshold) { + *output = "WARNING: System uptime is: " + sz_uptime; + status = e_status::warning; + } else { + *output = "OK: System uptime is: " + sz_uptime; + } + + perf->name("uptime"sv); + perf->unit("s"); + perf->value(uptime_bis); + perf->min(0); + if (_second_critical_threshold) { + perf->critical_low(0); + perf->critical(_second_critical_threshold); + } + if (_second_warning_threshold) { + perf->warning_low(0); + perf->warning(_second_warning_threshold); + } + return status; +} + +void check_uptime::help(std::ostream& help_stream) { + help_stream << + R"( +- uptime params:" + unit (defaults s): can be s, second, m, minute, h, hour, d, day, w, week + warning-uptime: warning threshold, if computer has been up for less than this time, service will be in warning state + critical-uptime: critical threshold + An example of configuration: + { + "check": "uptime", + "args": { + "unit": "day", + "warning-uptime": 1, + "critical-uptime": 2 + } + } + Examples of output: + OK: System uptime is: 5d 1h 1m 1s + CRITICAL: System uptime is: 1d 4h 0m 0s + Metrics: + uptime +)"; +} diff --git a/agent/native_windows/src/ntdll.cc b/agent/native_windows/src/ntdll.cc new file mode 100644 index 00000000000..df0207ffa8c --- /dev/null +++ b/agent/native_windows/src/ntdll.cc @@ -0,0 +1,59 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "ntdll.hh" + +namespace com::centreon::agent { + +// ntdll.dll handle +static HMODULE _ntdll = nullptr; + +NtQuerySystemInformationPtr nt_query_system_information = nullptr; +RtlGetVersionPtr rtl_get_version = nullptr; + +/** + * @brief load ntdll.dll and get NtQuerySystemInformation and RtlGetVersion + * address + * + */ +void load_nt_dll() { + if (!_ntdll) { + _ntdll = LoadLibraryA("ntdll.dll"); + if (!_ntdll) { + throw std::runtime_error("Failed to load ntdll.dll"); + } + } + + // get NtQuerySystemInformation Pointer + nt_query_system_information = (NtQuerySystemInformationPtr)GetProcAddress( + _ntdll, "NtQuerySystemInformation"); + if (!nt_query_system_information) { + FreeLibrary(_ntdll); + _ntdll = nullptr; + throw std::runtime_error( + "Failed to get address of NtQuerySystemInformation"); + } + + rtl_get_version = (RtlGetVersionPtr)GetProcAddress(_ntdll, "RtlGetVersion"); + if (!rtl_get_version) { + FreeLibrary(_ntdll); + _ntdll = nullptr; + throw std::runtime_error("Failed to get address of RtlGetVersion"); + } +} +} // namespace com::centreon::agent \ No newline at end of file diff --git a/agent/precomp_inc/precomp.hh b/agent/precomp_inc/precomp.hh index cffb6f5b781..e53dd163dca 100644 --- a/agent/precomp_inc/precomp.hh +++ b/agent/precomp_inc/precomp.hh @@ -21,9 +21,11 @@ #include #include +#include #include #include #include +#include #include #include @@ -31,17 +33,34 @@ #include #include +#include +#include +#include +#include +#include +#include #include #include +#include + #include namespace asio = boost::asio; #include +#include +#include +#include +#include +#include #include #include #include +#include "com/centreon/exceptions/msg_fmt.hh" + +namespace multi_index = boost::multi_index; + #endif diff --git a/agent/proto/agent.proto b/agent/proto/agent.proto index 5a9190d2c12..f555d0e169a 100644 --- a/agent/proto/agent.proto +++ b/agent/proto/agent.proto @@ -63,6 +63,8 @@ message AgentInfo { //host name of the computer of the agent string host=1; Version centreon_version=2; + string os=3; //can be alma, windows, etc + string os_version=4; } //Agent configuration sent by Engine diff --git a/agent/src/bireactor.cc b/agent/src/bireactor.cc index e26346be55c..6a7f07dd4e8 100644 --- a/agent/src/bireactor.cc +++ b/agent/src/bireactor.cc @@ -29,8 +29,9 @@ using namespace com::centreon::agent; * @tparam bireactor_class */ template -std::set>> - bireactor::_instances; +std::set>>* + bireactor::_instances = + new std::set>>; template std::mutex bireactor::_instances_m; @@ -42,11 +43,11 @@ bireactor::bireactor( const std::string_view& class_name, const std::string& peer) : _write_pending(false), - _alive(true), _class_name(class_name), _peer(peer), _io_context(io_context), - _logger(logger) { + _logger(logger), + _alive(true) { SPDLOG_LOGGER_DEBUG(_logger, "create {} this={:p} peer:{}", _class_name, static_cast(this), _peer); } @@ -61,7 +62,7 @@ template void bireactor::register_stream( const std::shared_ptr& strm) { std::lock_guard l(_instances_m); - _instances.insert(strm); + _instances->insert(strm); } template @@ -162,7 +163,7 @@ void bireactor::OnDone() { std::lock_guard l(_instances_m); SPDLOG_LOGGER_DEBUG(logger, "{:p} server::OnDone() to {}", static_cast(me.get()), peer); - _instances.erase(std::static_pointer_cast>(me)); + _instances->erase(std::static_pointer_cast>(me)); }); } @@ -186,7 +187,7 @@ void bireactor::OnDone(const ::grpc::Status& status) { static_cast(me.get()), peer, status.error_message(), status.error_details()); } - _instances.erase(std::static_pointer_cast>(me)); + _instances->erase(std::static_pointer_cast>(me)); }); } diff --git a/agent/src/check.cc b/agent/src/check.cc index 27c29701f16..a730ad4a4c4 100644 --- a/agent/src/check.cc +++ b/agent/src/check.cc @@ -20,12 +20,53 @@ using namespace com::centreon::agent; +/** + * @brief update check interval of a check + * + * @param cmd_name name of command (entered by user in centreon UI) + * @param last_check_interval + */ +void checks_statistics::add_interval_stat(const std::string& cmd_name, + const duration& last_check_interval) { + auto it = _stats.find(cmd_name); + if (it == _stats.end()) { + _stats.insert({cmd_name, last_check_interval, {}}); + } else { + _stats.get<0>().modify(it, [last_check_interval](check_stat& it) { + it.last_check_interval = last_check_interval; + }); + } +} + +/** + * @brief update check duration of a check + * + * @param cmd_name name of command (entered by user in centreon UI) + * @param last_check_duration + */ +void checks_statistics::add_duration_stat(const std::string& cmd_name, + const duration& last_check_duration) { + auto it = _stats.find(cmd_name); + if (it == _stats.end()) { + _stats.insert({cmd_name, {}, last_check_duration}); + } else { + _stats.get<0>().modify(it, [last_check_duration](check_stat& it) { + it.last_check_duration = last_check_duration; + }); + } +} + +const std::array check::status_label = { + "OK: ", "WARNING: ", "CRITICAL: ", "UNKNOWN: "}; + /** * @brief Construct a new check::check object * * @param io_context * @param logger - * @param exp + * @param first_start_expected start expected + * @param check_interval check interval between two checks (not only this but + * also others) * @param serv * @param command_name * @param cmd_line @@ -34,37 +75,41 @@ using namespace com::centreon::agent; */ check::check(const std::shared_ptr& io_context, const std::shared_ptr& logger, - time_point exp, + time_point first_start_expected, + duration check_interval, const std::string& serv, const std::string& command_name, const std::string& cmd_line, const engine_to_agent_request_ptr& cnf, - completion_handler&& handler) - : _start_expected(exp), + completion_handler&& handler, + const checks_statistics::pointer& stat) + : _start_expected(first_start_expected, check_interval), _service(serv), _command_name(command_name), _command_line(cmd_line), _conf(cnf), - _io_context(io_context), - _logger(logger), _time_out_timer(*io_context), - _completion_handler(handler) {} + _completion_handler(handler), + _stat(stat), + _io_context(io_context), + _logger(logger) {} /** - * @brief scheduler uses this method to increase start_expected + * @brief start timeout timer and init some flags used by timeout and completion + * must be called first by daughter check class + * @code {.c++} + * void my_check::start_check(const duration & timeout) { + * if (!_start_check(timeout)) + * return; + * ....do job.... + * } + * @endcode * - * @param to_add - */ -void check::add_duration_to_start_expected(const duration& to_add) { - _start_expected += to_add; -} - -/** - * @brief start a asynchronous check * * @param timeout + * @return true if check can be done, false otherwise */ -void check::start_check(const duration& timeout) { +bool check::_start_check(const duration& timeout) { if (_running_check) { SPDLOG_LOGGER_ERROR(_logger, "check for service {} is already running", _service); @@ -73,14 +118,21 @@ void check::start_check(const duration& timeout) { to_call(me, 3, std::list(), {"a check is already running"}); }); - return; + return false; } - // we refresh start expected in order that next call will occur at now + check - // period - _start_expected = std::chrono::system_clock::now(); _running_check = true; _start_timeout_timer(timeout); SPDLOG_LOGGER_TRACE(_logger, "start check for service {}", _service); + + time_point now = std::chrono::system_clock::now(); + + if (_last_start.time_since_epoch().count() != 0) { + _stat->add_interval_stat(_command_name, now - _last_start); + } + + _last_start = now; + + return true; } /** @@ -111,6 +163,7 @@ void check::_timeout_timer_handler(const boost::system::error_code& err, if (start_check_index == _running_check_index) { SPDLOG_LOGGER_ERROR(_logger, "check timeout for service {} cmd: {}", _service, _command_name); + this->_on_timeout(); on_completion(start_check_index, 3 /*unknown*/, std::list(), {"Timeout at execution of " + _command_line}); @@ -133,11 +186,84 @@ void check::on_completion( const std::list& perfdata, const std::list& outputs) { if (start_check_index == _running_check_index) { - SPDLOG_LOGGER_TRACE(_logger, "end check for service {} cmd: {}", _service, - _command_name); + SPDLOG_LOGGER_TRACE(_logger, + "end check for service {} cmd: {} status:{} output: {}", + _service, _command_name, status, + outputs.empty() ? "" : outputs.front()); _time_out_timer.cancel(); _running_check = false; ++_running_check_index; + _stat->add_duration_stat(_command_name, + std::chrono::system_clock::now() - _last_start); _completion_handler(shared_from_this(), status, perfdata, outputs); } } + +/** + * @brief get a double value from a json number or a string containing a number + * + * @param cmd_name used to trace exception + * @param field_name used to trace exception + * @param val rapidjson value + * @param must_be_positive if true, value must be positive + * @throw exception-object if value is not a number + * @return std::optional set if value is not an empty string + */ +std::optional check::get_double(const std::string& cmd_name, + const char* field_name, + const rapidjson::Value& val, + bool must_be_positive) { + double value; + if (val.IsNumber()) { + value = val.GetDouble(); + } else if (val.IsString()) { + const char* to_conv = val.GetString(); + if (!*to_conv) { + return {}; + } + if (!absl::SimpleAtod(to_conv, &value)) { + throw exceptions::msg_fmt("command: {}, parameter {} is not a number", + cmd_name, field_name); + } + } else { + throw exceptions::msg_fmt("command: {}, parameter {} is not a number", + cmd_name, field_name); + } + if (must_be_positive && value < 0) { + throw exceptions::msg_fmt("command: {}, {} is negative for parameter {}", + cmd_name, value, field_name); + } + return value; +} + +/** + * @brief get a boolean value from a json object + * It can be a boolean value or a string containing a boolean + * + * @param cmd_name + * @param field_name + * @param val + * @throw exception-object if value is not a boolean + * @return std::optional + */ +std::optional check::get_bool(const std::string& cmd_name, + const char* field_name, + const rapidjson::Value& val) { + bool value; + if (val.IsBool()) { + value = val.GetBool(); + } else if (val.IsString()) { + const char* to_conv = val.GetString(); + if (!*to_conv) { + return {}; + } + if (!absl::SimpleAtob(to_conv, &value)) { + throw exceptions::msg_fmt("command: {}, parameter {} is not a boolean", + cmd_name, field_name); + } + } else { + throw exceptions::msg_fmt("command: {}, parameter {} is not a boolean", + cmd_name, field_name); + } + return value; +} diff --git a/agent/src/check_exec.cc b/agent/src/check_exec.cc index bd475ef5d08..27a1250f9aa 100644 --- a/agent/src/check_exec.cc +++ b/agent/src/check_exec.cc @@ -116,20 +116,24 @@ void detail::process::_on_completion() { check_exec::check_exec(const std::shared_ptr& io_context, const std::shared_ptr& logger, - time_point exp, + time_point first_start_expected, + duration check_interval, const std::string& serv, const std::string& cmd_name, const std::string& cmd_line, const engine_to_agent_request_ptr& cnf, - check::completion_handler&& handler) + check::completion_handler&& handler, + const checks_statistics::pointer& stat) : check(io_context, logger, - exp, + first_start_expected, + check_interval, serv, cmd_name, cmd_line, cnf, - std::move(handler)) {} + std::move(handler), + stat) {} /** * @brief create and initialize a check_exec object (don't use constructor) @@ -137,7 +141,9 @@ check_exec::check_exec(const std::shared_ptr& io_context, * @tparam handler_type * @param io_context * @param logger - * @param exp start expected + * @param first_start_expected start expected + * @param check_interval check interval between two checks (not only this but + * also others) * @param serv * @param cmd_name * @param cmd_line @@ -148,15 +154,17 @@ check_exec::check_exec(const std::shared_ptr& io_context, std::shared_ptr check_exec::load( const std::shared_ptr& io_context, const std::shared_ptr& logger, - time_point exp, + time_point first_start_expected, + duration check_interval, const std::string& serv, const std::string& cmd_name, const std::string& cmd_line, const engine_to_agent_request_ptr& cnf, - check::completion_handler&& handler) { - std::shared_ptr ret = - std::make_shared(io_context, logger, exp, serv, cmd_name, - cmd_line, cnf, std::move(handler)); + check::completion_handler&& handler, + const checks_statistics::pointer& stat) { + std::shared_ptr ret = std::make_shared( + io_context, logger, first_start_expected, check_interval, serv, cmd_name, + cmd_line, cnf, std::move(handler), stat); ret->_init(); return ret; } @@ -185,7 +193,9 @@ void check_exec::_init() { * @param timeout */ void check_exec::start_check(const duration& timeout) { - check::start_check(timeout); + if (!check::_start_check(timeout)) { + return; + } if (!_process) { _io_context->post([me = check::shared_from_this(), start_check_index = _get_running_check_index()]() { @@ -220,24 +230,25 @@ void check_exec::start_check(const duration& timeout) { } } +/** + * @brief get process id of the check (only used by tests) + * + * @return int + */ +int check_exec::get_pid() const { + if (!_process) { + return 0; + } + return _process->get_pid(); +} /** * @brief process is killed in case of timeout and handler is called * * @param err * @param start_check_index */ -void check_exec::_timeout_timer_handler(const boost::system::error_code& err, - unsigned start_check_index) { - if (err) { - return; - } - if (start_check_index == _get_running_check_index()) { - _process->kill(); - check::_timeout_timer_handler(err, start_check_index); - } else { - SPDLOG_LOGGER_ERROR(_logger, "start_check_index={}, running_index={}", - start_check_index, _get_running_check_index()); - } +void check_exec::_on_timeout() { + _process->kill(); } /** diff --git a/agent/src/check_health.cc b/agent/src/check_health.cc new file mode 100644 index 00000000000..4414de8ce52 --- /dev/null +++ b/agent/src/check_health.cc @@ -0,0 +1,303 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "check_health.hh" +#include +#include "com/centreon/common/rapidjson_helper.hh" +#include "config.hh" +#include "version.hh" + +using namespace com::centreon::agent; + +/** + * @brief Construct a new check_health object + * + * @param io_context + * @param logger + * @param first_start_expected + * @param check_interval + * @param serv + * @param cmd_name + * @param cmd_line + * @param args + * @param cnf + * @param handler + */ +check_health::check_health(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point first_start_expected, + duration check_interval, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const rapidjson::Value& args, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler, + const checks_statistics::pointer& stat) + : check(io_context, + logger, + first_start_expected, + check_interval, + serv, + cmd_name, + cmd_line, + cnf, + std::move(handler), + stat), + _warning_check_interval(0), + _critical_check_interval(0), + _warning_check_duration(0), + _critical_check_duration(0), + _measure_timer(*io_context) { + com::centreon::common::rapidjson_helper arg(args); + try { + if (args.IsObject()) { + _warning_check_interval = arg.get_unsigned("warning-interval", 0); + _critical_check_interval = arg.get_unsigned("critical-interval", 0); + _warning_check_duration = arg.get_unsigned("warning-runtime", 0); + _critical_check_duration = arg.get_unsigned("critical-runtime", 0); + } + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(_logger, "check_health, fail to parse arguments: {}", + e.what()); + throw; + } + + if (config::instance().use_reverse_connection()) { + _info_output = "Version: " CENTREON_AGENT_VERSION + " - Connection mode: Poller initiated - Current " + "configuration: {} checks - Average runtime: {}s"; + } else { + _info_output = "Version: " CENTREON_AGENT_VERSION + " - Connection mode: Agent initiated - Current " + "configuration: {} checks - Average runtime: {}s"; + } +} + +/** + * @brief start a timer to do the job + * + * @param timeout unused + */ +void check_health::start_check([[maybe_unused]] const duration& timeout) { + if (!_start_check(timeout)) { + return; + } + + // we wait a little in order to have statistics check_interval/2 + _measure_timer.expires_from_now(get_raw_start_expected().get_step() / 2); + _measure_timer.async_wait( + [me = shared_from_this(), start_check_index = _get_running_check_index()]( + const boost::system::error_code& err) mutable { + std::static_pointer_cast(me)->_measure_timer_handler( + err, start_check_index); + }); +} + +/** + * @brief timer handler that do the job + * + * @param err set if canceled + * @param start_check_index used by on_completion + */ +void check_health::_measure_timer_handler(const boost::system::error_code& err, + unsigned start_check_index) { + if (err) { + return; + } + std::string output; + std::list perf; + e_status status = compute(&output, &perf); + + on_completion(start_check_index, status, perf, {output}); +} + +/** + * @brief calculate status, output and perfdata from statistics + * + * @param ms_uptime + * @param output + * @param perfs + * @return e_status + */ +e_status check_health::compute(std::string* output, + std::list* perf) { + e_status ret = e_status::ok; + + const checks_statistics& stats = get_stats(); + + if (stats.size() == 0) { + *output = "UNKNOWN: No check yet performed"; + return e_status::unknown; + } + + absl::flat_hash_set written_to_output; + + unsigned average_runtime = 0; + for (const auto& stat : stats.get_ordered_by_duration()) { + average_runtime += std::chrono::duration_cast( + stat.last_check_duration) + .count(); + } + + auto append_state_to_output = [&](e_status status, std::string* temp_output, + const auto& iter) { + if (written_to_output.insert(iter->cmd_name).second) { + if (temp_output->empty()) { + *temp_output = status_label[status]; + } else { + temp_output->push_back(','); + temp_output->push_back(' '); + } + if (status > ret) { + ret = status; + } + absl::StrAppend(temp_output, iter->cmd_name, " runtime:", + std::chrono::duration_cast( + iter->last_check_duration) + .count(), + "s interval:", + std::chrono::duration_cast( + iter->last_check_interval) + .count(), + "s"); + } + }; + + std::string critical_output; + if (_critical_check_duration > 0) { + auto critical_duration = std::chrono::seconds(_critical_check_duration); + for (auto iter = stats.get_ordered_by_duration().rbegin(); + iter != stats.get_ordered_by_duration().rend() && + iter->last_check_duration > critical_duration; + ++iter) { + append_state_to_output(e_status::critical, &critical_output, iter); + } + } + + if (_critical_check_interval > 0) { + auto critical_interval = std::chrono::seconds(_critical_check_interval); + for (auto iter = stats.get_ordered_by_interval().rbegin(); + iter != stats.get_ordered_by_interval().rend() && + iter->last_check_interval > critical_interval; + ++iter) { + append_state_to_output(e_status::critical, &critical_output, iter); + } + } + + std::string warning_output; + if (_warning_check_duration) { + auto warning_duration = std::chrono::seconds(_warning_check_duration); + for (auto iter = stats.get_ordered_by_duration().rbegin(); + iter != stats.get_ordered_by_duration().rend() && + iter->last_check_duration > warning_duration; + ++iter) { + append_state_to_output(e_status::warning, &warning_output, iter); + } + } + + if (_warning_check_interval) { + auto warning_interval = std::chrono::seconds(_warning_check_interval); + for (auto iter = stats.get_ordered_by_interval().rbegin(); + iter != stats.get_ordered_by_interval().rend() && + iter->last_check_interval > warning_interval; + ++iter) { + append_state_to_output(e_status::warning, &warning_output, iter); + } + } + + unsigned max_check_interval = + std::chrono::duration_cast( + stats.get_ordered_by_interval().rbegin()->last_check_interval) + .count(); + unsigned max_check_duration = + std::chrono::duration_cast( + stats.get_ordered_by_duration().rbegin()->last_check_duration) + .count(); + + auto& interval_perf = perf->emplace_back(); + interval_perf.name("interval"); + interval_perf.unit("s"); + interval_perf.value(max_check_interval); + if (_warning_check_interval > 0) { + interval_perf.warning_low(0); + interval_perf.warning(_warning_check_interval); + } + if (_critical_check_interval > 0) { + interval_perf.critical_low(0); + interval_perf.critical(_critical_check_interval); + } + + auto& duration_perf = perf->emplace_back(); + duration_perf.name("runtime"); + duration_perf.unit("s"); + duration_perf.value(max_check_duration); + if (_warning_check_duration > 0) { + duration_perf.warning_low(0); + duration_perf.warning(_warning_check_duration); + } + if (_critical_check_duration > 0) { + duration_perf.critical_low(0); + duration_perf.critical(_critical_check_duration); + } + + if (ret != e_status::ok) { + if (!critical_output.empty()) { + output->append(critical_output); + if (!warning_output.empty()) { + *output += " - "; + output->append(warning_output); + } + } else if (!warning_output.empty()) { + output->append(warning_output); + } + *output += " - "; + } else { + *output = "OK: "; + } + fmt::format_to(std::back_inserter(*output), _info_output, get_stats().size(), + average_runtime / get_stats().size()); + + return ret; +} + +void check_health::help(std::ostream& help_stream) { + help_stream << R"( +- health params: + - warning-interval (s): warning if a check interval is greater than this value + - critical-interval (s): critical if a check interval is greater than this value + - warning-runtime (s): warning if a check duration is greater than this value + - critical-runtime (s): critical if a check duration is greater than this value + An example of configuration: + { + "check": "health", + "args": { + "warning-runtime": 30, + "critical-runtime": 50, + "warning-interval": 60, + "critical-interval": "90" + } + } + Examples of output: + CRITICAL: command2 runtime:25s interval:15s - WARNING: command1 runtime:20s interval:10s - Version: 24.11.0 - Connection mode: Poller initiated - Current configuration: 2 checks - Average runtime: 22s + Metrics: + runtime + interval + +)"; +} diff --git a/agent/src/config.cc b/agent/src/config.cc index d15de69aead..a6cb759e614 100644 --- a/agent/src/config.cc +++ b/agent/src/config.cc @@ -17,10 +17,8 @@ */ #include -#include #include "com/centreon/common/rapidjson_helper.hh" -#include "com/centreon/exceptions/msg_fmt.hh" #include "config.hh" using namespace com::centreon::agent; @@ -61,7 +59,7 @@ const std::string_view config::config_schema(R"( "description": "Name of the SSL certification authority", "type": "string" }, - "reverse_connection": { + "reversed_grpc_streaming": { "description": "Set to true to make Engine connect to the agent. Requires the agent to be configured as a server. Default: false", "type": "boolean" }, @@ -89,6 +87,11 @@ const std::string_view config::config_schema(R"( "description:": "Maximum number of log files to keep. Supernumerary files will be deleted. To be valid, log_files_max_size must be also be provided", "type": "integer", "min": 1 + }, + "second_max_reconnect_backoff": { + "description": "Maximum time between subsequent connection attempts, in seconds. Default: 60s", + "type": "integer", + "min": 0 } }, "required": [ @@ -99,6 +102,8 @@ const std::string_view config::config_schema(R"( )"); +std::unique_ptr config::_global_conf; + config::config(const std::string& path) { static common::json_validator validator(config_schema); rapidjson::Document file_content_d; @@ -144,5 +149,7 @@ config::config(const std::string& path) { if (_host.empty()) { _host = boost::asio::ip::host_name(); } - _reverse_connection = json_config.get_bool("reverse_connection", false); + _reverse_connection = json_config.get_bool("reversed_grpc_streaming", false); + _second_max_reconnect_backoff = + json_config.get_unsigned("second_max_reconnect_backoff", 60); } diff --git a/agent/src/config_win.cc b/agent/src/config_win.cc index 9fe35068904..8abe509d395 100644 --- a/agent/src/config_win.cc +++ b/agent/src/config_win.cc @@ -18,13 +18,12 @@ #include -#include - -#include "com/centreon/exceptions/msg_fmt.hh" #include "config.hh" using namespace com::centreon::agent; +std::unique_ptr config::_global_conf; + /** * @brief Construct a new config::config object * @@ -61,12 +60,13 @@ config::config(const std::string& registry_key) { return result == ERROR_SUCCESS && value; }; - auto get_unsigned = [&](const char* value_name) -> uint32_t { + auto get_unsigned = [&](const char* value_name, + unsigned default_value = 0) -> uint32_t { uint32_t value; DWORD size = sizeof(value); LSTATUS result = RegGetValueA(h_key, nullptr, value_name, RRF_RT_DWORD, nullptr, &value, &size); - return result == ERROR_SUCCESS ? value : 0; + return result == ERROR_SUCCESS ? value : default_value; }; _endpoint = get_sz_reg_or_default("endpoint", ""); @@ -103,7 +103,9 @@ config::config(const std::string& registry_key) { if (_host.empty()) { _host = boost::asio::ip::host_name(); } - _reverse_connection = get_bool("reverse_connection"); + _reverse_connection = get_bool("reversed_grpc_streaming"); + _second_max_reconnect_backoff = + get_unsigned("second_max_reconnect_backoff", 60); RegCloseKey(h_key); } diff --git a/agent/src/drive_size.cc b/agent/src/drive_size.cc new file mode 100644 index 00000000000..f2e58b14a1d --- /dev/null +++ b/agent/src/drive_size.cc @@ -0,0 +1,637 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "drive_size.hh" +#include "com/centreon/common/rapidjson_helper.hh" + +using namespace com::centreon::agent; + +static std::shared_ptr< + com::centreon::agent::check_drive_size_detail::drive_size_thread> + _worker; +static std::thread* _worker_thread = nullptr; + +namespace com::centreon::agent::check_drive_size_detail { + +/******************************************************************************** + * filter + *********************************************************************************/ + +/** + * @brief as filter parameter is a regex, we need to apply the regex on each + * line of this array + * + */ +constexpr std::array, 35> + _fs_type = { + std::make_pair("hrunknown", hr_unknown), + std::make_pair("hrstorageram", hr_storage_ram), + std::make_pair("hrstoragevirtualmemory", hr_storage_virtual_memory), + std::make_pair("hrstoragefixeddisk", hr_storage_fixed_disk), + std::make_pair("hrstorageremovabledisk", hr_storage_removable_disk), + std::make_pair("hrstoragefloppydisk", hr_storage_floppy_disk), + std::make_pair("hrstoragecompactdisc", hr_storage_compact_disc), + std::make_pair("hrstorageramdisk", hr_storage_ram_disk), + std::make_pair("hrstorageflashmemory", hr_storage_flash_memory), + std::make_pair("hrstoragenetworkdisk", hr_storage_network_disk), + std::make_pair("hrfsother", hr_fs_other), + std::make_pair("hrfsunknown", hr_fs_unknown), + std::make_pair("hrfsberkeleyffs", hr_fs_berkeley_ffs), + std::make_pair("hrfssys5fs", hr_fs_sys5_fs), + std::make_pair("hrfsfat", hr_fs_fat), + std::make_pair("hrfshpfs", hr_fs_hpfs), + std::make_pair("hrfshfs", hr_fs_hfs), + std::make_pair("hrfsmfs", hr_fs_mfs), + std::make_pair("hrfsntfs", hr_fs_ntfs), + std::make_pair("hrfsvnode", hr_fs_vnode), + std::make_pair("hrfsjournaled", hr_fs_journaled), + std::make_pair("hrfsiso9660", hr_fs_iso9660), + std::make_pair("hrfsrockridge", hr_fs_rock_ridge), + std::make_pair("hrfsnfs", hr_fs_nfs), + std::make_pair("hrfsnetware", hr_fs_netware), + std::make_pair("hrfsafs", hr_fs_afs), + std::make_pair("hrfsdfs", hr_fs_dfs), + std::make_pair("hrfsappleshare", hr_fs_appleshare), + std::make_pair("hrfsrfs", hr_fs_rfs), + std::make_pair("hrfsdgcfs", hr_fs_dgcfs), + std::make_pair("hrfsbfs", hr_fs_bfs), + std::make_pair("hrfsfat32", hr_fs_fat32), + std::make_pair("hrfslinuxext2", hr_fs_linux_ext2), + std::make_pair("hrfslinuxext4", hr_fs_linux_ext4), + std::make_pair("hrfsexfat", hr_fs_exfat)}; + +/** + * @brief Construct a new filter::filter object + * + * + * @param args json array that can contain these keys: + * filter-storage-type or filter-type + * filter-fs + * filter-exclude-fs + * filter-mountpoint + * filter-exclude-mountpoint + */ +filter::filter(const rapidjson::Value& args) : _fs_type_filter(0xFFFFFFFFU) { + if (args.IsObject()) { + for (auto member_iter = args.MemberBegin(); member_iter != args.MemberEnd(); + ++member_iter) { + if (member_iter->name == "filter-storage-type" || + member_iter->name == "filter-type") { + if (member_iter->value.IsString() && + member_iter->value.GetStringLength() > 0) { + std::string sz_regexp(member_iter->value.GetString()); + boost::to_lower(sz_regexp); + re2::RE2 filter_typ_re(sz_regexp); + if (!filter_typ_re.ok()) { + throw exceptions::msg_fmt( + "invalid regex for filter-storage-type: {}", + member_iter->value.GetString()); + } + _fs_type_filter = 0; + for (const auto& [label, flag] : _fs_type) { + if (RE2::FullMatch(label, filter_typ_re)) { + _fs_type_filter |= flag; + } + } + } + } else if (member_iter->name == "filter-fs" && + member_iter->value.IsString() && + member_iter->value.GetStringLength() > 0) { + _filter_fs = std::make_unique(member_iter->value.GetString()); + if (!_filter_fs->ok()) { + throw exceptions::msg_fmt("invalid regex for filter-fs: {}", + member_iter->value.GetString()); + } + } else if (member_iter->name == "exclude-fs" && + member_iter->value.IsString() && + member_iter->value.GetStringLength() > 0) { + _filter_exclude_fs = + std::make_unique(member_iter->value.GetString()); + if (!_filter_exclude_fs->ok()) { // NOLINT + throw exceptions::msg_fmt("invalid regex for filter-exclude-fs: {}", + member_iter->value.GetString()); + } + } else if (member_iter->name == "filter-mountpoint" && + member_iter->value.IsString() && + member_iter->value.GetStringLength() > 0) { + _filter_mountpoint = + std::make_unique(member_iter->value.GetString()); + if (!_filter_mountpoint->ok()) { + throw exceptions::msg_fmt("invalid regex for filter-mountpoint: {}", + member_iter->value.GetString()); + } + } else if (member_iter->name == "exclude-mountpoint" && + member_iter->value.IsString() && + member_iter->value.GetStringLength() > 0) { + _filter_exclude_mountpoint = + std::make_unique(member_iter->value.GetString()); + if (!_filter_exclude_mountpoint->ok()) { + throw exceptions::msg_fmt( + "invalid regex for filter-exclude-mountpoint: {}", + member_iter->value.GetString()); + } + } + } + } +} + +/** + * @brief test if fs has yet been tested and yet allowed + * + * @param fs + * @return true tested and allowed + * @return false not tested + */ +bool filter::is_fs_yet_allowed(const std::string_view& fs) const { + absl::MutexLock l(&_protect); + return _cache_allowed_fs.find(fs) != _cache_allowed_fs.end(); +} + +/** + * @brief test if fs has yet been tested and yet excluded + * + * @param fs + * @return true tested and excluded + * @return false not tested + */ +bool filter::is_fs_yet_excluded(const std::string_view& fs) const { + absl::MutexLock l(&_protect); + return _cache_excluded_fs.find(fs) != _cache_excluded_fs.end(); +} + +/** + * @brief test a fs + * + * @param fs + * @param mount_point (linux only) + * @param fs_type e_drive_fs_type mask + * @return true allowed by filter + * @return false + */ +bool filter::is_allowed(const std::string_view& fs, + const std::string_view& mount_point, + e_drive_fs_type fs_type) { + if (!(_fs_type_filter & fs_type)) { + return false; + } + + absl::MutexLock l(&_protect); + + bool yet_allowed = _cache_allowed_fs.find(fs) != _cache_allowed_fs.end(); + bool yet_excluded = _cache_excluded_fs.find(fs) != _cache_excluded_fs.end(); + if (yet_excluded) { + return false; + } + + if (!yet_allowed) { + if (_filter_exclude_fs && RE2::FullMatch(fs, *_filter_exclude_fs)) { + _cache_excluded_fs.emplace(fs); + return false; + } + + if (_filter_fs) { + if (RE2::FullMatch(fs, *_filter_fs)) { + _cache_allowed_fs.emplace(fs); + } else { + _cache_excluded_fs.emplace(fs); + return false; + } + } else { + _cache_allowed_fs.emplace(fs); + } + } + + yet_allowed = _cache_allowed_mountpoint.find(mount_point) != + _cache_allowed_mountpoint.end(); + yet_excluded = _cache_excluded_mountpoint.find(mount_point) != + _cache_excluded_mountpoint.end(); + if (yet_excluded) { + return false; + } + + if (!yet_allowed) { + if (_filter_exclude_mountpoint && + RE2::FullMatch(mount_point, *_filter_exclude_mountpoint)) { + _cache_excluded_mountpoint.emplace(mount_point); + return false; + } + + if (_filter_mountpoint) { + if (RE2::FullMatch(mount_point, *_filter_mountpoint)) { + _cache_allowed_mountpoint.emplace(mount_point); + } else { + _cache_excluded_mountpoint.emplace(mount_point); + return false; + } + } else { + _cache_allowed_mountpoint.emplace(mount_point); + } + } + + return true; +} + +/******************************************************************************** + * drive_size_thread + *********************************************************************************/ + +drive_size_thread::get_fs_stats drive_size_thread::os_fs_stats; + +/** + * @brief function run in a separate thread started by + * check_drive_size::start_check + * + */ +void drive_size_thread::run() { + auto keep_object_alive = shared_from_this(); + while (_active) { + absl::MutexLock l(&_queue_m); + _queue_m.Await(absl::Condition(this, &drive_size_thread::has_to_stop_wait)); + if (!_active) { + return; + } + time_point now = std::chrono::system_clock::now(); + while (!_queue.empty()) { + if (_queue.begin()->timeout < now) { + _queue.pop_front(); + } else { + break; + } + } + + if (!_queue.empty()) { + auto to_execute = _queue.begin(); + std::list stats = + os_fs_stats(*to_execute->request_filter, _logger); + // main code of this program is not thread safe, so we use io_context + // launched from main thread to call callback + _io_context->post( + [result = std::move(stats), + completion_handler = std::move(to_execute->handler)]() { + completion_handler(result); + }); + _queue.erase(to_execute); + } + } +} + +/** + * @brief wake up thread and tell him it's time to die + * + */ +void drive_size_thread::kill() { + absl::MutexLock l(&_queue_m); + _active = false; +} + +/** + * @brief start an asynchronous check + * + * @tparam handler_type + * @param request_filter + * @param timeout + * @param handler + */ +template +void drive_size_thread::async_get_fs_stats( + const std::shared_ptr& request_filter, + const time_point& timeout, + handler_type&& handler) { + absl::MutexLock lck(&_queue_m); + _queue.push_back( + {request_filter, std::forward(handler), timeout}); +} + +} // namespace com::centreon::agent::check_drive_size_detail + +/******************************************************************************** + * check_drive_size + *********************************************************************************/ + +check_drive_size::check_drive_size( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point first_start_expected, + duration check_interval, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const rapidjson::Value& args, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler, + const checks_statistics::pointer& stat) + : check(io_context, + logger, + first_start_expected, + check_interval, + serv, + cmd_name, + cmd_line, + cnf, + std::move(handler), + stat), + _filter(std::make_shared(args)), + _prct_threshold(false), + _free_threshold(false), + _warning(0), + _critical(0), + _fs_test(&check_drive_size::_no_test) { + using namespace std::literals; + try { + if (args.IsObject()) { + common::rapidjson_helper helper(args); + + if (args.HasMember("unit")) { + _prct_threshold = helper.get_string("unit", "%") == "%"sv; + } else { + _prct_threshold = helper.get_string("units", "%") == "%"sv; + } + _free_threshold = helper.get_bool("free", false); + + _warning = helper.get_uint64_t("warning", 0); + _critical = helper.get_uint64_t("critical", 0); + if (_prct_threshold) { + if (_warning || _critical) { + _warning *= 100; + _critical *= 100; + _fs_test = _free_threshold ? &check_drive_size::_prct_free_test + : &check_drive_size::_prct_used_test; + } + } else { + if (_warning || _critical) { + _fs_test = _free_threshold ? &check_drive_size::_free_test + : &check_drive_size::_used_test; + } + } + } + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR( + _logger, "check_drive_size fail to parse check params: {}", e.what()); + throw; + } +} + +/** + * @brief used in case of no threshold + * + * @param fs + * @return e_status + */ +e_status check_drive_size::_no_test( + [[maybe_unused]] const check_drive_size_detail::fs_stat& fs) const { + return e_status::ok; +} + +/** + * @brief test used fs with fixed thresholds + * + * @param fs + * @return e_status + */ +e_status check_drive_size::_used_test( + const check_drive_size_detail::fs_stat& fs) const { + if (_critical && fs.is_used_more_than_threshold(_critical)) { + return e_status::critical; + } + if (_warning && fs.is_used_more_than_threshold(_warning)) { + return e_status::warning; + } + return e_status::ok; +} + +/** + * @brief test used fs with percent thresholds + * + * @param fs + * @return e_status + */ +e_status check_drive_size::_prct_used_test( + const check_drive_size_detail::fs_stat& fs) const { + if (_critical && fs.is_used_more_than_prct_threshold(_critical)) { + return e_status::critical; + } + if (_warning && fs.is_used_more_than_prct_threshold(_warning)) { + return e_status::warning; + } + return e_status::ok; +} + +/** + * @brief test free fs with fixed thresholds + * + * @param fs + * @return e_status + */ +e_status check_drive_size::_free_test( + const check_drive_size_detail::fs_stat& fs) const { + if (_critical && fs.is_free_less_than_threshold(_critical)) { + return e_status::critical; + } + if (_warning && fs.is_free_less_than_threshold(_warning)) { + return e_status::warning; + } + return e_status::ok; +} + +/** + * @brief test free fs with percent thresholds + * + * @param fs + * @return e_status + */ +e_status check_drive_size::_prct_free_test( + const check_drive_size_detail::fs_stat& fs) const { + if (_critical && fs.is_free_less_than_prct_threshold(_critical)) { + return e_status::critical; + } + if (_warning && fs.is_free_less_than_prct_threshold(_warning)) { + return e_status::warning; + } + return e_status::ok; +} + +/** + * @brief start a check + * start _worker thread if not yet done and pass query to it + * + * @param timeout + */ +void check_drive_size::start_check(const duration& timeout) { + if (!check::_start_check(timeout)) { + return; + } + + if (!_worker_thread) { + _worker = std::make_shared( + _io_context, _logger); + _worker_thread = new std::thread([worker = _worker] { worker->run(); }); + } + + unsigned running_check_index = _get_running_check_index(); + + _worker->async_get_fs_stats( + _filter, std::chrono::system_clock::now() + timeout, + [me = shared_from_this(), running_check_index]( + const std::list& result) { + me->_completion_handler(running_check_index, result); + }); +} + +/** + * @brief called by _worker once work is done + * As it is not thread safe, _worker use io_context to post result + * + * @param start_check_index + * @param result + */ +void check_drive_size::_completion_handler( + unsigned start_check_index, + const std::list& result) { + e_status status = e_status::ok; + + std::string output; + std::list perfs; + + for (const auto& fs : result) { + e_status fs_status = (this->*_fs_test)(fs); + if (fs_status > status) { + status = fs_status; + } + if (fs_status != e_status::ok) { + if (!output.empty()) { + output.push_back(' '); + } + output += fs_status == e_status::critical ? "CRITICAL: " : "WARNING: "; + if (_prct_threshold) { + output += fmt::format("{} Total: {}G Used: {:.2f}% Free: {:.2f}%", + fs.mount_point, fs.total / 1024 / 1024 / 1024, + fs.get_used_prct(), fs.get_free_prct()); + } else { + output += fmt::format("{} Total: {}G Used: {}G Free: {}G", + fs.mount_point, fs.total / 1024 / 1024 / 1024, + fs.used / 1024 / 1024 / 1024, + (fs.total - fs.used) / 1024 / 1024 / 1024); + } + } + + centreon::common::perfdata& perf = perfs.emplace_back(); + perf.name((_free_threshold ? "free_" : "used_") + fs.mount_point); + + if (_prct_threshold) { + perf.unit("%"); + perf.min(0); + perf.max(100.0); + if (_warning) { + perf.warning_low(0); + perf.warning(static_cast(_warning) / 100); + } + if (_critical) { + perf.critical_low(0); + perf.critical(static_cast(_critical) / 100); + } + perf.value(_free_threshold ? fs.get_free_prct() : fs.get_used_prct()); + } else { + perf.unit("B"); + perf.min(0); + perf.max(fs.total); + if (_warning) { + perf.warning_low(0); + perf.warning(_warning); + } + if (_critical) { + perf.critical_low(0); + perf.critical(_critical); + } + perf.value(_free_threshold ? (fs.total - fs.used) : fs.used); + } + } + if (output.empty()) { + using namespace std::literals; + if (perfs.empty()) { + output = "No storage found (filters issue)"sv; + status = e_status::critical; + } else { + output = "OK: All storages are ok"sv; + } + } + + on_completion(start_check_index, status, perfs, {output}); +} + +/** + * @brief stop _worker + * + */ +void check_drive_size::thread_kill() { + if (_worker_thread) { + _worker->kill(); + _worker_thread->join(); + delete _worker_thread; + _worker_thread = nullptr; + } +} + +void check_drive_size::help(std::ostream& help_stream) { + help_stream << + R"( +- storage params: + unit (default %): unit of threshold. If different from % threshold are in bytes + free (default used): true: threshold is applied on free space and service become warning if free sapce is lower than threshold + false: threshold is applied on used space and service become warning if used space is higher than threshold + warning: warning threshold + critical: critical threshold + filters: + filter-storage-type: case insensitive regex to filter storage type it includes drive type (fixed, network...) and also fs type (fat32, ntfs..) + types recognized by agent: + hrunknown + hrstoragefixeddisk + hrstorageremovabledisk + hrstoragecompactdisc + hrstorageramdisk + hrstoragenetworkdisk + hrfsunknown + hrfsfat + hrfsntfs + hrfsfat32 + hrfsexfat + filter-fs: regex to filter filesystem + Example: [C-D]:\\.* + exclude-fs: regex to exclude filesystem + An example of configuration: + { + "check": "storage", + "args": { + "unit": "%", + "free": false, + "warning": 80, + "critical": 90, + "filter-storage-type": "hrstoragefixeddisk", + "filter-fs": "[C-D]:\\" + } + } + Examples of output: + WARNING: C:\ Total: 322G Used: 39.54% Free: 60.46% CRITICAL: D:\ Total: 5G Used: 50.60% Free: 49.40% + Metrics: + if free flag = true + free_C:\ + free_D:\ + if free flag = false + used_C:\ + used_D:\ +)"; +} diff --git a/agent/src/main.cc b/agent/src/main.cc index 34d11ab1874..07719168736 100644 --- a/agent/src/main.cc +++ b/agent/src/main.cc @@ -20,7 +20,12 @@ #include #include +#include "agent_info.hh" +#include "check_cpu.hh" +#include "check_health.hh" + #include "config.hh" +#include "drive_size.hh" #include "streaming_client.hh" #include "streaming_server.hh" @@ -103,12 +108,16 @@ int main(int argc, char* argv[]) { "Usage: {} \nSchema of the config " "file is:\n{}", argv[0], config::config_schema); + std::cout << std::endl << "Native checks options:" << std::endl; + check_cpu::help(std::cout); + check_health::help(std::cout); return 1; } - std::unique_ptr conf; try { - conf = std::make_unique(argv[1]); + // mandatory to convert arg to a string to ensure of the choice of load + // method by compiler + config::load(std::string(argv[1])); } catch (const std::exception& e) { SPDLOG_ERROR("fail to parse config file {}: {}", argv[1], e.what()); return 1; @@ -122,20 +131,21 @@ int main(int argc, char* argv[]) { const std::string logger_name = "centreon-monitoring-agent"; - if (conf->get_log_type() == config::to_file) { + const config& conf = config::instance(); + if (conf.get_log_type() == config::to_file) { try { - if (!conf->get_log_file().empty()) { - if (conf->get_log_files_max_size() > 0 && - conf->get_log_files_max_number() > 0) { + if (!conf.get_log_file().empty()) { + if (conf.get_log_files_max_size() > 0 && + conf.get_log_files_max_number() > 0) { g_logger = spdlog::rotating_logger_mt( - logger_name, conf->get_log_file(), - conf->get_log_files_max_size() * 0x100000, - conf->get_log_files_max_number()); + logger_name, conf.get_log_file(), + conf.get_log_files_max_size() * 0x100000, + conf.get_log_files_max_number()); } else { SPDLOG_INFO( "no log-max-file-size option or no log-max-files option provided " "=> logs will not be rotated by centagent"); - g_logger = spdlog::basic_logger_mt(logger_name, conf->get_log_file()); + g_logger = spdlog::basic_logger_mt(logger_name, conf.get_log_file()); } } else { SPDLOG_ERROR( @@ -143,18 +153,19 @@ int main(int argc, char* argv[]) { g_logger = spdlog::stdout_color_mt(logger_name); } } catch (const std::exception& e) { - SPDLOG_CRITICAL("Can't log to {}: {}", conf->get_log_file(), e.what()); + SPDLOG_CRITICAL("Can't log to {}: {}", conf.get_log_file(), e.what()); return 2; } } else { g_logger = spdlog::stdout_color_mt(logger_name); } - g_logger->set_level(conf->get_log_level()); + g_logger->set_level(conf.get_log_level()); g_logger->flush_on(spdlog::level::warn); - spdlog::flush_every(std::chrono::seconds(1)); + // don't use it because spdlog mutex would hang child process + // spdlog::flush_every(std::chrono::seconds(1)); SPDLOG_LOGGER_INFO(g_logger, "centreon-monitoring-agent start, you can decrease log " @@ -170,23 +181,25 @@ int main(int argc, char* argv[]) { _signals.async_wait(signal_handler); grpc_conf = std::make_shared( - conf->get_endpoint(), conf->use_encryption(), - read_file(conf->get_public_cert_file()), - read_file(conf->get_private_key_file()), - read_file(conf->get_ca_certificate_file()), conf->get_ca_name(), true, - 30); + conf.get_endpoint(), conf.use_encryption(), + read_file(conf.get_public_cert_file()), + read_file(conf.get_private_key_file()), + read_file(conf.get_ca_certificate_file()), conf.get_ca_name(), true, 30, + conf.get_second_max_reconnect_backoff()); } catch (const std::exception& e) { SPDLOG_CRITICAL("fail to parse input params: {}", e.what()); return -1; } - if (conf->use_reverse_connection()) { + read_os_version(); + + if (conf.use_reverse_connection()) { _streaming_server = streaming_server::load(g_io_context, g_logger, - grpc_conf, conf->get_host()); + grpc_conf, conf.get_host()); } else { _streaming_client = streaming_client::load(g_io_context, g_logger, - grpc_conf, conf->get_host()); + grpc_conf, conf.get_host()); } try { @@ -196,6 +209,9 @@ int main(int argc, char* argv[]) { return -1; } + // kill check_drive_size thread if used + check_drive_size::thread_kill(); + SPDLOG_LOGGER_INFO(g_logger, "centreon-monitoring-agent end"); return 0; diff --git a/agent/src/main_win.cc b/agent/src/main_win.cc index e551c5164fa..c6c10608eca 100644 --- a/agent/src/main_win.cc +++ b/agent/src/main_win.cc @@ -17,15 +17,32 @@ */ #include +#include "agent_info.hh" +#include "check_cpu.hh" +#include "check_health.hh" +#include "check_memory.hh" +#include "check_service.hh" +#include "check_uptime.hh" +#include "drive_size.hh" + #include #include #include #include #include "config.hh" +#include "drive_size.hh" +#include "ntdll.hh" #include "streaming_client.hh" #include "streaming_server.hh" +namespace com::centreon::agent::check_drive_size_detail { + +std::list os_fs_stats(filter& filter, + const std::shared_ptr& logger); + +} + using namespace com::centreon::agent; #define SERVICE_NAME "CentreonMonitoringAgent" @@ -98,6 +115,22 @@ static std::string read_file(const std::string& file_path) { return ""; } +void show_help() { + std::cout << "usage: centagent.exe [options]" << std::endl; + std::cout << "Options:" << std::endl; + std::cout << " --standalone: run the agent in standalone mode not from " + "service manager (mandatory for start it from command line)" + << std::endl; + std::cout << " --help: show this help" << std::endl; + std::cout << std::endl << "native checks options:" << std::endl; + check_cpu::help(std::cout); + check_memory::help(std::cout); + check_uptime::help(std::cout); + check_drive_size::help(std::cout); + check_service::help(std::cout); + check_health::help(std::cout); +} + /** * @brief this program can be started in two ways * from command line: main function @@ -106,17 +139,20 @@ static std::string read_file(const std::string& file_path) { * @return int exit status returned to command line (0 success) */ int _main(bool service_start) { - const char* registry_path = "SOFTWARE\\Centreon\\" SERVICE_NAME; + std::string registry_path = "SOFTWARE\\Centreon\\" SERVICE_NAME; - std::unique_ptr conf; try { - conf = std::make_unique(registry_path); + config::load(registry_path); } catch (const std::exception& e) { SPDLOG_ERROR("fail to read conf from registry {}: {}", registry_path, e.what()); return 1; } + // init os specific drive_size getter + check_drive_size_detail::drive_size_thread::os_fs_stats = + check_drive_size_detail::os_fs_stats; + if (service_start) SPDLOG_INFO("centreon-monitoring-agent service start"); else @@ -130,37 +166,39 @@ int _main(bool service_start) { g_logger = std::make_shared("", sink); }; + const config& conf = config::instance(); + try { - if (conf->get_log_type() == config::to_file) { - if (!conf->get_log_file().empty()) { - if (conf->get_log_files_max_size() > 0 && - conf->get_log_files_max_number() > 0) { + if (conf.get_log_type() == config::to_file) { + if (!conf.get_log_file().empty()) { + if (conf.get_log_files_max_size() > 0 && + conf.get_log_files_max_number() > 0) { g_logger = spdlog::rotating_logger_mt( - logger_name, conf->get_log_file(), - conf->get_log_files_max_size() * 0x100000, - conf->get_log_files_max_number()); + logger_name, conf.get_log_file(), + conf.get_log_files_max_size() * 0x100000, + conf.get_log_files_max_number()); } else { SPDLOG_INFO( "no log-max-file-size option or no log-max-files option provided " "=> logs will not be rotated by centagent"); - g_logger = spdlog::basic_logger_mt(logger_name, conf->get_log_file()); + g_logger = spdlog::basic_logger_mt(logger_name, conf.get_log_file()); } } else { SPDLOG_ERROR( "log-type=file needs the option log-file => log to event log"); create_event_logger(); } - } else if (conf->get_log_type() == config::to_stdout) { + } else if (conf.get_log_type() == config::to_stdout) { g_logger = spdlog::stdout_color_mt(logger_name); } else { create_event_logger(); } } catch (const std::exception& e) { - SPDLOG_CRITICAL("Can't log to {}: {}", conf->get_log_file(), e.what()); + SPDLOG_CRITICAL("Can't log to {}: {}", conf.get_log_file(), e.what()); return 2; } - g_logger->set_level(conf->get_log_level()); + g_logger->set_level(conf.get_log_level()); g_logger->flush_on(spdlog::level::warn); @@ -173,32 +211,38 @@ int _main(bool service_start) { _signals.async_wait(signal_handler); grpc_conf = std::make_shared( - conf->get_endpoint(), conf->use_encryption(), - read_file(conf->get_public_cert_file()), - read_file(conf->get_private_key_file()), - read_file(conf->get_ca_certificate_file()), conf->get_ca_name(), true, - 30); + conf.get_endpoint(), conf.use_encryption(), + read_file(conf.get_public_cert_file()), + read_file(conf.get_private_key_file()), + read_file(conf.get_ca_certificate_file()), conf.get_ca_name(), true, 30, + conf.get_second_max_reconnect_backoff()); } catch (const std::exception& e) { SPDLOG_CRITICAL("fail to parse input params: {}", e.what()); return -1; } - if (conf->use_reverse_connection()) { - _streaming_server = streaming_server::load(g_io_context, g_logger, - grpc_conf, conf->get_host()); - } else { - _streaming_client = streaming_client::load(g_io_context, g_logger, - grpc_conf, conf->get_host()); - } - try { + load_nt_dll(); + read_os_version(); + + if (conf.use_reverse_connection()) { + _streaming_server = streaming_server::load(g_io_context, g_logger, + grpc_conf, conf.get_host()); + } else { + _streaming_client = streaming_client::load(g_io_context, g_logger, + grpc_conf, conf.get_host()); + } + g_io_context->run(); } catch (const std::exception& e) { SPDLOG_LOGGER_CRITICAL(g_logger, "unhandled exception: {}", e.what()); return -1; } + // kill check_drive_size thread if used + check_drive_size::thread_kill(); + SPDLOG_LOGGER_INFO(g_logger, "centreon-monitoring-agent end"); return 0; @@ -227,6 +271,15 @@ int main(int argc, char* argv[]) { return _main(false); } + if (argc > 1 && !lstrcmpi(argv[1], "--help")) { + show_help(); + return 0; + } + + SPDLOG_INFO( + "centagent.exe will start in service mode, if you launch it from command " + "line, use --standalone flag"); + SERVICE_TABLE_ENTRY DispatchTable[] = { {SERVICE_NAME, (LPSERVICE_MAIN_FUNCTION)SvcMain}, {NULL, NULL}}; diff --git a/agent/src/native_check_base.cc b/agent/src/native_check_base.cc new file mode 100644 index 00000000000..593f89b4069 --- /dev/null +++ b/agent/src/native_check_base.cc @@ -0,0 +1,211 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "native_check_base.hh" +#include "com/centreon/common/rapidjson_helper.hh" + +using namespace com::centreon::agent; +using namespace com::centreon::agent::native_check_detail; + +/** + * @brief construct a snapshot to status converter + * + * @tparam nb_metric + * @param status e_warning or e_critical + * @param data_index index of the data to compare + * @param threshold + * @param total_data_index index of the total data in order to do a percent + * compare + * @param free_threshold if true, status is set if value < threshold + */ +template +measure_to_status::measure_to_status(e_status status, + unsigned data_index, + double threshold, + unsigned total_data_index, + bool percent, + bool free_threshold) + : _status(status), + _data_index(data_index), + _threshold(threshold), + _total_data_index(total_data_index), + _percent(percent), + _free_threshold(free_threshold) {} + +template +void measure_to_status::compute_status( + const snapshot& to_test, + e_status* status) const { + if (_status <= *status) { + return; + } + double value = + _percent ? to_test.get_proportional_value(_data_index, _total_data_index) + : to_test.get_metric(_data_index); + if (_free_threshold) { + if (value < _threshold) { + *status = _status; + } + } else { + if (value > _threshold) { + *status = _status; + } + } +} + +/** + * @brief Construct a new check native_check_base + * + * @param io_context + * @param logger + * @param first_start_expected start expected + * @param check_interval check interval between two checks (not only this but + * also others) + * @param serv service + * @param cmd_name + * @param cmd_line + * @param args native plugin arguments + * @param cnf engine configuration received object + * @param handler called at measure completion + */ +template +native_check_base::native_check_base( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point first_start_expected, + duration check_interval, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const rapidjson::Value& args, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler, + const checks_statistics::pointer& stat) + : check(io_context, + logger, + first_start_expected, + check_interval, + serv, + cmd_name, + cmd_line, + cnf, + std::move(handler), + stat) {} + +/** + * @brief start a measure + * + * @param timeout + */ +template +void native_check_base::start_check(const duration& timeout) { + if (!check::_start_check(timeout)) { + return; + } + + try { + std::shared_ptr> mem_metrics = + measure(); + + _io_context->post([me = shared_from_this(), + start_check_index = _get_running_check_index(), + metrics = mem_metrics]() mutable { + std::string output; + output.reserve(1024); + std::list perfs; + e_status status = me->compute(*metrics, &output, &perfs); + me->on_completion(start_check_index, status, perfs, {output}); + }); + + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(_logger, "fail to get memory info: {}", e.what()); + _io_context->post([me = shared_from_this(), + start_check_index = _get_running_check_index(), + err = e.what()] { + me->on_completion(start_check_index, e_status::unknown, {}, {err}); + }); + } +} + +/** + * @brief compute status, output and metrics from a measure + * + * @tparam nb_metric + * @param data memory measure + * @param output plugins output + * @param perfs perfdatas + * @return e_status plugins status output + */ +template +e_status native_check_base::compute( + const native_check_detail::snapshot& data, + std::string* output, + std::list* perfs) const { + e_status status = e_status::ok; + + for (const auto& mem_status : _measure_to_status) { + mem_status.second->compute_status(data, &status); + } + + *output = status_label[status]; + data.dump_to_output(output); + + const auto& metric_definitions = get_metric_definitions(); + + for (const auto& metric : metric_definitions) { + common::perfdata& to_add = perfs->emplace_back(); + to_add.name(metric.name); + if (metric.percent) { + to_add.unit("%"); + to_add.min(0); + to_add.max(100); + to_add.value(data.get_proportional_value(metric.data_index, + metric.total_data_index) * + 100); + } else { + if (_no_percent_unit) { + to_add.unit(_no_percent_unit); + } + if (metric.total_data_index != nb_metric) { + to_add.min(0); + to_add.max(data.get_metric(metric.total_data_index)); + } + to_add.value(data.get_metric(metric.data_index)); + } + // we search measure_to_status to get warning and critical thresholds + // warning + auto mem_to_status_search = _measure_to_status.find(std::make_tuple( + metric.data_index, metric.total_data_index, e_status::warning)); + if (mem_to_status_search != _measure_to_status.end()) { + to_add.warning_low(0); + to_add.warning(metric.percent + ? 100 * mem_to_status_search->second->get_threshold() + : mem_to_status_search->second->get_threshold()); + } + // critical + mem_to_status_search = _measure_to_status.find(std::make_tuple( + metric.data_index, metric.total_data_index, e_status::critical)); + if (mem_to_status_search != _measure_to_status.end()) { + to_add.critical_low(0); + to_add.critical(metric.percent + ? 100 * mem_to_status_search->second->get_threshold() + : mem_to_status_search->second->get_threshold()); + } + } + return status; +} \ No newline at end of file diff --git a/agent/src/native_check_cpu_base.cc b/agent/src/native_check_cpu_base.cc new file mode 100644 index 00000000000..01f04e50023 --- /dev/null +++ b/agent/src/native_check_cpu_base.cc @@ -0,0 +1,451 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "native_check_cpu_base.hh" +#include "com/centreon/common/rapidjson_helper.hh" + +using namespace com::centreon::agent; +using namespace com::centreon::agent::check_cpu_detail; + +/** + * @brief Construct a new per cpu time base::per cpu time base object + * all values are set to zero + * @tparam nb_metric + */ +template +per_cpu_time_base::per_cpu_time_base() { + _metrics.fill(0); +} + +/** + * @brief dump all values into plugin output + * + * @tparam nb_metric + * @param cpu_index cpu index or average_cpu_index for all cpus + * @param metric_label label for each metric + * @param output output string + */ +template +void per_cpu_time_base::dump(const unsigned& cpu_index, + const std::string_view metric_label[], + std::string* output) const { + if (cpu_index == average_cpu_index) { + *output += fmt::format("CPU(s) average Usage: {:.2f}%", + (static_cast(_total_used) / _total) * 100); + } else { + *output += fmt::format("CPU'{}' Usage: {:.2f}%", cpu_index, + (static_cast(_total_used) / _total) * 100); + } + + for (unsigned field_index = 0; field_index < nb_metric; ++field_index) { + *output += metric_label[field_index]; + *output += + fmt::format("{:.2f}%", get_proportional_value(field_index) * 100); + } +} + +/** + * @brief used for debugging + * + * @param output + */ +template +void per_cpu_time_base::dump_values(std::string* output) const { + for (unsigned field_index = 0; field_index < nb_metric; ++field_index) { + absl::StrAppend(output, " ", _metrics[field_index]); + } + absl::StrAppend(output, " used:", _total_used); + absl::StrAppend(output, " total:", _total); +} + +/** + * @brief subtract a per_cpu_time_base from this + * + * @tparam nb_metric + * @param to_subtract + */ +template +void per_cpu_time_base::subtract( + const per_cpu_time_base& to_subtract) { + typename std::array::iterator dest = _metrics.begin(); + typename std::array::const_iterator src = + to_subtract._metrics.begin(); + for (; dest < _metrics.end(); ++dest, ++src) { + *dest -= *src; + } + _total_used -= to_subtract._total_used; + _total -= to_subtract._total; +} + +/** + * @brief add a per_cpu_time_base to this + * + * @tparam nb_metric + * @param to_add + */ +template +void per_cpu_time_base::add(const per_cpu_time_base& to_add) { + typename std::array::iterator dest = _metrics.begin(); + typename std::array::const_iterator src = + to_add._metrics.begin(); + for (; dest < _metrics.end(); ++dest, ++src) { + *dest += *src; + } + _total_used += to_add._total_used; + _total += to_add._total; +} + +/** + * @brief subtract a cpu snapshot from this + * + * @tparam nb_metric + * @param to_subtract + * @return index_to_cpu + */ +template +index_to_cpu cpu_time_snapshot::subtract( + const cpu_time_snapshot& to_subtract) const { + index_to_cpu result; + // in case of pdh, first measure is empty, so we use only second sample + if (to_subtract._data.empty()) { + return _data; + } + for (const auto& left_it : _data) { + const auto& right_it = to_subtract._data.find(left_it.first); + if (right_it == to_subtract._data.end()) { + continue; + } + per_cpu_time_base& res = result[left_it.first]; + res = left_it.second; + res.subtract(right_it->second); + } + return result; +} + +/** + * @brief used for debug, dump all values + * + * @tparam nb_metric + * @param cpus + * @param output + */ +template +void cpu_time_snapshot::dump(std::string* output) const { + output->reserve(output->size() + _data.size() * 256); + for (const auto& cpu : _data) { + output->push_back(cpu.first + '0'); + output->append(":{"); + for (unsigned i = 0; i < nb_metric; ++i) { + absl::StrAppend(output, " ", cpu.second.get_proportional_value(i)); + } + absl::StrAppend(output, " used:", cpu.second.get_proportional_used()); + output->push_back('\n'); + cpu.second.dump_values(output); + + output->append("}\n"); + } +} + +/** + * @brief update status of each cpu or all cpus if metric > threshold + * + * @tparam nb_metric + * @param to_test metrics + * @param per_cpu_status out: status per cpu index + */ +template +void cpu_to_status::compute_status( + const index_to_cpu& to_test, + boost::container::flat_map* per_cpu_status) const { + auto check_threshold = + [&, this](const typename index_to_cpu::value_type& values) { + double val = _data_index >= nb_metric + ? values.second.get_proportional_used() + : values.second.get_proportional_value(_data_index); + if (val > _threshold) { + auto& to_update = (*per_cpu_status)[values.first]; + // if ok (=0) and _status is warning (=1) or critical(=2), we update + if (_status > to_update) { + to_update = _status; + } + } + }; + + if (_average) { + auto avg = to_test.find(average_cpu_index); + if (avg == to_test.end()) { + return; + } + check_threshold(*avg); + } else { + for (const auto& by_cpu : to_test) { + if (by_cpu.first == average_cpu_index) { + continue; + } + check_threshold(by_cpu); + } + } +} + +/** + * @brief Construct a new check native_check_cpu cpu object + * + * @param io_context + * @param logger + * @param first_start_expected start expected + * @param check_interval check interval between two checks (not only this but + * also others) + * @param serv service + * @param cmd_name + * @param cmd_line + * @param args native plugin arguments + * @param cnf engine configuration received object + * @param handler called at measure completion + */ +template +native_check_cpu::native_check_cpu( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point first_start_expected, + duration check_interval, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const rapidjson::Value& args, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler, + const checks_statistics::pointer& stat) + : check(io_context, + logger, + first_start_expected, + check_interval, + serv, + cmd_name, + cmd_line, + cnf, + std::move(handler), + stat), + + _nb_core(std::thread::hardware_concurrency()), + _cpu_detailed(false), + _measure_timer(*io_context) { + if (args.IsObject()) { + com::centreon::common::rapidjson_helper arg(args); + _cpu_detailed = arg.get_bool("cpu-detailed", false); + } +} + +/** + * @brief start a measure + * measure duration is the min of timeout - 1s, check_interval - 1s + * + * @param timeout + */ +template +void native_check_cpu::start_check(const duration& timeout) { + if (!check::_start_check(timeout)) { + return; + } + + try { + std::unique_ptr> begin = + get_cpu_time_snapshot(true); + + time_point end_measure = std::chrono::system_clock::now() + timeout; + time_point end_measure_period = + get_start_expected() + + std::chrono::seconds(get_conf()->config().check_interval()); + + if (end_measure > end_measure_period) { + end_measure = end_measure_period; + } + + end_measure -= std::chrono::seconds(1); + + _measure_timer.expires_at(end_measure); + _measure_timer.async_wait( + [me = shared_from_this(), first_measure = std::move(begin), + start_check_index = _get_running_check_index()]( + const boost::system::error_code& err) mutable { + me->_measure_timer_handler(err, start_check_index, + std::move(first_measure)); + }); + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(_logger, "{} fail to start check: {}", + get_command_name(), e.what()); + _io_context->post([me = shared_from_this(), + start_check_index = _get_running_check_index(), + err = e.what()] { + me->on_completion(start_check_index, e_status::unknown, {}, {err}); + }); + } +} + +/** + * @brief called at measure timer expiration + * Then we take a new snapshot of /proc/stat, compute difference with + * first_measure and generate output and perfdatas + * + * @param err asio error + * @param start_check_index passed to on_completion to validate result + * @param first_measure first snapshot to compare + */ +template +void native_check_cpu::_measure_timer_handler( + const boost::system::error_code& err, + unsigned start_check_index, + std::unique_ptr>&& + first_measure) { + if (err) { + return; + } + + std::string output; + std::list perfs; + + std::unique_ptr> new_measure = + get_cpu_time_snapshot(false); + + e_status worst = compute(*first_measure, *new_measure, &output, &perfs); + + on_completion(start_check_index, worst, perfs, {output}); +} + +/** + * @brief compute the difference between second_measure and first_measure and + * generate status, output and perfdatas + * + * @param first_measure first snapshot of /proc/stat + * @param second_measure second snapshot of /proc/stat + * @param output out plugin output + * @param perfs perfdatas + * @return e_status plugin out status + */ +template +e_status native_check_cpu::_compute( + const check_cpu_detail::cpu_time_snapshot& first_measure, + const check_cpu_detail::cpu_time_snapshot& second_measure, + const std::string_view summary_labels[], + const std::string_view perfdata_labels[], + std::string* output, + std::list* perfs) { + index_to_cpu delta = second_measure.subtract(first_measure); + + // we need to know per cpu status to provide no ok cpu details + boost::container::flat_map by_proc_status; + + for (const auto& checker : _cpu_to_status) { + checker.second.compute_status(delta, &by_proc_status); + } + + e_status worst = e_status::ok; + for (const auto& to_cmp : by_proc_status.sequence()) { + if (to_cmp.second > worst) { + worst = to_cmp.second; + } + } + + if (worst == e_status::ok) { // all is ok + auto average_data = delta.find(check_cpu_detail::average_cpu_index); + if (average_data != delta.end()) { + *output = fmt::format("OK: CPU(s) average usage is {:.2f}%", + average_data->second.get_proportional_used() * 100); + } else { + *output = "OK: CPUs usages are ok."; + } + } else { + bool first = true; + // not all cpus ok => display detail per cpu nok + for (const auto& cpu_status : by_proc_status) { + if (cpu_status.second != e_status::ok) { + if (first) { + first = false; + } else { + output->push_back(' '); + } + *output += status_label[cpu_status.second]; + delta[cpu_status.first].dump(cpu_status.first, summary_labels, output); + } + } + } + + auto fill_perfdata = [&, this]( + const std::string_view& label, unsigned index, + unsigned cpu_index, + const per_cpu_time_base& per_cpu_data) { + double val = index >= nb_metric + ? per_cpu_data.get_proportional_used() + : per_cpu_data.get_proportional_value(index); + bool is_average = cpu_index == check_cpu_detail::average_cpu_index; + common::perfdata to_add; + to_add.name(label); + to_add.unit("%"); + to_add.min(0); + to_add.max(100); + to_add.value(val * 100); + // we search cpu_to_status to get warning and critical thresholds + // warning + auto cpu_to_status_search = _cpu_to_status.find( + std::make_tuple(index, is_average, e_status::warning)); + if (cpu_to_status_search != _cpu_to_status.end()) { + to_add.warning_low(0); + to_add.warning(100 * cpu_to_status_search->second.get_threshold()); + } + // critical + cpu_to_status_search = _cpu_to_status.find( + std::make_tuple(index, is_average, e_status::critical)); + if (cpu_to_status_search != _cpu_to_status.end()) { + to_add.critical_low(0); + to_add.critical(100 * cpu_to_status_search->second.get_threshold()); + } + perfs->emplace_back(std::move(to_add)); + }; + + if (_cpu_detailed) { + for (const auto& by_core : delta) { + std::string cpu_name; + const char* suffix; + if (by_core.first != check_cpu_detail::average_cpu_index) { + absl::StrAppend(&cpu_name, by_core.first, "~"); + suffix = "#core.cpu.utilization.percentage"; + } else { + suffix = "#cpu.utilization.percentage"; + } + for (unsigned stat_ind = 0; stat_ind < nb_metric; ++stat_ind) { + fill_perfdata((cpu_name + perfdata_labels[stat_ind].data()) + suffix, + stat_ind, by_core.first, by_core.second); + } + fill_perfdata((cpu_name + "used") + suffix, nb_metric, by_core.first, + by_core.second); + } + + } else { + for (const auto& by_core : delta) { + std::string cpu_name; + if (by_core.first != check_cpu_detail::average_cpu_index) { + absl::StrAppend(&cpu_name, by_core.first, + "#core.cpu.utilization.percentage"); + } else { + cpu_name = "cpu.utilization.percentage"; + } + + fill_perfdata(cpu_name, nb_metric, by_core.first, by_core.second); + } + } + return worst; +} diff --git a/agent/src/scheduler.cc b/agent/src/scheduler.cc index 42b30224b8c..08741b12a2c 100644 --- a/agent/src/scheduler.cc +++ b/agent/src/scheduler.cc @@ -17,10 +17,19 @@ */ #include "scheduler.hh" +#include +#include "check.hh" +#include "check_cpu.hh" +#include "check_health.hh" +#ifdef _WIN32 +#include "check_memory.hh" +#include "check_service.hh" +#include "check_uptime.hh" +#endif #include "check_exec.hh" #include "com/centreon/common/rapidjson_helper.hh" #include "com/centreon/common/utf8.hh" -#include "com/centreon/exceptions/msg_fmt.hh" +#include "drive_size.hh" using namespace com::centreon::agent; @@ -31,6 +40,8 @@ using namespace com::centreon::agent; void scheduler::_start() { _init_export_request(); _next_send_time_point = std::chrono::system_clock::now(); + _check_time_step = + time_step(_next_send_time_point, std::chrono::milliseconds(100)); update(_conf); _start_send_timer(); _start_check_timer(); @@ -97,11 +108,13 @@ scheduler::default_config() { * */ void scheduler::_start_check_timer() { - if (_check_queue.empty() || + if (_waiting_check_queue.empty() || _active_check >= _conf->config().max_concurrent_checks()) { - _check_timer.expires_from_now(std::chrono::milliseconds(100)); + _check_time_step.increment_to_after_now(); + _check_timer.expires_at(_check_time_step.value()); } else { - _check_timer.expires_at((*_check_queue.begin())->get_start_expected()); + _check_timer.expires_at( + (*_waiting_check_queue.begin())->get_start_expected()); } _check_timer.async_wait( [me = shared_from_this()](const boost::system::error_code& err) { @@ -129,13 +142,14 @@ void scheduler::_check_timer_handler(const boost::system::error_code& err) { */ void scheduler::_start_waiting_check() { time_point now = std::chrono::system_clock::now(); - if (!_check_queue.empty()) { - for (check_queue::iterator to_check = _check_queue.begin(); - !_check_queue.empty() && to_check != _check_queue.end() && + if (!_waiting_check_queue.empty()) { + for (check_queue::iterator to_check = _waiting_check_queue.begin(); + !_waiting_check_queue.empty() && + to_check != _waiting_check_queue.end() && (*to_check)->get_start_expected() <= now && _active_check < _conf->config().max_concurrent_checks();) { _start_check(*to_check); - to_check = _check_queue.erase(to_check); + to_check = _waiting_check_queue.erase(to_check); } } } @@ -149,7 +163,7 @@ void scheduler::_start_waiting_check() { * @param conf */ void scheduler::update(const engine_to_agent_request_ptr& conf) { - _check_queue.clear(); + _waiting_check_queue.clear(); _active_check = 0; size_t nb_check = conf->config().services().size(); @@ -163,11 +177,16 @@ void scheduler::update(const engine_to_agent_request_ptr& conf) { conf->config().check_interval()); if (nb_check > 0) { - duration check_interval = + // raz stats in order to not keep statistics of deleted checks + checks_statistics::pointer stat = std::make_shared(); + + duration time_between_check = std::chrono::microseconds(conf->config().check_interval() * 1000000) / nb_check; time_point next = std::chrono::system_clock::now(); + _check_time_step = time_step(next, time_between_check); + auto last_inserted_iter = _waiting_check_queue.end(); for (const auto& serv : conf->config().services()) { if (_logger->level() == spdlog::level::trace) { SPDLOG_LOGGER_TRACE( @@ -180,16 +199,19 @@ void scheduler::update(const engine_to_agent_request_ptr& conf) { } try { auto check_to_schedule = _check_builder( - _io_context, _logger, next, serv.service_description(), - serv.command_name(), serv.command_line(), conf, + _io_context, _logger, next, time_between_check, + serv.service_description(), serv.command_name(), + serv.command_line(), conf, [me = shared_from_this()]( const std::shared_ptr& check, unsigned status, const std::list& perfdata, const std::list& outputs) { me->_check_handler(check, status, perfdata, outputs); - }); - _check_queue.emplace(check_to_schedule); - next += check_interval; + }, + stat); + last_inserted_iter = _waiting_check_queue.emplace_hint( + last_inserted_iter, check_to_schedule); + next += time_between_check; } catch (const std::exception& e) { SPDLOG_LOGGER_ERROR( _logger, "service: {} command:{} won't be scheduled cause: {}", @@ -199,6 +221,8 @@ void scheduler::update(const engine_to_agent_request_ptr& conf) { } _conf = conf; + + _start_waiting_check(); } /** @@ -249,13 +273,19 @@ void scheduler::_check_handler( --_active_check; if (_alive) { - // repush for next check - check->add_duration_to_start_expected( - std::chrono::seconds(_conf->config().check_interval())); - - _check_queue.insert(check); - // we have decreased _active_check, so we can launch another check - _start_waiting_check(); + time_point min_next_start = + check->get_start_expected() + + std::chrono::seconds(_conf->config().check_interval()); + time_point now = std::chrono::system_clock::now(); + if (min_next_start < now) + min_next_start = now; + + // repush for next check and search a free start slot in queue + check->increment_start_expected_to_after_min_timepoint(min_next_start); + while (!_waiting_check_queue.insert(check).second) { + // slot yet reserved => try next + check->add_check_interval_to_start_expected(); + } } } @@ -280,10 +310,10 @@ void scheduler::stop() { * @param outputs */ void scheduler::_store_result_in_metrics( - const check::pointer& check, - unsigned status, - const std::list& perfdata, - const std::list& outputs) { + [[maybe_unused]] const check::pointer& check, + [[maybe_unused]] unsigned status, + [[maybe_unused]] const std::list& perfdata, + [[maybe_unused]] const std::list& outputs) { // auto scope_metrics = // get_scope_metrics(check->get_host(), check->get_service()); // unsigned now = std::chrono::duration_cast( @@ -435,6 +465,11 @@ void scheduler::_add_metric_to_scope( attrib_type->set_key("auto"); break; } + case com::centreon::common::perfdata::gauge: { + auto attrib_type = data_point->add_attributes(); + attrib_type->set_key("gauge"); + break; + } } if (perf.critical() <= std::numeric_limits::max()) { _add_exemplar(perf.critical_mode() ? "crit_ge" : "crit_gt", perf.critical(), @@ -510,12 +545,14 @@ void scheduler::_add_exemplar( std::shared_ptr scheduler::default_check_builder( const std::shared_ptr& io_context, const std::shared_ptr& logger, - time_point start_expected, + time_point first_start_expected, + duration check_interval, const std::string& service, const std::string& cmd_name, const std::string& cmd_line, const engine_to_agent_request_ptr& conf, - check::completion_handler&& handler) { + check::completion_handler&& handler, + const checks_statistics::pointer& stat) { using namespace std::literals; // test native checks where cmd_lin is a json try { @@ -523,16 +560,46 @@ std::shared_ptr scheduler::default_check_builder( common::rapidjson_helper::read_from_string(cmd_line); common::rapidjson_helper native_params(native_check_info); std::string_view check_type = native_params.get_string("check"); - const rapidjson::Value& args = native_params.get_member("args"); - - if (check_type == "cpu"sv) { + const rapidjson::Value* args; + if (native_params.has_member("args")) { + args = &native_params.get_member("args"); + } else { + static const rapidjson::Value no_arg; + args = &no_arg; + } + if (check_type == "cpu_percentage"sv) { + return std::make_shared( + io_context, logger, first_start_expected, check_interval, service, + cmd_name, cmd_line, *args, conf, std::move(handler), stat); + } else if (check_type == "health"sv) { + return std::make_shared( + io_context, logger, first_start_expected, check_interval, service, + cmd_name, cmd_line, *args, conf, std::move(handler), stat); +#ifdef _WIN32 + } else if (check_type == "uptime"sv) { + return std::make_shared( + io_context, logger, first_start_expected, check_interval, service, + cmd_name, cmd_line, *args, conf, std::move(handler), stat); + } else if (check_type == "storage"sv) { + return std::make_shared( + io_context, logger, first_start_expected, check_interval, service, + cmd_name, cmd_line, *args, conf, std::move(handler), stat); + } else if (check_type == "memory"sv) { + return std::make_shared( + io_context, logger, first_start_expected, check_interval, service, + cmd_name, cmd_line, *args, conf, std::move(handler), stat); + } else if (check_type == "service"sv) { + return std::make_shared( + io_context, logger, first_start_expected, check_interval, service, + cmd_name, cmd_line, *args, conf, std::move(handler), stat); +#endif } else { throw exceptions::msg_fmt("command {}, unknown native check:{}", cmd_name, cmd_line); } - } catch (const std::exception&) { - return check_exec::load(io_context, logger, start_expected, service, - cmd_name, cmd_line, conf, std::move(handler)); + return check_exec::load(io_context, logger, first_start_expected, + check_interval, service, cmd_name, cmd_line, conf, + std::move(handler), stat); } } diff --git a/agent/src/streaming_client.cc b/agent/src/streaming_client.cc index ab38cc67717..df8960d05ff 100644 --- a/agent/src/streaming_client.cc +++ b/agent/src/streaming_client.cc @@ -16,10 +16,9 @@ * For more information : contact@centreon.com */ +#include "agent_info.hh" #include "streaming_client.hh" -#include "check_exec.hh" #include "com/centreon/common/defer.hh" -#include "version.hh" using namespace com::centreon::agent; @@ -144,12 +143,7 @@ void streaming_client::_create_reactor() { // identifies to engine std::shared_ptr who_i_am = std::make_shared(); - auto infos = who_i_am->mutable_init(); - - infos->mutable_centreon_version()->set_major(CENTREON_AGENT_VERSION_MAJOR); - infos->mutable_centreon_version()->set_minor(CENTREON_AGENT_VERSION_MINOR); - infos->mutable_centreon_version()->set_patch(CENTREON_AGENT_VERSION_PATCH); - infos->set_host(_supervised_host); + fill_agent_info(_supervised_host, who_i_am->mutable_init()); _reactor->write(who_i_am); } @@ -191,7 +185,7 @@ void streaming_client::_send(const std::shared_ptr& request) { * @param request */ void streaming_client::on_incomming_request( - const std::shared_ptr& caller, + const std::shared_ptr& caller [[maybe_unused]], const std::shared_ptr& request) { // incoming request is used in main thread _io_context->post([request, sched = _sched]() { sched->update(request); }); diff --git a/agent/src/streaming_server.cc b/agent/src/streaming_server.cc index 681ce348a25..e6ad15d6717 100644 --- a/agent/src/streaming_server.cc +++ b/agent/src/streaming_server.cc @@ -16,10 +16,9 @@ * For more information : contact@centreon.com */ +#include "agent_info.hh" #include "streaming_server.hh" -#include "check_exec.hh" #include "scheduler.hh" -#include "version.hh" using namespace com::centreon::agent; @@ -88,12 +87,7 @@ void server_reactor::_start() { // identifies to engine std::shared_ptr who_i_am = std::make_shared(); - auto infos = who_i_am->mutable_init(); - - infos->mutable_centreon_version()->set_major(CENTREON_AGENT_VERSION_MAJOR); - infos->mutable_centreon_version()->set_minor(CENTREON_AGENT_VERSION_MINOR); - infos->mutable_centreon_version()->set_patch(CENTREON_AGENT_VERSION_PATCH); - infos->set_host(_supervised_host); + fill_agent_info(_supervised_host, who_i_am->mutable_init()); write(who_i_am); } diff --git a/agent/test/CMakeLists.txt b/agent/test/CMakeLists.txt index 897aea3b643..89da2f4b434 100644 --- a/agent/test/CMakeLists.txt +++ b/agent/test/CMakeLists.txt @@ -19,14 +19,20 @@ set( SRC_COMMON check_test.cc check_exec_test.cc + drive_size_test.cc + check_health_test.cc scheduler_test.cc test_main.cc ) if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") - set(SRC ${SRC_COMMON} config_test.cc) + set(SRC ${SRC_COMMON} config_test.cc check_linux_cpu_test.cc) else() - set(SRC ${SRC_COMMON}) + set(SRC ${SRC_COMMON} + check_windows_cpu_test.cc + check_windows_memory_test.cc + check_uptime_test.cc + check_windows_service_test.cc) endif() @@ -71,7 +77,7 @@ else() Boost::program_options gRPC::gpr gRPC::grpc gRPC::grpc++ gRPC::grpc++_alts fmt::fmt - ) + pdh) endif() add_dependencies(ut_agent centreon_common centagent_lib) diff --git a/agent/test/check_exec_test.cc b/agent/test/check_exec_test.cc index b3b547cfd13..23966c702b4 100644 --- a/agent/test/check_exec_test.cc +++ b/agent/test/check_exec_test.cc @@ -17,12 +17,14 @@ */ #include +#include +#include "check.hh" #include "check_exec.hh" using namespace com::centreon::agent; -#ifdef _WINDOWS +#ifdef _WIN32 #define ECHO_PATH "tests\\echo.bat" #define SLEEP_PATH "tests\\sleep.bat" #define END_OF_LINE "\r\n" @@ -45,11 +47,13 @@ TEST(check_exec_test, echo) { std::mutex mut; std::condition_variable cond; std::shared_ptr check = check_exec::load( - g_io_context, spdlog::default_logger(), time_point(), serv, cmd_name, + g_io_context, spdlog::default_logger(), {}, {}, serv, cmd_name, command_line, engine_to_agent_request_ptr(), - [&](const std::shared_ptr& caller, + [&]([[maybe_unused]] const std::shared_ptr& + caller, int statuss, - const std::list& perfdata, + [[maybe_unused]] const std::list& + perfdata, const std::list& output) { { std::lock_guard l(mut); @@ -57,7 +61,8 @@ TEST(check_exec_test, echo) { outputs = output; } cond.notify_one(); - }); + }, + std::make_shared()); check->start_check(std::chrono::seconds(1)); std::unique_lock l(mut); @@ -68,30 +73,49 @@ TEST(check_exec_test, echo) { } TEST(check_exec_test, timeout) { - command_line = SLEEP_PATH " 5"; + command_line = SLEEP_PATH " 120"; int status; std::list outputs; std::condition_variable cond; std::shared_ptr check = check_exec::load( - g_io_context, spdlog::default_logger(), time_point(), serv, cmd_name, + g_io_context, spdlog::default_logger(), {}, {}, serv, cmd_name, command_line, engine_to_agent_request_ptr(), - [&](const std::shared_ptr& caller, + [&]([[maybe_unused]] const std::shared_ptr& + caller, int statuss, - const std::list& perfdata, + [[maybe_unused]] const std::list& + perfdata, const std::list& output) { status = statuss; outputs = output; cond.notify_one(); - }); + }, + std::make_shared()); check->start_check(std::chrono::seconds(1)); + int pid = check->get_pid(); + std::mutex mut; std::unique_lock l(mut); cond.wait(l); ASSERT_NE(status, 0); ASSERT_EQ(outputs.size(), 1); - ASSERT_EQ(*outputs.begin(), "Timeout at execution of " SLEEP_PATH " 5"); + ASSERT_EQ(*outputs.begin(), "Timeout at execution of " SLEEP_PATH " 120"); + ASSERT_GT(pid, 0); + std::this_thread::sleep_for(std::chrono::seconds(1)); + +#ifdef _WIN32 + auto process_handle = + OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, FALSE, pid); + ASSERT_NE(process_handle, nullptr); + DWORD exit_code; + ASSERT_EQ(GetExitCodeProcess(process_handle, &exit_code), TRUE); + ASSERT_NE(exit_code, STILL_ACTIVE); + CloseHandle(process_handle); +#else + ASSERT_EQ(kill(pid, 0), -1); +#endif } TEST(check_exec_test, bad_command) { @@ -101,11 +125,13 @@ TEST(check_exec_test, bad_command) { std::condition_variable cond; std::mutex mut; std::shared_ptr check = check_exec::load( - g_io_context, spdlog::default_logger(), time_point(), serv, cmd_name, + g_io_context, spdlog::default_logger(), {}, {}, serv, cmd_name, command_line, engine_to_agent_request_ptr(), - [&](const std::shared_ptr& caller, + [&]([[maybe_unused]] const std::shared_ptr& + caller, int statuss, - const std::list& perfdata, + [[maybe_unused]] const std::list& + perfdata, const std::list& output) { { std::lock_guard l(mut); @@ -115,14 +141,15 @@ TEST(check_exec_test, bad_command) { SPDLOG_INFO("end of {}", command_line); std::this_thread::sleep_for(std::chrono::milliseconds(50)); cond.notify_one(); - }); + }, + std::make_shared()); check->start_check(std::chrono::seconds(1)); std::unique_lock l(mut); cond.wait(l); ASSERT_EQ(status, 3); ASSERT_EQ(outputs.size(), 1); -#ifdef _WINDOWS +#ifdef _WIN32 // message is language dependant ASSERT_GE(outputs.begin()->size(), 20); #else @@ -137,17 +164,19 @@ TEST(check_exec_test, recurse_not_lock) { std::condition_variable cond; unsigned cpt = 0; std::shared_ptr check = check_exec::load( - g_io_context, spdlog::default_logger(), time_point(), serv, cmd_name, + g_io_context, spdlog::default_logger(), {}, {}, serv, cmd_name, command_line, engine_to_agent_request_ptr(), [&](const std::shared_ptr& caller, int, - const std::list& perfdata, - const std::list& output) { + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& output) { if (!cpt) { ++cpt; caller->start_check(std::chrono::seconds(1)); } else cond.notify_one(); - }); + }, + std::make_shared()); check->start_check(std::chrono::seconds(1)); std::mutex mut; diff --git a/agent/test/check_health_test.cc b/agent/test/check_health_test.cc new file mode 100644 index 00000000000..339241d8ab9 --- /dev/null +++ b/agent/test/check_health_test.cc @@ -0,0 +1,339 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include +#include +#include "check.hh" +#include "com/centreon/common/rapidjson_helper.hh" + +#include "check_health.hh" +#include "config.hh" +#include "version.hh" + +extern std::shared_ptr g_io_context; + +using namespace com::centreon::agent; +using namespace std::string_literals; +using namespace com::centreon::common::literals; +using namespace std::chrono_literals; + +TEST(check_health_test, no_threshold_no_reverse) { + config::load(false); + + rapidjson::Document check_args = + R"({ "warning-interval" : "", "critical-interval" : ""})"_json; + + auto stats = std::make_shared(); + + stats->add_interval_stat("command1", 10s); + stats->add_duration_stat("command1", 20s); + stats->add_interval_stat("command2", 15s); + stats->add_duration_stat("command2", 25s); + + check_health checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + stats); + + std::string output; + std::list perfs; + e_status ret = checker.compute(&output, &perfs); + EXPECT_EQ(ret, e_status::ok); + EXPECT_EQ(output, "OK: Version: " CENTREON_AGENT_VERSION + " - Connection mode: Agent initiated - Current " + "configuration: 2 checks - Average runtime: 22s"); + EXPECT_EQ(perfs.size(), 2); + for (const auto& perf : perfs) { + EXPECT_EQ(perf.unit(), "s"); + EXPECT_TRUE(std::isnan(perf.warning_low())); + EXPECT_TRUE(std::isnan(perf.warning())); + EXPECT_TRUE(std::isnan(perf.critical_low())); + EXPECT_TRUE(std::isnan(perf.critical())); + if (perf.name() == "runtime") { + EXPECT_EQ(perf.value(), 25); + } else if (perf.name() == "interval") { + EXPECT_EQ(perf.value(), 15); + } else { + FAIL() << "Unexpected perfdata name: " << perf.name(); + } + } +} + +TEST(check_health_test, no_threshold_reverse) { + config::load(true); + + rapidjson::Document check_args = + R"({ "warning-interval" : "", "critical-interval" : ""})"_json; + + auto stats = std::make_shared(); + + stats->add_interval_stat("command1", 10s); + stats->add_duration_stat("command1", 20s); + stats->add_interval_stat("command2", 15s); + stats->add_duration_stat("command2", 25s); + + check_health checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + stats); + + std::string output; + std::list perfs; + e_status ret = checker.compute(&output, &perfs); + EXPECT_EQ(ret, e_status::ok); + EXPECT_EQ(output, "OK: Version: " CENTREON_AGENT_VERSION + " - Connection mode: Poller initiated - Current " + "configuration: 2 checks - Average runtime: 22s"); + EXPECT_EQ(perfs.size(), 2); + for (const auto& perf : perfs) { + EXPECT_EQ(perf.unit(), "s"); + EXPECT_TRUE(std::isnan(perf.warning_low())); + EXPECT_TRUE(std::isnan(perf.warning())); + EXPECT_TRUE(std::isnan(perf.critical_low())); + EXPECT_TRUE(std::isnan(perf.critical())); + if (perf.name() == "runtime") { + EXPECT_EQ(perf.value(), 25); + } else if (perf.name() == "interval") { + EXPECT_EQ(perf.value(), 15); + } else { + FAIL() << "Unexpected perfdata name: " << perf.name(); + } + } +} + +TEST(check_health_test, threshold_1) { + config::load(true); + + rapidjson::Document check_args = + R"({ "warning-interval" : "9", "critical-interval" : "14"})"_json; + + auto stats = std::make_shared(); + + stats->add_interval_stat("command1", 10s); + stats->add_duration_stat("command1", 20s); + stats->add_interval_stat("command2", 15s); + stats->add_duration_stat("command2", 25s); + + check_health checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + stats); + + std::string output; + std::list perfs; + e_status ret = checker.compute(&output, &perfs); + EXPECT_EQ(ret, e_status::critical); + EXPECT_EQ(output, + "CRITICAL: command2 runtime:25s interval:15s - WARNING: command1 " + "runtime:20s interval:10s - Version: " CENTREON_AGENT_VERSION + " - Connection mode: Poller initiated - Current configuration: 2 " + "checks - Average runtime: 22s"); + EXPECT_EQ(perfs.size(), 2); + for (const auto& perf : perfs) { + EXPECT_EQ(perf.unit(), "s"); + if (perf.name() == "runtime") { + EXPECT_TRUE(std::isnan(perf.warning_low())); + EXPECT_TRUE(std::isnan(perf.warning())); + EXPECT_TRUE(std::isnan(perf.critical_low())); + EXPECT_TRUE(std::isnan(perf.critical())); + EXPECT_EQ(perf.value(), 25); + } else if (perf.name() == "interval") { + EXPECT_EQ(perf.value(), 15); + EXPECT_EQ(perf.warning_low(), 0); + EXPECT_EQ(perf.warning(), 9); + EXPECT_EQ(perf.critical_low(), 0); + EXPECT_EQ(perf.critical(), 14); + } else { + FAIL() << "Unexpected perfdata name: " << perf.name(); + } + } +} + +TEST(check_health_test, threshold_2) { + config::load(true); + + rapidjson::Document check_args = + R"({ "warning-interval" : "9", "critical-interval" : "14", "warning-runtime": 19, "critical-runtime":24})"_json; + + auto stats = std::make_shared(); + + stats->add_interval_stat("command1", 10s); + stats->add_duration_stat("command1", 20s); + stats->add_interval_stat("command2", 15s); + stats->add_duration_stat("command2", 25s); + + check_health checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + stats); + + std::string output; + std::list perfs; + e_status ret = checker.compute(&output, &perfs); + EXPECT_EQ(ret, e_status::critical); + EXPECT_EQ(output, + "CRITICAL: command2 runtime:25s interval:15s - WARNING: command1 " + "runtime:20s interval:10s - Version: " CENTREON_AGENT_VERSION + " - Connection mode: Poller initiated - Current configuration: 2 " + "checks - Average runtime: 22s"); + EXPECT_EQ(perfs.size(), 2); + for (const auto& perf : perfs) { + EXPECT_EQ(perf.unit(), "s"); + if (perf.name() == "runtime") { + EXPECT_EQ(perf.value(), 25); + EXPECT_EQ(perf.warning_low(), 0); + EXPECT_EQ(perf.warning(), 19); + EXPECT_EQ(perf.critical_low(), 0); + EXPECT_EQ(perf.critical(), 24); + } else if (perf.name() == "interval") { + EXPECT_EQ(perf.value(), 15); + EXPECT_EQ(perf.warning_low(), 0); + EXPECT_EQ(perf.warning(), 9); + EXPECT_EQ(perf.critical_low(), 0); + EXPECT_EQ(perf.critical(), 14); + } else { + FAIL() << "Unexpected perfdata name: " << perf.name(); + } + } +} + +TEST(check_health_test, threshold_3) { + config::load(true); + + rapidjson::Document check_args = + R"({ "warning-interval" : "", "critical-interval" : "14", "warning-runtime": 19})"_json; + + auto stats = std::make_shared(); + + stats->add_interval_stat("command1", 10s); + stats->add_duration_stat("command1", 20s); + stats->add_interval_stat("command2", 15s); + stats->add_duration_stat("command2", 25s); + + check_health checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + stats); + + std::string output; + std::list perfs; + e_status ret = checker.compute(&output, &perfs); + EXPECT_EQ(ret, e_status::critical); + EXPECT_EQ(output, + "CRITICAL: command2 runtime:25s interval:15s - WARNING: command1 " + "runtime:20s interval:10s - Version: " CENTREON_AGENT_VERSION + " - Connection mode: Poller initiated - Current configuration: 2 " + "checks - Average runtime: 22s"); + EXPECT_EQ(perfs.size(), 2); + for (const auto& perf : perfs) { + EXPECT_EQ(perf.unit(), "s"); + if (perf.name() == "runtime") { + EXPECT_EQ(perf.value(), 25); + EXPECT_EQ(perf.warning_low(), 0); + EXPECT_EQ(perf.warning(), 19); + EXPECT_TRUE(std::isnan(perf.critical_low())); + EXPECT_TRUE(std::isnan(perf.critical())); + } else if (perf.name() == "interval") { + EXPECT_EQ(perf.value(), 15); + EXPECT_TRUE(std::isnan(perf.warning_low())); + EXPECT_TRUE(std::isnan(perf.warning())); + EXPECT_EQ(perf.critical_low(), 0); + EXPECT_EQ(perf.critical(), 14); + } else { + FAIL() << "Unexpected perfdata name: " << perf.name(); + } + } +} + +TEST(check_health_test, threshold_4) { + config::load(true); + + rapidjson::Document check_args = + R"({ "warning-interval" : "", "critical-interval" : "16", "warning-runtime": 19})"_json; + + auto stats = std::make_shared(); + + stats->add_interval_stat("command1", 10s); + stats->add_duration_stat("command1", 20s); + stats->add_interval_stat("command2", 15s); + stats->add_duration_stat("command2", 25s); + + check_health checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + stats); + + std::string output; + std::list perfs; + e_status ret = checker.compute(&output, &perfs); + EXPECT_EQ(ret, e_status::warning); + EXPECT_EQ(output, + "WARNING: command2 runtime:25s interval:15s, command1 runtime:20s " + "interval:10s - Version: " CENTREON_AGENT_VERSION + " - Connection mode: Poller initiated - Current configuration: 2 " + "checks - Average runtime: 22s"); + EXPECT_EQ(perfs.size(), 2); + for (const auto& perf : perfs) { + EXPECT_EQ(perf.unit(), "s"); + if (perf.name() == "runtime") { + EXPECT_EQ(perf.value(), 25); + EXPECT_EQ(perf.warning_low(), 0); + EXPECT_EQ(perf.warning(), 19); + EXPECT_TRUE(std::isnan(perf.critical_low())); + EXPECT_TRUE(std::isnan(perf.critical())); + } else if (perf.name() == "interval") { + EXPECT_EQ(perf.value(), 15); + EXPECT_TRUE(std::isnan(perf.warning_low())); + EXPECT_TRUE(std::isnan(perf.warning())); + EXPECT_EQ(perf.critical_low(), 0); + EXPECT_EQ(perf.critical(), 16); + } else { + FAIL() << "Unexpected perfdata name: " << perf.name(); + } + } +} diff --git a/agent/test/check_linux_cpu_test.cc b/agent/test/check_linux_cpu_test.cc new file mode 100644 index 00000000000..f407a0089c4 --- /dev/null +++ b/agent/test/check_linux_cpu_test.cc @@ -0,0 +1,700 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include "check_cpu.hh" +#include "com/centreon/common/rapidjson_helper.hh" + +extern std::shared_ptr g_io_context; + +using namespace com::centreon::agent; + +const char* proc_sample = + R"(cpu 4360186 24538 1560174 17996659 64169 0 93611 0 0 0 +cpu0 1089609 6082 396906 4497394 15269 0 11914 0 0 0 +cpu1 1082032 5818 391692 4456828 16624 0 72471 0 0 0 +cpu2 1095585 6334 386205 4524762 16543 0 1774 0 0 0 +cpu3 1092959 6304 385370 4517673 15732 0 7451 0 0 0 +intr 213853764 0 35 0 0 0 0 0 0 0 56927 0 0 134 0 0 0 48 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 29851994 30 0 408 411 0 0 0 0 0 0 0 0 0 0 0 0 0 0 43 26 529900 571944 554845 556829 19615758 7070 8 0 0 0 0 2 15 3220 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 529237135 +btime 1728880818 +processes 274444 +procs_running 2 +procs_blocked 0 +softirq 160085949 64462978 14075755 1523012 4364896 33 0 17578206 28638313 73392 29369364 +)"; + +TEST(proc_stat_file_test, read_sample) { + constexpr const char* test_file_path = "/tmp/proc_stat_test"; + + ::remove(test_file_path); + std::ofstream f(test_file_path); + f.write(proc_sample, strlen(proc_sample)); + + check_cpu_detail::proc_stat_file to_compare(test_file_path, 4); + + for (const auto& by_cpu : to_compare.get_values()) { + switch (by_cpu.first) { + case 0: + ASSERT_EQ(by_cpu.second.get_total(), 6017174); + ASSERT_DOUBLE_EQ(by_cpu.second.get_proportional_used(), + (6017174.0 - 4497394.0) / 6017174); + ASSERT_DOUBLE_EQ(by_cpu.second.get_proportional_value( + check_cpu_detail::e_proc_stat_index::user), + 1089609.0 / 6017174); + ASSERT_DOUBLE_EQ(by_cpu.second.get_proportional_value( + check_cpu_detail::e_proc_stat_index::nice), + 6082.0 / 6017174); + ASSERT_DOUBLE_EQ(by_cpu.second.get_proportional_value( + check_cpu_detail::e_proc_stat_index::system), + 396906.0 / 6017174); + ASSERT_DOUBLE_EQ(by_cpu.second.get_proportional_value( + check_cpu_detail::e_proc_stat_index::idle), + 4497394.0 / 6017174); + ASSERT_DOUBLE_EQ(by_cpu.second.get_proportional_value( + check_cpu_detail::e_proc_stat_index::iowait), + 15269.0 / 6017174); + ASSERT_DOUBLE_EQ(by_cpu.second.get_proportional_value( + check_cpu_detail::e_proc_stat_index::irq), + 0); + ASSERT_DOUBLE_EQ(by_cpu.second.get_proportional_value( + check_cpu_detail::e_proc_stat_index::soft_irq), + 11914.0 / 6017174); + ASSERT_DOUBLE_EQ(by_cpu.second.get_proportional_value( + check_cpu_detail::e_proc_stat_index::steal), + 0); + ASSERT_DOUBLE_EQ(by_cpu.second.get_proportional_value( + check_cpu_detail::e_proc_stat_index::guest), + 0); + ASSERT_DOUBLE_EQ(by_cpu.second.get_proportional_value( + check_cpu_detail::e_proc_stat_index::guest_nice), + 0); + break; + case 1: + ASSERT_EQ(by_cpu.second.get_total(), 6025465); + break; + case 2: + ASSERT_EQ(by_cpu.second.get_total(), 6031203); + break; + case 3: + ASSERT_EQ(by_cpu.second.get_total(), 6025489); + break; + case check_cpu_detail::average_cpu_index: + ASSERT_EQ(by_cpu.second.get_total(), 24099337); + ASSERT_DOUBLE_EQ(by_cpu.second.get_proportional_value( + check_cpu_detail::e_proc_stat_index::system), + 1560174.0 / 24099337); + break; + default: + FAIL() << "unexpected cpu:" << by_cpu.first; + break; + } + } +} + +const char* proc_sample_2 = + R"(cpu 4574560 24547 1630654 18918908 68531 0 96832 0 0 0 +cpu0 1143030 6086 414440 4726292 16461 0 14668 0 0 0 +cpu1 1135947 5820 409352 4687911 17696 0 72516 0 0 0 +cpu2 1149227 6335 404370 4754742 17697 0 2149 0 0 0 +cpu3 1146355 6305 402491 4749962 16675 0 7498 0 0 0 +intr 224918652 0 35 0 0 0 0 0 0 0 57636 0 0 134 0 0 0 48 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 30628697 30 0 408 411 0 0 0 0 0 0 0 0 0 0 0 0 0 0 43 26 564911 598184 598096 594403 20270994 8610 8 0 0 0 0 2 15 3220 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 558464714 +btime 1728880818 +processes 289981 +procs_running 1 +procs_blocked 0 +softirq 166407220 66442046 14763247 1577070 4447556 33 0 18081353 30219191 75659 30801065 +)"; + +using namespace std::string_literals; + +TEST(proc_stat_file_test, no_threshold) { + constexpr const char* test_file_path = "/tmp/proc_stat_test"; + { + ::remove(test_file_path); + std::ofstream f(test_file_path); + f.write(proc_sample, strlen(proc_sample)); + } + constexpr const char* test_file_path2 = "/tmp/proc_stat_test2"; + { + ::remove(test_file_path2); + std::ofstream f(test_file_path2); + f.write(proc_sample_2, strlen(proc_sample_2)); + } + + check_cpu_detail::proc_stat_file first_measure(test_file_path, 4); + + check_cpu_detail::proc_stat_file second_measure(test_file_path2, 4); + + auto delta = second_measure.subtract(first_measure); + + std::string output; + std::list perfs; + + rapidjson::Document check_args; + + check_cpu checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + e_status status = + checker.compute(first_measure, second_measure, &output, &perfs); + ASSERT_EQ(status, e_status::ok); + ASSERT_EQ(output, "OK: CPU(s) average usage is 24.08%"); + + ASSERT_EQ(perfs.size(), 5); + + for (const auto& perf : perfs) { + ASSERT_TRUE(std::isnan(perf.critical_low())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_FALSE(perf.critical_mode()); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.warning())); + ASSERT_FALSE(perf.warning_mode()); + ASSERT_EQ(perf.min(), 0); + ASSERT_EQ(perf.max(), 100); + ASSERT_EQ(perf.unit(), "%"); + ASSERT_EQ(perf.value_type(), com::centreon::common::perfdata::gauge); + if (perf.name() == "0#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), delta[0].get_proportional_used() * 100, 0.01); + } else if (perf.name() == "1#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), delta[1].get_proportional_used() * 100, 0.01); + } else if (perf.name() == "2#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), delta[2].get_proportional_used() * 100, 0.01); + } else if (perf.name() == "3#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), delta[3].get_proportional_used() * 100, 0.01); + } else if (perf.name() == "cpu.utilization.percentage") { + ASSERT_NEAR( + perf.value(), + delta[check_cpu_detail::average_cpu_index].get_proportional_used() * + 100, + 0.01); + } else { + FAIL() << "unexpected perfdata name:" << perf.name(); + } + } +} + +TEST(proc_stat_file_test, no_threshold_detailed) { + constexpr const char* test_file_path = "/tmp/proc_stat_test"; + { + ::remove(test_file_path); + std::ofstream f(test_file_path); + f.write(proc_sample, strlen(proc_sample)); + } + constexpr const char* test_file_path2 = "/tmp/proc_stat_test2"; + { + ::remove(test_file_path2); + std::ofstream f(test_file_path2); + f.write(proc_sample_2, strlen(proc_sample_2)); + } + + check_cpu_detail::proc_stat_file first_measure(test_file_path, 4); + + check_cpu_detail::proc_stat_file second_measure(test_file_path2, 4); + + auto delta = second_measure.subtract(first_measure); + + std::string output; + std::list perfs; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = R"({"cpu-detailed":"true"})"_json; + + check_cpu checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + e_status status = + checker.compute(first_measure, second_measure, &output, &perfs); + ASSERT_EQ(status, e_status::ok); + ASSERT_EQ(output, "OK: CPU(s) average usage is 24.08%"); + + ASSERT_EQ(perfs.size(), 55); + + for (const auto& perf : perfs) { + ASSERT_TRUE(std::isnan(perf.critical_low())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_FALSE(perf.critical_mode()); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.warning())); + ASSERT_FALSE(perf.warning_mode()); + ASSERT_EQ(perf.min(), 0); + ASSERT_EQ(perf.max(), 100); + ASSERT_EQ(perf.unit(), "%"); + ASSERT_EQ(perf.value_type(), com::centreon::common::perfdata::gauge); + + unsigned cpu_index = check_cpu_detail::average_cpu_index; + std::string counter_type; + if (std::isdigit(perf.name()[0])) { + cpu_index = perf.name()[0] - '0'; + counter_type = perf.name().substr(2, perf.name().find('#') - 2); + } else { + counter_type = perf.name().substr(0, perf.name().find('#')); + } + const auto& cpu_data = delta[cpu_index]; + if (counter_type == "user") { + ASSERT_NEAR(perf.value(), + (cpu_data.get_proportional_value( + check_cpu_detail::e_proc_stat_index::user) * + 100), + 0.01); + } else if (counter_type == "nice") { + ASSERT_NEAR(perf.value(), + cpu_data.get_proportional_value( + check_cpu_detail::e_proc_stat_index::nice) * + 100, + 0.01); + } else if (counter_type == "system") { + ASSERT_NEAR(perf.value(), + cpu_data.get_proportional_value( + check_cpu_detail::e_proc_stat_index::system) * + 100, + 0.01); + } else if (counter_type == "idle") { + ASSERT_NEAR(perf.value(), + cpu_data.get_proportional_value( + check_cpu_detail::e_proc_stat_index::idle) * + 100, + 0.01); + } else if (counter_type == "iowait") { + ASSERT_NEAR(perf.value(), + cpu_data.get_proportional_value( + check_cpu_detail::e_proc_stat_index::iowait) * + 100, + 0.01); + } else if (counter_type == "interrupt") { + ASSERT_NEAR(perf.value(), + cpu_data.get_proportional_value( + check_cpu_detail::e_proc_stat_index::irq) * + 100, + 0.01); + } else if (counter_type == "softirq") { + ASSERT_NEAR(perf.value(), + cpu_data.get_proportional_value( + check_cpu_detail::e_proc_stat_index::soft_irq) * + 100, + 0.01); + } else if (counter_type == "steal") { + ASSERT_NEAR(perf.value(), + cpu_data.get_proportional_value( + check_cpu_detail::e_proc_stat_index::steal) * + 100, + 0.01); + } else if (counter_type == "guest") { + ASSERT_NEAR(perf.value(), + cpu_data.get_proportional_value( + check_cpu_detail::e_proc_stat_index::guest) * + 100, + 0.01); + } else if (counter_type == "guestnice") { + ASSERT_NEAR(perf.value(), + cpu_data.get_proportional_value( + check_cpu_detail::e_proc_stat_index::guest_nice) * + 100, + 0.01); + } else if (counter_type == "used") { + ASSERT_NEAR(perf.value(), cpu_data.get_proportional_used() * 100, 0.01); + } else { + FAIL() << "unexpected perfdata name:" << perf.name(); + } + } +} + +TEST(proc_stat_file_test, threshold_nodetailed) { + constexpr const char* test_file_path = "/tmp/proc_stat_test"; + { + ::remove(test_file_path); + std::ofstream f(test_file_path); + f.write(proc_sample, strlen(proc_sample)); + } + constexpr const char* test_file_path2 = "/tmp/proc_stat_test2"; + { + ::remove(test_file_path2); + std::ofstream f(test_file_path2); + f.write(proc_sample_2, strlen(proc_sample_2)); + } + + check_cpu_detail::proc_stat_file first_measure(test_file_path, 4); + + check_cpu_detail::proc_stat_file second_measure(test_file_path2, 4); + + auto delta = second_measure.subtract(first_measure); + + std::string output; + std::list perfs; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({"warning-core" : "24.1", "critical-core" : 24.4, "warning-average" : "10", "critical-average" : "20"})"_json; + + check_cpu checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + e_status status = + checker.compute(first_measure, second_measure, &output, &perfs); + ASSERT_EQ(status, e_status::critical); + ASSERT_EQ( + output, + R"(CRITICAL: CPU'0' Usage: 24.66%, User 17.58%, Nice 0.00%, System 5.77%, Idle 75.34%, IOWait 0.39%, Interrupt 0.00%, Soft Irq 0.91%, Steal 0.00%, Guest 0.00%, Guest Nice 0.00% WARNING: CPU'2' Usage: 24.18%, User 17.69%, Nice 0.00%, System 5.99%, Idle 75.82%, IOWait 0.38%, Interrupt 0.00%, Soft Irq 0.12%, Steal 0.00%, Guest 0.00%, Guest Nice 0.00% CRITICAL: CPU(s) average Usage: 24.08%, User 17.65%, Nice 0.00%, System 5.80%, Idle 75.92%, IOWait 0.36%, Interrupt 0.00%, Soft Irq 0.27%, Steal 0.00%, Guest 0.00%, Guest Nice 0.00%)"); + + ASSERT_EQ(perfs.size(), 5); + + for (const auto& perf : perfs) { + ASSERT_EQ(perf.critical_low(), 0); + ASSERT_FALSE(perf.critical_mode()); + ASSERT_EQ(perf.warning_low(), 0); + if (perf.name() == "cpu.utilization.percentage") { + ASSERT_NEAR(perf.warning(), 10, 0.01); + ASSERT_NEAR(perf.critical(), 20, 0.01); + } else { + ASSERT_NEAR(perf.warning(), 24.1, 0.01); + ASSERT_NEAR(perf.critical(), 24.4, 0.01); + } + ASSERT_FALSE(perf.warning_mode()); + ASSERT_EQ(perf.min(), 0); + ASSERT_EQ(perf.max(), 100); + ASSERT_EQ(perf.unit(), "%"); + ASSERT_EQ(perf.value_type(), com::centreon::common::perfdata::gauge); + if (perf.name() == "0#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), delta[0].get_proportional_used() * 100, 0.01); + } else if (perf.name() == "1#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), delta[1].get_proportional_used() * 100, 0.01); + } else if (perf.name() == "2#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), delta[2].get_proportional_used() * 100, 0.01); + } else if (perf.name() == "3#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), delta[3].get_proportional_used() * 100, 0.01); + } else if (perf.name() == "cpu.utilization.percentage") { + ASSERT_NEAR( + perf.value(), + delta[check_cpu_detail::average_cpu_index].get_proportional_used() * + 100, + 0.01); + } else { + FAIL() << "unexpected perfdata name:" << perf.name(); + } + } +} + +TEST(proc_stat_file_test, threshold_nodetailed2) { + constexpr const char* test_file_path = "/tmp/proc_stat_test"; + { + ::remove(test_file_path); + std::ofstream f(test_file_path); + f.write(proc_sample, strlen(proc_sample)); + } + constexpr const char* test_file_path2 = "/tmp/proc_stat_test2"; + { + ::remove(test_file_path2); + std::ofstream f(test_file_path2); + f.write(proc_sample_2, strlen(proc_sample_2)); + } + + check_cpu_detail::proc_stat_file first_measure(test_file_path, 4); + + check_cpu_detail::proc_stat_file second_measure(test_file_path2, 4); + + auto delta = second_measure.subtract(first_measure); + + std::string output; + std::list perfs; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({"warning-core-iowait" : "0.36", "critical-core-iowait" : "0.39", "warning-average-iowait" : "0.3", "critical-average-iowait" : "0.4"})"_json; + + check_cpu checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + e_status status = + checker.compute(first_measure, second_measure, &output, &perfs); + ASSERT_EQ(status, e_status::critical); + ASSERT_EQ( + output, + R"(CRITICAL: CPU'0' Usage: 24.66%, User 17.58%, Nice 0.00%, System 5.77%, Idle 75.34%, IOWait 0.39%, Interrupt 0.00%, Soft Irq 0.91%, Steal 0.00%, Guest 0.00%, Guest Nice 0.00% WARNING: CPU'2' Usage: 24.18%, User 17.69%, Nice 0.00%, System 5.99%, Idle 75.82%, IOWait 0.38%, Interrupt 0.00%, Soft Irq 0.12%, Steal 0.00%, Guest 0.00%, Guest Nice 0.00% WARNING: CPU(s) average Usage: 24.08%, User 17.65%, Nice 0.00%, System 5.80%, Idle 75.92%, IOWait 0.36%, Interrupt 0.00%, Soft Irq 0.27%, Steal 0.00%, Guest 0.00%, Guest Nice 0.00%)"); + + ASSERT_EQ(perfs.size(), 5); + + for (const auto& perf : perfs) { + ASSERT_TRUE(std::isnan(perf.critical_low())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_FALSE(perf.critical_mode()); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.warning())); + ASSERT_FALSE(perf.warning_mode()); + ASSERT_EQ(perf.min(), 0); + ASSERT_EQ(perf.max(), 100); + ASSERT_EQ(perf.unit(), "%"); + ASSERT_EQ(perf.value_type(), com::centreon::common::perfdata::gauge); + } +} + +TEST(proc_stat_file_test, threshold_detailed) { + constexpr const char* test_file_path = "/tmp/proc_stat_test"; + { + ::remove(test_file_path); + std::ofstream f(test_file_path); + f.write(proc_sample, strlen(proc_sample)); + } + constexpr const char* test_file_path2 = "/tmp/proc_stat_test2"; + { + ::remove(test_file_path2); + std::ofstream f(test_file_path2); + f.write(proc_sample_2, strlen(proc_sample_2)); + } + + check_cpu_detail::proc_stat_file first_measure(test_file_path, 4); + + check_cpu_detail::proc_stat_file second_measure(test_file_path2, 4); + + auto delta = second_measure.subtract(first_measure); + + std::string output; + std::list perfs; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({"cpu-detailed":true, "warning-core" : "24.1", "critical-core" : "24.4", "warning-average" : "10", "critical-average" : "20"})"_json; + + check_cpu checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + e_status status = + checker.compute(first_measure, second_measure, &output, &perfs); + ASSERT_EQ(status, e_status::critical); + ASSERT_EQ( + output, + R"(CRITICAL: CPU'0' Usage: 24.66%, User 17.58%, Nice 0.00%, System 5.77%, Idle 75.34%, IOWait 0.39%, Interrupt 0.00%, Soft Irq 0.91%, Steal 0.00%, Guest 0.00%, Guest Nice 0.00% WARNING: CPU'2' Usage: 24.18%, User 17.69%, Nice 0.00%, System 5.99%, Idle 75.82%, IOWait 0.38%, Interrupt 0.00%, Soft Irq 0.12%, Steal 0.00%, Guest 0.00%, Guest Nice 0.00% CRITICAL: CPU(s) average Usage: 24.08%, User 17.65%, Nice 0.00%, System 5.80%, Idle 75.92%, IOWait 0.36%, Interrupt 0.00%, Soft Irq 0.27%, Steal 0.00%, Guest 0.00%, Guest Nice 0.00%)"); + + ASSERT_EQ(perfs.size(), 55); + + for (const auto& perf : perfs) { + ASSERT_FALSE(perf.critical_mode()); + if (perf.name().find("used#core.cpu.utilization.percentage") != + std::string::npos || + perf.name().find("used#cpu.utilization.percentage") != + std::string::npos) { + ASSERT_EQ(perf.critical_low(), 0); + ASSERT_EQ(perf.warning_low(), 0); + if (!std::isdigit(perf.name()[0])) { + ASSERT_NEAR(perf.warning(), 10, 0.01); + ASSERT_NEAR(perf.critical(), 20, 0.01); + } else { + ASSERT_NEAR(perf.warning(), 24.1, 0.01); + ASSERT_NEAR(perf.critical(), 24.4, 0.01); + } + } else { + ASSERT_TRUE(std::isnan(perf.warning())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.critical_low())); + } + ASSERT_FALSE(perf.warning_mode()); + ASSERT_EQ(perf.min(), 0); + ASSERT_EQ(perf.max(), 100); + ASSERT_EQ(perf.unit(), "%"); + ASSERT_EQ(perf.value_type(), com::centreon::common::perfdata::gauge); + } +} + +TEST(proc_stat_file_test, threshold_detailed2) { + constexpr const char* test_file_path = "/tmp/proc_stat_test"; + { + ::remove(test_file_path); + std::ofstream f(test_file_path); + f.write(proc_sample, strlen(proc_sample)); + } + constexpr const char* test_file_path2 = "/tmp/proc_stat_test2"; + { + ::remove(test_file_path2); + std::ofstream f(test_file_path2); + f.write(proc_sample_2, strlen(proc_sample_2)); + } + + check_cpu_detail::proc_stat_file first_measure(test_file_path, 4); + + check_cpu_detail::proc_stat_file second_measure(test_file_path2, 4); + + auto delta = second_measure.subtract(first_measure); + + std::string output; + std::list perfs; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({"cpu-detailed":"true", "warning-core-iowait" : "0.36", "critical-core-iowait" : "0.39", "warning-average-iowait" : "0.3", "critical-average-iowait" : "0.4"})"_json; + + check_cpu checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + e_status status = + checker.compute(first_measure, second_measure, &output, &perfs); + ASSERT_EQ(status, e_status::critical); + ASSERT_EQ( + output, + R"(CRITICAL: CPU'0' Usage: 24.66%, User 17.58%, Nice 0.00%, System 5.77%, Idle 75.34%, IOWait 0.39%, Interrupt 0.00%, Soft Irq 0.91%, Steal 0.00%, Guest 0.00%, Guest Nice 0.00% WARNING: CPU'2' Usage: 24.18%, User 17.69%, Nice 0.00%, System 5.99%, Idle 75.82%, IOWait 0.38%, Interrupt 0.00%, Soft Irq 0.12%, Steal 0.00%, Guest 0.00%, Guest Nice 0.00% WARNING: CPU(s) average Usage: 24.08%, User 17.65%, Nice 0.00%, System 5.80%, Idle 75.92%, IOWait 0.36%, Interrupt 0.00%, Soft Irq 0.27%, Steal 0.00%, Guest 0.00%, Guest Nice 0.00%)"); + + ASSERT_EQ(perfs.size(), 55); + + for (const auto& perf : perfs) { + ASSERT_FALSE(perf.critical_mode()); + if (perf.name().find("iowait#core.cpu.utilization.percentage") != + std::string::npos || + perf.name().find("iowait#cpu.utilization.percentage") != + std::string::npos) { + ASSERT_EQ(perf.critical_low(), 0); + ASSERT_EQ(perf.warning_low(), 0); + if (!std::isdigit(perf.name()[0])) { + ASSERT_NEAR(perf.warning(), 0.3, 0.01); + ASSERT_NEAR(perf.critical(), 0.4, 0.01); + } else { + ASSERT_NEAR(perf.warning(), 0.36, 0.01); + ASSERT_NEAR(perf.critical(), 0.39, 0.01); + } + } else { + ASSERT_TRUE(std::isnan(perf.warning())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.critical_low())); + } + ASSERT_FALSE(perf.warning_mode()); + ASSERT_EQ(perf.min(), 0); + ASSERT_EQ(perf.max(), 100); + ASSERT_EQ(perf.unit(), "%"); + ASSERT_EQ(perf.value_type(), com::centreon::common::perfdata::gauge); + } +} + +TEST(proc_stat_file_test, threshold_detailed3) { + constexpr const char* test_file_path = "/tmp/proc_stat_test"; + { + ::remove(test_file_path); + std::ofstream f(test_file_path); + f.write(proc_sample, strlen(proc_sample)); + } + constexpr const char* test_file_path2 = "/tmp/proc_stat_test2"; + { + ::remove(test_file_path2); + std::ofstream f(test_file_path2); + f.write(proc_sample_2, strlen(proc_sample_2)); + } + + check_cpu_detail::proc_stat_file first_measure(test_file_path, 4); + + check_cpu_detail::proc_stat_file second_measure(test_file_path2, 4); + + auto delta = second_measure.subtract(first_measure); + + std::string output; + std::list perfs; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({"cpu-detailed":"true", "warning-core-iowait" : "0.36", "critical-core-iowait" : "0.39", "warning-average-iowait" : "", "critical-average-iowait" : ""})"_json; + + check_cpu checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + e_status status = + checker.compute(first_measure, second_measure, &output, &perfs); + EXPECT_EQ(status, e_status::critical); + EXPECT_EQ( + output, + R"(CRITICAL: CPU'0' Usage: 24.66%, User 17.58%, Nice 0.00%, System 5.77%, Idle 75.34%, IOWait 0.39%, Interrupt 0.00%, Soft Irq 0.91%, Steal 0.00%, Guest 0.00%, Guest Nice 0.00% WARNING: CPU'2' Usage: 24.18%, User 17.69%, Nice 0.00%, System 5.99%, Idle 75.82%, IOWait 0.38%, Interrupt 0.00%, Soft Irq 0.12%, Steal 0.00%, Guest 0.00%, Guest Nice 0.00%)"); + + EXPECT_EQ(perfs.size(), 55); + + for (const auto& perf : perfs) { + EXPECT_FALSE(perf.critical_mode()); + if (perf.name().find("iowait#core.cpu.utilization.percentage") != + std::string::npos || + perf.name().find("iowait#cpu.utilization.percentage") != + std::string::npos) { + if (!std::isdigit(perf.name()[0])) { + EXPECT_TRUE(std::isnan(perf.critical_low())); + EXPECT_TRUE(std::isnan(perf.warning_low())); + EXPECT_TRUE(std::isnan(perf.warning())); + EXPECT_TRUE(std::isnan(perf.critical())); + } else { + EXPECT_EQ(perf.critical_low(), 0); + EXPECT_EQ(perf.warning_low(), 0); + EXPECT_NEAR(perf.warning(), 0.36, 0.01); + EXPECT_NEAR(perf.critical(), 0.39, 0.01); + } + } else { + EXPECT_TRUE(std::isnan(perf.warning())); + EXPECT_TRUE(std::isnan(perf.critical())); + EXPECT_TRUE(std::isnan(perf.warning_low())); + EXPECT_TRUE(std::isnan(perf.critical_low())); + } + ASSERT_FALSE(perf.warning_mode()); + EXPECT_EQ(perf.min(), 0); + EXPECT_EQ(perf.max(), 100); + EXPECT_EQ(perf.unit(), "%"); + EXPECT_EQ(perf.value_type(), com::centreon::common::perfdata::gauge); + } +} diff --git a/agent/test/check_test.cc b/agent/test/check_test.cc index 1a09b0761cf..ca81bdb37e4 100644 --- a/agent/test/check_test.cc +++ b/agent/test/check_test.cc @@ -30,7 +30,9 @@ class dummy_check : public check { public: void start_check(const duration& timeout) override { - check::start_check(timeout); + if (!_start_check(timeout)) { + return; + } _command_timer.expires_from_now(_command_duration); _command_timer.async_wait([me = shared_from_this(), this, running_index = _get_running_check_index()]( @@ -53,11 +55,13 @@ class dummy_check : public check { : check(g_io_context, spdlog::default_logger(), std::chrono::system_clock::now(), + std::chrono::seconds(1), serv, command_name, command_line, nullptr, - handler), + handler, + std::make_shared()), _command_duration(command_duration), _command_timer(*g_io_context) {} }; @@ -77,7 +81,8 @@ TEST(check_test, timeout) { serv, cmd_name, cmd_line, std::chrono::milliseconds(500), [&status, &output, &handler_call_cpt, &cond]( const std::shared_ptr&, unsigned statuss, - const std::list& perfdata, + [[maybe_unused]] const std::list& + perfdata, const std::list& outputs) { status = statuss; if (outputs.size() == 1) { @@ -114,7 +119,8 @@ TEST(check_test, no_timeout) { serv, cmd_name, cmd_line, std::chrono::milliseconds(100), [&status, &output, &handler_call_cpt, &cond]( const std::shared_ptr&, unsigned statuss, - const std::list& perfdata, + [[maybe_unused]] const std::list& + perfdata, const std::list& outputs) { status = statuss; if (outputs.size() == 1) { diff --git a/agent/test/check_uptime_test.cc b/agent/test/check_uptime_test.cc new file mode 100644 index 00000000000..d08756a387e --- /dev/null +++ b/agent/test/check_uptime_test.cc @@ -0,0 +1,305 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include "com/centreon/common/rapidjson_helper.hh" + +#include "check_uptime.hh" + +extern std::shared_ptr g_io_context; + +using namespace com::centreon::agent; +using namespace std::string_literals; + +TEST(native_check_uptime, ok) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning-uptime" : "345600", "critical-uptime" : "172800"})"_json; + + check_uptime checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + std::string output; + com::centreon::common::perfdata perf; + e_status status = + checker.compute((86400 * 5 + 3600 + 60 + 1) * 1000, &output, &perf); + ASSERT_EQ(status, e_status::ok); + ASSERT_EQ(output, "OK: System uptime is: 5d 1h 1m 1s"); + ASSERT_EQ(perf.name(), "uptime"); + ASSERT_EQ(perf.unit(), "s"); + ASSERT_EQ(perf.value(), 86400 * 5 + 3600 + 60 + 1); + ASSERT_EQ(perf.min(), 0); + ASSERT_EQ(perf.critical(), 172800); + ASSERT_EQ(perf.warning(), 345600); + ASSERT_EQ(perf.critical_low(), 0); + ASSERT_EQ(perf.warning_low(), 0); +} + +TEST(native_check_uptime, ok_m) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning-uptime" : "5760", "critical-uptime" : "2880", "unit": "m"})"_json; + + check_uptime checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + std::string output; + com::centreon::common::perfdata perf; + e_status status = + checker.compute((86400 * 5 + 3600 + 60 + 1) * 1000, &output, &perf); + ASSERT_EQ(status, e_status::ok); + ASSERT_EQ(output, "OK: System uptime is: 5d 1h 1m 1s"); + ASSERT_EQ(perf.name(), "uptime"); + ASSERT_EQ(perf.unit(), "s"); + ASSERT_EQ(perf.value(), 86400 * 5 + 3600 + 60 + 1); + ASSERT_EQ(perf.min(), 0); + ASSERT_EQ(perf.critical(), 172800); + ASSERT_EQ(perf.warning(), 345600); + ASSERT_EQ(perf.critical_low(), 0); + ASSERT_EQ(perf.warning_low(), 0); +} + +TEST(native_check_uptime, ok_h) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning-uptime" : "96", "critical-uptime" : "48", "unit": "h"})"_json; + + check_uptime checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + std::string output; + com::centreon::common::perfdata perf; + e_status status = + checker.compute((86400 * 5 + 3600 + 60 + 1) * 1000, &output, &perf); + ASSERT_EQ(status, e_status::ok); + ASSERT_EQ(output, "OK: System uptime is: 5d 1h 1m 1s"); + ASSERT_EQ(perf.name(), "uptime"); + ASSERT_EQ(perf.unit(), "s"); + ASSERT_EQ(perf.value(), 86400 * 5 + 3600 + 60 + 1); + ASSERT_EQ(perf.min(), 0); + ASSERT_EQ(perf.critical(), 172800); + ASSERT_EQ(perf.warning(), 345600); + ASSERT_EQ(perf.critical_low(), 0); + ASSERT_EQ(perf.warning_low(), 0); +} + +TEST(native_check_uptime, ok_d) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning-uptime" : "4", "critical-uptime" : "2", "unit": "d"})"_json; + + check_uptime checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + std::string output; + com::centreon::common::perfdata perf; + e_status status = + checker.compute((86400 * 5 + 3600 + 60 + 1) * 1000, &output, &perf); + ASSERT_EQ(status, e_status::ok); + ASSERT_EQ(output, "OK: System uptime is: 5d 1h 1m 1s"); + ASSERT_EQ(perf.name(), "uptime"); + ASSERT_EQ(perf.unit(), "s"); + ASSERT_EQ(perf.value(), 86400 * 5 + 3600 + 60 + 1); + ASSERT_EQ(perf.min(), 0); + ASSERT_EQ(perf.critical(), 172800); + ASSERT_EQ(perf.warning(), 345600); + ASSERT_EQ(perf.critical_low(), 0); + ASSERT_EQ(perf.warning_low(), 0); +} + +TEST(native_check_uptime, ok_w) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning-uptime" : "2", "critical-uptime" : "1", "unit": "w"})"_json; + + check_uptime checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + std::string output; + com::centreon::common::perfdata perf; + e_status status = + checker.compute((86400 * 14 + 3600 + 60 + 1) * 1000, &output, &perf); + ASSERT_EQ(status, e_status::ok); + ASSERT_EQ(output, "OK: System uptime is: 14d 1h 1m 1s"); + ASSERT_EQ(perf.name(), "uptime"); + ASSERT_EQ(perf.unit(), "s"); + ASSERT_EQ(perf.value(), 86400 * 14 + 3600 + 60 + 1); + ASSERT_EQ(perf.min(), 0); + ASSERT_EQ(perf.critical(), 7 * 86400); + ASSERT_EQ(perf.warning(), 14 * 86400); + ASSERT_EQ(perf.critical_low(), 0); + ASSERT_EQ(perf.warning_low(), 0); +} + +TEST(native_check_uptime, warning) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning-uptime" : "4", "critical-uptime" : "2", "unit": "d"})"_json; + + check_uptime checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + std::string output; + com::centreon::common::perfdata perf; + e_status status = + checker.compute((86400 * 3 + 3600 + 1) * 1000, &output, &perf); + ASSERT_EQ(status, e_status::warning); + ASSERT_EQ(output, "WARNING: System uptime is: 3d 1h 0m 1s"); + ASSERT_EQ(perf.name(), "uptime"); + ASSERT_EQ(perf.unit(), "s"); + ASSERT_EQ(perf.value(), 86400 * 3 + 3600 + 1); + ASSERT_EQ(perf.min(), 0); + ASSERT_EQ(perf.critical(), 172800); + ASSERT_EQ(perf.warning(), 345600); + ASSERT_EQ(perf.critical_low(), 0); + ASSERT_EQ(perf.warning_low(), 0); +} + +TEST(native_check_uptime, warning_bis) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning-uptime" : "4", "critical-uptime" : "", "unit": "d"})"_json; + + check_uptime checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + std::string output; + com::centreon::common::perfdata perf; + e_status status = + checker.compute((86400 * 3 + 3600 + 1) * 1000, &output, &perf); + ASSERT_EQ(status, e_status::warning); + ASSERT_EQ(output, "WARNING: System uptime is: 3d 1h 0m 1s"); + ASSERT_EQ(perf.name(), "uptime"); + ASSERT_EQ(perf.unit(), "s"); + ASSERT_EQ(perf.value(), 86400 * 3 + 3600 + 1); + ASSERT_EQ(perf.min(), 0); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_EQ(perf.warning(), 345600); + ASSERT_TRUE(std::isnan(perf.critical_low())); + ASSERT_EQ(perf.warning_low(), 0); +} + +TEST(native_check_uptime, critical) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning-uptime" : "4", "critical-uptime" : "2", "unit": "d"})"_json; + + check_uptime checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + std::string output; + com::centreon::common::perfdata perf; + e_status status = checker.compute((86400 + 3600 * 4) * 1000, &output, &perf); + ASSERT_EQ(status, e_status::critical); + ASSERT_EQ(output, "CRITICAL: System uptime is: 1d 4h 0m 0s"); + ASSERT_EQ(perf.name(), "uptime"); + ASSERT_EQ(perf.unit(), "s"); + ASSERT_EQ(perf.value(), 86400 + 3600 * 4); + ASSERT_EQ(perf.min(), 0); + ASSERT_EQ(perf.critical(), 172800); + ASSERT_EQ(perf.warning(), 345600); + ASSERT_EQ(perf.critical_low(), 0); + ASSERT_EQ(perf.warning_low(), 0); +} + +TEST(native_check_uptime, critical_bis) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning-uptime" : "", "critical-uptime" : "2", "unit": "d"})"_json; + + check_uptime checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + std::string output; + com::centreon::common::perfdata perf; + e_status status = checker.compute((86400 + 3600 * 4) * 1000, &output, &perf); + ASSERT_EQ(status, e_status::critical); + ASSERT_EQ(output, "CRITICAL: System uptime is: 1d 4h 0m 0s"); + ASSERT_EQ(perf.name(), "uptime"); + ASSERT_EQ(perf.unit(), "s"); + ASSERT_EQ(perf.value(), 86400 + 3600 * 4); + ASSERT_EQ(perf.min(), 0); + ASSERT_EQ(perf.critical(), 172800); + ASSERT_TRUE(std::isnan(perf.warning())); + ASSERT_EQ(perf.critical_low(), 0); + ASSERT_TRUE(std::isnan(perf.warning_low())); +} diff --git a/agent/test/check_windows_cpu_test.cc b/agent/test/check_windows_cpu_test.cc new file mode 100644 index 00000000000..5826a585208 --- /dev/null +++ b/agent/test/check_windows_cpu_test.cc @@ -0,0 +1,510 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include "com/centreon/common/rapidjson_helper.hh" + +#include "check_cpu.hh" + +extern std::shared_ptr g_io_context; + +using namespace com::centreon::agent; +using namespace std::string_literals; + +TEST(native_check_cpu_windows, construct) { + M_SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION info; + info.IdleTime.QuadPart = 60; + info.KernelTime.QuadPart = 70; + info.UserTime.QuadPart = 25; + info.DpcTime.QuadPart = 1; + info.InterruptTime.QuadPart = 5; + check_cpu_detail::kernel_per_cpu_time k(info); + ASSERT_EQ(k.get_proportional_value(check_cpu_detail::e_proc_stat_index::user), + 0.25); + ASSERT_EQ( + k.get_proportional_value(check_cpu_detail::e_proc_stat_index::system), + 0.1); + ASSERT_EQ(k.get_proportional_value(check_cpu_detail::e_proc_stat_index::idle), + 0.6); + ASSERT_EQ( + k.get_proportional_value(check_cpu_detail::e_proc_stat_index::interrupt), + 0.05); + ASSERT_EQ(k.get_proportional_value(check_cpu_detail::e_proc_stat_index::dpc), + 0.01); + ASSERT_EQ(k.get_proportional_used(), 0.4); +} + +constexpr M_SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION info[2] = { + {0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0}}; + +TEST(native_check_cpu_windows, output_no_threshold) { + check_cpu_detail::kernel_cpu_time_snapshot first(info, info + 2); + + M_SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION info2[2]; + info2[0].IdleTime.QuadPart = 60; + info2[0].KernelTime.QuadPart = 70; + info2[0].UserTime.QuadPart = 25; + info2[0].DpcTime.QuadPart = 1; + info2[0].InterruptTime.QuadPart = 5; + + info2[1].IdleTime.QuadPart = 40; + info2[1].KernelTime.QuadPart = 50; + info2[1].UserTime.QuadPart = 45; + info2[1].DpcTime.QuadPart = 0; + info2[1].InterruptTime.QuadPart = 5; + + check_cpu_detail::kernel_cpu_time_snapshot second(info2, info2 + 2); + + std::string output; + std::list perfs; + + rapidjson::Document check_args; + + check_cpu checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + checker.compute(first, second, &output, &perfs); + ASSERT_EQ(output, "OK: CPU(s) average usage is 50.00%"); + + ASSERT_EQ(perfs.size(), 3); + + for (const auto& perf : perfs) { + ASSERT_TRUE(std::isnan(perf.critical_low())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_FALSE(perf.critical_mode()); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.warning())); + ASSERT_FALSE(perf.warning_mode()); + ASSERT_EQ(perf.min(), 0); + ASSERT_EQ(perf.max(), 100); + ASSERT_EQ(perf.unit(), "%"); + ASSERT_EQ(perf.value_type(), com::centreon::common::perfdata::gauge); + if (perf.name() == "0#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 40.0, 0.01); + } else if (perf.name() == "1#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 60.0, 0.01); + } else if (perf.name() == "cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 50.0, 0.01); + } else { + FAIL() << "unexpected perfdata name:" << perf.name(); + } + } +} + +TEST(native_check_cpu_windows, output_no_threshold_detailed) { + check_cpu_detail::kernel_cpu_time_snapshot first(info, info + 2); + + M_SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION info2[2]; + info2[0].IdleTime.QuadPart = 60; + info2[0].KernelTime.QuadPart = 70; + info2[0].UserTime.QuadPart = 25; + info2[0].DpcTime.QuadPart = 1; + info2[0].InterruptTime.QuadPart = 5; + + info2[1].IdleTime.QuadPart = 40; + info2[1].KernelTime.QuadPart = 50; + info2[1].UserTime.QuadPart = 45; + info2[1].DpcTime.QuadPart = 0; + info2[1].InterruptTime.QuadPart = 5; + + check_cpu_detail::kernel_cpu_time_snapshot second(info2, info2 + 2); + + std::string output; + std::list perfs; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({"cpu-detailed":true, "warning-core" : "", "critical-core" : ""})"_json; + + check_cpu checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + checker.compute(first, second, &output, &perfs); + ASSERT_EQ(output, "OK: CPU(s) average usage is 50.00%"); + + ASSERT_EQ(perfs.size(), 18); + + for (const auto& perf : perfs) { + ASSERT_TRUE(std::isnan(perf.critical_low())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_FALSE(perf.critical_mode()); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.warning())); + ASSERT_FALSE(perf.warning_mode()); + ASSERT_EQ(perf.min(), 0); + ASSERT_EQ(perf.max(), 100); + ASSERT_EQ(perf.unit(), "%"); + ASSERT_EQ(perf.value_type(), com::centreon::common::perfdata::gauge); + + if (perf.name() == "0~user#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 25.0, 0.01); + } else if (perf.name() == "1~user#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 45.0, 0.01); + } else if (perf.name() == "user#cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 35.0, 0.01); + } else if (perf.name() == "0~system#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 10.0, 0.01); + } else if (perf.name() == "1~system#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 10.0, 0.01); + } else if (perf.name() == "system#cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 10.0, 0.01); + } else if (perf.name() == "0~idle#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 60.0, 0.01); + } else if (perf.name() == "1~idle#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 40.0, 0.01); + } else if (perf.name() == "idle#cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 50.0, 0.01); + } else if (perf.name() == "0~interrupt#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 5.0, 0.01); + } else if (perf.name() == "1~interrupt#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 5.0, 0.01); + } else if (perf.name() == "interrupt#cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 5.0, 0.01); + } else if (perf.name() == + "0~dpc_interrupt#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 1.0, 0.01); + } else if (perf.name() == + "1~dpc_interrupt#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 0.0, 0.01); + } else if (perf.name() == "dpc_interrupt#cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 0.5, 0.01); + } else if (perf.name() == "0~used#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 40.0, 0.01); + } else if (perf.name() == "1~used#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 60.0, 0.01); + } else if (perf.name() == "used#cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 50.0, 0.01); + } else { + FAIL() << "unexpected perfdata name:" << perf.name(); + } + } +} + +TEST(native_check_cpu_windows, output_threshold) { + check_cpu_detail::kernel_cpu_time_snapshot first(info, info + 2); + + M_SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION info2[2]; + info2[0].IdleTime.QuadPart = 60; + info2[0].KernelTime.QuadPart = 70; + info2[0].UserTime.QuadPart = 25; + info2[0].DpcTime.QuadPart = 1; + info2[0].InterruptTime.QuadPart = 5; + + info2[1].IdleTime.QuadPart = 40; + info2[1].KernelTime.QuadPart = 50; + info2[1].UserTime.QuadPart = 45; + info2[1].DpcTime.QuadPart = 0; + info2[1].InterruptTime.QuadPart = 5; + + check_cpu_detail::kernel_cpu_time_snapshot second(info2, info2 + 2); + + std::string output; + std::list perfs; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({"cpu-detailed":"", "warning-core" : "39", "critical-core" : "59", "warning-average" : "49", "critical-average" : "60"})"_json; + + check_cpu checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + checker.compute(first, second, &output, &perfs); + ASSERT_EQ( + output, + "WARNING: CPU'0' Usage: 40.00%, User 25.00%, System 10.00%, Idle 60.00%, " + "Interrupt 5.00%, Dpc Interrupt 1.00% CRITICAL: CPU'1' Usage: 60.00%, " + "User 45.00%, System 10.00%, Idle 40.00%, Interrupt 5.00%, Dpc Interrupt " + "0.00% WARNING: CPU(s) average Usage: 50.00%, User 35.00%, System " + "10.00%, Idle 50.00%, Interrupt 5.00%, Dpc Interrupt 0.50%"); + + ASSERT_EQ(perfs.size(), 3); + + for (const auto& perf : perfs) { + if (perf.name() == "cpu.utilization.percentage") { + ASSERT_NEAR(perf.warning(), 49.0, 0.01); + ASSERT_NEAR(perf.critical(), 60.0, 0.01); + } else { + ASSERT_NEAR(perf.warning(), 39.0, 0.01); + ASSERT_NEAR(perf.critical(), 59.0, 0.01); + } + ASSERT_EQ(perf.warning_low(), 0); + ASSERT_EQ(perf.critical_low(), 0); + ASSERT_FALSE(perf.critical_mode()); + ASSERT_FALSE(perf.warning_mode()); + ASSERT_EQ(perf.min(), 0); + ASSERT_EQ(perf.max(), 100); + ASSERT_EQ(perf.unit(), "%"); + ASSERT_EQ(perf.value_type(), com::centreon::common::perfdata::gauge); + if (perf.name() == "0#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 40.0, 0.01); + } else if (perf.name() == "1#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 60.0, 0.01); + } else if (perf.name() == "cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 50.0, 0.01); + } else { + FAIL() << "unexpected perfdata name:" << perf.name(); + } + } +} + +TEST(native_check_cpu_windows, output_threshold_detailed) { + check_cpu_detail::kernel_cpu_time_snapshot first(info, info + 2); + + M_SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION info2[2]; + info2[0].IdleTime.QuadPart = 60; + info2[0].KernelTime.QuadPart = 70; + info2[0].UserTime.QuadPart = 25; + info2[0].DpcTime.QuadPart = 1; + info2[0].InterruptTime.QuadPart = 5; + + info2[1].IdleTime.QuadPart = 40; + info2[1].KernelTime.QuadPart = 50; + info2[1].UserTime.QuadPart = 45; + info2[1].DpcTime.QuadPart = 0; + info2[1].InterruptTime.QuadPart = 5; + + check_cpu_detail::kernel_cpu_time_snapshot second(info2, info2 + 2); + + std::string output; + std::list perfs; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({"cpu-detailed":"true", "warning-core" : 39, "critical-core" : 59, "warning-average" : "49", "critical-average" : "60", "warning-core-user": "30", "critical-core-user": "40", "warning-average-user": "31", "critical-average-user": "41" })"_json; + + check_cpu checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + checker.compute(first, second, &output, &perfs); + ASSERT_EQ( + output, + "WARNING: CPU'0' Usage: 40.00%, User 25.00%, System 10.00%, Idle 60.00%, " + "Interrupt 5.00%, Dpc Interrupt 1.00% CRITICAL: CPU'1' Usage: 60.00%, " + "User 45.00%, System 10.00%, Idle 40.00%, Interrupt 5.00%, Dpc Interrupt " + "0.00% WARNING: CPU(s) average Usage: 50.00%, User 35.00%, System " + "10.00%, Idle 50.00%, Interrupt 5.00%, Dpc Interrupt 0.50%"); + + ASSERT_EQ(perfs.size(), 18); + + for (const auto& perf : perfs) { + ASSERT_FALSE(perf.critical_mode()); + ASSERT_FALSE(perf.warning_mode()); + ASSERT_EQ(perf.min(), 0); + ASSERT_EQ(perf.max(), 100); + ASSERT_EQ(perf.unit(), "%"); + ASSERT_EQ(perf.value_type(), com::centreon::common::perfdata::gauge); + + if (perf.name() == "0~user#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 25.0, 0.01); + ASSERT_NEAR(perf.warning(), 30.0, 0.01); + ASSERT_NEAR(perf.critical(), 40.0, 0.01); + ASSERT_EQ(perf.warning_low(), 0); + ASSERT_EQ(perf.critical_low(), 0); + } else if (perf.name() == "1~user#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 45.0, 0.01); + ASSERT_NEAR(perf.warning(), 30.0, 0.01); + ASSERT_NEAR(perf.critical(), 40.0, 0.01); + ASSERT_EQ(perf.warning_low(), 0); + ASSERT_EQ(perf.critical_low(), 0); + } else if (perf.name() == "user#cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 35.0, 0.01); + ASSERT_NEAR(perf.warning(), 31.0, 0.01); + ASSERT_NEAR(perf.critical(), 41.0, 0.01); + ASSERT_EQ(perf.warning_low(), 0); + ASSERT_EQ(perf.critical_low(), 0); + } else if (perf.name() == "0~system#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 10.0, 0.01); + ASSERT_TRUE(std::isnan(perf.critical_low())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.warning())); + } else if (perf.name() == "1~system#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 10.0, 0.01); + ASSERT_TRUE(std::isnan(perf.critical_low())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.warning())); + } else if (perf.name() == "system#cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 10.0, 0.01); + ASSERT_TRUE(std::isnan(perf.critical_low())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.warning())); + } else if (perf.name() == "0~idle#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 60.0, 0.01); + ASSERT_TRUE(std::isnan(perf.critical_low())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.warning())); + } else if (perf.name() == "1~idle#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 40.0, 0.01); + ASSERT_TRUE(std::isnan(perf.critical_low())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.warning())); + } else if (perf.name() == "idle#cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 50.0, 0.01); + ASSERT_TRUE(std::isnan(perf.critical_low())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.warning())); + } else if (perf.name() == "0~interrupt#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 5.0, 0.01); + ASSERT_TRUE(std::isnan(perf.critical_low())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.warning())); + } else if (perf.name() == "1~interrupt#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 5.0, 0.01); + ASSERT_TRUE(std::isnan(perf.critical_low())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.warning())); + } else if (perf.name() == "interrupt#cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 5.0, 0.01); + ASSERT_TRUE(std::isnan(perf.critical_low())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.warning())); + } else if (perf.name() == + "0~dpc_interrupt#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 1.0, 0.01); + ASSERT_TRUE(std::isnan(perf.critical_low())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.warning())); + + } else if (perf.name() == + "1~dpc_interrupt#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 0.0, 0.01); + ASSERT_TRUE(std::isnan(perf.critical_low())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.warning())); + } else if (perf.name() == "dpc_interrupt#cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 0.5, 0.01); + ASSERT_TRUE(std::isnan(perf.critical_low())); + ASSERT_TRUE(std::isnan(perf.critical())); + ASSERT_TRUE(std::isnan(perf.warning_low())); + ASSERT_TRUE(std::isnan(perf.warning())); + } else if (perf.name() == "0~used#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 40.0, 0.01); + ASSERT_NEAR(perf.warning(), 39.0, 0.01); + ASSERT_NEAR(perf.critical(), 59.0, 0.01); + ASSERT_EQ(perf.warning_low(), 0); + ASSERT_EQ(perf.critical_low(), 0); + } else if (perf.name() == "1~used#core.cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 60.0, 0.01); + ASSERT_NEAR(perf.warning(), 39.0, 0.01); + ASSERT_NEAR(perf.critical(), 59.0, 0.01); + ASSERT_EQ(perf.warning_low(), 0); + ASSERT_EQ(perf.critical_low(), 0); + } else if (perf.name() == "used#cpu.utilization.percentage") { + ASSERT_NEAR(perf.value(), 50.0, 0.01); + ASSERT_NEAR(perf.warning(), 49.0, 0.01); + ASSERT_NEAR(perf.critical(), 60.0, 0.01); + ASSERT_EQ(perf.warning_low(), 0); + ASSERT_EQ(perf.critical_low(), 0); + } else { + FAIL() << "unexpected perfdata name:" << perf.name(); + } + } +} + +TEST(native_check_cpu_windows, compare_kernel_dph) { + using namespace com::centreon::common::literals; + rapidjson::Document nt_check_args = + R"({"use-nt-query-system-information":true })"_json; + + check_cpu nt_checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, nt_check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + rapidjson::Document pdh_check_args = + R"({"use-nt-query-system-information":"false" })"_json; + + check_cpu pdh_checker( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, pdh_check_args, nullptr, + []([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) {}, + std::make_shared()); + + auto first_nt = nt_checker.get_cpu_time_snapshot(true); + auto first_pdh = pdh_checker.get_cpu_time_snapshot(true); + + std::this_thread::sleep_for(std::chrono::milliseconds(2500)); + + auto second_nt = nt_checker.get_cpu_time_snapshot(false); + auto second_pdh = pdh_checker.get_cpu_time_snapshot(false); + + auto diff_nt = second_nt->subtract(*first_nt); + auto diff_pdh = second_pdh->subtract(*first_pdh); + + ASSERT_EQ(diff_nt.size(), diff_pdh.size()); + auto nt_iter = diff_nt.begin(); + auto pdh_iter = diff_pdh.begin(); + auto nt_iter_end = diff_nt.end(); + for (; nt_iter != nt_iter_end; ++nt_iter, ++pdh_iter) { + ASSERT_NEAR(nt_iter->second.get_proportional_used(), + pdh_iter->second.get_proportional_used(), 0.1); + for (size_t j = 0; j < 5; ++j) { + ASSERT_NEAR(nt_iter->second.get_proportional_value(j), + pdh_iter->second.get_proportional_value(j), 0.1); + } + } +} \ No newline at end of file diff --git a/agent/test/check_windows_memory_test.cc b/agent/test/check_windows_memory_test.cc new file mode 100644 index 00000000000..cd5ca009d7f --- /dev/null +++ b/agent/test/check_windows_memory_test.cc @@ -0,0 +1,260 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include + +#include "com/centreon/common/rapidjson_helper.hh" + +#include "check_memory.hh" + +extern std::shared_ptr g_io_context; + +using namespace com::centreon::agent; +using namespace com::centreon::agent::native_check_detail; + +using namespace std::string_literals; + +class test_check : public check_memory { + public: + static MEMORYSTATUSEX mock; + static PERFORMANCE_INFORMATION perf_mock; + + test_check(const rapidjson::Value& args) + : check_memory( + g_io_context, + spdlog::default_logger(), + {}, + {}, + "serv"s, + "cmd_name"s, + "cmd_line"s, + args, + nullptr, + [](const std::shared_ptr& caller, + int status, + const std::list& perfdata, + const std::list& outputs) {}, + std::make_shared()) {} + + std::shared_ptr> + measure() override { + return std::make_shared(mock, perf_mock, + _output_flags); + } +}; + +MEMORYSTATUSEX test_check::mock = { + 0, + 0, + 16ull * 1024 * 1024 * 1024, // ullTotalPhys + 7ull * 1024 * 1024, // ullAvailPhys + 24ull * 1024 * 1024 * 1024, // ullTotalPageFile + 6ull * 1024 * 1024 * 1024, // ullAvailPageFile + 100ull * 1024 * 1024 * 1024, // ullTotalVirtual + 40ull * 1024 * 1024 * 1024}; // ullAvailVirtual + +PERFORMANCE_INFORMATION test_check::perf_mock = { + 0, // cb + 5 * 1024 * 1024, // CommitTotal + 15 * 1024 * 1024, // CommitLimit + 0, // CommitPeak + 4194304, // PhysicalTotal + 1792, // PhysicalAvailable + 0, // SystemCache + 0, // KernelTotal + 0, // KernelPaged + 0, // KernelNonpaged + 4096, // PageSize + 0, // HandleCount + 0, // ProcessCount + 0, // ThreadCount +}; + +const uint64_t _total_phys = test_check::mock.ullTotalPhys; +const uint64_t _available_phys = test_check::mock.ullAvailPhys; +const uint64_t _total_swap = + (test_check::perf_mock.CommitLimit - test_check::perf_mock.PhysicalTotal) * + test_check::perf_mock.PageSize; +const uint64_t _used_swap = (test_check::perf_mock.CommitTotal + + test_check::perf_mock.PhysicalAvailable - + test_check::perf_mock.PhysicalTotal) * + test_check::perf_mock.PageSize; + +const uint64_t _total_virtual = test_check::mock.ullTotalPageFile; +const uint64_t _available_virtual = test_check::mock.ullAvailPageFile; + +static void test_perfs(std::list perfs) { + ASSERT_EQ(perfs.size(), 9); + for (const auto& perf : perfs) { + ASSERT_EQ(perf.min(), 0); + if (perf.name() == "memory.usage.bytes") { + ASSERT_EQ(perf.value(), _total_phys - _available_phys); + ASSERT_EQ(perf.max(), _total_phys); + ASSERT_EQ(perf.unit(), "B"); + } else if (perf.name() == "memory.free.bytes") { + ASSERT_EQ(perf.value(), _available_phys); + ASSERT_EQ(perf.max(), _total_phys); + ASSERT_EQ(perf.unit(), "B"); + } else if (perf.name() == "memory.usage.percentage") { + ASSERT_NEAR(perf.value(), + (_total_phys - _available_phys) * 100.0 / _total_phys, 0.01); + ASSERT_EQ(perf.max(), 100); + ASSERT_EQ(perf.unit(), "%"); + } else if (perf.name() == "swap.free.bytes") { + ASSERT_EQ(perf.max(), _total_swap); + ASSERT_EQ(perf.value(), _total_swap - _used_swap); + ASSERT_EQ(perf.unit(), "B"); + } else if (perf.name() == "swap.usage.bytes") { + ASSERT_EQ(perf.max(), _total_swap); + ASSERT_EQ(perf.value(), _used_swap); + ASSERT_EQ(perf.unit(), "B"); + } else if (perf.name() == "swap.usage.percentage") { + ASSERT_NEAR(perf.value(), _used_swap * 100.0 / _total_swap, 0.01); + ASSERT_EQ(perf.max(), 100); + ASSERT_EQ(perf.unit(), "%"); + } else if (perf.name() == "virtual-memory.usage.bytes") { + ASSERT_EQ(perf.max(), _total_virtual); + ASSERT_EQ(perf.value(), (_total_virtual - _available_virtual)); + ASSERT_EQ(perf.unit(), "B"); + } else if (perf.name() == "virtual-memory.free.bytes") { + ASSERT_EQ(perf.max(), _total_virtual); + ASSERT_EQ(perf.value(), _available_virtual); + ASSERT_EQ(perf.unit(), "B"); + } else if (perf.name() == "virtual-memory.usage.percentage") { + ASSERT_EQ(perf.value(), + (_total_virtual - _available_virtual) * 100.0 / _total_virtual); + ASSERT_EQ(perf.max(), 100); + ASSERT_EQ(perf.unit(), "%"); + } else { + FAIL() << "unexpected perfdata name:" << perf.name(); + } + } +} + +TEST(native_check_memory_windows, output_no_threshold) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = R"({"swap": ""})"_json; + test_check to_check(check_args); + std::string output; + std::list perfs; + + com::centreon::agent::e_status status = + to_check.compute(*to_check.measure(), &output, &perfs); + + ASSERT_EQ(output, + "OK: Ram total: 16 GB, used (-buffers/cache): 15.99 GB (99.96%), " + "free: 7 MB (0.04%)"); + test_perfs(perfs); +} + +TEST(native_check_memory_windows, output_no_threshold2) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = R"({ "swap": true})"_json; + test_check to_check(check_args); + std::string output; + std::list perfs; + + com::centreon::agent::e_status status = + to_check.compute(*to_check.measure(), &output, &perfs); + + ASSERT_EQ(output, + "OK: Ram total: 16 GB, used (-buffers/cache): 15.99 GB (99.96%), " + "free: 7 MB (0.04%) " + "Swap total: 44 GB, used: 4 GB (9.11%), free: 39.99 GB (90.89%)"); + test_perfs(perfs); +} + +TEST(native_check_memory_windows, output_no_threshold3) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = R"({ "swap": true, "virtual": "true"})"_json; + test_check to_check(check_args); + std::string output; + std::list perfs; + + com::centreon::agent::e_status status = + to_check.compute(*to_check.measure(), &output, &perfs); + + ASSERT_EQ(output, + "OK: Ram total: 16 GB, used (-buffers/cache): 15.99 GB (99.96%), " + "free: 7 MB (0.04%) " + "Swap total: 44 GB, used: 4 GB (9.11%), free: 39.99 GB (90.89%) " + "Virtual total: 24 GB, used: 18 GB (75.00%), free: 6 GB (25.00%)"); + test_perfs(perfs); +} + +TEST(native_check_memory_windows, output_threshold) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning-usage-free": "8388609", "critical-usage-prct": 99.99, "warning-virtual": "20000000000", "critical-virtual": 50000000000 })"_json; + test_check to_check(check_args); + std::string output; + std::list perfs; + + com::centreon::agent::e_status status = + to_check.compute(*to_check.measure(), &output, &perfs); + + ASSERT_EQ( + output, + "WARNING: Ram total: 16 GB, used (-buffers/cache): 15.99 GB (99.96%), " + "free: 7 MB (0.04%)"); + test_perfs(perfs); + for (const auto& perf : perfs) { + if (perf.name() == "memory.free.bytes") { + ASSERT_EQ(perf.warning_low(), 0); + ASSERT_EQ(perf.warning(), 8388609); + } else if (perf.name() == "memory.usage.percentage") { + ASSERT_EQ(perf.critical_low(), 0); + ASSERT_NEAR(perf.critical(), 99.99, 0.01); + } else if (perf.name() == "virtual-memory.usage.bytes") { + ASSERT_EQ(perf.warning_low(), 0); + ASSERT_EQ(perf.warning(), 20000000000); + ASSERT_EQ(perf.critical_low(), 0); + ASSERT_EQ(perf.critical(), 50000000000); + } + } +} + +TEST(native_check_memory_windows, output_threshold_2) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning-usage-prct": "1", "critical-usage-prct": "99.5", "warning-usage-free": "" })"_json; + test_check to_check(check_args); + std::string output; + std::list perfs; + + com::centreon::agent::e_status status = + to_check.compute(*to_check.measure(), &output, &perfs); + + ASSERT_EQ( + output, + "CRITICAL: Ram total: 16 GB, used (-buffers/cache): 15.99 GB (99.96%), " + "free: 7 MB (0.04%)"); + test_perfs(perfs); + for (const auto& perf : perfs) { + if (perf.name() == "memory.usage.percentage") { + ASSERT_EQ(perf.warning_low(), 0); + ASSERT_NEAR(perf.warning(), 1, 0.01); + } else if (perf.name() == "memory.usage.percentage") { + ASSERT_EQ(perf.critical_low(), 0); + ASSERT_NEAR(perf.critical(), 99.5, 0.01); + } + } +} diff --git a/agent/test/check_windows_service_test.cc b/agent/test/check_windows_service_test.cc new file mode 100644 index 00000000000..65facd72938 --- /dev/null +++ b/agent/test/check_windows_service_test.cc @@ -0,0 +1,860 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include "com/centreon/common/rapidjson_helper.hh" + +#include "check_service.hh" + +extern std::shared_ptr g_io_context; + +using namespace com::centreon::agent; +using namespace com::centreon::agent::native_check_detail; + +using namespace std::string_literals; + +class mock_service_enumerator : public service_enumerator { + public: + using enum_with_conf = std::pair; + + std::vector data; + + size_t max_enumerate = 512; + + bool _enumerate_services(serv_array& services, + DWORD* services_returned) override; + + bool _query_service_config( + LPCSTR service_name, + QUERY_SERVICE_CONFIGA& serv_conf, + const std::shared_ptr& logger) override; + + static enum_with_conf create_serv(const char* name, + const char* display, + DWORD state, + DWORD start_type) { + ENUM_SERVICE_STATUSA serv; + serv.lpServiceName = const_cast(name); + serv.lpDisplayName = const_cast(display); + serv.ServiceStatus.dwCurrentState = state; + serv.ServiceStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS; + serv.ServiceStatus.dwControlsAccepted = SERVICE_ACCEPT_STOP; + serv.ServiceStatus.dwWin32ExitCode = 0; + serv.ServiceStatus.dwServiceSpecificExitCode = 0; + serv.ServiceStatus.dwCheckPoint = 0; + serv.ServiceStatus.dwWaitHint = 0; + + QUERY_SERVICE_CONFIGA serv_conf; + serv_conf.dwServiceType = SERVICE_WIN32_OWN_PROCESS; + serv_conf.dwStartType = start_type; + serv_conf.dwErrorControl = SERVICE_ERROR_NORMAL; + serv_conf.lpBinaryPathName = "C:\\path\\to\\service.exe"; + serv_conf.lpLoadOrderGroup = nullptr; + serv_conf.dwTagId = 0; + serv_conf.lpDependencies = nullptr; + serv_conf.lpServiceStartName = nullptr; + serv_conf.lpDisplayName = const_cast(display); + + return {serv, serv_conf}; + } +}; + +bool mock_service_enumerator::_enumerate_services(serv_array& services, + DWORD* services_returned) { + size_t to_return = std::min(max_enumerate, data.size() - _resume_handle); + to_return = std::min(to_return, service_array_size); + *services_returned = to_return; + for (unsigned i = 0; i < to_return; ++i) { + services[i] = data[i].first; + } + _resume_handle += to_return; + return true; +} + +bool mock_service_enumerator::_query_service_config( + LPCSTR service_name, + QUERY_SERVICE_CONFIGA& serv_conf, + const std::shared_ptr& logger) { + for (const auto& service : data) { + if (strcmp(service_name, service.first.lpServiceName) == 0) { + serv_conf = service.second; + return true; + } + } + return false; +} + +constexpr std::array expected_metrics = { + "services.stopped.count", "services.starting.count", + "services.stopping.count", "services.running.count", + "services.continuing.count", "services.pausing.count", + "services.paused.count"}; + +TEST(check_service, service_no_threshold_all_running) { + mock_service_enumerator::enum_with_conf data[] = { + mock_service_enumerator::create_serv("service1", "desc serv1", + SERVICE_RUNNING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service2", "desc serv2", + SERVICE_RUNNING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service3", "desc serv3", + SERVICE_RUNNING, SERVICE_AUTO_START), + }; + + mock_service_enumerator mock; + mock.data = {std::begin(data), std::end(data)}; + + check_service::_enumerator_constructor = [&mock]() { + return std::make_unique(mock); + }; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = "{ }"_json; + + check_service test_check( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + [](const std::shared_ptr& caller, int status, + const std::list& perfdata, + const std::list& outputs) {}, + std::make_shared()); + + auto snap = test_check.measure(); + + std::string output; + std::list perfs; + e_status status = test_check.compute(*snap, &output, &perfs); + + EXPECT_EQ(status, e_status::ok); + + EXPECT_EQ(output, "OK: all services are running"); + + EXPECT_EQ(perfs.size(), 7); + + for (const com::centreon::common::perfdata& perf : perfs) { + EXPECT_NE(std::find(expected_metrics.begin(), expected_metrics.end(), + perf.name()), + expected_metrics.end()); + if (perf.name() == "services.running.count") { + EXPECT_EQ(perf.value(), 3.0); + } else { + EXPECT_EQ(perf.value(), 0.0); + } + EXPECT_EQ(perf.min(), 0); + EXPECT_EQ(perf.max(), snap->get_metric(e_service_metric::total)); + } +} + +TEST(check_service, service_no_threshold_one_by_state) { + mock_service_enumerator::enum_with_conf data[] = { + mock_service_enumerator::create_serv("service_stopped", + "desc service_stopped", + SERVICE_STOPPED, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_start_pending", "desc service_start_pending", + SERVICE_START_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_stop_pending", "desc service_stop_pending", + SERVICE_STOP_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_running", + "desc service_running", + SERVICE_RUNNING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_continue_pending", "desc service_continue_pending", + SERVICE_CONTINUE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_pause_pending", " desc service_pause_pending", + SERVICE_PAUSE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_paused", + "desc service_paused", + SERVICE_PAUSED, SERVICE_AUTO_START), + }; + + mock_service_enumerator mock; + mock.data = {std::begin(data), std::end(data)}; + + check_service::_enumerator_constructor = [&mock]() { + return std::make_unique(mock); + }; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = "{ }"_json; + + check_service test_check( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + [](const std::shared_ptr& caller, int status, + const std::list& perfdata, + const std::list& outputs) {}, + std::make_shared()); + + auto snap = test_check.measure(); + + std::string output; + std::list perfs; + e_status status = test_check.compute(*snap, &output, &perfs); + + EXPECT_EQ(status, e_status::ok); + + EXPECT_EQ(output, + "OK: services: 1 stopped, 1 starting, 1 stopping, 1 running, " + "1 continuing, 1 pausing, 1 paused"); + + EXPECT_EQ(perfs.size(), 7); + + for (const com::centreon::common::perfdata& perf : perfs) { + EXPECT_NE(std::find(expected_metrics.begin(), expected_metrics.end(), + perf.name()), + expected_metrics.end()); + EXPECT_EQ(perf.value(), 1.0); + EXPECT_EQ(perf.min(), 0); + EXPECT_EQ(perf.max(), snap->get_metric(e_service_metric::total)); + } +} + +TEST(check_service, service_filter_exclude_all_service) { + mock_service_enumerator::enum_with_conf data[] = { + mock_service_enumerator::create_serv("service_stopped", + "desc service_stopped", + SERVICE_STOPPED, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_start_pending", "desc service_start_pending ", + SERVICE_START_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_stop_pending", "desc service_stop_pending", + SERVICE_STOP_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_running", + "desc service_running", + SERVICE_RUNNING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_continue_pending", "desc service_continue_pending", + SERVICE_CONTINUE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_pause_pending", "desc service_pause_pending", + SERVICE_PAUSE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_paused", + "desc service_paused", + SERVICE_PAUSED, SERVICE_AUTO_START)}; + + mock_service_enumerator mock; + mock.data = {std::begin(data), std::end(data)}; + + check_service::_enumerator_constructor = [&mock]() { + return std::make_unique(mock); + }; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = R"({ "exclude-name": ".*" })"_json; + + check_service test_check( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + [](const std::shared_ptr& caller, int status, + const std::list& perfdata, + const std::list& outputs) {}, + std::make_shared()); + + auto snap = test_check.measure(); + + std::string output; + std::list perfs; + e_status status = test_check.compute(*snap, &output, &perfs); + + EXPECT_EQ(status, e_status::critical); + + EXPECT_EQ(output, "CRITICAL: no service found"); + + EXPECT_EQ(perfs.size(), 7); + + for (const com::centreon::common::perfdata& perf : perfs) { + EXPECT_NE(std::find(expected_metrics.begin(), expected_metrics.end(), + perf.name()), + expected_metrics.end()); + EXPECT_EQ(perf.value(), 0.0); + EXPECT_EQ(perf.min(), 0); + EXPECT_EQ(perf.max(), snap->get_metric(e_service_metric::total)); + } +} + +TEST(check_service, service_filter_allow_some_service) { + mock_service_enumerator::enum_with_conf data[] = { + mock_service_enumerator::create_serv("service_stopped", + "desc service_stopped", + SERVICE_STOPPED, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_start_pending", "desc service_start_pending", + SERVICE_START_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_stop_pending", "desc service_stop_pending", + SERVICE_STOP_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_running", + "desc service_running", + SERVICE_RUNNING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_continue_pending", "desc service_continue_pending", + SERVICE_CONTINUE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_pause_pending", "desc service_pause_pending", + SERVICE_PAUSE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_paused", + "desc service_paused", + SERVICE_PAUSED, SERVICE_AUTO_START)}; + + mock_service_enumerator mock; + mock.data = {std::begin(data), std::end(data)}; + + check_service::_enumerator_constructor = [&mock]() { + return std::make_unique(mock); + }; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = R"({ "filter-name": "service_s.*" })"_json; + + check_service test_check( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + [](const std::shared_ptr& caller, int status, + const std::list& perfdata, + const std::list& outputs) {}, + std::make_shared()); + + auto snap = test_check.measure(); + + std::string output; + std::list perfs; + e_status status = test_check.compute(*snap, &output, &perfs); + + EXPECT_EQ(status, e_status::ok); + + EXPECT_EQ(output, "OK: services: 1 stopped, 1 starting, 1 stopping"); + + EXPECT_EQ(perfs.size(), 7); + + for (const com::centreon::common::perfdata& perf : perfs) { + EXPECT_NE(std::find(expected_metrics.begin(), expected_metrics.end(), + perf.name()), + expected_metrics.end()); + if (perf.name() == "services.stopped.count" || + perf.name() == "services.starting.count" || + perf.name() == "services.stopping.count") { + EXPECT_EQ(perf.value(), 1.0); + } else { + EXPECT_EQ(perf.value(), 0.0); + } + EXPECT_EQ(perf.min(), 0); + EXPECT_EQ(perf.max(), snap->get_metric(e_service_metric::total)); + } +} + +TEST(check_service, service_filter_exclude_some_service) { + mock_service_enumerator::enum_with_conf data[] = { + mock_service_enumerator::create_serv("service_stopped", + "desc service_stopped", + SERVICE_STOPPED, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_start_pending", "desc service_start_pending", + SERVICE_START_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_stop_pending", "desc service_stop_pending", + SERVICE_STOP_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_running", + "desc service_running", + SERVICE_RUNNING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_continue_pending", "desc service_continue_pending", + SERVICE_CONTINUE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_pause_pending", "desc service_pause_pending", + SERVICE_PAUSE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_paused", + "desc service_paused", + SERVICE_PAUSED, SERVICE_AUTO_START)}; + + mock_service_enumerator mock; + mock.data = {std::begin(data), std::end(data)}; + + check_service::_enumerator_constructor = [&mock]() { + return std::make_unique(mock); + }; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = R"({ "exclude-name": "service_s.*" })"_json; + + check_service test_check( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + [](const std::shared_ptr& caller, int status, + const std::list& perfdata, + const std::list& outputs) {}, + std::make_shared()); + + auto snap = test_check.measure(); + + std::string output; + std::list perfs; + e_status status = test_check.compute(*snap, &output, &perfs); + + EXPECT_EQ(status, e_status::ok); + + EXPECT_EQ(output, + "OK: services: 1 running, 1 continuing, 1 pausing, 1 paused"); + + EXPECT_EQ(perfs.size(), 7); + + for (const com::centreon::common::perfdata& perf : perfs) { + EXPECT_NE(std::find(expected_metrics.begin(), expected_metrics.end(), + perf.name()), + expected_metrics.end()); + if (perf.name() == "services.stopped.count" || + perf.name() == "services.starting.count" || + perf.name() == "services.stopping.count") { + EXPECT_EQ(perf.value(), 0.0); + } else { + EXPECT_EQ(perf.value(), 1.0); + } + EXPECT_EQ(perf.min(), 0); + EXPECT_EQ(perf.max(), snap->get_metric(e_service_metric::total)); + } +} + +TEST(check_service, service_filter_allow_some_service_warning_running) { + mock_service_enumerator::enum_with_conf data[] = { + mock_service_enumerator::create_serv("service_stopped", + "desc service_stopped", + SERVICE_STOPPED, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_start_pending", "desc service_start_pending", + SERVICE_START_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_stop_pending", "desc service_stop_pending", + SERVICE_STOP_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_running", + "desc service_running", + SERVICE_RUNNING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_continue_pending", "desc service_continue_pending", + SERVICE_CONTINUE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_pause_pending", "desc service_pause_pending", + SERVICE_PAUSE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_paused", + "desc service_paused", + SERVICE_PAUSED, SERVICE_AUTO_START)}; + + mock_service_enumerator mock; + mock.data = {std::begin(data), std::end(data)}; + + check_service::_enumerator_constructor = [&mock]() { + return std::make_unique(mock); + }; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "filter-name": "service_s.*", "warning-total-running": "5", "critical-total-running": "" })"_json; + + check_service test_check( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + [](const std::shared_ptr& caller, int status, + const std::list& perfdata, + const std::list& outputs) {}, + std::make_shared()); + + auto snap = test_check.measure(); + + std::string output; + std::list perfs; + e_status status = test_check.compute(*snap, &output, &perfs); + + EXPECT_EQ(status, e_status::warning); + + EXPECT_EQ(output, "WARNING: services: 1 stopped, 1 starting, 1 stopping"); + + EXPECT_EQ(perfs.size(), 7); + + for (const com::centreon::common::perfdata& perf : perfs) { + EXPECT_NE(std::find(expected_metrics.begin(), expected_metrics.end(), + perf.name()), + expected_metrics.end()); + if (perf.name() == "services.stopped.count" || + perf.name() == "services.starting.count" || + perf.name() == "services.stopping.count") { + EXPECT_EQ(perf.value(), 1.0); + } else { + EXPECT_EQ(perf.value(), 0.0); + } + EXPECT_EQ(perf.min(), 0); + EXPECT_EQ(perf.max(), snap->get_metric(e_service_metric::total)); + } +} + +TEST(check_service, service_filter_allow_some_service_warning_stopped) { + mock_service_enumerator::enum_with_conf data[] = { + mock_service_enumerator::create_serv("service_stopped", + "desc service_stopped", + SERVICE_STOPPED, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_start_pending", "desc service_start_pending", + SERVICE_START_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_stopped2", + "desc service_stopped2", + SERVICE_STOPPED, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_running", + "desc service_running", + SERVICE_RUNNING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_continue_pending", "desc service_continue_pending", + SERVICE_CONTINUE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_pause_pending", "desc service_pause_pending", + SERVICE_PAUSE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_paused", + "desc service_paused", + SERVICE_PAUSED, SERVICE_AUTO_START), + }; + + mock_service_enumerator mock; + mock.data = {std::begin(data), std::end(data)}; + + check_service::_enumerator_constructor = [&mock]() { + return std::make_unique(mock); + }; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "filter-name": "service_s.*", "warning-total-stopped": 1 })"_json; + + check_service test_check( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + [](const std::shared_ptr& caller, int status, + const std::list& perfdata, + const std::list& outputs) {}, + std::make_shared()); + + auto snap = test_check.measure(); + + std::string output; + std::list perfs; + e_status status = test_check.compute(*snap, &output, &perfs); + + EXPECT_EQ(status, e_status::warning); + + EXPECT_EQ(output, "WARNING: services: 2 stopped, 1 starting"); + + EXPECT_EQ(perfs.size(), 7); + + for (const com::centreon::common::perfdata& perf : perfs) { + EXPECT_NE(std::find(expected_metrics.begin(), expected_metrics.end(), + perf.name()), + expected_metrics.end()); + if (perf.name() == "services.stopped.count") { + EXPECT_EQ(perf.value(), 2.0); + } else if (perf.name() == "services.starting.count") { + EXPECT_EQ(perf.value(), 1.0); + } else { + EXPECT_EQ(perf.value(), 0.0); + } + EXPECT_EQ(perf.min(), 0); + EXPECT_EQ(perf.max(), snap->get_metric(e_service_metric::total)); + } +} + +TEST(check_service, service_filter_allow_some_service_critical_state) { + mock_service_enumerator::enum_with_conf data[] = { + mock_service_enumerator::create_serv("service_stopped", + "desc service_stopped", + SERVICE_STOPPED, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_start_pending", "desc service_start_pending", + SERVICE_START_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_stopping", "desc service_stopping", SERVICE_STOP_PENDING, + SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_running", + "desc service_running", + SERVICE_RUNNING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_continue_pending", "desc service_continue_pending", + SERVICE_CONTINUE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_pause_pending", "desc service_pause_pending", + SERVICE_PAUSE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_paused", + "desc service_paused", + SERVICE_PAUSED, SERVICE_AUTO_START)}; + + mock_service_enumerator mock; + mock.data = {std::begin(data), std::end(data)}; + + check_service::_enumerator_constructor = [&mock]() { + return std::make_unique(mock); + }; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "filter-name": "service_s.*", "critical-state": "stop.*" })"_json; + + check_service test_check( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + [](const std::shared_ptr& caller, int status, + const std::list& perfdata, + const std::list& outputs) {}, + std::make_shared()); + + auto snap = test_check.measure(); + + std::string output; + std::list perfs; + e_status status = test_check.compute(*snap, &output, &perfs); + + EXPECT_EQ(status, e_status::critical); + + EXPECT_EQ(output, + "CRITICAL: services: 1 stopped, 1 starting, 1 stopping " + "CRITICAL: service_stopped is stopped CRITICAL: service_stopping " + "is stopping"); + + EXPECT_EQ(perfs.size(), 7); + + for (const com::centreon::common::perfdata& perf : perfs) { + EXPECT_NE(std::find(expected_metrics.begin(), expected_metrics.end(), + perf.name()), + expected_metrics.end()); + if (perf.name() == "services.stopped.count" || + perf.name() == "services.starting.count" || + perf.name() == "services.stopping.count") { + EXPECT_EQ(perf.value(), 1.0); + } else { + EXPECT_EQ(perf.value(), 0.0); + } + EXPECT_EQ(perf.min(), 0); + EXPECT_EQ(perf.max(), snap->get_metric(e_service_metric::total)); + } +} + +TEST(check_service, service_filter_start_auto_true) { + mock_service_enumerator::enum_with_conf data[] = { + mock_service_enumerator::create_serv("service_stopped", + "desc service_stopped", + SERVICE_STOPPED, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_start_pending", + "desc service_start_pending", + SERVICE_START_PENDING, 0), + mock_service_enumerator::create_serv( + "service_stopping", "desc service_stopping", SERVICE_STOP_PENDING, + SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_running", "desc service_running", SERVICE_RUNNING, 0), + mock_service_enumerator::create_serv( + "service_continue_pending", "desc service_continue_pending", + SERVICE_CONTINUE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_pause_pending", "desc service_pause_pending", + SERVICE_PAUSE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_paused", "desc service_paused", SERVICE_PAUSED, 0)}; + + mock_service_enumerator mock; + mock.data = {std::begin(data), std::end(data)}; + + check_service::_enumerator_constructor = [&mock]() { + return std::make_unique(mock); + }; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = R"({ "start-auto": true })"_json; + + check_service test_check( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + [](const std::shared_ptr& caller, int status, + const std::list& perfdata, + const std::list& outputs) {}, + std::make_shared()); + + auto snap = test_check.measure(); + + std::string output; + std::list perfs; + e_status status = test_check.compute(*snap, &output, &perfs); + + EXPECT_EQ(status, e_status::ok); + + EXPECT_EQ(output, + "OK: services: 1 stopped, 1 stopping, 1 continuing, 1 pausing"); + + EXPECT_EQ(perfs.size(), 7); + + for (const com::centreon::common::perfdata& perf : perfs) { + EXPECT_NE(std::find(expected_metrics.begin(), expected_metrics.end(), + perf.name()), + expected_metrics.end()); + if (perf.name() == "services.stopped.count" || + perf.name() == "services.continuing.count" || + perf.name() == "services.pausing.count" || + perf.name() == "services.stopping.count") { + EXPECT_EQ(perf.value(), 1.0); + } else { + EXPECT_EQ(perf.value(), 0.0); + } + EXPECT_EQ(perf.min(), 0); + EXPECT_EQ(perf.max(), snap->get_metric(e_service_metric::total)); + } +} + +TEST(check_service, service_filter_start_auto_false) { + mock_service_enumerator::enum_with_conf data[] = { + mock_service_enumerator::create_serv("service_stopped", + "desc service_stopped", + SERVICE_STOPPED, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_start_pending", + "desc service_start_pending", + SERVICE_START_PENDING, 0), + mock_service_enumerator::create_serv( + "service_stopping", "desc service_stopping", SERVICE_STOP_PENDING, + SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_running", "desc service_running", SERVICE_RUNNING, 0), + mock_service_enumerator::create_serv( + "service_continue_pending", "desc service_continue_pending", + SERVICE_CONTINUE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_pause_pending", "desc service_pause_pending", + SERVICE_PAUSE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_paused", "desc service_paused", SERVICE_PAUSED, 0)}; + + mock_service_enumerator mock; + mock.data = {std::begin(data), std::end(data)}; + + check_service::_enumerator_constructor = [&mock]() { + return std::make_unique(mock); + }; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = R"({ "start-auto": false })"_json; + + check_service test_check( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + [](const std::shared_ptr& caller, int status, + const std::list& perfdata, + const std::list& outputs) {}, + std::make_shared()); + + auto snap = test_check.measure(); + + std::string output; + std::list perfs; + e_status status = test_check.compute(*snap, &output, &perfs); + + EXPECT_EQ(status, e_status::ok); + + EXPECT_EQ(output, "OK: services: 1 starting, 1 running, 1 paused"); + + EXPECT_EQ(perfs.size(), 7); + + for (const com::centreon::common::perfdata& perf : perfs) { + EXPECT_NE(std::find(expected_metrics.begin(), expected_metrics.end(), + perf.name()), + expected_metrics.end()); + if (perf.name() == "services.starting.count" || + perf.name() == "services.running.count" || + perf.name() == "services.paused.count") { + EXPECT_EQ(perf.value(), 1.0); + } else { + EXPECT_EQ(perf.value(), 0.0); + } + EXPECT_EQ(perf.min(), 0); + EXPECT_EQ(perf.max(), snap->get_metric(e_service_metric::total)); + } +} + +TEST(check_service, + service_filter_allow_some_service_filtered_by_display_warning_running) { + mock_service_enumerator::enum_with_conf data[] = { + mock_service_enumerator::create_serv("service_stopped", + "desc service_stopped", + SERVICE_STOPPED, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_start_pending", "desc service_start_pending", + SERVICE_START_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_stop_pending", "desc service_stop_pending", + SERVICE_STOP_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_running", + "desc service_running", + SERVICE_RUNNING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_continue_pending", "desc service_continue_pending", + SERVICE_CONTINUE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv( + "service_pause_pending", "desc service_pause_pending", + SERVICE_PAUSE_PENDING, SERVICE_AUTO_START), + mock_service_enumerator::create_serv("service_paused", + "desc service_paused", + SERVICE_PAUSED, SERVICE_AUTO_START)}; + + mock_service_enumerator mock; + mock.data = {std::begin(data), std::end(data)}; + + check_service::_enumerator_constructor = [&mock]() { + return std::make_unique(mock); + }; + + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "filter-display": "desc service_s.*", "warning-total-running": "5" })"_json; + + check_service test_check( + g_io_context, spdlog::default_logger(), {}, {}, "serv"s, "cmd_name"s, + "cmd_line"s, check_args, nullptr, + [](const std::shared_ptr& caller, int status, + const std::list& perfdata, + const std::list& outputs) {}, + std::make_shared()); + + auto snap = test_check.measure(); + + std::string output; + std::list perfs; + e_status status = test_check.compute(*snap, &output, &perfs); + + EXPECT_EQ(status, e_status::warning); + + EXPECT_EQ(output, "WARNING: services: 1 stopped, 1 starting, 1 stopping"); + + EXPECT_EQ(perfs.size(), 7); + + for (const com::centreon::common::perfdata& perf : perfs) { + EXPECT_NE(std::find(expected_metrics.begin(), expected_metrics.end(), + perf.name()), + expected_metrics.end()); + if (perf.name() == "services.stopped.count" || + perf.name() == "services.starting.count" || + perf.name() == "services.stopping.count") { + EXPECT_EQ(perf.value(), 1.0); + } else { + EXPECT_EQ(perf.value(), 0.0); + } + EXPECT_EQ(perf.min(), 0); + EXPECT_EQ(perf.max(), snap->get_metric(e_service_metric::total)); + } +} diff --git a/agent/test/drive_size_test.cc b/agent/test/drive_size_test.cc new file mode 100644 index 00000000000..f9757f990c0 --- /dev/null +++ b/agent/test/drive_size_test.cc @@ -0,0 +1,670 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include +#include +#include +#include +#include +#include + +#include "com/centreon/common/rapidjson_helper.hh" + +#include "drive_size.hh" + +extern std::shared_ptr g_io_context; + +using namespace com::centreon::agent; +using namespace com::centreon::agent::check_drive_size_detail; + +struct sample { + std::string_view fs; + std::string_view mount_point; + uint64_t fs_type; + uint64_t used; + uint64_t total; +}; + +std::array _samples = { + {{"udev", "/dev", + check_drive_size_detail::e_drive_fs_type::hr_storage_fixed_disk | + check_drive_size_detail::e_drive_fs_type::hr_fs_other, + 0, 6024132000}, + + {"tmpfs", "/run", + check_drive_size_detail::e_drive_fs_type::hr_storage_fixed_disk | + check_drive_size_detail::e_drive_fs_type::hr_fs_other, + 16760000, 1212868000}, + {"/dev/sda12", "/", + check_drive_size_detail::e_drive_fs_type::hr_storage_fixed_disk | + check_drive_size_detail::e_drive_fs_type::hr_fs_linux_ext4, + 136830444000, 346066920000}, + {"tmpfs", "/dev/shm", + check_drive_size_detail::e_drive_fs_type::hr_storage_fixed_disk | + check_drive_size_detail::e_drive_fs_type::hr_fs_other, + 0, 6072708000}, + {"tmpfs", "/run/lock", + check_drive_size_detail::e_drive_fs_type::hr_storage_fixed_disk | + check_drive_size_detail::e_drive_fs_type::hr_fs_other, + 4000, 5116000}, + {"tmpfs", "/sys/fs/cgroup", + check_drive_size_detail::e_drive_fs_type::hr_storage_fixed_disk | + check_drive_size_detail::e_drive_fs_type::hr_fs_other, + 0, 6072708000}, + {"/dev/sda11", "/boot/efi", + check_drive_size_detail::e_drive_fs_type::hr_storage_fixed_disk | + check_drive_size_detail::e_drive_fs_type::hr_fs_fat, + 24000, 524248000}, + {"/dev/sda5", "/data", + check_drive_size_detail::e_drive_fs_type::hr_storage_fixed_disk | + check_drive_size_detail::e_drive_fs_type::hr_fs_fat32, + 3072708000, 6072708000}, + {"tmpfs", "/run/user/1001", + check_drive_size_detail::e_drive_fs_type::hr_storage_fixed_disk | + check_drive_size_detail::e_drive_fs_type::hr_fs_other, + 100000, 1214440000}}}; + +class drive_size_test : public ::testing::Test { + public: + static std::list compute( + filter& filt, + const std::shared_ptr& logger); + + static void SetUpTestCase() { drive_size_thread::os_fs_stats = compute; } + static void TearDownTestCase() { check_drive_size::thread_kill(); } +}; + +std::list drive_size_test::compute( + filter& filt, + const std::shared_ptr&) { + std::list result; + for (const auto& s : _samples) { + if (filt.is_allowed(s.fs, s.mount_point, + static_cast(s.fs_type))) { + result.emplace_back(s.fs, s.mount_point, s.used, s.total); + } + } + return result; +} + +using namespace std::string_literals; + +TEST_F(drive_size_test, test_fs_filter1) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning" : "1000000", "critical" : 20000000, "unit": "b", + "filter-type": "^hrfsother$"})"_json; + + absl::Mutex wait_m; + std::list perfs; + std::string output; + + auto is_complete = [&]() { return !perfs.empty(); }; + + auto debug_logger = spdlog::default_logger(); + + auto checker = std::make_shared( + g_io_context, spdlog::default_logger(), std::chrono::system_clock::now(), + std::chrono::seconds(1), "serv"s, "cmd_name"s, "cmd_line"s, check_args, + nullptr, + [&]([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) { + absl::MutexLock lck(&wait_m); + perfs = perfdata; + output = outputs.front(); + }, + std::make_shared()); + + checker->start_check(std::chrono::seconds(1)); + + absl::MutexLock lck(&wait_m); + wait_m.Await(absl::Condition(&is_complete)); + + ASSERT_EQ(output, "WARNING: /run Total: 1G Used: 0G Free: 1G"); + ASSERT_EQ(perfs.size(), 6); + + for (const auto& p : perfs) { + ASSERT_EQ(p.unit(), "B"); + ASSERT_EQ(p.min(), 0); + ASSERT_EQ(p.warning_low(), 0); + ASSERT_EQ(p.critical_low(), 0); + ASSERT_EQ(p.warning(), 1000000); + ASSERT_EQ(p.critical(), 20000000); + if (p.name() == "used_/dev") { + ASSERT_EQ(p.value(), 0); + ASSERT_EQ(p.max(), 6024132000); + } else if (p.name() == "used_/run") { + ASSERT_EQ(p.value(), 16760000); + ASSERT_EQ(p.max(), 1212868000); + } else if (p.name() == "used_/dev/shm") { + ASSERT_EQ(p.value(), 0); + ASSERT_EQ(p.max(), 6072708000); + } else if (p.name() == "used_/run/lock") { + ASSERT_EQ(p.value(), 4000); + ASSERT_EQ(p.max(), 5116000); + } else if (p.name() == "used_/sys/fs/cgroup") { + ASSERT_EQ(p.value(), 0); + ASSERT_EQ(p.max(), 6072708000); + } else if (p.name() == "used_/run/user/1001") { + ASSERT_EQ(p.value(), 100000); + ASSERT_EQ(p.max(), 1214440000); + } else { + FAIL() << "Unexpected perfdata name: " << p.name(); + } + } +} + +TEST_F(drive_size_test, test_fs_filter_percent) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning" : "1", "critical" : 5, "unit": "%", + "filter-type": "^hrfsother$"})"_json; + + absl::Mutex wait_m; + std::list perfs; + std::string output; + + auto is_complete = [&]() { return !perfs.empty(); }; + + auto debug_logger = spdlog::default_logger(); + + auto checker = std::make_shared( + g_io_context, spdlog::default_logger(), std::chrono::system_clock::now(), + std::chrono::seconds(1), "serv"s, "cmd_name"s, "cmd_line"s, check_args, + nullptr, + [&]([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) { + absl::MutexLock lck(&wait_m); + perfs = perfdata; + output = outputs.front(); + }, + std::make_shared()); + + checker->start_check(std::chrono::seconds(1)); + + absl::MutexLock lck(&wait_m); + wait_m.Await(absl::Condition(&is_complete)); + + ASSERT_EQ(output, "WARNING: /run Total: 1G Used: 1.38% Free: 98.62%"); + ASSERT_EQ(perfs.size(), 6); + + for (const auto& p : perfs) { + ASSERT_EQ(p.unit(), "%"); + ASSERT_EQ(p.min(), 0); + ASSERT_EQ(p.warning_low(), 0); + ASSERT_EQ(p.critical_low(), 0); + ASSERT_EQ(p.warning(), 1); + ASSERT_EQ(p.critical(), 5); + if (p.name() == "used_/dev") { + ASSERT_EQ(p.value(), 0); + ASSERT_EQ(p.max(), 100); + } else if (p.name() == "used_/run") { + ASSERT_NEAR(p.value(), 1.38, 0.01); + ASSERT_EQ(p.max(), 100); + } else if (p.name() == "used_/dev/shm") { + ASSERT_NEAR(p.value(), 0, 0.01); + ASSERT_EQ(p.max(), 100); + } else if (p.name() == "used_/run/lock") { + ASSERT_NEAR(p.value(), 0.08, 0.01); + ASSERT_EQ(p.max(), 100); + } else if (p.name() == "used_/sys/fs/cgroup") { + ASSERT_EQ(p.value(), 0); + ASSERT_EQ(p.max(), 100); + } else if (p.name() == "used_/run/user/1001") { + ASSERT_NEAR(p.value(), 0, 0.01); + ASSERT_EQ(p.max(), 100); + } else { + FAIL() << "Unexpected perfdata name: " << p.name(); + } + } +} + +TEST_F(drive_size_test, test_fs_filter2) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning" : "1000000", "critical" : 20000000, "unit": "b", + "filter-type": "^(hrfsfat$|hrfsfat32)$"})"_json; + + absl::Mutex wait_m; + std::list perfs; + std::string output; + + auto is_complete = [&]() { return !perfs.empty(); }; + + auto debug_logger = spdlog::default_logger(); + + auto checker = std::make_shared( + g_io_context, spdlog::default_logger(), std::chrono::system_clock::now(), + std::chrono::seconds(1), "serv"s, "cmd_name"s, "cmd_line"s, check_args, + nullptr, + [&]([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) { + absl::MutexLock lck(&wait_m); + perfs = perfdata; + output = outputs.front(); + }, + std::make_shared()); + + checker->start_check(std::chrono::seconds(1)); + + absl::MutexLock lck(&wait_m); + wait_m.Await(absl::Condition(&is_complete)); + + ASSERT_EQ(output, "CRITICAL: /data Total: 5G Used: 2G Free: 2G"); + ASSERT_EQ(perfs.size(), 2); + + for (const auto& p : perfs) { + ASSERT_EQ(p.unit(), "B"); + ASSERT_EQ(p.min(), 0); + ASSERT_EQ(p.warning_low(), 0); + ASSERT_EQ(p.critical_low(), 0); + ASSERT_EQ(p.warning(), 1000000); + ASSERT_EQ(p.critical(), 20000000); + if (p.name() == "used_/boot/efi") { + ASSERT_EQ(p.value(), 24000); + ASSERT_EQ(p.max(), 524248000); + } else if (p.name() == "used_/data") { + ASSERT_EQ(p.value(), 3072708000); + ASSERT_EQ(p.max(), 6072708000); + } else { + FAIL() << "Unexpected perfdata name: " << p.name(); + } + } +} + +TEST_F(drive_size_test, test_fs_filter_percent_2) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning" : "1", "critical" : "5", "unit": "%", + "filter-type": "^hrfsother$", "filter-fs": "^tmp.*$"})"_json; + + absl::Mutex wait_m; + std::list perfs; + std::string output; + + auto is_complete = [&]() { return !perfs.empty(); }; + + auto debug_logger = spdlog::default_logger(); + + auto checker = std::make_shared( + g_io_context, spdlog::default_logger(), std::chrono::system_clock::now(), + std::chrono::seconds(1), "serv"s, "cmd_name"s, "cmd_line"s, check_args, + nullptr, + [&]([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) { + absl::MutexLock lck(&wait_m); + perfs = perfdata; + output = outputs.front(); + }, + std::make_shared()); + + checker->start_check(std::chrono::seconds(1)); + + absl::MutexLock lck(&wait_m); + wait_m.Await(absl::Condition(&is_complete)); + + ASSERT_EQ(output, "WARNING: /run Total: 1G Used: 1.38% Free: 98.62%"); + ASSERT_EQ(perfs.size(), 5); + + for (const auto& p : perfs) { + ASSERT_EQ(p.unit(), "%"); + ASSERT_EQ(p.min(), 0); + ASSERT_EQ(p.warning_low(), 0); + ASSERT_EQ(p.critical_low(), 0); + ASSERT_EQ(p.warning(), 1); + ASSERT_EQ(p.critical(), 5); + if (p.name() == "used_/run") { + ASSERT_NEAR(p.value(), 1.38, 0.01); + ASSERT_EQ(p.max(), 100); + } else if (p.name() == "used_/dev/shm") { + ASSERT_NEAR(p.value(), 0, 0.01); + ASSERT_EQ(p.max(), 100); + } else if (p.name() == "used_/run/lock") { + ASSERT_NEAR(p.value(), 0.08, 0.01); + ASSERT_EQ(p.max(), 100); + } else if (p.name() == "used_/sys/fs/cgroup") { + ASSERT_EQ(p.value(), 0); + ASSERT_EQ(p.max(), 100); + } else if (p.name() == "used_/run/user/1001") { + ASSERT_NEAR(p.value(), 0, 0.01); + ASSERT_EQ(p.max(), 100); + } else { + FAIL() << "Unexpected perfdata name: " << p.name(); + } + } +} + +TEST_F(drive_size_test, test_fs_filter_percent_3) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning" : "1", "critical" : "5", "unit": "%", + "filter-type": "^hrfsother$", "filter-fs": "tmpfs", "filter-mountpoint":"^/run/.*$" })"_json; + + absl::Mutex wait_m; + std::list perfs; + std::string output; + + auto is_complete = [&]() { return !perfs.empty(); }; + + auto debug_logger = spdlog::default_logger(); + + auto checker = std::make_shared( + g_io_context, spdlog::default_logger(), std::chrono::system_clock::now(), + std::chrono::seconds(1), "serv"s, "cmd_name"s, "cmd_line"s, check_args, + nullptr, + [&]([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) { + absl::MutexLock lck(&wait_m); + perfs = perfdata; + output = outputs.front(); + }, + std::make_shared()); + + checker->start_check(std::chrono::seconds(1)); + + absl::MutexLock lck(&wait_m); + wait_m.Await(absl::Condition(&is_complete)); + + ASSERT_EQ(output, "OK: All storages are ok"); + ASSERT_EQ(perfs.size(), 2); + + for (const auto& p : perfs) { + ASSERT_EQ(p.unit(), "%"); + ASSERT_EQ(p.min(), 0); + ASSERT_EQ(p.warning_low(), 0); + ASSERT_EQ(p.critical_low(), 0); + ASSERT_EQ(p.warning(), 1); + ASSERT_EQ(p.critical(), 5); + if (p.name() == "used_/run") { + ASSERT_NEAR(p.value(), 1.38, 0.01); + ASSERT_EQ(p.max(), 100); + } else if (p.name() == "used_/run/lock") { + ASSERT_NEAR(p.value(), 0.08, 0.01); + ASSERT_EQ(p.max(), 100); + } else if (p.name() == "used_/run/user/1001") { + ASSERT_NEAR(p.value(), 0, 0.01); + ASSERT_EQ(p.max(), 100); + } else { + FAIL() << "Unexpected perfdata name: " << p.name(); + } + } +} + +TEST_F(drive_size_test, test_fs_filter_percent_4) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning" : "1", "critical" : "5", "unit": "%", + "filter-type": "^hrfsother$", "filter-fs": "tmpfs", "filter-mountpoint":"^/run.*$", "exclude-mountpoint": ".*lock.*" })"_json; + + absl::Mutex wait_m; + std::list perfs; + std::string output; + + auto is_complete = [&]() { return !perfs.empty(); }; + + auto debug_logger = spdlog::default_logger(); + + auto checker = std::make_shared( + g_io_context, spdlog::default_logger(), std::chrono::system_clock::now(), + std::chrono::seconds(1), "serv"s, "cmd_name"s, "cmd_line"s, check_args, + nullptr, + [&]([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) { + absl::MutexLock lck(&wait_m); + perfs = perfdata; + output = outputs.front(); + }, + std::make_shared()); + + checker->start_check(std::chrono::seconds(1)); + { + absl::MutexLock lck(&wait_m); + wait_m.Await(absl::Condition(&is_complete)); + + ASSERT_EQ(output, "WARNING: /run Total: 1G Used: 1.38% Free: 98.62%"); + ASSERT_EQ(perfs.size(), 2); + + for (const auto& p : perfs) { + ASSERT_EQ(p.unit(), "%"); + ASSERT_EQ(p.min(), 0); + ASSERT_EQ(p.warning_low(), 0); + ASSERT_EQ(p.critical_low(), 0); + ASSERT_EQ(p.warning(), 1); + ASSERT_EQ(p.critical(), 5); + if (p.name() == "used_/run") { + ASSERT_NEAR(p.value(), 1.38, 0.01); + ASSERT_EQ(p.max(), 100); + } else if (p.name() == "used_/run/user/1001") { + ASSERT_NEAR(p.value(), 0, 0.01); + ASSERT_EQ(p.max(), 100); + } else { + FAIL() << "Unexpected perfdata name: " << p.name(); + } + } + } + // recheck to validate filter cache + std::string output_save = output; + std::list perfs_save = perfs; + + absl::MutexLock lck(&wait_m); + wait_m.Await(absl::Condition(&is_complete)); + + ASSERT_EQ(output, output_save); + ASSERT_EQ(perfs, perfs_save); +} + +TEST_F(drive_size_test, test_fs_filter_percent_5) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning" : "30", "critical" : "50", "unit": "%", + "exclude-fs": "tmpfs", "exclude-mountpoint":"/dev" })"_json; + + absl::Mutex wait_m; + std::list perfs; + std::string output; + + auto is_complete = [&]() { return !perfs.empty(); }; + + auto debug_logger = spdlog::default_logger(); + + auto checker = std::make_shared( + g_io_context, spdlog::default_logger(), std::chrono::system_clock::now(), + std::chrono::seconds(1), "serv"s, "cmd_name"s, "cmd_line"s, check_args, + nullptr, + [&]([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) { + absl::MutexLock lck(&wait_m); + perfs = perfdata; + output = outputs.front(); + }, + std::make_shared()); + + checker->start_check(std::chrono::seconds(1)); + + absl::MutexLock lck(&wait_m); + wait_m.Await(absl::Condition(&is_complete)); + + ASSERT_EQ(output, + "WARNING: / Total: 322G Used: 39.54% Free: 60.46% CRITICAL: /data " + "Total: 5G Used: 50.60% Free: 49.40%"); + ASSERT_EQ(perfs.size(), 3); + + for (const auto& p : perfs) { + ASSERT_EQ(p.unit(), "%"); + ASSERT_EQ(p.min(), 0); + ASSERT_EQ(p.warning_low(), 0); + ASSERT_EQ(p.critical_low(), 0); + ASSERT_EQ(p.warning(), 30); + ASSERT_EQ(p.critical(), 50); + if (p.name() == "used_/") { + ASSERT_NEAR(p.value(), 39.54, 0.01); + ASSERT_EQ(p.max(), 100); + } else if (p.name() == "used_/data") { + ASSERT_NEAR(p.value(), 50.60, 0.01); + ASSERT_EQ(p.max(), 100); + } else if (p.name() == "used_/boot/efi") { + ASSERT_NEAR(p.value(), 0.0045, 0.0001); + ASSERT_EQ(p.max(), 100); + } else { + FAIL() << "Unexpected perfdata name: " << p.name(); + } + } +} + +TEST_F(drive_size_test, test_fs_filter_percent_6) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning" : "30", "critical" : "", "unit": "%", + "exclude-fs": "tmpfs", "exclude-mountpoint":"/dev" })"_json; + + absl::Mutex wait_m; + std::list perfs; + std::string output; + + auto is_complete = [&]() { return !perfs.empty(); }; + + auto debug_logger = spdlog::default_logger(); + + auto checker = std::make_shared( + g_io_context, spdlog::default_logger(), std::chrono::system_clock::now(), + std::chrono::seconds(1), "serv"s, "cmd_name"s, "cmd_line"s, check_args, + nullptr, + [&]([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) { + absl::MutexLock lck(&wait_m); + perfs = perfdata; + output = outputs.front(); + }, + std::make_shared()); + + checker->start_check(std::chrono::seconds(1)); + + absl::MutexLock lck(&wait_m); + wait_m.Await(absl::Condition(&is_complete)); + + ASSERT_EQ(output, + "WARNING: / Total: 322G Used: 39.54% Free: 60.46% WARNING: /data " + "Total: 5G Used: 50.60% Free: 49.40%"); + ASSERT_EQ(perfs.size(), 3); + + for (const auto& p : perfs) { + ASSERT_EQ(p.unit(), "%"); + ASSERT_EQ(p.min(), 0); + ASSERT_EQ(p.warning_low(), 0); + ASSERT_TRUE(std::isnan(p.critical_low())); + ASSERT_EQ(p.warning(), 30); + ASSERT_TRUE(std::isnan(p.critical())); + if (p.name() == "used_/") { + ASSERT_NEAR(p.value(), 39.54, 0.01); + ASSERT_EQ(p.max(), 100); + } else if (p.name() == "used_/data") { + ASSERT_NEAR(p.value(), 50.60, 0.01); + ASSERT_EQ(p.max(), 100); + } else if (p.name() == "used_/boot/efi") { + ASSERT_NEAR(p.value(), 0.0045, 0.0001); + ASSERT_EQ(p.max(), 100); + } else { + FAIL() << "Unexpected perfdata name: " << p.name(); + } + } +} + +TEST_F(drive_size_test, test_fs_filter_free_percent) { + using namespace com::centreon::common::literals; + rapidjson::Document check_args = + R"({ "warning" : "70", "critical" : "50", "unit": "%", "free": true, + "exclude-fs": "tmpfs", "exclude-mountpoint":"/dev" })"_json; + + absl::Mutex wait_m; + std::list perfs; + std::string output; + + auto is_complete = [&]() { return !perfs.empty(); }; + + auto debug_logger = spdlog::default_logger(); + + auto checker = std::make_shared( + g_io_context, spdlog::default_logger(), std::chrono::system_clock::now(), + std::chrono::seconds(1), "serv"s, "cmd_name"s, "cmd_line"s, check_args, + nullptr, + [&]([[maybe_unused]] const std::shared_ptr& caller, + [[maybe_unused]] int status, + [[maybe_unused]] const std::list& + perfdata, + [[maybe_unused]] const std::list& outputs) { + absl::MutexLock lck(&wait_m); + perfs = perfdata; + output = outputs.front(); + }, + std::make_shared()); + + checker->start_check(std::chrono::seconds(1)); + + absl::MutexLock lck(&wait_m); + wait_m.Await(absl::Condition(&is_complete)); + + ASSERT_EQ(output, + "WARNING: / Total: 322G Used: 39.54% Free: 60.46% CRITICAL: /data " + "Total: 5G Used: 50.60% Free: 49.40%"); + ASSERT_EQ(perfs.size(), 3); + + for (const auto& p : perfs) { + ASSERT_EQ(p.unit(), "%"); + ASSERT_EQ(p.min(), 0); + ASSERT_EQ(p.warning_low(), 0); + ASSERT_EQ(p.critical_low(), 0); + ASSERT_EQ(p.warning(), 70); + ASSERT_EQ(p.critical(), 50); + if (p.name() == "free_/") { + ASSERT_NEAR(p.value(), 60.46, 0.01); + ASSERT_EQ(p.max(), 100); + } else if (p.name() == "free_/data") { + ASSERT_NEAR(p.value(), 49.40, 0.01); + ASSERT_EQ(p.max(), 100); + } else if (p.name() == "free_/boot/efi") { + ASSERT_NEAR(p.value(), 99.99, 0.01); + ASSERT_EQ(p.max(), 100); + } else { + FAIL() << "Unexpected perfdata name: " << p.name(); + } + } +} diff --git a/agent/test/scheduler_test.cc b/agent/test/scheduler_test.cc index 5af1a86f4dd..b2bd15bc81b 100644 --- a/agent/test/scheduler_test.cc +++ b/agent/test/scheduler_test.cc @@ -17,6 +17,7 @@ */ #include +#include "check.hh" #include "scheduler.hh" @@ -37,21 +38,25 @@ class tempo_check : public check { tempo_check(const std::shared_ptr& io_context, const std::shared_ptr& logger, time_point exp, + duration check_interval, const std::string& serv, const std::string& cmd_name, const std::string& cmd_line, const engine_to_agent_request_ptr& cnf, int command_exit_status, duration completion_delay, - check::completion_handler&& handler) + check::completion_handler&& handler, + const checks_statistics::pointer& stat) : check(io_context, logger, exp, + check_interval, serv, cmd_name, cmd_line, cnf, - std::move(handler)), + std::move(handler), + stat), _completion_timer(*io_context), _command_exit_status(command_exit_status), _completion_delay(completion_delay) {} @@ -62,12 +67,15 @@ class tempo_check : public check { SPDLOG_INFO("start tempo check"); check_starts.emplace_back(this, std::chrono::system_clock::now()); } - check::start_check(timeout); + if (!_start_check(timeout)) { + return; + } _completion_timer.expires_from_now(_completion_delay); _completion_timer.async_wait([me = shared_from_this(), this, check_running_index = _get_running_check_index()]( - const boost::system::error_code& err) { + [[maybe_unused]] const boost::system:: + error_code& err) { SPDLOG_TRACE("end of completion timer for serv {}", get_service()); me->on_completion( check_running_index, _command_exit_status, @@ -137,10 +145,12 @@ TEST_F(scheduler_test, no_config) { [](const std::shared_ptr&) {}, [](const std::shared_ptr&, const std::shared_ptr&, time_point /* start expected*/, - const std::string& /*service*/, const std::string& /*cmd_name*/, - const std::string& /*cmd_line*/, + duration /* check interval */, const std::string& /*service*/, + const std::string& /*cmd_name*/, const std::string& /*cmd_line*/, const engine_to_agent_request_ptr& /*engine to agent request*/, - check::completion_handler&&) { return std::shared_ptr(); }); + check::completion_handler&&, const checks_statistics::pointer&) { + return std::shared_ptr(); + }); std::weak_ptr weak_shed(sched); sched.reset(); @@ -168,32 +178,31 @@ static bool tempo_check_assert_pred(const time_point& after, } TEST_F(scheduler_test, correct_schedule) { + { + std::lock_guard l(tempo_check::check_starts_m); + tempo_check::check_starts.clear(); + } + std::shared_ptr sched = scheduler::load( g_io_context, spdlog::default_logger(), "my_host", create_conf(20, 10, 1, 50, 1), [](const std::shared_ptr&) {}, [](const std::shared_ptr& io_context, const std::shared_ptr& logger, - time_point start_expected, const std::string& service, - const std::string& cmd_name, const std::string& cmd_line, + time_point start_expected, duration check_interval, + const std::string& service, const std::string& cmd_name, + const std::string& cmd_line, const engine_to_agent_request_ptr& engine_to_agent_request, - check::completion_handler&& handler) { + check::completion_handler&& handler, + const checks_statistics::pointer& stat) { return std::make_shared( - io_context, logger, start_expected, service, cmd_name, cmd_line, - engine_to_agent_request, 0, std::chrono::milliseconds(50), - std::move(handler)); + io_context, logger, start_expected, check_interval, service, + cmd_name, cmd_line, engine_to_agent_request, 0, + std::chrono::milliseconds(50), std::move(handler), stat); }); - { - std::lock_guard l(tempo_check::check_starts_m); - tempo_check::check_starts.clear(); - } - std::this_thread::sleep_for(std::chrono::milliseconds(10100)); - // we have 2 * 10 = 20 checks spread over 10 second - duration expected_interval = std::chrono::milliseconds(1000); - { std::lock_guard l(tempo_check::check_starts_m); ASSERT_GE(tempo_check::check_starts.size(), 20); @@ -204,6 +213,7 @@ TEST_F(scheduler_test, correct_schedule) { first = false; } else { ASSERT_NE(previous.first, check_time.first); + // check if we have a delay of 500ms between two checks ASSERT_PRED2(tempo_check_assert_pred, check_time.second, previous.second); } @@ -253,14 +263,16 @@ TEST_F(scheduler_test, time_out) { }, [](const std::shared_ptr& io_context, const std::shared_ptr& logger, - time_point start_expected, const std::string& service, - const std::string& cmd_name, const std::string& cmd_line, + time_point start_expected, duration check_interval, + const std::string& service, const std::string& cmd_name, + const std::string& cmd_line, const engine_to_agent_request_ptr& engine_to_agent_request, - check::completion_handler&& handler) { + check::completion_handler&& handler, + const checks_statistics::pointer& stat) { return std::make_shared( - io_context, logger, start_expected, service, cmd_name, cmd_line, - engine_to_agent_request, 0, std::chrono::milliseconds(1500), - std::move(handler)); + io_context, logger, start_expected, check_interval, service, + cmd_name, cmd_line, engine_to_agent_request, 0, + std::chrono::milliseconds(1500), std::move(handler), stat); }); std::unique_lock l(m); export_cond.wait(l); @@ -296,7 +308,6 @@ TEST_F(scheduler_test, time_out) { TEST_F(scheduler_test, correct_output_examplar) { std::shared_ptr exported_request; std::condition_variable export_cond; - time_point now = std::chrono::system_clock::now(); std::shared_ptr sched = scheduler::load( g_io_context, spdlog::default_logger(), "my_host", create_conf(2, 1, 2, 10, 1), @@ -306,14 +317,16 @@ TEST_F(scheduler_test, correct_output_examplar) { }, [](const std::shared_ptr& io_context, const std::shared_ptr& logger, - time_point start_expected, const std::string& service, - const std::string& cmd_name, const std::string& cmd_line, + time_point start_expected, duration check_interval, + const std::string& service, const std::string& cmd_name, + const std::string& cmd_line, const engine_to_agent_request_ptr& engine_to_agent_request, - check::completion_handler&& handler) { + check::completion_handler&& handler, + const checks_statistics::pointer& stat) { return std::make_shared( - io_context, logger, start_expected, service, cmd_name, cmd_line, - engine_to_agent_request, 0, std::chrono::milliseconds(10), - std::move(handler)); + io_context, logger, start_expected, check_interval, service, + cmd_name, cmd_line, engine_to_agent_request, 0, + std::chrono::milliseconds(10), std::move(handler), stat); }); std::mutex m; std::unique_lock l(m); @@ -385,27 +398,33 @@ class concurent_check : public check { concurent_check(const std::shared_ptr& io_context, const std::shared_ptr& logger, time_point exp, + duration check_interval, const std::string& serv, const std::string& cmd_name, const std::string& cmd_line, const engine_to_agent_request_ptr& cnf, int command_exit_status, duration completion_delay, - check::completion_handler&& handler) + check::completion_handler&& handler, + const checks_statistics::pointer& stat) : check(io_context, logger, exp, + check_interval, serv, cmd_name, cmd_line, cnf, - std::move(handler)), + std::move(handler), + stat), _completion_timer(*io_context), _command_exit_status(command_exit_status), _completion_delay(completion_delay) {} void start_check(const duration& timeout) override { - check::start_check(timeout); + if (!_start_check(timeout)) { + return; + } active_checks.insert(this); if (active_checks.size() > max_active_check) { max_active_check = active_checks.size(); @@ -414,7 +433,8 @@ class concurent_check : public check { _completion_timer.async_wait([me = shared_from_this(), this, check_running_index = _get_running_check_index()]( - const boost::system::error_code& err) { + [[maybe_unused]] const boost::system:: + error_code& err) { active_checks.erase(this); checked.insert(this); SPDLOG_TRACE("end of completion timer for serv {}", get_service()); @@ -438,17 +458,23 @@ TEST_F(scheduler_test, max_concurent) { std::shared_ptr sched = scheduler::load( g_io_context, spdlog::default_logger(), "my_host", create_conf(200, 10, 1, 10, 1), - [&](const std::shared_ptr& req) {}, + [&]([[maybe_unused]] const std::shared_ptr& req) {}, [](const std::shared_ptr& io_context, const std::shared_ptr& logger, - time_point start_expected, const std::string& service, - const std::string& cmd_name, const std::string& cmd_line, + time_point start_expected, duration check_interval, + const std::string& service, const std::string& cmd_name, + const std::string& cmd_line, const engine_to_agent_request_ptr& engine_to_agent_request, - check::completion_handler&& handler) { + check::completion_handler&& handler, + const checks_statistics::pointer& stat) { return std::make_shared( - io_context, logger, start_expected, service, cmd_name, cmd_line, - engine_to_agent_request, 0, std::chrono::milliseconds(750), - std::move(handler)); + io_context, logger, start_expected, check_interval, service, + cmd_name, cmd_line, engine_to_agent_request, 0, + std::chrono::milliseconds(750 - + 10) /*the - 10 is for some delay in test + execution from start expected*/ + , + std::move(handler), stat); }); // to many tests to be completed in eleven second diff --git a/agent/test/test_main.cc b/agent/test/test_main.cc index 21d63bb5a22..e13a6cc5d44 100644 --- a/agent/test/test_main.cc +++ b/agent/test/test_main.cc @@ -18,18 +18,22 @@ #include +#ifdef _WIN32 +#include "ntdll.hh" +#endif + std::shared_ptr g_io_context( std::make_shared()); class CentreonEngineEnvironment : public testing::Environment { public: -#ifndef _WINDOWS void SetUp() override { +#ifndef _WIN32 setenv("TZ", ":Europe/Paris", 1); - return; - } +#else + com::centreon::agent::load_nt_dll(); #endif - + } }; /** diff --git a/bbdo/CMakeLists.txt b/bbdo/CMakeLists.txt index 5232e26135b..8a26e12e8f6 100644 --- a/bbdo/CMakeLists.txt +++ b/bbdo/CMakeLists.txt @@ -18,6 +18,7 @@ set(protobuf_files header + common rebuild_message remove_graph_message bbdo @@ -43,45 +44,12 @@ foreach(name IN LISTS protobuf_files) add_custom_target( "target_${name}" DEPENDS "${CMAKE_SOURCE_DIR}/bbdo/${name}.pb.cc" "${CMAKE_SOURCE_DIR}/bbdo/${name}.pb.h") + add_library("pb_${name}_lib" STATIC "${name}.pb.cc" + "${name}.pb.h") + add_dependencies("pb_${name}_lib" "target_${name}") + set_target_properties("pb_${name}_lib" PROPERTIES POSITION_INDEPENDENT_CODE ON) endforeach() -add_library(pb_header_lib STATIC header.pb.cc header.pb.h) -add_dependencies(pb_header_lib target_header) -set_target_properties(pb_header_lib PROPERTIES POSITION_INDEPENDENT_CODE ON) - -add_library(pb_neb_lib STATIC neb.pb.cc neb.pb.h) -add_dependencies(pb_neb_lib target_neb target_header) -set_target_properties(pb_neb_lib PROPERTIES POSITION_INDEPENDENT_CODE ON) - -add_library(pb_storage_lib STATIC storage.pb.cc storage.pb.h) -add_dependencies(pb_storage_lib target_storage target_header) -set_target_properties(pb_storage_lib PROPERTIES POSITION_INDEPENDENT_CODE ON) - -add_library(pb_bbdo_lib STATIC bbdo.pb.cc bbdo.pb.h) -add_dependencies(pb_bbdo_lib target_bbdo) -set_target_properties(pb_bbdo_lib PROPERTIES POSITION_INDEPENDENT_CODE ON) - -add_library(pb_bam_lib STATIC bam.pb.cc bam.pb.h bam_state.pb.cc bam_state.pb.h) -add_dependencies(pb_bam_lib target_bam target_bam_state target_header) -set_target_properties(pb_bam_lib PROPERTIES POSITION_INDEPENDENT_CODE ON) - -add_library(pb_extcmd_lib STATIC extcmd.pb.cc extcmd.pb.h) -add_dependencies(pb_extcmd_lib target_extcmd target_header) -set_target_properties(pb_extcmd_lib PROPERTIES POSITION_INDEPENDENT_CODE ON) - -add_library(pb_rebuild_message_lib STATIC rebuild_message.pb.cc - rebuild_message.pb.h) -add_dependencies(pb_rebuild_message_lib target_rebuild_message target_header) -set_target_properties(pb_rebuild_message_lib - PROPERTIES POSITION_INDEPENDENT_CODE ON) - -add_library(pb_remove_graph_message_lib STATIC remove_graph_message.pb.cc - remove_graph_message.pb.h) -add_dependencies(pb_remove_graph_message_lib target_remove_graph_message - target_header) -set_target_properties(pb_remove_graph_message_lib - PROPERTIES POSITION_INDEPENDENT_CODE ON) - set(otl_protobuf_files opentelemetry/proto/collector/metrics/v1/metrics_service opentelemetry/proto/metrics/v1/metrics @@ -100,7 +68,7 @@ foreach(name IN LISTS otl_protobuf_files) --proto_path=${CMAKE_SOURCE_DIR}/opentelemetry-proto ${proto_file} VERBATIM) endforeach() - + add_library(pb_open_telemetry_lib STATIC ${CMAKE_SOURCE_DIR}/bbdo/opentelemetry/proto/collector/metrics/v1/metrics_service.pb.cc ${CMAKE_SOURCE_DIR}/bbdo/opentelemetry/proto/metrics/v1/metrics.pb.cc @@ -151,7 +119,7 @@ add_library( "storage/status.hh") set_target_properties(bbdo_storage PROPERTIES POSITION_INDEPENDENT_CODE ON) target_precompile_headers(bbdo_storage REUSE_FROM bbdo_bbdo) -add_dependencies(bbdo_storage table_max_size) +add_dependencies(bbdo_storage table_max_size pb_storage_lib) add_library( bbdo_bam STATIC "bam/ba_duration_event.cc" @@ -184,4 +152,4 @@ add_library( "bam/kpi_event.hh") set_target_properties(bbdo_bam PROPERTIES POSITION_INDEPENDENT_CODE ON) target_precompile_headers(bbdo_bam REUSE_FROM bbdo_bbdo) -add_dependencies(bbdo_bam table_max_size pb_bam_lib) +add_dependencies(bbdo_bam table_max_size pb_bam_lib pb_bam_state_lib) diff --git a/bbdo/bbdo.proto b/bbdo/bbdo.proto index ee3ffa21a07..a2d4b2a2fc1 100644 --- a/bbdo/bbdo.proto +++ b/bbdo/bbdo.proto @@ -1,23 +1,24 @@ -/* -** Copyright 2022 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ - +/** + * Copyright 2022-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ syntax = "proto3"; +import "common.proto"; + package com.centreon.broker; /* Ignore */ @@ -33,6 +34,12 @@ message Welcome { string extensions = 2; uint64 poller_id = 3; string poller_name = 4; + /* Broker name is more relevant than poller name because for example on the + * central, rrd broker, central broker and engine share the same poller name + * that is 'Central'. */ + string broker_name = 5; + com.centreon.common.PeerType peer_type = 6; + bool extended_negotiation = 7; } /* io::bbdo, bbdo::de_pb_ack, 50 */ @@ -44,3 +51,16 @@ message Ack { message Stop { uint64 poller_id = 1; } + +/* io::bbdo, bbdo::de_pb_engine_configuration, 52 */ +message EngineConfiguration { + uint64 poller_id = 1; // Poller ID of the sender (Engine or Broker) + string poller_name = 2; // Poller name of the sender + string broker_name = 3; // Broker name of the sender + com.centreon.common.PeerType peer_type = + 4; // Peer type of the sender (Engine or Broker) + string engine_config_version = + 5; // Engine uses this to send the conf version + bool need_update = + 6; // Broker uses this to tell Engine if an update is needed +} diff --git a/bbdo/bbdo/ack.hh b/bbdo/bbdo/ack.hh index 6a3427d57ea..d3b1bcbc660 100644 --- a/bbdo/bbdo/ack.hh +++ b/bbdo/bbdo/ack.hh @@ -19,10 +19,7 @@ #ifndef CCB_BBDO_ACK_HH #define CCB_BBDO_ACK_HH -#include "bbdo/events.hh" #include "com/centreon/broker/bbdo/internal.hh" -#include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" diff --git a/bbdo/bbdo/stop.hh b/bbdo/bbdo/stop.hh index 0074f402e6b..4578a9a7c52 100644 --- a/bbdo/bbdo/stop.hh +++ b/bbdo/bbdo/stop.hh @@ -19,10 +19,7 @@ #ifndef CCB_BBDO_STOP_HH #define CCB_BBDO_STOP_HH -#include "bbdo/events.hh" #include "com/centreon/broker/bbdo/internal.hh" -#include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" diff --git a/bbdo/bbdo/version_response.hh b/bbdo/bbdo/version_response.hh index 669be85b480..dfa9ac539f1 100644 --- a/bbdo/bbdo/version_response.hh +++ b/bbdo/bbdo/version_response.hh @@ -20,10 +20,7 @@ #define CCB_BBDO_VERSION_RESPONSE_HH #include "bbdo/bbdo/bbdo_version.hh" -#include "bbdo/events.hh" #include "com/centreon/broker/bbdo/internal.hh" -#include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" diff --git a/bbdo/common.proto b/bbdo/common.proto new file mode 100644 index 00000000000..aca90f2fe6a --- /dev/null +++ b/bbdo/common.proto @@ -0,0 +1,33 @@ +/** +* Copyright 2024 Centreon +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* For more information : contact@centreon.com +*/ + +syntax = "proto3"; + +package com.centreon.common; + +/** + * @brief Type of the peer. Used in Welcome message, also in the Broker gRPC + * service. + * + */ +enum PeerType { + UNKNOWN = 0; + BROKER = 1; + ENGINE = 2; + MAP = 3; +} diff --git a/bbdo/events.hh b/bbdo/events.hh index 9585bd26055..7db7d897269 100644 --- a/bbdo/events.hh +++ b/bbdo/events.hh @@ -93,7 +93,8 @@ enum data_element { de_remove_poller = 6, de_welcome = 7, de_pb_ack = 8, - de_pb_stop = 9 + de_pb_stop = 9, + de_pb_engine_configuration = 10, }; } namespace neb { @@ -155,6 +156,7 @@ enum data_element { de_pb_instance_configuration = 54, de_pb_adaptive_service_status = 55, de_pb_adaptive_host_status = 56, + de_pb_agent_stats = 57 }; } // namespace neb namespace storage { diff --git a/bbdo/neb.proto b/bbdo/neb.proto index a5c4715c630..2380428539b 100644 --- a/bbdo/neb.proto +++ b/bbdo/neb.proto @@ -424,7 +424,7 @@ message HostStatus { * @brief Message sent in BBDO 3.0.0 to update a host status partially * changed. For example, it is convenient for downtime changed. */ -/* io::neb, neb::de_pb_adaptive_host_status, 55 */ +/* io::neb, neb::de_pb_adaptive_host_status, 54 */ message AdaptiveHostStatus { uint64 host_id = 1; optional int32 scheduled_downtime_depth = 2; @@ -649,6 +649,7 @@ message Instance { int64 end_time = 7; int64 start_time = 8; string version = 9; + string engine_config_version = 10; } /* io::neb, neb::de_pb_responsive_instance, 19 */ @@ -841,3 +842,20 @@ message TagInfo { uint64 id = 1; TagType type = 2; } + +/* Ignore */ +message AgentInfo { + uint32 major = 1; + uint32 minor = 2; + uint32 patch = 3; + bool reverse = 4; + string os = 5; + string os_version = 6; + uint32 nb_agent = 7; +} + +/* io::neb, neb::de_pb_agent_stats, 57 */ +message AgentStats { + int64 poller_id = 1; + repeated AgentInfo stats = 2; +} \ No newline at end of file diff --git a/bbdo/storage/index_mapping.hh b/bbdo/storage/index_mapping.hh index 553334b4fdb..1742104cde5 100644 --- a/bbdo/storage/index_mapping.hh +++ b/bbdo/storage/index_mapping.hh @@ -19,11 +19,8 @@ #ifndef CCB_STORAGE_INDEX_MAPPING_HH #define CCB_STORAGE_INDEX_MAPPING_HH -#include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" -#include "com/centreon/broker/timestamp.hh" namespace com::centreon::broker::storage { diff --git a/bbdo/storage/metric.hh b/bbdo/storage/metric.hh index 6a7d4ea9d07..7bb3a293aec 100644 --- a/bbdo/storage/metric.hh +++ b/bbdo/storage/metric.hh @@ -21,7 +21,6 @@ #include "bbdo/storage.pb.h" #include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/timestamp.hh" diff --git a/bbdo/storage/metric_mapping.hh b/bbdo/storage/metric_mapping.hh index 690ac03c358..651186d9ded 100644 --- a/bbdo/storage/metric_mapping.hh +++ b/bbdo/storage/metric_mapping.hh @@ -20,7 +20,6 @@ #define CCB_STORAGE_METRIC_MAPPING_HH #include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/timestamp.hh" diff --git a/bbdo/storage/remove_graph.hh b/bbdo/storage/remove_graph.hh index 9323013f8e4..94c4870d7e0 100644 --- a/bbdo/storage/remove_graph.hh +++ b/bbdo/storage/remove_graph.hh @@ -20,7 +20,6 @@ #define CCB_STORAGE_REMOVE_GRAPH_HH #include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" diff --git a/bbdo/storage/status.hh b/bbdo/storage/status.hh index 70883acba91..d83559a017e 100644 --- a/bbdo/storage/status.hh +++ b/bbdo/storage/status.hh @@ -21,7 +21,6 @@ #include "bbdo/storage.pb.h" #include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/timestamp.hh" diff --git a/broker/CMakeLists.txt b/broker/CMakeLists.txt index ad2373471fe..e74fdf2ab59 100644 --- a/broker/CMakeLists.txt +++ b/broker/CMakeLists.txt @@ -1,5 +1,5 @@ # -# Copyright 2009-2023 Centreon +# Copyright 2009-2024 Centreon # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of @@ -66,15 +66,20 @@ endif() if(OS_DISTRIBUTOR STREQUAL "Debian" OR OS_DISTRIBUTOR STREQUAL "Ubuntu") message(STATUS "deb based os") add_definitions("-DMYSQL_SOCKET=\"/var/run/mysqld/mysqld.sock\"") - add_definitions("-DDEFAULT_MARIADB_EXTENSION_DIR=\"/usr/lib/x86_64-linux-gnu/libmariadb3/plugin\"") + add_definitions( + "-DDEFAULT_MARIADB_EXTENSION_DIR=\"/usr/lib/x86_64-linux-gnu/libmariadb3/plugin\"" + ) elseif(OS_DISTRIBUTOR STREQUAL "CentOS" OR OS_DISTRIBUTOR STREQUAL "RedHat") message(STATUS "rpm based os") add_definitions("-DMYSQL_SOCKET=\"/var/lib/mysql/mysql.sock\"") - add_definitions("-DDEFAULT_MARIADB_EXTENSION_DIR=\"/usr/lib64/mariadb/plugin\"") + add_definitions( + "-DDEFAULT_MARIADB_EXTENSION_DIR=\"/usr/lib64/mariadb/plugin\"") else() message(STATUS "other os: ${OS_DISTRIBUTOR}") add_definitions("-DMYSQL_SOCKET=\"/tmp/mysql.sock\"") - add_definitions("-DDEFAULT_MARIADB_EXTENSION_DIR=\"/usr/lib/x86_64-linux-gnu/libmariadb3/plugin\"") + add_definitions( + "-DDEFAULT_MARIADB_EXTENSION_DIR=\"/usr/lib/x86_64-linux-gnu/libmariadb3/plugin\"" + ) endif() include_directories( @@ -99,6 +104,7 @@ add_custom_command( COMMAND ${Protobuf_PROTOC_EXECUTABLE} ARGS --plugin=protoc-gen-grpc=${GRPC_CPP_PLUGIN} --proto_path=${SRC_DIR} + --proto_path=${CMAKE_SOURCE_DIR}/bbdo --proto_path=${CMAKE_SOURCE_DIR}/common/src --grpc_out="${SRC_DIR}" ${SRC_DIR}/broker.proto DEPENDS ${SRC_DIR}/broker.proto @@ -106,20 +112,20 @@ add_custom_command( OUTPUT ${SRC_DIR}/broker.pb.cc ${SRC_DIR}/broker.pb.h COMMAND ${Protobuf_PROTOC_EXECUTABLE} ARGS --cpp_out="${SRC_DIR}" + --proto_path=${CMAKE_SOURCE_DIR}/bbdo --proto_path=${SRC_DIR} --proto_path=${CMAKE_SOURCE_DIR}/common/src ${SRC_DIR}/broker.proto WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_custom_target("target_broker_message" DEPENDS "${SRC_DIR}/broker.pb.cc" +add_custom_target(target_broker_message DEPENDS "${SRC_DIR}/broker.pb.cc" "${SRC_DIR}/broker.pb.h") include_directories(${SRC_DIR} ${CMAKE_SOURCE_DIR}/common/src - ${CMAKE_SOURCE_DIR}/common/inc - ${CMAKE_SOURCE_DIR}/bbdo) + ${CMAKE_SOURCE_DIR}/common/inc ${CMAKE_SOURCE_DIR}/bbdo) add_library(berpc STATIC ${SRC_DIR}/broker.grpc.pb.cc ${SRC_DIR}/broker.pb.cc ${SRC_DIR}/broker.grpc.pb.h ${SRC_DIR}/broker.pb.h) -target_link_libraries(berpc protobuf) +target_link_libraries(berpc protobuf pb_common_lib) set_target_properties(berpc PROPERTIES COMPILE_FLAGS "-fPIC") # Version. @@ -441,34 +447,28 @@ set(LIBROKER_SOURCES add_library(rokerbase STATIC ${LIBROKER_SOURCES}) set_target_properties(rokerbase PROPERTIES COMPILE_FLAGS "-fPIC") target_precompile_headers(rokerbase REUSE_FROM multiplexing) -add_dependencies(rokerbase berpc target_bbdo target_extcmd) +add_dependencies(rokerbase berpc target_bbdo target_extcmd pb_neb_lib) target_link_libraries( rokerbase - sql bbdo_bbdo + berpc + ctncrypto + dl pb_bbdo_lib + pb_common_lib pb_extcmd_lib pb_open_telemetry_lib - berpc - z - spdlog::spdlog - crypto - ssl pthread - dl) + spdlog::spdlog + sql + z) add_library(roker STATIC ${SRC_DIR}/config/applier/init.cc) -target_link_libraries( - roker - rokerbase - crypto - ssl - pthread - dl) +target_link_libraries(roker rokerbase crypto ssl pthread dl) # Standalone binary. add_executable(cbd ${SRC_DIR}/main.cc) -add_dependencies(cbd multiplexing centreon_common) +add_dependencies(cbd multiplexing centreon_common pb_neb_lib) # Flags needed to include all symbols in binary. target_link_libraries( @@ -511,7 +511,7 @@ add_subdirectory(core/sql) # Generator module. add_broker_module(GENERATOR OFF) add_broker_module(STATS ON) -#add_broker_module(STATS_EXPORTER OFF) +# add_broker_module(STATS_EXPORTER OFF) add_broker_module(NEB ON) add_broker_module(RRD ON) add_broker_module(UNIFIED_SQL ON) diff --git a/broker/bam/CMakeLists.txt b/broker/bam/CMakeLists.txt index 5f2f4c57bfb..8f80de38179 100644 --- a/broker/bam/CMakeLists.txt +++ b/broker/bam/CMakeLists.txt @@ -142,7 +142,7 @@ add_library( "${INC_DIR}/service_book.hh" "${INC_DIR}/service_listener.hh" "${INC_DIR}/timeperiod_map.hh") -target_link_libraries("${BAM}" bbdo_storage bbdo_bam pb_bam_lib +target_link_libraries("${BAM}" bbdo_storage bbdo_bam pb_bam_lib pb_bam_state_lib spdlog::spdlog) target_precompile_headers(${BAM} PRIVATE precomp_inc/precomp.hpp) set_target_properties("${BAM}" PROPERTIES PREFIX "") diff --git a/broker/bam/inc/com/centreon/broker/bam/factory.hh b/broker/bam/inc/com/centreon/broker/bam/factory.hh index 9d5251b780b..7a3f6b15800 100644 --- a/broker/bam/inc/com/centreon/broker/bam/factory.hh +++ b/broker/bam/inc/com/centreon/broker/bam/factory.hh @@ -1,20 +1,20 @@ -/* -** Copyright 2014-2015 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2014-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #ifndef CCB_BAM_FACTORY_HH #define CCB_BAM_FACTORY_HH @@ -37,6 +37,7 @@ class factory : public io::factory { bool has_endpoint(config::endpoint& cfg, io::extension* ext) override; io::endpoint* new_endpoint( config::endpoint& cfg, + const std::map& global_params, bool& is_acceptor, std::shared_ptr cache = std::shared_ptr()) const override; diff --git a/broker/bam/src/factory.cc b/broker/bam/src/factory.cc index e7781556bf9..bb985c92a0f 100644 --- a/broker/bam/src/factory.cc +++ b/broker/bam/src/factory.cc @@ -1,27 +1,25 @@ /** -* Copyright 2014-2016, 2021 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2014-2016, 2021-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/bam/factory.hh" #include #include "com/centreon/broker/bam/connector.hh" -#include "com/centreon/broker/config/parser.hh" -#include "com/centreon/broker/sql/database_config.hh" #include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon::exceptions; @@ -60,10 +58,11 @@ bool factory::has_endpoint(config::endpoint& cfg, io::extension* ext) { */ io::endpoint* factory::new_endpoint( config::endpoint& cfg, + const std::map& global_params [[maybe_unused]], bool& is_acceptor, std::shared_ptr cache) const { // Find DB parameters. - database_config db_cfg(cfg); + database_config db_cfg(cfg, global_params); // Is it a BAM or BAM-BI output ? bool is_bam_bi{absl::EqualsIgnoreCase(cfg.type, "bam_bi")}; @@ -71,8 +70,7 @@ io::endpoint* factory::new_endpoint( // External command file. std::string ext_cmd_file; if (!is_bam_bi) { - std::map::const_iterator it = - cfg.params.find("command_file"); + auto it = cfg.params.find("command_file"); if (it == cfg.params.end() || it->second.empty()) throw msg_fmt("BAM: command_file parameter not set"); ext_cmd_file = it->second; @@ -81,8 +79,7 @@ io::endpoint* factory::new_endpoint( // Storage database name. std::string storage_db_name; { - std::map::const_iterator it( - cfg.params.find("storage_db_name")); + auto it = cfg.params.find("storage_db_name"); if (it != cfg.params.end()) storage_db_name = it->second; } diff --git a/broker/bam/src/reporting_stream.cc b/broker/bam/src/reporting_stream.cc index 7f478546b80..4a253eea664 100644 --- a/broker/bam/src/reporting_stream.cc +++ b/broker/bam/src/reporting_stream.cc @@ -1163,7 +1163,7 @@ void reporting_stream::_process_pb_ba_event( } // remove older events for BA for (auto it = m_events.begin(); it != m_events.end();) { - if (it->first < be.start_time()) + if (it->first < static_cast(be.start_time())) it = m_events.erase(it); else break; @@ -1795,6 +1795,7 @@ void reporting_stream::_dimension_dispatch( case io::events::data_type::value: _process_dimension_ba_timeperiod_relation(data); + break; case bam::pb_dimension_ba_timeperiod_relation::static_type(): _process_pb_dimension_ba_timeperiod_relation(data); break; diff --git a/broker/bam/test/ba/kpi_ba.cc b/broker/bam/test/ba/kpi_ba.cc index 6ed5cdf1b62..83e3a5b68d5 100644 --- a/broker/bam/test/ba/kpi_ba.cc +++ b/broker/bam/test/ba/kpi_ba.cc @@ -46,7 +46,7 @@ class KpiBA : public ::testing::Test { void SetUp() override { // Initialization. _logger = log_v2::instance().get(log_v2::BAM); - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); _aply_state = std::make_unique(_logger); _state = std::make_unique(_logger); diff --git a/broker/bam/test/ba/kpi_service.cc b/broker/bam/test/ba/kpi_service.cc index 71bad246e20..5dbeca2d583 100644 --- a/broker/bam/test/ba/kpi_service.cc +++ b/broker/bam/test/ba/kpi_service.cc @@ -37,7 +37,6 @@ using namespace com::centreon::broker; - class BamBA : public ::testing::Test { protected: std::unique_ptr _aply_state; @@ -49,7 +48,7 @@ class BamBA : public ::testing::Test { void SetUp() override { // Initialization. _logger = log_v2::instance().get(log_v2::BAM); - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); _aply_state = std::make_unique(_logger); _state = std::make_unique(_logger); diff --git a/broker/bam/test/configuration/applier-boolexp.cc b/broker/bam/test/configuration/applier-boolexp.cc index a39b7322316..011e1302bdb 100644 --- a/broker/bam/test/configuration/applier-boolexp.cc +++ b/broker/bam/test/configuration/applier-boolexp.cc @@ -30,7 +30,7 @@ class ApplierBoolexp : public ::testing::Test { public: void SetUp() override { // Initialization. - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); auto logger = log_v2::instance().get(log_v2::BAM); _aply_state = std::make_unique(logger); diff --git a/broker/bam/test/exp_builder/exp_builder.cc b/broker/bam/test/exp_builder/exp_builder.cc index f44f3e12db9..93fefa1cc50 100644 --- a/broker/bam/test/exp_builder/exp_builder.cc +++ b/broker/bam/test/exp_builder/exp_builder.cc @@ -35,7 +35,6 @@ using namespace com::centreon::broker; using log_v2 = com::centreon::common::log_v2::log_v2; - class BamExpBuilder : public ::testing::Test { protected: std::unique_ptr _visitor; @@ -45,7 +44,7 @@ class BamExpBuilder : public ::testing::Test { void SetUp() override { _logger = log_v2::instance().get(log_v2::BAM); try { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); _logger->set_level(spdlog::level::debug); _logger->flush_on(spdlog::level::debug); } catch (std::exception const& e) { diff --git a/broker/bam/test/monitoring_stream.cc b/broker/bam/test/monitoring_stream.cc index 51fadfc5e95..129c0619f68 100644 --- a/broker/bam/test/monitoring_stream.cc +++ b/broker/bam/test/monitoring_stream.cc @@ -31,7 +31,9 @@ using namespace com::centreon::broker; using namespace com::centreon::broker::bam; class BamMonitoringStream : public testing::Test { - void SetUp() override { config::applier::init(0, "test_broker", 0); } + void SetUp() override { + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); + } void TearDown() override { config::applier::deinit(); } }; diff --git a/broker/bam/test/time/check_timeperiod.cc b/broker/bam/test/time/check_timeperiod.cc index ba58e3382fd..77d88532385 100644 --- a/broker/bam/test/time/check_timeperiod.cc +++ b/broker/bam/test/time/check_timeperiod.cc @@ -1,20 +1,20 @@ /** -* Copyright 2013-2015 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2013-2015 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include #include "com/centreon/broker/config/applier/init.hh" @@ -141,7 +141,7 @@ static void parse_file(char const* filename, options& opt) { class BamTime : public ::testing::Test { public: void SetUp() override { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); } void TearDown() override { config::applier::deinit(); } diff --git a/broker/core/inc/com/centreon/broker/bbdo/factory.hh b/broker/core/inc/com/centreon/broker/bbdo/factory.hh index ad38b720071..ba0d4194c17 100644 --- a/broker/core/inc/com/centreon/broker/bbdo/factory.hh +++ b/broker/core/inc/com/centreon/broker/bbdo/factory.hh @@ -19,7 +19,6 @@ #ifndef CCB_BBDO_FACTORY_HH #define CCB_BBDO_FACTORY_HH -#include "com/centreon/broker/io/extension.hh" #include "com/centreon/broker/io/factory.hh" namespace com::centreon::broker::bbdo { @@ -41,7 +40,9 @@ class factory : public io::factory { factory& operator=(factory const& other) = delete; bool has_endpoint(config::endpoint& cfg, io::extension* ext) override; io::endpoint* new_endpoint( - config::endpoint& cfg, bool& is_acceptor, + config::endpoint& cfg, + const std::map& global_params, + bool& is_acceptor, std::shared_ptr cache = std::shared_ptr()) const override; }; diff --git a/broker/core/inc/com/centreon/broker/bbdo/internal.hh b/broker/core/inc/com/centreon/broker/bbdo/internal.hh index 82d46e774f9..e17c0e398e5 100644 --- a/broker/core/inc/com/centreon/broker/bbdo/internal.hh +++ b/broker/core/inc/com/centreon/broker/bbdo/internal.hh @@ -44,6 +44,10 @@ using pb_stop = com::centreon::broker::io::protobuf; +using pb_engine_configuration = com::centreon::broker::io::protobuf< + EngineConfiguration, + make_type(io::bbdo, bbdo::de_pb_engine_configuration)>; + using pb_bench = com::centreon::broker::io:: protobuf; diff --git a/broker/core/inc/com/centreon/broker/bbdo/stream.hh b/broker/core/inc/com/centreon/broker/bbdo/stream.hh index 065690617f3..5559c5070c5 100644 --- a/broker/core/inc/com/centreon/broker/bbdo/stream.hh +++ b/broker/core/inc/com/centreon/broker/bbdo/stream.hh @@ -20,6 +20,7 @@ #define CCB_BBDO_STREAM_HH #include "bbdo/bbdo/bbdo_version.hh" +#include "bbdo/common.pb.h" #include "com/centreon/broker/io/extension.hh" #include "com/centreon/broker/io/raw.hh" #include "com/centreon/broker/io/stream.hh" @@ -156,12 +157,29 @@ class stream : public io::stream { /* bbdo logger */ std::shared_ptr _logger; + void _negotiate_engine_conf(); void _write(std::shared_ptr const& d); bool _read_any(std::shared_ptr& d, time_t deadline); + void _handle_bbdo_event(const std::shared_ptr& d); + bool _wait_for_bbdo_event(uint32_t expected_type, + std::shared_ptr& d, + time_t deadline); void _send_event_stop_and_wait_for_ack(); std::string _get_extension_names(bool mandatory) const; + /* Poller Name of the peer: used since BBDO 3.0.1 */ std::string _poller_name; + /* Broker Name of the peer: used since BBDO 3.0.1 */ + std::string _broker_name; + /* ID of the peer poller: used since BBDO 3.0.1 */ uint64_t _poller_id = 0u; + /* True if the peer supports extended negotiation */ + bool _extended_negotiation = false; + /* Type of the peer: used since BBDO 3.0.1 */ + common::PeerType _peer_type = common::UNKNOWN; + /* Currently, this is a hash of the Engine configuration directory. It's + * filled when neb::pb_instance is sent to Broker. */ + std::string _config_version; + io::data* unserialize(uint32_t event_type, uint32_t source_id, uint32_t destination_id, @@ -192,6 +210,8 @@ class stream : public io::stream { void acknowledge_events(uint32_t events); void send_event_acknowledgement(); std::list get_running_config(); + bool check_poller_configuration(uint64_t poller_id, + const std::string& expected_version); }; } // namespace com::centreon::broker::bbdo diff --git a/broker/core/inc/com/centreon/broker/broker_impl.hh b/broker/core/inc/com/centreon/broker/broker_impl.hh index 2b919a43dbd..5dddd534c81 100644 --- a/broker/core/inc/com/centreon/broker/broker_impl.hh +++ b/broker/core/inc/com/centreon/broker/broker_impl.hh @@ -1,5 +1,5 @@ /** - * Copyright 2020-2023 Centreon (https://www.centreon.com/) + * Copyright 2020-2024 Centreon (https://www.centreon.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,6 +19,7 @@ #ifndef CENTREON_BROKER_CORE_SRC_BROKERIMPL_HH_ #define CENTREON_BROKER_CORE_SRC_BROKERIMPL_HH_ +#include #include "bbdo/events.hh" #include "broker.grpc.pb.h" #include "broker/core/src/broker.pb.h" @@ -115,11 +116,22 @@ class broker_impl final : public Broker::Service { grpc::Status SetSqlManagerStats(grpc::ServerContext* context [[maybe_unused]], const SqlManagerStatsOptions* request, ::google::protobuf::Empty*) override; - ::grpc::Status GetProcessStats( + grpc::Status GetProcessStats( ::grpc::ServerContext* context, const ::google::protobuf::Empty* request, ::com::centreon::common::pb_process_stat* response) override; + grpc::Status Aes256Encrypt(grpc::ServerContext* context, + const AesMessage* request, + GenericString* response) override; + grpc::Status Aes256Decrypt(grpc::ServerContext* context, + const AesMessage* request, + GenericString* response) override; + + grpc::Status GetPeers(grpc::ServerContext* context, + const ::google::protobuf::Empty* request, + PeerList* response) override; + public: void set_broker_name(const std::string& s); }; diff --git a/broker/core/inc/com/centreon/broker/compression/factory.hh b/broker/core/inc/com/centreon/broker/compression/factory.hh index b889c4e0680..b58fc2e3693 100644 --- a/broker/core/inc/com/centreon/broker/compression/factory.hh +++ b/broker/core/inc/com/centreon/broker/compression/factory.hh @@ -1,20 +1,20 @@ -/* -** Copyright 2011-2013 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2011-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #ifndef CCB_COMPRESSION_FACTORY_HH #define CCB_COMPRESSION_FACTORY_HH @@ -39,6 +39,7 @@ class factory : public io::factory { bool has_endpoint(config::endpoint& cfg, io::extension* ext) override; io::endpoint* new_endpoint( config::endpoint& cfg, + const std::map& global_params, bool& is_acceptor, std::shared_ptr cache = std::shared_ptr()) const override; diff --git a/broker/core/inc/com/centreon/broker/config/applier/endpoint.hh b/broker/core/inc/com/centreon/broker/config/applier/endpoint.hh index 1e44b023ba8..3e0494eacba 100644 --- a/broker/core/inc/com/centreon/broker/config/applier/endpoint.hh +++ b/broker/core/inc/com/centreon/broker/config/applier/endpoint.hh @@ -58,11 +58,14 @@ class endpoint { void _discard(); processing::failover* _create_failover( config::endpoint& cfg, + const std::map& global_params, std::shared_ptr mux, std::shared_ptr endp, std::list& l); - std::shared_ptr _create_endpoint(config::endpoint& cfg, - bool& is_acceptor); + std::shared_ptr _create_endpoint( + config::endpoint& cfg, + const std::map& global_params, + bool& is_acceptor); void _diff_endpoints( std::map const& current, std::list const& new_endpoints, @@ -74,7 +77,8 @@ class endpoint { endpoint& operator=(const endpoint&) = delete; endpoint(const endpoint&) = delete; - void apply(std::list const& endpoints); + void apply(std::list const& endpoints, + const std::map& global_params); iterator endpoints_begin(); iterator endpoints_end(); std::timed_mutex& endpoints_mutex(); diff --git a/broker/core/inc/com/centreon/broker/config/applier/init.hh b/broker/core/inc/com/centreon/broker/config/applier/init.hh index 8e840d96159..6178ad033f9 100644 --- a/broker/core/inc/com/centreon/broker/config/applier/init.hh +++ b/broker/core/inc/com/centreon/broker/config/applier/init.hh @@ -1,40 +1,38 @@ -/* -** Copyright 2011, 2021 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2011, 2021-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #ifndef CCB_CONFIG_APPLIER_INIT_HH_ #define CCB_CONFIG_APPLIER_INIT_HH_ #include "com/centreon/broker/config/state.hh" +#include "common.pb.h" -namespace com::centreon::broker { +namespace com::centreon::broker::config::applier { -namespace config { -namespace applier { enum applier_state { not_started, initialized, finished }; extern std::atomic mode; void deinit(); -void init(const config::state& conf); -void init(size_t n_thread, +void init(const common::PeerType peer_type, const config::state& conf); +void init(const common::PeerType peer_type, + size_t n_thread, const std::string& name, size_t event_queues_total_size); -} // namespace applier -} // namespace config -} // namespace com::centreon::broker +} // namespace com::centreon::broker::config::applier #endif /* !CCB_CONFIG_APPLIER_INIT_HH_ */ diff --git a/broker/core/inc/com/centreon/broker/config/applier/state.hh b/broker/core/inc/com/centreon/broker/config/applier/state.hh index 13be6203c44..6badc7c0a24 100644 --- a/broker/core/inc/com/centreon/broker/config/applier/state.hh +++ b/broker/core/inc/com/centreon/broker/config/applier/state.hh @@ -19,15 +19,11 @@ #ifndef CCB_CONFIG_APPLIER_STATE_HH #define CCB_CONFIG_APPLIER_STATE_HH -#include - #include "com/centreon/broker/config/applier/modules.hh" #include "com/centreon/broker/config/state.hh" +#include "common.pb.h" -namespace com::centreon::broker { - -namespace config { -namespace applier { +namespace com::centreon::broker::config::applier { /** * @class state state.hh "com/centreon/broker/config/applier/state.hh" * @brief Apply a configuration. @@ -41,26 +37,69 @@ class state { uint32_t sql_slowest_queries_count = false; }; + struct peer { + uint64_t poller_id; + std::string poller_name; + std::string broker_name; + time_t connected_since; + /* Is it a broker, an engine, a map or an unknown peer? */ + common::PeerType peer_type; + /* Does the peer support extended negotiation? */ + bool extended_negotiation; + /* Does this peer need an update concerning the engine configuration? */ + bool needs_update; + /* Is this peer ready to receive data? That's to say negociation and engine + * configuration exchanged. */ + bool ready = false; + }; + private: + const common::PeerType _peer_type; std::string _cache_dir; uint32_t _poller_id; uint32_t _rpc_port; bbdo::bbdo_version _bbdo_version; std::string _poller_name; + std::string _broker_name; size_t _pool_size; + + /* In a cbmod configuration, this string contains the directory containing + * the Engine configuration. */ + std::filesystem::path _engine_config_dir; + + /* Currently, this is the poller configurations known by this instance of + * Broker. It is updated during neb::instance and + * bbdo::pb_engine_configuration messages. And it is used in unified_sql + * stream when the neb::pb_instance_configuration is handled. */ + absl::flat_hash_map _engine_configuration + ABSL_GUARDED_BY(_connected_peers_m); + + /* In a Broker configuration, this object contains the configuration cache + * directory used by php. We can find there all the pollers configurations. */ + std::filesystem::path _config_cache_dir; + + /* In a Broker configuration, this object contains the pollers configurations + * known by the Broker. These directories are copies from the + * _config_cache_dir and are copied once Broker has written them in the + * storage database. */ + std::filesystem::path _pollers_config_dir; + modules _modules; static stats _stats_conf; - absl::flat_hash_map _connected_pollers; - mutable std::mutex _connected_pollers_m; + /* This map is indexed by the tuple {poller_id, poller_name, broker_name}. */ + absl::flat_hash_map, peer> + _connected_peers ABSL_GUARDED_BY(_connected_peers_m); + mutable absl::Mutex _connected_peers_m; - state(const std::shared_ptr& logger); + state(common::PeerType peer_type, + const std::shared_ptr& logger); ~state() noexcept = default; public: static state& instance(); - static void load(); + static void load(common::PeerType peer_type); static void unload(); static bool loaded(); @@ -72,17 +111,47 @@ class state { bbdo::bbdo_version get_bbdo_version() const noexcept; uint32_t poller_id() const noexcept; size_t pool_size() const noexcept; + const std::string& broker_name() const noexcept; const std::string& poller_name() const noexcept; + const std::filesystem::path& engine_config_dir() const noexcept; + void set_engine_config_dir(const std::filesystem::path& dir); + const std::filesystem::path& config_cache_dir() const noexcept; + void set_config_cache_dir(const std::filesystem::path& engine_conf_dir); + const std::filesystem::path& pollers_config_dir() const noexcept; + void set_pollers_config_dir(const std::filesystem::path& pollers_conf_dir); modules& get_modules(); - void add_poller(uint64_t poller_id, const std::string& poller_name); - void remove_poller(uint64_t poller_id); - bool has_connection_from_poller(uint64_t poller_id) const; + void add_peer(uint64_t poller_id, + const std::string& poller_name, + const std::string& broker_name, + common::PeerType peer_type, + bool extended_negotiation) + ABSL_LOCKS_EXCLUDED(_connected_peers_m); + void remove_peer(uint64_t poller_id, + const std::string& poller_name, + const std::string& broker_name) + ABSL_LOCKS_EXCLUDED(_connected_peers_m); + bool has_connection_from_poller(uint64_t poller_id) const + ABSL_LOCKS_EXCLUDED(_connected_peers_m); static stats& mut_stats_conf(); static const stats& stats_conf(); + std::vector connected_peers() const + ABSL_LOCKS_EXCLUDED(_connected_peers_m); + common::PeerType peer_type() const; + std::string get_engine_conf_from_cache(uint64_t poller_id); + void set_broker_needs_update(uint64_t poller_id, + const std::string& poller_name, + const std::string& broker_name, + common::PeerType peer_type, + bool need_update) + ABSL_LOCKS_EXCLUDED(_connected_peers_m); + void set_peers_ready() ABSL_LOCKS_EXCLUDED(_connected_peers_m); + bool broker_needs_update(uint64_t poller_id, + const std::string& poller_name, + const std::string& broker_name) const; + bool broker_needs_update() const; + void set_engine_configuration(uint64_t poller_id, const std::string& conf); + std::string engine_configuration(uint64_t poller_id) const; }; -} // namespace applier -} // namespace config - -} // namespace com::centreon::broker +} // namespace com::centreon::broker::config::applier #endif // !CCB_CONFIG_APPLIER_STATE_HH diff --git a/broker/core/inc/com/centreon/broker/config/endpoint.hh b/broker/core/inc/com/centreon/broker/config/endpoint.hh index 52914ad3de7..518742b4a56 100644 --- a/broker/core/inc/com/centreon/broker/config/endpoint.hh +++ b/broker/core/inc/com/centreon/broker/config/endpoint.hh @@ -19,7 +19,6 @@ #ifndef CCB_CONFIG_ENDPOINT_HH #define CCB_CONFIG_ENDPOINT_HH -#include #include "com/centreon/broker/multiplexing/muxer.hh" @@ -43,16 +42,6 @@ class endpoint { const io_type _type; public: - endpoint() = delete; - endpoint(io_type way); - endpoint(const endpoint& other); - ~endpoint() noexcept = default; - endpoint& operator=(const endpoint& other); - bool operator==(const endpoint& other) const; - bool operator!=(const endpoint& other) const; - bool operator<(const endpoint& other) const; - - io_type get_io_type() const { return _type; } time_t buffering_timeout; std::list failovers; std::string name; @@ -64,6 +53,17 @@ class endpoint { std::set write_filters; bool cache_enabled; nlohmann::json cfg; + + endpoint() = delete; + endpoint(io_type way); + endpoint(const endpoint& other); + ~endpoint() noexcept = default; + endpoint& operator=(const endpoint& other); + bool operator==(const endpoint& other) const; + bool operator!=(const endpoint& other) const; + bool operator<(const endpoint& other) const; + + io_type get_io_type() const { return _type; } }; } // namespace com::centreon::broker::config diff --git a/broker/core/inc/com/centreon/broker/config/parser.hh b/broker/core/inc/com/centreon/broker/config/parser.hh index c40f2f4eb03..b25e2c31963 100644 --- a/broker/core/inc/com/centreon/broker/config/parser.hh +++ b/broker/core/inc/com/centreon/broker/config/parser.hh @@ -21,7 +21,6 @@ #include - #include "com/centreon/broker/config/state.hh" #include "com/centreon/exceptions/msg_fmt.hh" @@ -36,7 +35,8 @@ namespace com::centreon::broker::config { class parser { void _get_generic_endpoint_configuration(const nlohmann::json& elem, endpoint& e); - void _parse_endpoint(const nlohmann::json& elem, endpoint& e, + void _parse_endpoint(const nlohmann::json& elem, + endpoint& e, std::string& module); public: @@ -79,14 +79,15 @@ class parser { parser() = default; ~parser() noexcept = default; - parser(parser const&) = delete; + parser(const parser&) = delete; parser& operator=(const parser&) = delete; state parse(const std::string& file); }; template <> absl::optional parser::check_and_read( - const nlohmann::json& elem, const std::string& key); + const nlohmann::json& elem, + const std::string& key); template <> absl::optional parser::check_and_read(const nlohmann::json& elem, diff --git a/broker/core/inc/com/centreon/broker/config/state.hh b/broker/core/inc/com/centreon/broker/config/state.hh index 9165f4229e6..acb27033036 100644 --- a/broker/core/inc/com/centreon/broker/config/state.hh +++ b/broker/core/inc/com/centreon/broker/config/state.hh @@ -19,7 +19,6 @@ #ifndef CCB_CONFIG_STATE_HH #define CCB_CONFIG_STATE_HH - #include "bbdo/bbdo/bbdo_version.hh" #include "com/centreon/broker/config/endpoint.hh" #include "common/log_v2/config.hh" @@ -53,6 +52,16 @@ class state { std::string _poller_name; size_t _pool_size; + /* The directory where the engine configuration files are stored. This file + * has a sense only for the cbmod (usual value: /etc/centreon-engine) */ + std::string _engine_config_dir; + + /* The directory where php writes the pollers configurations. */ + std::string _config_cache_dir; + + /* The directory where broker stores all the pollers configurations. */ + std::string _pollers_config_dir; + common::log_v2::config _log_conf; public: @@ -99,36 +108,42 @@ class state { uint16_t rpc_port(void) const noexcept; void listen_address(const std::string& listen_address) noexcept; const std::string& listen_address() const noexcept; - void broker_name(std::string const& name); + void broker_name(const std::string& name); const std::string& broker_name() const noexcept; void event_queues_total_size(uint64_t size); uint64_t event_queues_total_size() const noexcept; void set_bbdo_version(bbdo::bbdo_version v); bbdo::bbdo_version get_bbdo_version() const noexcept; - void cache_directory(std::string const& dir); - std::string const& cache_directory() const noexcept; - void command_file(std::string const& file); - std::string const& command_file() const noexcept; - void command_protocol(std::string const& prot); - std::string const& command_protocol() const noexcept; + void cache_directory(const std::string& dir); + const std::string& cache_directory() const noexcept; + void command_file(const std::string& file); + const std::string& command_file() const noexcept; + void command_protocol(const std::string& prot); + const std::string& command_protocol() const noexcept; void clear(); void add_endpoint(endpoint&& out) noexcept; std::list const& endpoints() const noexcept; void event_queue_max_size(int val) noexcept; int event_queue_max_size() const noexcept; - std::string const& module_directory() const noexcept; - void module_directory(std::string const& dir); + const std::string& module_directory() const noexcept; + void module_directory(const std::string& dir); std::list& module_list() noexcept; void add_module(std::string module); std::list const& module_list() const noexcept; std::map& params() noexcept; - std::map const& params() const noexcept; + const std::map& params() const noexcept; void poller_id(int id) noexcept; int poller_id() const noexcept; void pool_size(int size) noexcept; int pool_size() const noexcept; - void poller_name(std::string const& name); - std::string const& poller_name() const noexcept; + void poller_name(const std::string& name); + const std::string& poller_name() const noexcept; + void set_engine_config_dir(const std::string& dir); + const std::string& engine_config_dir() const noexcept; + void set_config_cache_dir(const std::string& config_cache_dir); + const std::string& config_cache_dir() const noexcept; + void set_pollers_config_dir(const std::string& pollers_config_dir); + const std::string& pollers_config_dir() const noexcept; common::log_v2::config& mut_log_conf(); const common::log_v2::config& log_conf() const; stats_exporter_conf& mut_stats_exporter(); diff --git a/broker/core/inc/com/centreon/broker/file/factory.hh b/broker/core/inc/com/centreon/broker/file/factory.hh index e0d537374cd..ac86796a926 100644 --- a/broker/core/inc/com/centreon/broker/file/factory.hh +++ b/broker/core/inc/com/centreon/broker/file/factory.hh @@ -41,12 +41,13 @@ class factory : public io::factory { bool has_endpoint(config::endpoint& cfg, io::extension* ext) override; io::endpoint* new_endpoint( config::endpoint& cfg, + const std::map& global_params, bool& is_acceptor, std::shared_ptr cache = std::shared_ptr()) const override; }; } // namespace file -} +} // namespace com::centreon::broker #endif // !CCB_FILE_FACTORY_HH diff --git a/broker/core/inc/com/centreon/broker/file/stream.hh b/broker/core/inc/com/centreon/broker/file/stream.hh index a776f1a5e22..1249e766279 100644 --- a/broker/core/inc/com/centreon/broker/file/stream.hh +++ b/broker/core/inc/com/centreon/broker/file/stream.hh @@ -19,7 +19,6 @@ #ifndef CCB_FILE_STREAM_HH #define CCB_FILE_STREAM_HH -#include "broker.pb.h" #include "com/centreon/broker/file/splitter.hh" #include "com/centreon/broker/io/stream.hh" #include "com/centreon/broker/stats/center.hh" diff --git a/broker/core/inc/com/centreon/broker/instance_broadcast.hh b/broker/core/inc/com/centreon/broker/instance_broadcast.hh index c613e1a311f..2cf43e981d1 100644 --- a/broker/core/inc/com/centreon/broker/instance_broadcast.hh +++ b/broker/core/inc/com/centreon/broker/instance_broadcast.hh @@ -20,7 +20,6 @@ #define CCB_INSTANCE_BROADCAST_HH #include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" @@ -63,6 +62,6 @@ class instance_broadcast : public io::data { void _internal_copy(instance_broadcast const& other); }; -} +} // namespace com::centreon::broker #endif // !CCB_INSTANCE_BROADCAST_HH diff --git a/broker/core/inc/com/centreon/broker/io/endpoint.hh b/broker/core/inc/com/centreon/broker/io/endpoint.hh index 5117d15316d..e8ce3b4dacf 100644 --- a/broker/core/inc/com/centreon/broker/io/endpoint.hh +++ b/broker/core/inc/com/centreon/broker/io/endpoint.hh @@ -19,8 +19,6 @@ #ifndef CCB_IO_ENDPOINT_HH #define CCB_IO_ENDPOINT_HH -#include - #include "com/centreon/broker/io/stream.hh" #include "com/centreon/broker/multiplexing/muxer_filter.hh" diff --git a/broker/core/inc/com/centreon/broker/io/factory.hh b/broker/core/inc/com/centreon/broker/io/factory.hh index b6cb3648715..6489116ac55 100644 --- a/broker/core/inc/com/centreon/broker/io/factory.hh +++ b/broker/core/inc/com/centreon/broker/io/factory.hh @@ -53,11 +53,14 @@ class factory { */ virtual bool has_endpoint(config::endpoint& cfg, io::extension* ext) = 0; virtual endpoint* new_endpoint( - config::endpoint& cfg, bool& is_acceptor, + config::endpoint& cfg, + const std::map& global_params, + bool& is_acceptor, std::shared_ptr cache = std::shared_ptr()) const = 0; virtual std::shared_ptr new_stream( - std::shared_ptr substream, bool is_acceptor, + std::shared_ptr substream, + bool is_acceptor, const std::unordered_map& options); static bool direct_grpc_serialized(const config::endpoint& cfg); diff --git a/broker/core/inc/com/centreon/broker/io/raw.hh b/broker/core/inc/com/centreon/broker/io/raw.hh index d5f91c6b352..3a740d9e2a9 100644 --- a/broker/core/inc/com/centreon/broker/io/raw.hh +++ b/broker/core/inc/com/centreon/broker/io/raw.hh @@ -19,7 +19,6 @@ #ifndef CCB_IO_RAW_HH #define CCB_IO_RAW_HH -#include "bbdo/events.hh" #include "com/centreon/broker/io/data.hh" #include "com/centreon/broker/io/events.hh" diff --git a/broker/core/inc/com/centreon/broker/misc/fifo_client.hh b/broker/core/inc/com/centreon/broker/misc/fifo_client.hh index dc5f735bd08..405d204d5b9 100644 --- a/broker/core/inc/com/centreon/broker/misc/fifo_client.hh +++ b/broker/core/inc/com/centreon/broker/misc/fifo_client.hh @@ -19,7 +19,6 @@ #ifndef _CCB_MISC_FIFO_CLIENT_HH #define _CCB_MISC_FIFO_CLIENT_HH - namespace com::centreon::broker { namespace misc { @@ -56,8 +55,6 @@ class fifo_client { */ int write(const std::string& buffer) { int retval = 0; -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wimplicit-fallthrough=" switch (_step) { case step::OPEN: _fd = open(_filename.c_str(), O_WRONLY | O_NONBLOCK); @@ -70,6 +67,7 @@ class fifo_client { // No break here, we continue with write _step = step::WRITE; } + [[fallthrough]]; case step::WRITE: if (::write(_fd, buffer.c_str(), buffer.size()) != static_cast(buffer.size())) { @@ -81,12 +79,11 @@ class fifo_client { } else retval = 0; } -#pragma GCC diagnostic pop return retval; } }; } // namespace misc -} +} // namespace com::centreon::broker #endif /* !_CCB_MISC_FIFO_CLIENT */ diff --git a/broker/core/inc/com/centreon/broker/misc/string.hh b/broker/core/inc/com/centreon/broker/misc/string.hh index 03c234bdcaf..74f0cf919e7 100644 --- a/broker/core/inc/com/centreon/broker/misc/string.hh +++ b/broker/core/inc/com/centreon/broker/misc/string.hh @@ -35,8 +35,7 @@ inline std::string& replace(std::string& str, return (str); } -std::string& trim(std::string& str) throw(); -std::string base64_encode(std::string const& str); +std::string& trim(std::string& str) noexcept; bool is_number(const std::string& s); std::string escape(const std::string& str, size_t s); diff --git a/broker/core/inc/com/centreon/broker/persistent_file.hh b/broker/core/inc/com/centreon/broker/persistent_file.hh index 24bc81de876..1f82dbe8a62 100644 --- a/broker/core/inc/com/centreon/broker/persistent_file.hh +++ b/broker/core/inc/com/centreon/broker/persistent_file.hh @@ -20,7 +20,6 @@ #define CCB_PERSISTENT_FILE_HH #include "com/centreon/broker/file/stream.hh" -#include "com/centreon/broker/io/stream.hh" namespace com::centreon::broker { diff --git a/broker/core/inc/com/centreon/broker/processing/failover.hh b/broker/core/inc/com/centreon/broker/processing/failover.hh index 294580ba8f4..42b8b99b6e6 100644 --- a/broker/core/inc/com/centreon/broker/processing/failover.hh +++ b/broker/core/inc/com/centreon/broker/processing/failover.hh @@ -21,11 +21,9 @@ #include #include "com/centreon/broker/io/endpoint.hh" -#include "com/centreon/broker/io/stream.hh" #include "com/centreon/broker/multiplexing/muxer.hh" #include "com/centreon/broker/processing/acceptor.hh" -#include "com/centreon/broker/processing/endpoint.hh" namespace com::centreon::broker { diff --git a/broker/core/multiplexing/inc/com/centreon/broker/multiplexing/muxer.hh b/broker/core/multiplexing/inc/com/centreon/broker/multiplexing/muxer.hh index 82887bce2af..82eb829f886 100644 --- a/broker/core/multiplexing/inc/com/centreon/broker/multiplexing/muxer.hh +++ b/broker/core/multiplexing/inc/com/centreon/broker/multiplexing/muxer.hh @@ -19,9 +19,7 @@ #ifndef CCB_MULTIPLEXING_MUXER_HH #define CCB_MULTIPLEXING_MUXER_HH -#include #include -#include #include "com/centreon/broker/multiplexing/engine.hh" #include "com/centreon/broker/multiplexing/muxer_filter.hh" diff --git a/broker/core/multiplexing/src/muxer.cc b/broker/core/multiplexing/src/muxer.cc index 2c3250ea32a..218ac2d5dda 100644 --- a/broker/core/multiplexing/src/muxer.cc +++ b/broker/core/multiplexing/src/muxer.cc @@ -17,7 +17,6 @@ */ #include "com/centreon/broker/multiplexing/muxer.hh" -#include #include #include @@ -28,7 +27,6 @@ #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/misc/misc.hh" #include "com/centreon/broker/misc/string.hh" -#include "com/centreon/broker/multiplexing/engine.hh" #include "com/centreon/common/pool.hh" #include "com/centreon/common/time.hh" #include "common/log_v2/log_v2.hh" @@ -325,7 +323,8 @@ void muxer::_execute_reader_if_needed() { if (to_call) { std::vector> to_fill; to_fill.reserve(_events_size); - bool still_events_to_read = read(to_fill, _events_size); + bool still_events_to_read [[maybe_unused]] = + read(to_fill, _events_size); uint32_t written = to_call->on_events(to_fill); if (written > 0) ack_events(written); diff --git a/broker/core/multiplexing/test/engine/start_stop.cc b/broker/core/multiplexing/test/engine/start_stop.cc index cdeca4860f3..091350299ce 100644 --- a/broker/core/multiplexing/test/engine/start_stop.cc +++ b/broker/core/multiplexing/test/engine/start_stop.cc @@ -33,11 +33,10 @@ const std::string MSG2("foo bar baz"); const std::string MSG3("last message with qux"); const std::string MSG4("no this is the last message"); - class StartStop : public testing::Test { public: void SetUp() override { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); } void TearDown() override { config::applier::deinit(); } diff --git a/broker/core/multiplexing/test/muxer/read.cc b/broker/core/multiplexing/test/muxer/read.cc index ef87d83990a..fd99f07914e 100644 --- a/broker/core/multiplexing/test/muxer/read.cc +++ b/broker/core/multiplexing/test/muxer/read.cc @@ -33,7 +33,7 @@ class MultiplexingMuxerRead : public ::testing::Test { public: void SetUp() override { try { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); stats::center::load(); } catch (std::exception const& e) { (void)e; diff --git a/broker/core/multiplexing/test/publisher/read.cc b/broker/core/multiplexing/test/publisher/read.cc index 88c92e89615..eecfc6fab6a 100644 --- a/broker/core/multiplexing/test/publisher/read.cc +++ b/broker/core/multiplexing/test/publisher/read.cc @@ -26,7 +26,7 @@ using namespace com::centreon::broker; class PublisherRead : public testing::Test { public: void SetUp() override { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); } void TearDown() override { config::applier::deinit(); } diff --git a/broker/core/multiplexing/test/publisher/write.cc b/broker/core/multiplexing/test/publisher/write.cc index b3bea3898b4..a4ef01ddb0c 100644 --- a/broker/core/multiplexing/test/publisher/write.cc +++ b/broker/core/multiplexing/test/publisher/write.cc @@ -34,7 +34,7 @@ const std::string MSG2("foo bar baz qux"); class PublisherWrite : public testing::Test { public: void SetUp() override { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); } void TearDown() override { config::applier::deinit(); } diff --git a/broker/core/precomp_inc/precomp.hpp b/broker/core/precomp_inc/precomp.hpp index d2bcefda020..43c6bfdb80a 100644 --- a/broker/core/precomp_inc/precomp.hpp +++ b/broker/core/precomp_inc/precomp.hpp @@ -76,5 +76,6 @@ namespace asio = boost::asio; #include +#include #endif diff --git a/broker/core/sql/CMakeLists.txt b/broker/core/sql/CMakeLists.txt index 2d5601cca45..fd17d0191f1 100644 --- a/broker/core/sql/CMakeLists.txt +++ b/broker/core/sql/CMakeLists.txt @@ -1,5 +1,5 @@ # -# Copyright 2023 Centreon +# Copyright 2023-2024 Centreon # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of @@ -22,6 +22,8 @@ set(TESTS_DIR "${PROJECT_SOURCE_DIR}/core/sql/test") +include_directories("${CMAKE_SOURCE_DIR}/common/http/inc") + add_custom_command( DEPENDS ${PROJECT_SOURCE_DIR}/core/sql/src/table_max_size.py "${CMAKE_SOURCE_DIR}/resources/centreon.sql" @@ -79,9 +81,10 @@ set(SOURCES # Static libraries. add_library(sql STATIC ${SOURCES}) +add_dependencies(sql pb_common_lib) set_target_properties(sql PROPERTIES COMPILE_FLAGS "-fPIC") target_precompile_headers(sql PRIVATE ../precomp_inc/precomp.hpp) -target_link_libraries(sql spdlog::spdlog mariadb) +target_link_libraries(sql spdlog::spdlog ctnvault centreon_http mariadb) # if(WITH_TESTING) set( TESTS_SOURCES ${TESTS_SOURCES} # ${TESTS_DIR}/engine/start_stop.cc ${TESTS_DIR}/muxer/read.cc diff --git a/broker/core/sql/inc/com/centreon/broker/sql/database_config.hh b/broker/core/sql/inc/com/centreon/broker/sql/database_config.hh index d68df108294..8311be0f46a 100644 --- a/broker/core/sql/inc/com/centreon/broker/sql/database_config.hh +++ b/broker/core/sql/inc/com/centreon/broker/sql/database_config.hh @@ -41,6 +41,24 @@ class endpoint; * queries_per_transaction and category can share the same connection */ class database_config { + std::string _type; + std::string _host; + std::string _socket; + unsigned short _port; + std::string _user; + std::string _password; + std::string _name; + int _queries_per_transaction; + bool _check_replication; + int _connections_count; + unsigned _max_commit_delay; + unsigned _category; + // where mariadb will find extension such as caching_sha2_password.so + std::string _extension_directory; + std::shared_ptr _config_logger; + + void _internal_copy(database_config const& other); + public: enum category { SHARED = 0, DATA_BIN_LOGS = 1 }; @@ -56,9 +74,10 @@ class database_config { bool check_replication = true, int connections_count = 1, unsigned max_commit_delay = 5); - database_config(config::endpoint const& cfg); - database_config(database_config const& other); - ~database_config(); + database_config(config::endpoint const& cfg, + const std::map& global_params); + database_config(const database_config& other); + ~database_config() noexcept = default; database_config& operator=(database_config const& other); bool operator==(database_config const& other) const; bool operator!=(const database_config& other) const; @@ -95,24 +114,6 @@ class database_config { } database_config auto_commit_conf() const; - - private: - void _internal_copy(database_config const& other); - - std::string _type; - std::string _host; - std::string _socket; - unsigned short _port; - std::string _user; - std::string _password; - std::string _name; - int _queries_per_transaction; - bool _check_replication; - int _connections_count; - unsigned _max_commit_delay; - unsigned _category; - // where mariadb will find extension such as caching_sha2_password.so - std::string _extension_directory; }; std::ostream& operator<<(std::ostream& s, const database_config cfg); diff --git a/broker/core/sql/inc/com/centreon/broker/sql/mysql.hh b/broker/core/sql/inc/com/centreon/broker/sql/mysql.hh index 1f8c370e9b2..397a3c11126 100644 --- a/broker/core/sql/inc/com/centreon/broker/sql/mysql.hh +++ b/broker/core/sql/inc/com/centreon/broker/sql/mysql.hh @@ -49,8 +49,8 @@ class mysql { ~mysql(); void prepare_statement(const database::mysql_stmt_base& stmt); database::mysql_stmt prepare_query( - std::string const& query, - mysql_bind_mapping const& bind_mapping = mysql_bind_mapping()); + const std::string& query, + const mysql_bind_mapping& bind_mapping = mysql_bind_mapping()); void commit(int thread_id = -1); int run_query(std::string const& query, my_error::code ec = my_error::empty, diff --git a/broker/core/sql/inc/com/centreon/broker/sql/mysql_error.hh b/broker/core/sql/inc/com/centreon/broker/sql/mysql_error.hh index d7af4120bc1..68dd4ddb4b4 100644 --- a/broker/core/sql/inc/com/centreon/broker/sql/mysql_error.hh +++ b/broker/core/sql/inc/com/centreon/broker/sql/mysql_error.hh @@ -110,6 +110,11 @@ class mysql_error { delete_resources_tags = 74, clean_resources = 75, delete_poller = 76, + update_hosts_enabled = 77, + update_services_enabled = 78, + update_hosts_resources_enabled = 79, + update_services_resources_enabled = 80, + insert_update_agent_information = 81 }; static constexpr const char* msg[]{ @@ -191,7 +196,11 @@ class mysql_error { "could not delete entry in resources_tags table: ", "could not clean the resources table: ", "could not delete poller: ", - }; + "could not update the enabled flag in hosts table: ", + "could not update the enabled flag in services table: ", + "could not update the enabled flag in resources table for host: ", + "could not update the enabled flag in resources table for service: ", + "could not insert or update agent_information table: "}; mysql_error() : _active(false) {} mysql_error(mysql_error const& other) = delete; diff --git a/broker/core/sql/inc/com/centreon/broker/sql/mysql_multi_insert.hh b/broker/core/sql/inc/com/centreon/broker/sql/mysql_multi_insert.hh index 3c6dc47c825..56cd072c313 100644 --- a/broker/core/sql/inc/com/centreon/broker/sql/mysql_multi_insert.hh +++ b/broker/core/sql/inc/com/centreon/broker/sql/mysql_multi_insert.hh @@ -20,9 +20,6 @@ #define CCB_MYSQL_MULTI_INSERT_HH #include "com/centreon/broker/sql/mysql.hh" -#include "com/centreon/broker/sql/mysql_bulk_bind.hh" -#include "com/centreon/broker/sql/mysql_bulk_stmt.hh" -#include "com/centreon/broker/sql/mysql_stmt.hh" namespace com::centreon::broker { diff --git a/broker/core/sql/src/database_config.cc b/broker/core/sql/src/database_config.cc index 6adf7ff3684..4200515475b 100644 --- a/broker/core/sql/src/database_config.cc +++ b/broker/core/sql/src/database_config.cc @@ -17,13 +17,19 @@ */ #include "com/centreon/broker/sql/database_config.hh" - -#include "com/centreon/broker/config/parser.hh" +#include +#include +#include "com/centreon/broker/config/endpoint.hh" #include "com/centreon/broker/exceptions/config.hh" +#include "com/centreon/common/http/http_config.hh" +#include "com/centreon/common/http/https_connection.hh" +#include "com/centreon/common/pool.hh" #include "common/log_v2/log_v2.hh" +#include "common/vault/vault_access.hh" using namespace com::centreon::broker; using com::centreon::common::log_v2::log_v2; +using namespace com::centreon::common::http; namespace com::centreon::broker { std::ostream& operator<<(std::ostream& s, const database_config cfg) { @@ -51,7 +57,8 @@ database_config::database_config() _check_replication(true), _connections_count(1), _category(SHARED), - _extension_directory(DEFAULT_MARIADB_EXTENSION_DIR) {} + _extension_directory(DEFAULT_MARIADB_EXTENSION_DIR), + _config_logger{log_v2::instance().get(log_v2::CONFIG)} {} /** * Constructor. @@ -93,54 +100,92 @@ database_config::database_config(const std::string& type, _connections_count(connections_count), _max_commit_delay(max_commit_delay), _category(SHARED), - _extension_directory(DEFAULT_MARIADB_EXTENSION_DIR) {} + _extension_directory(DEFAULT_MARIADB_EXTENSION_DIR), + _config_logger{log_v2::instance().get(log_v2::CONFIG)} {} /** * Build a database configuration from a configuration set. * * @param[in] cfg Endpoint configuration. */ -database_config::database_config(config::endpoint const& cfg) - : _extension_directory(DEFAULT_MARIADB_EXTENSION_DIR) { - std::map::const_iterator it, end; - end = cfg.params.end(); +database_config::database_config( + const config::endpoint& cfg, + const std::map& global_params) + : _extension_directory{DEFAULT_MARIADB_EXTENSION_DIR}, + _config_logger{log_v2::instance().get(log_v2::CONFIG)} { + std::string env_file; + { + auto found = global_params.find("env_file"); + if (found != global_params.end()) { + env_file = found->second; + _config_logger->debug("Env file '{}' used.", env_file); + } else { + env_file = "/usr/share/centreon/.env"; + _config_logger->debug( + "No env_file provided in Broker configuration, default one used."); + } + } + std::string vault_file; + { + auto found = global_params.find("vault_configuration"); + if (found != global_params.end()) { + vault_file = found->second; + _config_logger->debug("Vault configuration file '{}' used.", vault_file); + } else { + _config_logger->debug( + "No vault configuration file provided in Broker configuration."); + } + } + bool verify_peer = true; + { + auto found = global_params.find("verify_vault_peer"); + if (found != global_params.end()) { + if (absl::SimpleAtob(found->second, &verify_peer)) { + _config_logger->debug("Verify Vault peer {}.", + verify_peer ? "enabled" : "disabled"); + } else { + _config_logger->debug("Verification of Vault peer enabled by default."); + verify_peer = true; + } + } else { + _config_logger->debug("Verification of Vault peer enabled by default."); + } + } // db_type - it = cfg.params.find("db_type"); - if (it != end) - _type = it->second; + auto found = cfg.params.find("db_type"); + if (found != cfg.params.end()) + _type = found->second; else throw exceptions::config("no 'db_type' defined for endpoint '{}'", cfg.name); // db_host - it = cfg.params.find("db_host"); - if (it != end) - _host = it->second; + found = cfg.params.find("db_host"); + if (found != cfg.params.end()) + _host = found->second; else _host = "localhost"; // db_socket if (_host == "localhost") { - it = cfg.params.find("db_socket"); - if (it != end) - _socket = it->second; + found = cfg.params.find("db_socket"); + if (found != cfg.params.end()) + _socket = found->second; else _socket = MYSQL_SOCKET; } else _socket = ""; // db_port - it = cfg.params.find("db_port"); - auto logger_config = log_v2::instance().get(log_v2::CONFIG); - if (it != end) { + found = cfg.params.find("db_port"); + if (found != cfg.params.end()) { uint32_t port; - if (!absl::SimpleAtoi(it->second, &port)) { - logger_config->error( + if (!absl::SimpleAtoi(found->second, &port)) { + _config_logger->error( "In the database configuration, 'db_port' should be a number, " - "and " - "not '{}'", - it->second); + "and not '{}'", + found->second); _port = 0; } else _port = port; @@ -148,42 +193,54 @@ database_config::database_config(config::endpoint const& cfg) _port = 0; // db_user - it = cfg.params.find("db_user"); - if (it != end) - _user = it->second; + found = cfg.params.find("db_user"); + if (found != cfg.params.end()) + _user = found->second; // db_password - it = cfg.params.find("db_password"); - if (it != end) - _password = it->second; + found = cfg.params.find("db_password"); + if (found != cfg.params.end()) + _password = found->second; + + try { + common::vault::vault_access vault(env_file, vault_file, verify_peer, + _config_logger); + _password = vault.decrypt(_password); + _config_logger->info("Database password get from Vault configuration"); + } catch (const std::exception& e) { + constexpr std::string_view password_prefix("secret::hashicorp_vault::"); + std::string_view password_header(_password.data(), password_prefix.size()); + if (password_header == password_prefix) + _config_logger->error("No usable Vault configuration: {}", e.what()); + } // db_name - it = cfg.params.find("db_name"); - if (it != end) - _name = it->second; + found = cfg.params.find("db_name"); + if (found != cfg.params.end()) + _name = found->second; else throw exceptions::config("no 'db_name' defined for endpoint '{}'", cfg.name); // queries_per_transaction - it = cfg.params.find("queries_per_transaction"); - if (it != end) { - if (!absl::SimpleAtoi(it->second, &_queries_per_transaction)) { - logger_config->error( + found = cfg.params.find("queries_per_transaction"); + if (found != cfg.params.end()) { + if (!absl::SimpleAtoi(found->second, &_queries_per_transaction)) { + _config_logger->error( "queries_per_transaction is a number but must be given as a " "string. " "Unable to read the value '{}' - value 2000 taken by default.", - it->second); + found->second); _queries_per_transaction = 2000; } } else _queries_per_transaction = 2000; // check_replication - it = cfg.params.find("check_replication"); - if (it != end) { - if (!absl::SimpleAtob(it->second, &_check_replication)) { - logger_config->error( + found = cfg.params.find("check_replication"); + if (found != cfg.params.end()) { + if (!absl::SimpleAtob(found->second, &_check_replication)) { + _config_logger->error( "check_replication is a string containing a boolean. If not " "specified, it will be considered as \"true\"."); _check_replication = true; @@ -192,10 +249,10 @@ database_config::database_config(config::endpoint const& cfg) _check_replication = true; // connections_count - it = cfg.params.find("connections_count"); - if (it != end) { - if (!absl::SimpleAtoi(it->second, &_connections_count)) { - logger_config->error( + found = cfg.params.find("connections_count"); + if (found != cfg.params.end()) { + if (!absl::SimpleAtoi(found->second, &_connections_count)) { + _config_logger->error( "connections_count is a string " "containing an integer. If not " "specified, it will be considered as " @@ -204,10 +261,10 @@ database_config::database_config(config::endpoint const& cfg) } } else _connections_count = 1; - it = cfg.params.find("max_commit_delay"); - if (it != end) { - if (!absl::SimpleAtoi(it->second, &_max_commit_delay)) { - logger_config->error( + found = cfg.params.find("max_commit_delay"); + if (found != cfg.params.end()) { + if (!absl::SimpleAtoi(found->second, &_max_commit_delay)) { + _config_logger->error( "max_commit_delay is a string " "containing an integer. If not " "specified, it will be considered as " @@ -217,9 +274,9 @@ database_config::database_config(config::endpoint const& cfg) } else _max_commit_delay = 5; - it = cfg.params.find("extension_directory"); - if (it != end) { - _extension_directory = it->second; + found = cfg.params.find("extension_directory"); + if (found != cfg.params.end()) { + _extension_directory = found->second; } } @@ -228,15 +285,10 @@ database_config::database_config(config::endpoint const& cfg) * * @param[in] other Object to copy. */ -database_config::database_config(database_config const& other) { +database_config::database_config(const database_config& other) { _internal_copy(other); } -/** - * Destructor. - */ -database_config::~database_config() {} - /** * Assignment operator. * @@ -244,7 +296,7 @@ database_config::~database_config() {} * * @return This object. */ -database_config& database_config::operator=(database_config const& other) { +database_config& database_config::operator=(const database_config& other) { if (this != &other) _internal_copy(other); return *this; @@ -280,8 +332,8 @@ bool database_config::operator==(database_config const& other) const { _host, other._host); else if (_socket != other._socket) logger->debug( - "database configurations do not match because of their sockets: {} " - "!= {}", + "database configurations do not match because of their sockets: " + "{} != {}", _socket, other._socket); else if (_port != other._port) logger->debug( @@ -295,8 +347,8 @@ bool database_config::operator==(database_config const& other) const { _user, other._user); else if (_password != other._password) logger->debug( - "database configurations do not match because of their passwords: " - "{} != {}", + "database configurations do not match because of their " + "passwords: {} != {}", _password, other._password); else if (_name != other._name) logger->debug( @@ -305,13 +357,13 @@ bool database_config::operator==(database_config const& other) const { _name, other._name); else if (_queries_per_transaction != other._queries_per_transaction) logger->debug( - "database configurations do not match because of their queries per " - "transactions: {} != {}", + "database configurations do not match because of their queries " + "per transactions: {} != {}", _queries_per_transaction, other._queries_per_transaction); else if (_connections_count != other._connections_count) logger->debug( - "database configurations do not match because of their connections " - "counts: {} != {}", + "database configurations do not match because of their " + "connections counts: {} != {}", _connections_count, other._connections_count); else if (_max_commit_delay != other._max_commit_delay) logger->debug( diff --git a/broker/core/sql/src/mysql.cc b/broker/core/sql/src/mysql.cc index 7e35d114642..3db489f5d15 100644 --- a/broker/core/sql/src/mysql.cc +++ b/broker/core/sql/src/mysql.cc @@ -288,8 +288,8 @@ void mysql::prepare_statement(const mysql_stmt_base& stmt) { * * @return A mysql_stmt prepared and ready to use. */ -mysql_stmt mysql::prepare_query(std::string const& query, - mysql_bind_mapping const& bind_mapping) { +mysql_stmt mysql::prepare_query(const std::string& query, + const mysql_bind_mapping& bind_mapping) { mysql_stmt retval(query, bind_mapping); prepare_statement(retval); diff --git a/broker/core/sql/src/mysql_connection.cc b/broker/core/sql/src/mysql_connection.cc index 5c6d2548bba..13f5fd1be7c 100644 --- a/broker/core/sql/src/mysql_connection.cc +++ b/broker/core/sql/src/mysql_connection.cc @@ -16,6 +16,7 @@ * For more information : contact@centreon.com */ #include +#include #include "com/centreon/broker/config/applier/init.hh" #include "com/centreon/broker/misc/misc.hh" @@ -460,18 +461,26 @@ void mysql_connection::_statement(mysql_task* t) { "mysql_connection {:p}: execute statement {:x} attempt {}: {}", static_cast(this), task->statement_id, attempts, query); if (mysql_stmt_execute(stmt)) { - std::string err_msg( - fmt::format("{} errno={} {}", mysql_error::msg[task->error_code], - ::mysql_errno(_conn), ::mysql_stmt_error(stmt))); - SPDLOG_LOGGER_ERROR(_logger, - "connection fail to execute statement {:p}: {}", - static_cast(this), err_msg); - if (_server_error(::mysql_stmt_errno(stmt))) { + int32_t err_code = ::mysql_stmt_errno(stmt); + std::string err_msg(fmt::format("{} errno={} {}", + mysql_error::msg[task->error_code], + err_code, ::mysql_stmt_error(stmt))); + if (err_code == 0) { + SPDLOG_LOGGER_ERROR(_logger, + "mysql_connection: errno=0, so we simulate a " + "server error CR_SERVER_LOST"); + err_code = CR_SERVER_LOST; + } else { + SPDLOG_LOGGER_ERROR(_logger, + "connection fail to execute statement {:p}: {}", + static_cast(this), err_msg); + } + if (_server_error(err_code)) { set_error_message(err_msg); break; } - if (mysql_stmt_errno(stmt) != 1213 && - mysql_stmt_errno(stmt) != 1205) // Dead Lock error + if (err_code != ER_LOCK_DEADLOCK && + err_code != ER_LOCK_WAIT_TIMEOUT) // Dead Lock error attempts = MAX_ATTEMPTS; if (mysql_commit(_conn)) { diff --git a/broker/core/sql/src/mysql_multi_insert.cc b/broker/core/sql/src/mysql_multi_insert.cc index cafc020e386..7d375cb82cd 100644 --- a/broker/core/sql/src/mysql_multi_insert.cc +++ b/broker/core/sql/src/mysql_multi_insert.cc @@ -132,7 +132,11 @@ void bulk_or_multi::execute(mysql& connexion, my_error::code ec, int thread_id) { if (_bulk_stmt) { - if (!_bulk_bind->empty()) { + /* If the database connection is lost, we can have this issue */ + if (!_bulk_bind) { + _bulk_bind = _bulk_stmt->create_bind(); + _bulk_bind->reserve(_bulk_row); + } else if (!_bulk_bind->empty()) { _bulk_stmt->set_bind(std::move(_bulk_bind)); connexion.run_statement(*_bulk_stmt, ec, thread_id); _bulk_bind = _bulk_stmt->create_bind(); diff --git a/broker/core/src/bbdo/factory.cc b/broker/core/src/bbdo/factory.cc index aa52e70da2f..cd3b28fa793 100644 --- a/broker/core/src/bbdo/factory.cc +++ b/broker/core/src/bbdo/factory.cc @@ -43,8 +43,7 @@ bool factory::has_endpoint(config::endpoint& cfg, io::extension* ext) { /* Legacy case: 'protocol' is set in the object and should be equal to "bbdo" */ bool bbdo_protocol_found = false; - std::map::const_iterator it{ - cfg.params.find("protocol")}; + auto it = cfg.params.find("protocol"); bbdo_protocol_found = (it != cfg.params.end() && it->second == "bbdo"); /* New case: with bbdo_client and bbdo_server, bbdo is automatic. */ @@ -74,18 +73,16 @@ bool factory::has_endpoint(config::endpoint& cfg, io::extension* ext) { */ io::endpoint* factory::new_endpoint( config::endpoint& cfg, + const std::map& global_params [[maybe_unused]], bool& is_acceptor, - std::shared_ptr cache) const { - (void)cache; - + std::shared_ptr cache [[maybe_unused]]) const { // Return value. std::unique_ptr retval; auto logger = log_v2::instance().get(log_v2::CORE); - std::map::const_iterator it; // Coarse endpoint ? bool coarse = false; - it = cfg.params.find("coarse"); + auto it = cfg.params.find("coarse"); if (it != cfg.params.end()) { if (!absl::SimpleAtob(it->second, &coarse)) { logger->error( diff --git a/broker/core/src/bbdo/internal.cc b/broker/core/src/bbdo/internal.cc index 4278ea00ad9..6f963d527a0 100644 --- a/broker/core/src/bbdo/internal.cc +++ b/broker/core/src/bbdo/internal.cc @@ -56,6 +56,9 @@ void bbdo::load() { &bbdo::pb_stop::operations); e.register_event(make_type(io::local, local::de_pb_stop), "LocStop", &local::pb_stop::operations); + e.register_event(make_type(io::bbdo, bbdo::de_pb_engine_configuration), + "EngineConfiguration", + &bbdo::pb_engine_configuration::operations); // Register BBDO protocol. io::protocols::instance().reg("BBDO", std::make_shared(), 7, diff --git a/broker/core/src/bbdo/stream.cc b/broker/core/src/bbdo/stream.cc index f4fb8ab754f..0404213c649 100644 --- a/broker/core/src/bbdo/stream.cc +++ b/broker/core/src/bbdo/stream.cc @@ -21,21 +21,16 @@ #include #include -#include -#include - -#include "bbdo/bbdo.pb.h" #include "bbdo/bbdo/ack.hh" #include "bbdo/bbdo/stop.hh" #include "bbdo/bbdo/version_response.hh" -#include "com/centreon/broker/bbdo/internal.hh" #include "com/centreon/broker/config/applier/state.hh" #include "com/centreon/broker/exceptions/timeout.hh" #include "com/centreon/broker/io/protocols.hh" -#include "com/centreon/broker/io/raw.hh" #include "com/centreon/broker/misc/misc.hh" -#include "com/centreon/broker/misc/string.hh" #include "com/centreon/broker/multiplexing/publisher.hh" +#include "com/centreon/broker/neb/internal.hh" +#include "com/centreon/common/file.hh" #include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" @@ -633,7 +628,9 @@ int32_t stream::stop() { /* We return the number of events handled by our stream. */ int32_t retval = _acknowledged_events; _acknowledged_events = 0; - config::applier::state::instance().remove_poller(_poller_id); + if (_poller_id && !_broker_name.empty() && !_poller_name.empty()) + config::applier::state::instance().remove_peer(_poller_id, _poller_name, + _broker_name); return retval; } @@ -757,14 +754,25 @@ void stream::negotiate(stream::negotiation_type neg) { _write(welcome_packet); } else { auto welcome{std::make_shared()}; - welcome->mut_obj().mutable_version()->set_major(_bbdo_version.major_v); - welcome->mut_obj().mutable_version()->set_minor(_bbdo_version.minor_v); - welcome->mut_obj().mutable_version()->set_patch(_bbdo_version.patch); - welcome->mut_obj().set_extensions(extensions); - welcome->mut_obj().set_poller_id( - config::applier::state::instance().poller_id()); - welcome->mut_obj().set_poller_name( - config::applier::state::instance().poller_name()); + auto& obj = welcome->mut_obj(); + obj.mutable_version()->set_major(_bbdo_version.major_v); + obj.mutable_version()->set_minor(_bbdo_version.minor_v); + obj.mutable_version()->set_patch(_bbdo_version.patch); + obj.set_extensions(extensions); + obj.set_poller_id(config::applier::state::instance().poller_id()); + obj.set_poller_name(config::applier::state::instance().poller_name()); + obj.set_broker_name(config::applier::state::instance().broker_name()); + obj.set_peer_type(config::applier::state::instance().peer_type()); + /* I know I'm Engine, and I have access to the configuration. */ + if (!config::applier::state::instance().engine_config_dir().empty()) + obj.set_extended_negotiation(true); + /* I know I'm Broker, and I have access to the php cache configuration + * directory. */ + else if (!config::applier::state::instance().config_cache_dir().empty()) + obj.set_extended_negotiation(true); + /* I don't know what I am. */ + else + obj.set_extended_negotiation(false); _write(welcome); } } @@ -801,6 +809,8 @@ void stream::negotiate(stream::negotiation_type neg) { } std::string peer_extensions; + _extended_negotiation = false; + if (d->type() == version_response::static_type()) { std::shared_ptr v( std::static_pointer_cast(d)); @@ -868,14 +878,25 @@ void stream::negotiate(stream::negotiation_type neg) { /* if _negotiate, we send all the extensions we would like to have, * otherwise we only send the mandatory extensions */ auto welcome(std::make_shared()); - welcome->mut_obj().mutable_version()->set_major(_bbdo_version.major_v); - welcome->mut_obj().mutable_version()->set_minor(_bbdo_version.minor_v); - welcome->mut_obj().mutable_version()->set_patch(_bbdo_version.patch); - welcome->mut_obj().set_extensions(extensions); - welcome->mut_obj().set_poller_id( - config::applier::state::instance().poller_id()); - welcome->mut_obj().set_poller_name( - config::applier::state::instance().poller_name()); + auto& obj = welcome->mut_obj(); + obj.mutable_version()->set_major(_bbdo_version.major_v); + obj.mutable_version()->set_minor(_bbdo_version.minor_v); + obj.mutable_version()->set_patch(_bbdo_version.patch); + obj.set_extensions(extensions); + obj.set_poller_id(config::applier::state::instance().poller_id()); + obj.set_poller_name(config::applier::state::instance().poller_name()); + obj.set_broker_name(config::applier::state::instance().broker_name()); + obj.set_peer_type(config::applier::state::instance().peer_type()); + /* I know I'm Engine, and I have access to the configuration directory. */ + if (!config::applier::state::instance().engine_config_dir().empty()) + obj.set_extended_negotiation(true); + /* I know I'm Broker, and I have access to the php cache configuration + * directory. */ + else if (!config::applier::state::instance().config_cache_dir().empty()) + obj.set_extended_negotiation(true); + /* I don't have access to any configuration directory. */ + else + obj.set_extended_negotiation(false); _write(welcome); _substream->flush(); @@ -883,6 +904,14 @@ void stream::negotiate(stream::negotiation_type neg) { peer_extensions = w->obj().extensions(); _poller_id = w->obj().poller_id(); _poller_name = w->obj().poller_name(); + _broker_name = w->obj().broker_name(); + + _peer_type = w->obj().peer_type(); + if (_peer_type != common::UNKNOWN) { + /* We are in the bbdo stream, _poller_id, _broker_name, + * _extended_negotiation are informations about the peer, not us. */ + _extended_negotiation = true; + } } // Negotiation. @@ -949,7 +978,12 @@ void stream::negotiate(stream::negotiation_type neg) { // Stream has now negotiated. _negotiated = true; - config::applier::state::instance().add_poller(_poller_id, _poller_name); + /* With old BBDO, we don't have poller_id nor poller name available. */ + if (_poller_id > 0 && !_broker_name.empty()) { + config::applier::state::instance().add_peer(_poller_id, _poller_name, + _broker_name, _peer_type, + _extended_negotiation); + } SPDLOG_LOGGER_TRACE(_logger, "Negotiation done."); } @@ -963,6 +997,174 @@ std::list stream::get_running_config() { return retval; } +/** + * @brief Handle a BBDO event. Events of category io::bbdo are the guardians of + * BBDO messages. These messages are used by the protocol itself and are always + * prioritized. + * + * @param d The event to handle. + */ +void stream::_handle_bbdo_event(const std::shared_ptr& d) { + switch (d->type()) { + case version_response::static_type(): { + auto version(std::static_pointer_cast(d)); + if (version->bbdo_major != _bbdo_version.major_v) { + SPDLOG_LOGGER_ERROR( + _logger, + "BBDO: peer is using protocol version {}.{}.{}, whereas we're " + "using protocol version {}.{}.{}", + version->bbdo_major, version->bbdo_minor, version->bbdo_patch, + _bbdo_version.major_v, _bbdo_version.minor_v, _bbdo_version.patch); + throw msg_fmt( + "BBDO: peer is using protocol version {}.{}.{} " + "whereas we're using protocol version {}.{}.{}", + version->bbdo_major, version->bbdo_minor, version->bbdo_patch, + _bbdo_version.major_v, _bbdo_version.minor_v, _bbdo_version.patch); + } + SPDLOG_LOGGER_INFO( + _logger, + "BBDO: peer is using protocol version {}.{}.{} , we're using " + "version " + "{}.{}.{}", + version->bbdo_major, version->bbdo_minor, version->bbdo_patch, + _bbdo_version.major_v, _bbdo_version.minor_v, _bbdo_version.patch); + + break; + } + case pb_welcome::static_type(): { + auto welcome(std::static_pointer_cast(d)); + const auto& pb_version = welcome->obj().version(); + if (pb_version.major() != _bbdo_version.major_v) { + SPDLOG_LOGGER_ERROR( + _logger, + "BBDO: peer is using protocol version {}.{}.{}, whereas we're " + "using protocol version {}.{}.{}", + pb_version.major(), pb_version.minor(), pb_version.patch(), + _bbdo_version.major_v, _bbdo_version.minor_v, _bbdo_version.patch); + throw msg_fmt( + "BBDO: peer is using protocol version {}.{}.{} " + "whereas we're using protocol version {}.{}.{}", + pb_version.major(), pb_version.minor(), pb_version.patch(), + _bbdo_version.major_v, _bbdo_version.minor_v, _bbdo_version.patch); + } + SPDLOG_LOGGER_INFO( + _logger, + "BBDO: peer is using protocol version {}.{}.{} , we're using " + "version " + "{}.{}.{}", + pb_version.major(), pb_version.minor(), pb_version.patch(), + _bbdo_version.major_v, _bbdo_version.minor_v, _bbdo_version.patch); + break; + } + case ack::static_type(): + SPDLOG_LOGGER_INFO( + _logger, "BBDO: received acknowledgement for {} events", + std::static_pointer_cast(d)->acknowledged_events); + acknowledge_events( + std::static_pointer_cast(d)->acknowledged_events); + break; + case pb_ack::static_type(): + SPDLOG_LOGGER_INFO(_logger, + "BBDO: received pb acknowledgement for {} events", + std::static_pointer_cast(d) + ->obj() + .acknowledged_events()); + acknowledge_events(std::static_pointer_cast(d) + ->obj() + .acknowledged_events()); + break; + case stop::static_type(): + case pb_stop::static_type(): { + SPDLOG_LOGGER_INFO( + _logger, "BBDO: received stop from peer with ID {}", + std::static_pointer_cast(d)->obj().poller_id()); + send_event_acknowledgement(); + /* Now, we send a local::pb_stop to ask unified_sql to update the + * database since the poller is going away. */ + auto loc_stop = std::make_shared(); + auto& obj = loc_stop->mut_obj(); + obj.set_poller_id( + std::static_pointer_cast(d)->obj().poller_id()); + multiplexing::publisher pblshr; + pblshr.write(loc_stop); + } break; + case pb_engine_configuration::static_type(): { + const EngineConfiguration& ec = + std::static_pointer_cast(d)->obj(); + if (config::applier::state::instance().peer_type() == common::BROKER && + _peer_type == common::ENGINE) { + SPDLOG_LOGGER_INFO( + _logger, + "BBDO: received engine configuration from Engine peer '{}'", + ec.broker_name()); + bool match = check_poller_configuration(ec.poller_id(), + ec.engine_config_version()); + auto engine_conf = std::make_shared(); + auto& obj = engine_conf->mut_obj(); + obj.set_poller_id(config::applier::state::instance().poller_id()); + obj.set_poller_name(config::applier::state::instance().poller_name()); + obj.set_broker_name(config::applier::state::instance().broker_name()); + obj.set_peer_type(common::BROKER); + if (match) { + SPDLOG_LOGGER_INFO( + _logger, "BBDO: engine configuration for '{}' is up to date", + ec.broker_name()); + obj.set_need_update(false); + } else { + SPDLOG_LOGGER_INFO(_logger, + "BBDO: engine configuration for '{}' is " + "outdated", + ec.broker_name()); + /* engine_conf has a new version, it is sent to engine. And engine + * will send its configuration to broker. */ + obj.set_need_update(true); + } + _write(engine_conf); + } + } break; + default: + break; + } +} + +/** + * @brief Wait for a BBDO event (category io::bbdo) of a specific type. While + * received events are of category io::bbdo, they are handled as usual, and when + * the expected event is received, it is returned. The expected event is not + * handled. + * + * @param expected_type The expected type of the event. + * @param d The event that was received with the expected type. + * @param deadline The deadline in seconds. + * + * @return true if the expected event was received before the deadline, false + * otherwise. + */ +bool stream::_wait_for_bbdo_event(uint32_t expected_type, + std::shared_ptr& d, + time_t deadline) { + for (;;) { + bool timed_out = !_read_any(d, deadline); + uint32_t event_id = !d ? 0 : d->type(); + if (timed_out || (event_id >> 16) != io::bbdo) + return false; + + if (event_id == expected_type) + return true; + + _handle_bbdo_event(d); + + // Control messages. + SPDLOG_LOGGER_DEBUG( + _logger, + "BBDO: event with ID {} was a control message, launching recursive " + "read", + event_id); + } + + return false; +} + /** * Read data from stream. * @@ -977,100 +1179,11 @@ bool stream::read(std::shared_ptr& d, time_t deadline) { // Read event. d.reset(); - bool timed_out(!_read_any(d, deadline)); - uint32_t event_id(!d ? 0 : d->type()); - - while (!timed_out && ((event_id >> 16) == io::bbdo)) { - switch (event_id) { - case version_response::static_type(): { - auto version(std::static_pointer_cast(d)); - if (version->bbdo_major != _bbdo_version.major_v) { - SPDLOG_LOGGER_ERROR( - _logger, - "BBDO: peer is using protocol version {}.{}.{}, whereas we're " - "using protocol version {}.{}.{}", - version->bbdo_major, version->bbdo_minor, version->bbdo_patch, - _bbdo_version.major_v, _bbdo_version.minor_v, - _bbdo_version.patch); - throw msg_fmt( - "BBDO: peer is using protocol version {}.{}.{} " - "whereas we're using protocol version {}.{}.{}", - version->bbdo_major, version->bbdo_minor, version->bbdo_patch, - _bbdo_version.major_v, _bbdo_version.minor_v, - _bbdo_version.patch); - } - SPDLOG_LOGGER_INFO( - _logger, - "BBDO: peer is using protocol version {}.{}.{} , we're using " - "version " - "{}.{}.{}", - version->bbdo_major, version->bbdo_minor, version->bbdo_patch, - _bbdo_version.major_v, _bbdo_version.minor_v, _bbdo_version.patch); + bool timed_out = !_read_any(d, deadline); + uint32_t event_id = !d ? 0 : d->type(); - break; - } - case pb_welcome::static_type(): { - auto welcome(std::static_pointer_cast(d)); - const auto& pb_version = welcome->obj().version(); - if (pb_version.major() != _bbdo_version.major_v) { - SPDLOG_LOGGER_ERROR( - _logger, - "BBDO: peer is using protocol version {}.{}.{}, whereas we're " - "using protocol version {}.{}.{}", - pb_version.major(), pb_version.minor(), pb_version.patch(), - _bbdo_version.major_v, _bbdo_version.minor_v, - _bbdo_version.patch); - throw msg_fmt( - "BBDO: peer is using protocol version {}.{}.{} " - "whereas we're using protocol version {}.{}.{}", - pb_version.major(), pb_version.minor(), pb_version.patch(), - _bbdo_version.major_v, _bbdo_version.minor_v, - _bbdo_version.patch); - } - SPDLOG_LOGGER_INFO( - _logger, - "BBDO: peer is using protocol version {}.{}.{} , we're using " - "version " - "{}.{}.{}", - pb_version.major(), pb_version.minor(), pb_version.patch(), - _bbdo_version.major_v, _bbdo_version.minor_v, _bbdo_version.patch); - break; - } - case ack::static_type(): - SPDLOG_LOGGER_INFO( - _logger, "BBDO: received acknowledgement for {} events", - std::static_pointer_cast(d)->acknowledged_events); - acknowledge_events( - std::static_pointer_cast(d)->acknowledged_events); - break; - case pb_ack::static_type(): - SPDLOG_LOGGER_INFO(_logger, - "BBDO: received pb acknowledgement for {} events", - std::static_pointer_cast(d) - ->obj() - .acknowledged_events()); - acknowledge_events(std::static_pointer_cast(d) - ->obj() - .acknowledged_events()); - break; - case stop::static_type(): - case pb_stop::static_type(): { - SPDLOG_LOGGER_INFO( - _logger, "BBDO: received stop from peer with ID {}", - std::static_pointer_cast(d)->obj().poller_id()); - send_event_acknowledgement(); - /* Now, we send a local::pb_stop to ask unified_sql to update the - * database since the poller is going away. */ - auto loc_stop = std::make_shared(); - auto& obj = loc_stop->mut_obj(); - obj.set_poller_id( - std::static_pointer_cast(d)->obj().poller_id()); - multiplexing::publisher pblshr; - pblshr.write(loc_stop); - } break; - default: - break; - } + while (!timed_out && (event_id >> 16) == io::bbdo) { + _handle_bbdo_event(d); // Control messages. SPDLOG_LOGGER_DEBUG( @@ -1139,11 +1252,11 @@ bool stream::_read_any(std::shared_ptr& d, time_t deadline) { uint32_t dest_id = ntohl(*reinterpret_cast(pack + 12)); uint16_t expected = misc::crc16_ccitt(pack + 2, BBDO_HEADER_SIZE - 2); - SPDLOG_LOGGER_TRACE( - _logger, - "Reading: header eventID {} sourceID {} destID {} checksum {:x} and " - "expected {:x}", - event_id, source_id, dest_id, chksum, expected); + SPDLOG_LOGGER_TRACE(_logger, + "Reading: header eventID {} sourceID {} destID {} " + "checksum {:x} and " + "expected {:x}", + event_id, source_id, dest_id, chksum, expected); if (expected != chksum) { // The packet is corrupted. @@ -1216,11 +1329,11 @@ bool stream::_read_any(std::shared_ptr& d, time_t deadline) { } /* There is no reason to have this but no one knows. */ if (_buffer.size() > 0) { - SPDLOG_LOGGER_ERROR( - _logger, - "There are still {} long BBDO packets that cannot be sent, this " - "maybe be due to a corrupted retention file.", - _buffer.size()); + SPDLOG_LOGGER_ERROR(_logger, + "There are still {} long BBDO packets that " + "cannot be sent, this " + "maybe be due to a corrupted retention file.", + _buffer.size()); /* In case of too many long events stored in memory, we purge the * oldest ones. */ while (_buffer.size() > 3) { @@ -1270,7 +1383,8 @@ bool stream::_read_any(std::shared_ptr& d, time_t deadline) { if (_buffer.size() > 1) { SPDLOG_LOGGER_ERROR( _logger, - "There are {} long BBDO packets waiting for their missing parts " + "There are {} long BBDO packets waiting for their missing " + "parts " "in memory, this may be due to a corrupted retention file.", _buffer.size()); /* In case of too many long events stored in memory, we purge the @@ -1294,12 +1408,11 @@ bool stream::_read_any(std::shared_ptr& d, time_t deadline) { /** * @brief Fill the internal _packet vector until it reaches the given size. It * may be bigger. The deadline is the limit time after that an exception is - * thrown. Even if an exception is thrown the vector may begin to be fill, it is - * just not finished, and so no data are lost. Received packets are BBDO packets - * or maybe pieces of BBDO packets, so we keep vectors as is because usually a - * vector should just represent a packet. - * In case of event serialized only by grpc stream, we store it in - * _grpc_serialized_queue + * thrown. Even if an exception is thrown the vector may begin to be fill, it + * is just not finished, and so no data are lost. Received packets are BBDO + * packets or maybe pieces of BBDO packets, so we keep vectors as is because + * usually a vector should just represent a packet. In case of event + * serialized only by grpc stream, we store it in _grpc_serialized_queue * * @param size The wanted final size * @param deadline A time_t. @@ -1386,17 +1499,98 @@ void stream::statistics(nlohmann::json& tree) const { _substream->statistics(tree); } +void stream::_negotiate_engine_conf() { + SPDLOG_LOGGER_DEBUG(_logger, + "BBDO: instance event sent to {} - supports " + "extended negotiation: {}", + _broker_name, _extended_negotiation); + /* We are an Engine since we emit an instance event and we have an + * engine config directory. If the Broker supports extended negotiation, + * we send also an engine configuration event. And then we'll wait for + * an answer from Broker. */ + if (_extended_negotiation && + !config::applier::state::instance().engine_config_dir().empty()) { + auto engine_conf = std::make_shared(); + auto& obj = engine_conf->mut_obj(); + obj.set_poller_id(config::applier::state::instance().poller_id()); + obj.set_poller_name(config::applier::state::instance().poller_name()); + obj.set_broker_name(config::applier::state::instance().broker_name()); + obj.set_peer_type(common::ENGINE); + + /* Time to fill the config version. */ + std::error_code ec; + _config_version = common::hash_directory( + config::applier::state::instance().engine_config_dir(), ec); + if (ec) { + _logger->error( + "BBDO: cannot access directory '{}': {}", + config::applier::state::instance().engine_config_dir().string(), + ec.message()); + } + obj.set_engine_config_version(_config_version); + _logger->info( + "BBDO: engine configuration sent to peer '{}' with version {}", + _broker_name, _config_version); + _write(engine_conf); + std::shared_ptr d; + time_t deadline = time(nullptr) + 5; + bool found = _wait_for_bbdo_event(pb_engine_configuration::static_type(), d, + deadline); + if (!found) { + _logger->warn( + "BBDO: no engine configuration received from peer '{}' as " + "response", + _broker_name); + if (d) { + _logger->info( + "BBDO: received message of type {:x} instead of " + "pb_engine_configuration", + d->type()); + } else + _logger->info("BBDO: no message received"); + } else { + _logger->debug( + "BBDO: engine configuration from peer '{}' received as expected", + _broker_name); + const EngineConfiguration& ec = + std::static_pointer_cast(d)->obj(); + + if (!ec.need_update()) { + SPDLOG_LOGGER_INFO(_logger, + "BBDO: No engine configuration update needed"); + config::applier::state::instance().set_broker_needs_update( + ec.poller_id(), ec.poller_name(), ec.broker_name(), common::BROKER, + false); + } else { + SPDLOG_LOGGER_INFO(_logger, + "BBDO: Engine configuration needs to be updated"); + config::applier::state::instance().set_broker_needs_update( + ec.poller_id(), ec.poller_name(), ec.broker_name(), common::BROKER, + true); + } + } + } else { + /* Legacy negociation */ + config::applier::state::instance().set_peers_ready(); + } +} + void stream::_write(const std::shared_ptr& d) { assert(d); + if (d->type() == neb::pb_instance::static_type()) + _negotiate_engine_conf(); + if (!_grpc_serialized || !std::dynamic_pointer_cast(d)) { - // Check if data exists. std::shared_ptr serialized(serialize(*d)); if (serialized) { SPDLOG_LOGGER_TRACE(_logger, - "BBDO: serialized event of type {} to {} bytes", + "BBDO: serialized event of type {:x} to {} bytes", d->type(), serialized->size()); _substream->write(serialized); + } else { + SPDLOG_LOGGER_ERROR(_logger, "BBDO: cannot serialize event of type {:x}", + d->type()); } } else _substream->write(d); @@ -1446,3 +1640,49 @@ void stream::send_event_acknowledgement() { _events_received_since_last_ack = 0; } } + +/** + * @brief Check if the poller configuration is up to date. + * + * @param poller_id + * @param expected_version + * + * @return + */ +bool stream::check_poller_configuration(uint64_t poller_id, + const std::string& expected_version) { + std::error_code ec; + const std::filesystem::path& pollers_conf_dir = + config::applier::state::instance().pollers_config_dir(); + if (!std::filesystem::is_directory(pollers_conf_dir, ec)) { + if (ec) + _logger->error("Cannot access directory '{}': {}", + pollers_conf_dir.string(), ec.message()); + std::filesystem::create_directories(pollers_conf_dir, ec); + if (ec) { + _logger->error("Cannot create directory '{}': {}", + pollers_conf_dir.string(), ec.message()); + return false; + } + } + auto poller_dir = pollers_conf_dir / fmt::to_string(poller_id); + if (!std::filesystem::is_directory(poller_dir, ec)) { + if (ec) + _logger->error("Cannot access directory '{}': {}", poller_dir.string(), + ec.message()); + std::filesystem::create_directories(poller_dir, ec); + if (ec) + _logger->error("Cannot create directory '{}': {}", poller_dir.string(), + ec.message()); + return false; + } + std::string current = common::hash_directory(poller_dir, ec); + if (ec) { + _logger->error("Cannot access directory '{}': {}", poller_dir.string(), + ec.message()); + return false; + } + config::applier::state::instance().set_engine_configuration(poller_id, + current); + return current == expected_version; +} diff --git a/broker/core/src/broker.proto b/broker/core/src/broker.proto index 04a810ac8c2..94475badd37 100644 --- a/broker/core/src/broker.proto +++ b/broker/core/src/broker.proto @@ -1,7 +1,9 @@ syntax = "proto3"; import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; import "process_stat.proto"; +import "common.proto"; package com.centreon.broker; @@ -91,6 +93,28 @@ service Broker { * @return nothing. */ rpc SetLogFlushPeriod(LogFlushPeriod) returns (google.protobuf.Empty) {} + rpc Aes256Encrypt(AesMessage) returns (GenericString) {} + rpc Aes256Decrypt(AesMessage) returns (GenericString) {} + + rpc GetPeers(google.protobuf.Empty) returns (PeerList) {} +} + +message PeerList { + repeated Peer peers = 1; +} + +message Peer { + uint32 id = 1; + string poller_name = 2; + string broker_name = 3; + google.protobuf.Timestamp connected_since = 4; + common.PeerType type = 5; +} + +message AesMessage { + string app_secret = 1; + string salt = 2; + string content = 3; } message Version { diff --git a/broker/core/src/broker_impl.cc b/broker/core/src/broker_impl.cc index bff46898aaf..fd8a5709454 100644 --- a/broker/core/src/broker_impl.cc +++ b/broker/core/src/broker_impl.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020-2023 Centreon (https://www.centreon.com/) + * Copyright 2020-2024 Centreon (https://www.centreon.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,6 +18,8 @@ */ #include "com/centreon/broker/broker_impl.hh" +#include +#include #include "com/centreon/broker/config/applier/endpoint.hh" #include "com/centreon/broker/config/applier/state.hh" @@ -26,10 +28,12 @@ #include "com/centreon/broker/stats/helper.hh" #include "com/centreon/broker/version.hh" #include "com/centreon/common/process_stat.hh" +#include "common/crypto/aes256.hh" #include "common/log_v2/log_v2.hh" using namespace com::centreon::broker; using namespace com::centreon::broker::version; +using com::centreon::common::crypto::aes256; using com::centreon::common::log_v2::log_v2; /** @@ -394,3 +398,53 @@ ::grpc::Status broker_impl::GetProcessStats( } return grpc::Status::OK; } + +grpc::Status broker_impl::Aes256Encrypt(grpc::ServerContext* context + [[maybe_unused]], + const AesMessage* request, + GenericString* response) { + std::string first_key = request->app_secret(); + std::string second_key = request->salt(); + + try { + aes256 access(first_key, second_key); + std::string result = access.encrypt(request->content()); + response->set_str_arg(result); + return grpc::Status::OK; + } catch (const std::exception& e) { + return grpc::Status(grpc::INVALID_ARGUMENT, grpc::string(e.what())); + } +} + +grpc::Status broker_impl::Aes256Decrypt(grpc::ServerContext* context + [[maybe_unused]], + const AesMessage* request, + GenericString* response) { + std::string first_key = request->app_secret(); + std::string second_key = request->salt(); + + try { + aes256 access(first_key, second_key); + std::string result = access.decrypt(request->content()); + response->set_str_arg(result); + return grpc::Status::OK; + } catch (const std::exception& e) { + return grpc::Status(grpc::INVALID_ARGUMENT, grpc::string(e.what())); + } +} + +grpc::Status broker_impl::GetPeers(grpc::ServerContext* context + [[maybe_unused]], + const ::google::protobuf::Empty* request + [[maybe_unused]], + PeerList* response) { + for (auto& p : config::applier::state::instance().connected_peers()) { + auto peer = response->add_peers(); + peer->set_id(p.poller_id); + peer->set_poller_name(p.poller_name); + peer->set_broker_name(p.broker_name); + peer->mutable_connected_since()->set_seconds(p.connected_since); + peer->set_type(p.peer_type); + } + return grpc::Status::OK; +} diff --git a/broker/core/src/cache/global_cache_data.cc b/broker/core/src/cache/global_cache_data.cc index ecd8f2af7c8..87a479b52a1 100644 --- a/broker/core/src/cache/global_cache_data.cc +++ b/broker/core/src/cache/global_cache_data.cc @@ -261,7 +261,7 @@ void global_cache_data::add_host_to_group(uint64_t group, */ void global_cache_data::remove_host_from_group(uint64_t group, uint64_t host) { absl::WriterMutexLock l(&_protect); - _host_group->get<2>().erase(host_group_element{host, group}); + _host_group->get<2>().erase(host_group_element{host, group, 0}); } /** @@ -317,7 +317,8 @@ void global_cache_data::remove_service_from_group(uint64_t group, uint64_t host, uint64_t service) { absl::WriterMutexLock l(&_protect); - _service_group->get<2>().erase(service_group_element{{host, service}, group}); + _service_group->get<2>().erase( + service_group_element{{host, service}, group, 0}); } /** diff --git a/broker/core/src/compression/factory.cc b/broker/core/src/compression/factory.cc index 4d904dc09d3..4a7ff60a936 100644 --- a/broker/core/src/compression/factory.cc +++ b/broker/core/src/compression/factory.cc @@ -118,15 +118,12 @@ bool factory::has_endpoint(config::endpoint& cfg, io::extension* ext) { */ io::endpoint* factory::new_endpoint( config::endpoint& cfg, - bool& is_acceptor, - std::shared_ptr cache) const { - (void)is_acceptor; - (void)cache; - + const std::map& global_params [[maybe_unused]], + bool& is_acceptor [[maybe_unused]], + std::shared_ptr cache [[maybe_unused]]) const { // Get compression level. int level{-1}; - std::map::const_iterator it{ - cfg.params.find("compression_level")}; + auto it = cfg.params.find("compression_level"); if (it != cfg.params.end()) { if (!absl::SimpleAtoi(it->second, &level)) { log_v2::instance() diff --git a/broker/core/src/config/applier/endpoint.cc b/broker/core/src/config/applier/endpoint.cc index 02c456e3ae7..8b26e82cabf 100644 --- a/broker/core/src/config/applier/endpoint.cc +++ b/broker/core/src/config/applier/endpoint.cc @@ -21,15 +21,8 @@ #include #include "com/centreon/broker/config/applier/state.hh" -#include "com/centreon/broker/io/endpoint.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/io/protocols.hh" -#include "com/centreon/broker/misc/misc.hh" -#include "com/centreon/broker/multiplexing/engine.hh" -#include "com/centreon/broker/multiplexing/muxer.hh" -#include "com/centreon/broker/persistent_cache.hh" -#include "com/centreon/broker/processing/acceptor.hh" -#include "com/centreon/broker/processing/endpoint.hh" #include "com/centreon/broker/processing/failover.hh" #include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" @@ -100,7 +93,8 @@ endpoint::~endpoint() { * * @param[in] endpoints Endpoints configuration objects. */ -void endpoint::apply(std::list const& endpoints) { +void endpoint::apply(std::list const& endpoints, + const std::map& global_params) { // Log messages. SPDLOG_LOGGER_INFO(_logger, "endpoint applier: loading configuration"); @@ -162,7 +156,8 @@ void endpoint::apply(std::list const& endpoints) { SPDLOG_LOGGER_DEBUG(_logger, "endpoint applier: creating endpoint {}", ep.name); bool is_acceptor; - std::shared_ptr e{_create_endpoint(ep, is_acceptor)}; + std::shared_ptr e{ + _create_endpoint(ep, global_params, is_acceptor)}; std::unique_ptr endp; /* Input or output? */ /* This is tricky, one day we will make better... I hope. @@ -232,7 +227,7 @@ void endpoint::apply(std::list const& endpoints) { auto mux = multiplexing::muxer::create( ep.name, multiplexing::engine::instance_ptr(), r_filter, w_filter, true); - endp.reset(_create_failover(ep, mux, e, endp_to_create)); + endp.reset(_create_failover(ep, global_params, mux, e, endp_to_create)); } { std::lock_guard lock(_endpointsm); @@ -385,6 +380,7 @@ void endpoint::unload() { */ processing::failover* endpoint::_create_failover( config::endpoint& cfg, + const std::map& global_params, std::shared_ptr mux, std::shared_ptr endp, std::list& l) { @@ -405,14 +401,15 @@ processing::failover* endpoint::_create_failover( front_failover, cfg.name); else { bool is_acceptor; - std::shared_ptr e(_create_endpoint(*it, is_acceptor)); + std::shared_ptr e( + _create_endpoint(*it, global_params, is_acceptor)); if (is_acceptor) throw msg_fmt( "endpoint applier: cannot allow acceptor '{}' as failover for " "endpoint '{}'", front_failover, cfg.name); failovr = std::shared_ptr( - _create_failover(*it, mux, e, l)); + _create_failover(*it, global_params, mux, e, l)); // Add secondary failovers for (std::list::const_iterator @@ -427,7 +424,8 @@ processing::failover* endpoint::_create_failover( "endpoint '{}'", *failover_it, cfg.name); bool is_acceptor{false}; - std::shared_ptr endp(_create_endpoint(*it, is_acceptor)); + std::shared_ptr endp( + _create_endpoint(*it, global_params, is_acceptor)); if (is_acceptor) { SPDLOG_LOGGER_ERROR( _logger, @@ -456,8 +454,10 @@ processing::failover* endpoint::_create_failover( * * @return A new endpoint. */ -std::shared_ptr endpoint::_create_endpoint(config::endpoint& cfg, - bool& is_acceptor) { +std::shared_ptr endpoint::_create_endpoint( + config::endpoint& cfg, + const std::map& global_params, + bool& is_acceptor) { // Create endpoint object. std::shared_ptr endp; int level{0}; @@ -479,8 +479,9 @@ std::shared_ptr endpoint::_create_endpoint(config::endpoint& cfg, log_v2::instance().get(log_id)); } - endp = std::shared_ptr( - it->second.endpntfactry->new_endpoint(cfg, is_acceptor, cache)); + endp = + std::shared_ptr(it->second.endpntfactry->new_endpoint( + cfg, global_params, is_acceptor, cache)); SPDLOG_LOGGER_INFO(_logger, " create endpoint {} for endpoint '{}'", it->first, cfg.name); level = it->second.osi_to + 1; @@ -502,7 +503,8 @@ std::shared_ptr endpoint::_create_endpoint(config::endpoint& cfg, if ((it->second.osi_from == level) && (it->second.endpntfactry->has_endpoint(cfg, nullptr))) { std::shared_ptr current( - it->second.endpntfactry->new_endpoint(cfg, is_acceptor)); + it->second.endpntfactry->new_endpoint(cfg, global_params, + is_acceptor)); SPDLOG_LOGGER_INFO(_logger, " create endpoint {} for endpoint '{}'", it->first, cfg.name); current->from(endp); diff --git a/broker/core/src/config/applier/init.cc b/broker/core/src/config/applier/init.cc index cb99e7c2120..6efb53f1f8c 100644 --- a/broker/core/src/config/applier/init.cc +++ b/broker/core/src/config/applier/init.cc @@ -58,7 +58,8 @@ std::atomic config::applier::mode{not_started}; * @param n_thread number of threads in the pool. * @param name The broker name to give to this cbd instance. */ -void config::applier::init(size_t n_thread, +void config::applier::init(const common::PeerType peer_type, + size_t n_thread, const std::string&, size_t event_queues_total_size) { /* Load singletons. @@ -75,7 +76,7 @@ void config::applier::init(size_t n_thread, com::centreon::common::pool::set_pool_size(n_thread); stats::center::load(); mysql_manager::load(); - config::applier::state::load(); + config::applier::state::load(peer_type); file::disk_accessor::load(event_queues_total_size); io::protocols::load(); io::events::load(); @@ -111,6 +112,8 @@ void config::applier::deinit() { * * @param conf The configuration used to initialize the all. */ -void config::applier::init(const config::state& conf) { - init(conf.pool_size(), conf.broker_name(), conf.event_queues_total_size()); +void config::applier::init(const common::PeerType peer_type, + const config::state& conf) { + init(peer_type, conf.pool_size(), conf.broker_name(), + conf.event_queues_total_size()); } diff --git a/broker/core/src/config/applier/state.cc b/broker/core/src/config/applier/state.cc index 4af76911d21..10c2edf6cc4 100644 --- a/broker/core/src/config/applier/state.cc +++ b/broker/core/src/config/applier/state.cc @@ -17,13 +17,15 @@ */ #include "com/centreon/broker/config/applier/state.hh" +#include +#include +#include #include "com/centreon/broker/config/applier/endpoint.hh" #include "com/centreon/broker/instance_broadcast.hh" -#include "com/centreon/broker/multiplexing/engine.hh" -#include "com/centreon/broker/multiplexing/muxer.hh" #include "com/centreon/broker/vars.hh" #include "com/centreon/exceptions/msg_fmt.hh" +#include "common.pb.h" #include "common/log_v2/log_v2.hh" using namespace com::centreon::exceptions; @@ -46,8 +48,10 @@ state::stats state::_stats_conf; /** * Default constructor. */ -state::state(const std::shared_ptr& logger) - : _poller_id(0), +state::state(common::PeerType peer_type, + const std::shared_ptr& logger) + : _peer_type{peer_type}, + _poller_id(0), _rpc_port(0), _bbdo_version{2u, 0u, 0u}, _modules{logger} {} @@ -106,6 +110,7 @@ void state::apply(const com::centreon::broker::config::state& s, bool run_mux) { // Set poller instance. _poller_id = s.poller_id(); + _broker_name = s.broker_name(); _poller_name = s.poller_name(); _rpc_port = s.rpc_port(); _bbdo_version = s.get_bbdo_version(); @@ -114,11 +119,29 @@ void state::apply(const com::centreon::broker::config::state& s, bool run_mux) { _pool_size = s.pool_size(); // Set cache directory. - _cache_dir = s.cache_directory(); - if (_cache_dir.empty()) - _cache_dir.append(PREFIX_VAR); - _cache_dir.append("/"); - _cache_dir.append(s.broker_name()); + std::filesystem::path cache_dir; + if (s.cache_directory().empty()) + cache_dir = PREFIX_VAR; + else + cache_dir = s.cache_directory(); + + _cache_dir = cache_dir.string() + "/" + s.broker_name(); + + if (s.get_bbdo_version().major_v >= 3) { + // Engine configuration directory (for cbmod). + if (!s.engine_config_dir().empty()) + set_engine_config_dir(s.engine_config_dir()); + + // Configuration cache directory (for broker, from php). + set_config_cache_dir(s.config_cache_dir()); + + // Pollers configuration directory (for Broker). + // If not provided in the configuration, use a default directory. + if (!s.config_cache_dir().empty() && _pollers_config_dir.empty()) + set_pollers_config_dir(cache_dir / "pollers-configuration/"); + else + set_pollers_config_dir(s.pollers_config_dir()); + } // Apply modules configuration. _modules.apply(s.module_list(), s.module_directory(), &s); @@ -142,7 +165,7 @@ void state::apply(const com::centreon::broker::config::state& s, bool run_mux) { com::centreon::broker::config::state st{s}; // Apply input and output configuration. - endpoint::instance().apply(st.endpoints()); + endpoint::instance().apply(st.endpoints(), st.params()); // Create instance broadcast event. auto ib{std::make_shared()}; @@ -188,9 +211,9 @@ state& state::instance() { /** * Load singleton. */ -void state::load() { +void state::load(common::PeerType peer_type) { if (!gl_state) - gl_state = new state(log_v2::instance().get(log_v2::CONFIG)); + gl_state = new state(peer_type, log_v2::instance().get(log_v2::CONFIG)); } /** @@ -220,6 +243,15 @@ const std::string& state::poller_name() const noexcept { return _poller_name; } +/** + * Get the broker name. + * + * @return Broker name of this Broker instance. + */ +const std::string& state::broker_name() const noexcept { + return _broker_name; +} + /** * @brief Get the thread pool size. * @@ -254,22 +286,28 @@ const config::applier::state::stats& state::stats_conf() { * @brief Add a poller to the list of connected pollers. * * @param poller_id The id of the poller (an id by host) - * @param poller_name The name of the poller + * @param broker_name The name of the poller */ -void state::add_poller(uint64_t poller_id, const std::string& poller_name) { - std::lock_guard lck(_connected_pollers_m); +void state::add_peer(uint64_t poller_id, + const std::string& poller_name, + const std::string& broker_name, + common::PeerType peer_type, + bool extended_negotiation) { + assert(poller_id && !broker_name.empty()); + absl::MutexLock lck(&_connected_peers_m); auto logger = log_v2::instance().get(log_v2::CORE); - auto found = _connected_pollers.find(poller_id); - if (found == _connected_pollers.end()) { - logger->info("Poller '{}' with id {} connected", poller_name, poller_id); - _connected_pollers[poller_id] = poller_name; + auto found = _connected_peers.find({poller_id, poller_name, broker_name}); + if (found == _connected_peers.end()) { + logger->info("Poller '{}' with id {} connected", broker_name, poller_id); } else { logger->warn( - "Poller '{}' with id {} already known as connected. Replacing it " - "with '{}'", - _connected_pollers[poller_id], poller_id, poller_name); - found->second = poller_name; + "Poller '{}' with id {} already known as connected. Replacing it.", + broker_name, poller_id); + _connected_peers.erase(found); } + _connected_peers[{poller_id, poller_name, broker_name}] = + peer{poller_id, poller_name, broker_name, time(nullptr), + peer_type, extended_negotiation, true, false}; } /** @@ -277,16 +315,22 @@ void state::add_poller(uint64_t poller_id, const std::string& poller_name) { * * @param poller_id The id of the poller to remove. */ -void state::remove_poller(uint64_t poller_id) { - std::lock_guard lck(_connected_pollers_m); +void state::remove_peer(uint64_t poller_id, + const std::string& poller_name, + const std::string& broker_name) { + assert(poller_id && !broker_name.empty()); + absl::MutexLock lck(&_connected_peers_m); auto logger = log_v2::instance().get(log_v2::CORE); - auto found = _connected_pollers.find(poller_id); - if (found == _connected_pollers.end()) - logger->warn("There is currently no poller {} connected", poller_id); - else { - logger->info("Poller '{}' with id {} just disconnected", - _connected_pollers[poller_id], poller_id); - _connected_pollers.erase(found); + auto found = _connected_peers.find({poller_id, poller_name, broker_name}); + if (found != _connected_peers.end()) { + logger->info("Peer poller: '{}' - broker: '{}' with id {} disconnected", + poller_name, broker_name, poller_id); + _connected_peers.erase(found); + } else { + logger->warn( + "Peer poller: '{}' - broker: '{}' with id {} and type '{}' not found " + "in connected peers", + poller_name, broker_name, poller_id); } } @@ -296,6 +340,203 @@ void state::remove_poller(uint64_t poller_id) { * @param poller_id The poller to check. */ bool state::has_connection_from_poller(uint64_t poller_id) const { - std::lock_guard lck(_connected_pollers_m); - return _connected_pollers.contains(poller_id); + absl::MutexLock lck(&_connected_peers_m); + for (auto& p : _connected_peers) + if (p.second.poller_id == poller_id && p.second.peer_type == common::ENGINE) + return true; + return false; +} + +/** + * @brief Get the list of connected pollers. + * + * @return A vector of pairs containing the poller id and the poller name. + */ +std::vector state::connected_peers() const { + absl::MutexLock lck(&_connected_peers_m); + std::vector retval; + for (auto it = _connected_peers.begin(); it != _connected_peers.end(); ++it) + retval.push_back(it->second); + return retval; +} + +/** + * @brief Get the Engine configuration directory. + * + * @return The Engine configuration directory. + */ +const std::filesystem::path& state::engine_config_dir() const noexcept { + return _engine_config_dir; +} + +/** + * @brief Set the Engine configuration directory. + * + * @param engine_conf_dir The Engine configuration directory. + */ +void state::set_engine_config_dir(const std::filesystem::path& dir) { + _engine_config_dir = dir; +} + +/** + * @brief Get the configuration cache directory used by php to write + * pollers' configurations. + * + * @return The configuration cache directory. + */ +const std::filesystem::path& state::config_cache_dir() const noexcept { + return _config_cache_dir; +} + +/** + * @brief Set the configuration cache directory. + * + * @param engine_conf_dir The configuration cache directory. + */ +void state::set_config_cache_dir( + const std::filesystem::path& config_cache_dir) { + _config_cache_dir = config_cache_dir; +} + +/** + * @brief Get the pollers configurations directory. + * + * @return The pollers configurations directory. + */ +const std::filesystem::path& state::pollers_config_dir() const noexcept { + return _pollers_config_dir; +} + +/** + * @brief Set the pollers configurations directory. + * + * @param pollers_config_dir The pollers configurations directory. + */ +void state::set_pollers_config_dir( + const std::filesystem::path& pollers_config_dir) { + _pollers_config_dir = pollers_config_dir; +} + +/** + * @brief Get the type of peer this state is defined for. + * + * @return A PeerType enum. + */ +com::centreon::common::PeerType state::peer_type() const { + return _peer_type; +} + +/** + * @brief Specify if a broker needs an update. And then set the broker as ready + * to receive data. + * + * @param poller_id The poller id. + * @param broker_name The poller name. + * @param peer_type The peer type. + * @param need_update true if the broker needs an update, false otherwise. + */ +void state::set_broker_needs_update(uint64_t poller_id, + const std::string& poller_name, + const std::string& broker_name, + common::PeerType peer_type, + bool need_update) { + absl::MutexLock lck(&_connected_peers_m); + auto found = _connected_peers.find({poller_id, poller_name, broker_name}); + if (found != _connected_peers.end()) { + found->second.needs_update = need_update; + found->second.ready = true; + } else { + auto logger = log_v2::instance().get(log_v2::CORE); + logger->warn( + "Poller '{}' with id {} and type '{}' not found in connected peers", + broker_name, poller_id, + common::PeerType_descriptor()->FindValueByNumber(peer_type)->name()); + } +} + +/** + * @brief Set all the connected peers as ready to receive data (no extended + * negociation available). + */ +void state::set_peers_ready() { + absl::MutexLock lck(&_connected_peers_m); + for (auto& p : _connected_peers) + p.second.ready = true; +} + +/** + * @brief Check if a broker needs an update. + * + * @param poller_id The poller id. + * @param broker_name The poller name. + * @param peer_type The peer type. + * + * @return true if the broker needs an update, false otherwise. + */ +bool state::broker_needs_update(uint64_t poller_id, + const std::string& poller_name, + const std::string& broker_name) const { + auto found = _connected_peers.find({poller_id, poller_name, broker_name}); + if (found != _connected_peers.end()) + return found->second.needs_update; + else + return false; +} + +/** + * @brief Wait for 20 seconds for all Brokers to be ready and then check if at + * least one broker needs an update. + * + * @return true if at least one broker needs an update, false otherwise. + */ +bool state::broker_needs_update() const { + auto brokers_ready = [this] { + for (auto& p : _connected_peers) { + if (p.second.peer_type == common::BROKER && !p.second.ready) + return false; + } + return true; + }; + + absl::MutexLock lck(&_connected_peers_m); + // Let's wait for at most 20 seconds for all brokers to be ready. + _connected_peers_m.AwaitWithTimeout(absl::Condition(&brokers_ready), + absl::Seconds(20)); + + // Now, we can check if they need some updates. + for (auto& p : _connected_peers) { + if (p.second.peer_type == common::BROKER && p.second.needs_update) + return true; + } + return false; +} + +/** + * @brief The peer with the given poller_id has its engine configuration version + * set to the given one. + * + * @param poller_id Poller ID concerned by the modification. + * @param version The version to set. + */ +void state::set_engine_configuration(uint64_t poller_id, + const std::string& version) { + absl::MutexLock lck(&_connected_peers_m); + _engine_configuration[poller_id] = version; +} + +/** + * @brief Get the engine configuration for a poller. On error an empty string is + * returned. + * + * @param poller_id The poller id. + * + * @return The engine configuration as a string. + */ +std::string state::engine_configuration(uint64_t poller_id) const { + absl::MutexLock lck(&_connected_peers_m); + auto found = _engine_configuration.find(poller_id); + if (found != _engine_configuration.end()) + return found->second; + else + return ""; } diff --git a/broker/core/src/config/endpoint.cc b/broker/core/src/config/endpoint.cc index 05fc64535dd..7c6db250fb9 100644 --- a/broker/core/src/config/endpoint.cc +++ b/broker/core/src/config/endpoint.cc @@ -134,8 +134,8 @@ bool endpoint::operator<(const endpoint& other) const { return cfg < other.cfg; // Need to check all parameters one by one. - std::map::const_iterator it1{params.begin()}, - it2{other.params.begin()}, end1{params.end()}, end2{other.params.end()}; + auto it1 = params.begin(), it2 = other.params.begin(), end1 = params.end(), + end2 = other.params.end(); while (it1 != end1 && it2 != end2) { if (it1->first != it2->first) return it1->first < it2->first; diff --git a/broker/core/src/config/parser.cc b/broker/core/src/config/parser.cc index 298feb98abf..9a7945a53a2 100644 --- a/broker/core/src/config/parser.cc +++ b/broker/core/src/config/parser.cc @@ -22,7 +22,6 @@ #include #include -#include #include "com/centreon/broker/exceptions/deprecated.hh" #include "com/centreon/broker/misc/filesystem.hh" @@ -263,6 +262,21 @@ state parser::parse(std::string const& file) { if (!misc::filesystem::readable(retval.cache_directory())) throw msg_fmt("The cache directory '{}' is not accessible", retval.cache_directory()); + } else if (get_conf( + {it.key(), it.value()}, "cache_config_directory", retval, + &state::set_config_cache_dir, &json::is_string)) { + if (!misc::filesystem::readable(retval.config_cache_dir())) + throw msg_fmt( + "The cache configuration directory '{}' is not accessible", + retval.config_cache_dir()); + } else if (get_conf({it.key(), it.value()}, + "pollers_config_directory", retval, + &state::set_pollers_config_dir, + &json::is_string)) { + if (!misc::filesystem::readable(retval.pollers_config_dir())) + throw msg_fmt( + "The pollers configuration directory '{}' is not accessible", + retval.pollers_config_dir()); } else if (get_conf({it.key(), it.value()}, "pool_size", retval, &state::pool_size, &json::is_number, &json::get)) @@ -491,7 +505,10 @@ state parser::parse(std::string const& file) { if (it.key() == "stats") retval.add_module("15-stats.so"); - retval.params()[it.key()] = it.value().dump(); + if (it.value().is_string()) + retval.params()[it.key()] = it.value().get(); + else + retval.params()[it.key()] = it.value().dump(); } } } diff --git a/broker/core/src/config/state.cc b/broker/core/src/config/state.cc index 9cbd8c16655..5f02dd4fa79 100644 --- a/broker/core/src/config/state.cc +++ b/broker/core/src/config/state.cc @@ -369,7 +369,7 @@ std::map& state::params() noexcept { * * @return Additional parameters list. */ -std::map const& state::params() const noexcept { +const std::map& state::params() const noexcept { return _params; } @@ -472,3 +472,57 @@ const state::stats_exporter_conf& state::get_stats_exporter() const { state::stats_exporter_conf& state::mut_stats_exporter() { return _stats_exporter_conf; } + +/** + * @brief Set the directory containing the Engine configuration. + * + * @param engine_config_dir The directory containing the Engine configuration. + */ +void state::set_engine_config_dir(const std::string& dir) { + _engine_config_dir = dir; +} + +/** + * @brief Get the directory containing the Engine configuration. + * + * @return The directory containing the Engine configuration. + */ +const std::string& state::engine_config_dir() const noexcept { + return _engine_config_dir; +} + +/** + * @brief Set the directory containing the cache configuration of the pollers. + * + * @param config_cache_dir The directory name + */ +void state::set_config_cache_dir(const std::string& config_cache_dir) { + _config_cache_dir = config_cache_dir; +} + +/** + * @brief Get the directory containing the cache configuration of the pollers. + * + * @return The directory name + */ +const std::string& state::config_cache_dir() const noexcept { + return _config_cache_dir; +} + +/** + * @brief Set the directory containing the pollers configurations. + * + * @param pollers_config_dir The directory name + */ +void state::set_pollers_config_dir(const std::string& pollers_config_dir) { + _pollers_config_dir = pollers_config_dir; +} + +/** + * @brief Get the directory containing the pollers configurations. + * + * @return The directory name + */ +const std::string& state::pollers_config_dir() const noexcept { + return _pollers_config_dir; +} diff --git a/broker/core/src/file/factory.cc b/broker/core/src/file/factory.cc index 7fcf70f0982..496c2f67932 100644 --- a/broker/core/src/file/factory.cc +++ b/broker/core/src/file/factory.cc @@ -1,25 +1,24 @@ /** -* Copyright 2011-2013,2016-2017 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2011-2013,2016-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/file/factory.hh" #include "com/centreon/broker/file/opener.hh" -#include "com/centreon/broker/io/extension.hh" #include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon::exceptions; @@ -56,20 +55,15 @@ bool factory::has_endpoint(config::endpoint& cfg, io::extension* ext) { */ io::endpoint* factory::new_endpoint( config::endpoint& cfg, + const std::map& global_params [[maybe_unused]], bool& is_acceptor, - std::shared_ptr cache) const { - (void)cache; - + std::shared_ptr cache [[maybe_unused]]) const { // Find path to the file. std::string filename; { - std::map::const_iterator it{ - cfg.params.find("path")}; + auto it = cfg.params.find("path"); if (it == cfg.params.end()) - throw msg_fmt( - "file: no 'path' defined for file " - "endpoint '{}'", - cfg.name); + throw msg_fmt("file: no 'path' defined for file endpoint '{}'", cfg.name); filename = it->second; } diff --git a/broker/core/src/io/events.cc b/broker/core/src/io/events.cc index 8f318195b1c..755e81e3137 100644 --- a/broker/core/src/io/events.cc +++ b/broker/core/src/io/events.cc @@ -233,6 +233,9 @@ events::events() { &bbdo::pb_stop::operations); register_event(bbdo::pb_bench::static_type(), "Bench", &bbdo::pb_bench::operations); + register_event(bbdo::pb_engine_configuration::static_type(), + "EngineConfiguration", + &bbdo::pb_engine_configuration::operations); // Register BBDO protocol. io::protocols::instance().reg("BBDO", std::make_shared(), 7, diff --git a/broker/core/src/main.cc b/broker/core/src/main.cc index a279dcf2c26..26f5eee5251 100644 --- a/broker/core/src/main.cc +++ b/broker/core/src/main.cc @@ -21,15 +21,12 @@ #include #include #include -#include #include #include #include -#include #include -#include -#include #include +#include "bbdo/common.pb.h" #include @@ -39,6 +36,7 @@ #include namespace asio = boost::asio; +using namespace com::centreon; // with this define boost::interprocess doesn't need Boost.DataTime #define BOOST_DATE_TIME_NO_LIB 1 @@ -60,14 +58,14 @@ namespace asio = boost::asio; #include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" +using log_v2 = common::log_v2::log_v2; + using namespace com::centreon::broker; using namespace com::centreon::exceptions; std::shared_ptr g_io_context = std::make_shared(); -using log_v2 = com::centreon::common::log_v2::log_v2; - // Main config file. static std::vector gl_mainconfigfiles; static config::state gl_state; @@ -286,7 +284,7 @@ int main(int argc, char* argv[]) { if (n_thread > 0 && n_thread < 100) conf.pool_size(n_thread); - config::applier::init(conf); + config::applier::init(common::BROKER, conf); // Apply resulting configuration totally or partially. config::applier::state::instance().apply(conf, !check); diff --git a/broker/core/src/misc/diagnostic.cc b/broker/core/src/misc/diagnostic.cc index 21f394ff1d1..b16038a46a3 100644 --- a/broker/core/src/misc/diagnostic.cc +++ b/broker/core/src/misc/diagnostic.cc @@ -21,9 +21,7 @@ #include #include #include "com/centreon/broker/config/parser.hh" -#include "com/centreon/broker/misc/filesystem.hh" #include "com/centreon/broker/misc/misc.hh" -#include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" using namespace com::centreon::exceptions; diff --git a/broker/core/src/misc/string.cc b/broker/core/src/misc/string.cc index 354669c0fcc..b2077c4a202 100644 --- a/broker/core/src/misc/string.cc +++ b/broker/core/src/misc/string.cc @@ -21,8 +21,6 @@ #include -#include - using namespace com::centreon::broker::misc; static char const* whitespaces(" \t\r\n"); @@ -46,29 +44,6 @@ std::string& string::trim(std::string& str) noexcept { return str; } -std::string string::base64_encode(const std::string& str) { - static const std::string b = - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; - std::string retval; - retval.reserve((str.size() / 3 + (str.size() % 3 > 0)) * 4); - - int val = 0, valb = -6; - for (unsigned char c : str) { - val = (val << 8) + c; - valb += 8; - while (valb >= 0) { - retval.push_back(b[(val >> valb) & 0x3F]); - valb -= 6; - } - } - if (valb > -6) - retval.push_back(b[((val << 8) >> (valb + 8)) & 0x3F]); - while (retval.size() % 4) - retval.push_back('='); - - return retval; -} - bool string::is_number(const std::string& s) { return !s.empty() && std::find_if(s.begin(), s.end(), [](char c) { return !std::isdigit(c); diff --git a/broker/core/test/bbdo/output.cc b/broker/core/test/bbdo/output.cc index d0831d3a51d..228437c8fa6 100644 --- a/broker/core/test/bbdo/output.cc +++ b/broker/core/test/bbdo/output.cc @@ -75,7 +75,7 @@ class OutputTest : public ::testing::Test { _logger = log_v2::instance().get(log_v2::CORE); io::data::broker_id = 0; try { - config::applier::init(0, "broker_test", 0); + config::applier::init(com::centreon::common::BROKER, 0, "broker_test", 0); } catch (std::exception const& e) { (void)e; } diff --git a/broker/core/test/compression/stream/read.cc b/broker/core/test/compression/stream/read.cc index 4481aa6f53a..f8c740dc1bc 100644 --- a/broker/core/test/compression/stream/read.cc +++ b/broker/core/test/compression/stream/read.cc @@ -30,7 +30,7 @@ class CompressionStreamRead : public ::testing::Test { public: void SetUp() override { try { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); } catch (const std::exception& e) { (void)e; } diff --git a/broker/core/test/compression/stream/write.cc b/broker/core/test/compression/stream/write.cc index ffa2624c1e8..f91184d6bc7 100644 --- a/broker/core/test/compression/stream/write.cc +++ b/broker/core/test/compression/stream/write.cc @@ -31,7 +31,7 @@ class CompressionStreamWrite : public ::testing::Test { public: void SetUp() override { try { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); } catch (const std::exception& e) { (void)e; } diff --git a/broker/core/test/config/init.cc b/broker/core/test/config/init.cc index f6cac87c8d2..b7272ddd6b8 100644 --- a/broker/core/test/config/init.cc +++ b/broker/core/test/config/init.cc @@ -28,6 +28,6 @@ using namespace com::centreon::broker; */ TEST(init, init) { // First object. - config::applier::init(0, "test", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test", 0); ASSERT_NO_THROW(config::applier::deinit()); } diff --git a/broker/core/test/file/stream/max_size.cc b/broker/core/test/file/stream/max_size.cc index b3cee0ebe90..6f8e437f773 100644 --- a/broker/core/test/file/stream/max_size.cc +++ b/broker/core/test/file/stream/max_size.cc @@ -40,7 +40,7 @@ int main(int argc, char* argv[]) { QCoreApplication app(argc, argv); // Initialization. - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); // Generate file name. QString filename[4]; diff --git a/broker/core/test/file/stream/mixed.cc b/broker/core/test/file/stream/mixed.cc index 62b36563fd0..de8b6dea497 100644 --- a/broker/core/test/file/stream/mixed.cc +++ b/broker/core/test/file/stream/mixed.cc @@ -102,7 +102,7 @@ int main(int argc, char* argv[]) { QCoreApplication app(argc, argv); // Initialization. - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); // Generate file name. QString filename(QDir::tempPath()); diff --git a/broker/core/test/file/stream/read.cc b/broker/core/test/file/stream/read.cc index 3d06836f38e..cd493d459a1 100644 --- a/broker/core/test/file/stream/read.cc +++ b/broker/core/test/file/stream/read.cc @@ -42,7 +42,7 @@ int main(int argc, char* argv[]) { QCoreApplication app(argc, argv); // Initialization. - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); // Generate file name. QString filename(QDir::tempPath()); diff --git a/broker/core/test/file/stream/write.cc b/broker/core/test/file/stream/write.cc index 3b71a3e49be..cb9e1d1bdee 100644 --- a/broker/core/test/file/stream/write.cc +++ b/broker/core/test/file/stream/write.cc @@ -40,7 +40,7 @@ int main(int argc, char* argv[]) { QCoreApplication app(argc, argv); // Initialization. - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); // Generate file name. std::string filename(misc::temp_path()); diff --git a/broker/core/test/main.cc b/broker/core/test/main.cc index 9d819272489..ea389a1373c 100644 --- a/broker/core/test/main.cc +++ b/broker/core/test/main.cc @@ -32,7 +32,8 @@ std::shared_ptr g_io_context = class CentreonBrokerEnvironment : public testing::Environment { public: void SetUp() override { - com::centreon::broker::config::applier::state::load(); + com::centreon::broker::config::applier::state::load( + com::centreon::common::BROKER); com::centreon::broker::io::protocols::load(); com::centreon::broker::io::events::load(); } diff --git a/broker/core/test/misc/string.cc b/broker/core/test/misc/string.cc index cf18b6edf3f..db270361556 100644 --- a/broker/core/test/misc/string.cc +++ b/broker/core/test/misc/string.cc @@ -49,14 +49,6 @@ TEST(StringSplit, ManyPart) { ASSERT_EQ(lst, res); } -TEST(StringBase64, Encode) { - ASSERT_EQ(string::base64_encode("A first little attempt."), - "QSBmaXJzdCBsaXR0bGUgYXR0ZW1wdC4="); - ASSERT_EQ(string::base64_encode("A"), "QQ=="); - ASSERT_EQ(string::base64_encode("AB"), "QUI="); - ASSERT_EQ(string::base64_encode("ABC"), "QUJD"); -} - TEST(escape, simple) { ASSERT_EQ("Hello", string::escape("Hello", 10)); ASSERT_EQ("Hello", string::escape("Hello", 5)); diff --git a/broker/core/test/modules/module.cc b/broker/core/test/modules/module.cc index ac011d7aa3c..ff32623e767 100644 --- a/broker/core/test/modules/module.cc +++ b/broker/core/test/modules/module.cc @@ -32,7 +32,7 @@ class Modules : public testing::Test { public: void SetUp() override { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); _logger = log_v2::instance().get(log_v2::CORE); } diff --git a/broker/core/test/mysql/mysql.cc b/broker/core/test/mysql/mysql.cc index bfba10674d2..365fcaf2c56 100644 --- a/broker/core/test/mysql/mysql.cc +++ b/broker/core/test/mysql/mysql.cc @@ -54,7 +54,7 @@ class DatabaseStorageTest : public ::testing::Test { public: void SetUp() override { try { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); } catch (std::exception const& e) { (void)e; } @@ -2064,7 +2064,8 @@ TEST_F(DatabaseStorageTest, MySqlMultiInsert) { unsigned data_index; for (data_index = 0; data_index < 1000000; ++data_index) { - row to_insert = {.name = fmt::format("name_{}", data_index), + row to_insert = {.id = 0, + .name = fmt::format("name_{}", data_index), .value = double(data_index) / 10, .t = char(data_index), .e = std::string(1, 'a' + data_index % 3), diff --git a/broker/core/test/processing/acceptor.cc b/broker/core/test/processing/acceptor.cc index 5479d4ca87e..c3497ecdb1d 100644 --- a/broker/core/test/processing/acceptor.cc +++ b/broker/core/test/processing/acceptor.cc @@ -33,7 +33,7 @@ class ProcessingTest : public ::testing::Test { public: void SetUp() override { try { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); } catch (std::exception const& e) { (void)e; } diff --git a/broker/core/test/processing/feeder.cc b/broker/core/test/processing/feeder.cc index 0f9a33fef5e..9be3f3dc468 100644 --- a/broker/core/test/processing/feeder.cc +++ b/broker/core/test/processing/feeder.cc @@ -46,7 +46,7 @@ class TestFeeder : public ::testing::Test { public: void SetUp() override { stats::center::load(); - config::applier::state::load(); + config::applier::state::load(com::centreon::common::BROKER); file::disk_accessor::load(10000); multiplexing::engine::load(); io::protocols::load(); diff --git a/broker/deps.go b/broker/deps.go deleted file mode 100644 index cd7a37a177d..00000000000 --- a/broker/deps.go +++ /dev/null @@ -1,122 +0,0 @@ -package main - -import ( - "bufio" - "fmt" - "log" - "os" - "path/filepath" - "regexp" - "strings" - ) - -// MaxDepth Max depth in search tree -const MaxDepth = 3 - -func findIncludes(file string, treated *[]string, depth int) { - var myList []string - file1 := filepath.Clean(file) - //file1, err := filepath.EvalSymLinks(file1) - //if err != nil { - // fmt.Println("Error: " + err.Error()) - // os.Exit(1) - //} - f, err := os.Open(file1) - if err != nil { - file1 = strings.TrimPrefix(file, "inc/") - for _, pref := range []string{ - "/usr/local/include/", - "rrd/inc/", - "generator/inc/", - "graphite/inc/", - "tls/inc/", - "lua/inc/", - "redis/inc/", - "neb/inc/", - "tcp/inc/", - "bam/inc/", - "core/inc/", - "watchdog/inc/", - "stats/inc/", - "notification/inc/", - "../bbdo/", - "dumper/inc/", - "storage/inc/", - "unified_sql/inc/", - "influxdb/inc/", - "sql/inc/" } { - f, err = os.Open(pref + file1) - if err == nil { - file1 = pref + file1 - *treated = append(*treated, file1) - break - } - } - } else { - *treated = append(*treated, file1) - } - defer f.Close() - - depth++ - if depth > MaxDepth { - return - } - - scanner := bufio.NewScanner(f) - r, _ := regexp.Compile("^#\\s*include\\s*([<\"])(.*)[>\"]") - for scanner.Scan() { - line := scanner.Text() - match := r.FindStringSubmatch(line) - if len(match) > 0 { - /* match[0] is the global match, match[1] is '<' or '"' and match[2] is the file to include */ - if match[1] == "\"" { - fmt.Printf(" \"%s\" -> \"%s\";\n", file, match[2]) - myList = append(myList, match[2]) - } else { - fmt.Printf(" \"%s\" -> \"%s\";\n", file, match[2]) - } - } - } - if err := scanner.Err(); err != nil { - log.Print(file, " --- ", err) - } - - for _, file2 := range myList { - found := false - for _, ff := range *treated { - if ff == file2 { - found = true - break - } - } - if !found { - findIncludes(file2, treated, depth) - } - } -} - -func main() { - args := os.Args[1:] - var fileList []string - - if len(args) == 0 { - for _, searchDir := range []string{"src", "inc"} { - filepath.Walk(searchDir, func(path string, f os.FileInfo, err error) error { - if strings.HasSuffix(path, ".cc") || strings.HasSuffix(path, ".hh") { - fileList = append(fileList, path) - } - return nil - }) - } - } else { - fileList = append(fileList, args[0]) - } - - fmt.Println("digraph deps {") - - var treated []string - for _, file := range fileList { - findIncludes(file, &treated, 0) - } - fmt.Println("}") -} diff --git a/broker/deps.sh b/broker/deps.sh deleted file mode 100755 index df88cbda0c6..00000000000 --- a/broker/deps.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -go run deps.go "$*" > /tmp/deps.dot -dot -Tpng /tmp/deps.dot -o deps.png -if [ -x /usr/bin/eog ] ; then - eog deps.png& -elif [ -x /usr/bin/lximage-qt ] ; then - lximage-qt deps.png& -fi - diff --git a/broker/generator/inc/com/centreon/broker/generator/factory.hh b/broker/generator/inc/com/centreon/broker/generator/factory.hh index 56d4ae7735c..ddc886d88ce 100644 --- a/broker/generator/inc/com/centreon/broker/generator/factory.hh +++ b/broker/generator/inc/com/centreon/broker/generator/factory.hh @@ -1,29 +1,28 @@ -/* -** Copyright 2017 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2017-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #ifndef CCB_GENERATOR_FACTORY_HH #define CCB_GENERATOR_FACTORY_HH #include "com/centreon/broker/io/factory.hh" -namespace com::centreon::broker { +namespace com::centreon::broker::generator { -namespace generator { /** * @class factory factory.hh "com/centreon/broker/generator/factory.hh" * @brief Generator streams factory. @@ -33,17 +32,17 @@ namespace generator { class factory : public io::factory { public: factory() = default; - factory(factory const& other) = delete; + factory(factory const&) = delete; ~factory() = default; factory& operator=(factory const& other) = delete; - bool has_endpoint(config::endpoint& cfg); - io::endpoint* new_endpoint(config::endpoint& cfg, - bool& is_acceptor, - std::shared_ptr cache = - std::shared_ptr()) const; + bool has_endpoint(config::endpoint& cfg, io::extension* ext) override; + io::endpoint* new_endpoint( + config::endpoint& cfg, + const std::map& global_params, + bool& is_acceptor, + std::shared_ptr cache = + std::shared_ptr()) const override; }; -} // namespace generator - -} +} // namespace com::centreon::broker::generator #endif // !CCB_GENERATOR_FACTORY_HH diff --git a/broker/generator/src/factory.cc b/broker/generator/src/factory.cc index 569e848dd09..35632de794d 100644 --- a/broker/generator/src/factory.cc +++ b/broker/generator/src/factory.cc @@ -1,20 +1,20 @@ /** -* Copyright 2017 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2017-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/generator/factory.hh" #include @@ -23,12 +23,6 @@ using namespace com::centreon::broker; using namespace com::centreon::broker::generator; -/************************************** - * * - * Public Methods * - * * - **************************************/ - /** * Check if a configuration match the generator streams. * @@ -36,9 +30,9 @@ using namespace com::centreon::broker::generator; * * @return True if configuration matches any of the generator streams. */ -bool factory::has_endpoint(config::endpoint& cfg) { - return ((cfg.type == "generator_receiver") || - (cfg.type == "generator_sender")); +bool factory::has_endpoint(config::endpoint& cfg, + io::extension* ext [[maybe_unused]]) { + return cfg.type == "generator_receiver" || cfg.type == "generator_sender"; } /** @@ -52,10 +46,9 @@ bool factory::has_endpoint(config::endpoint& cfg) { */ io::endpoint* factory::new_endpoint( config::endpoint& cfg, + const std::map& global_params, bool& is_acceptor, - std::shared_ptr cache) const { - (void)cache; - + std::shared_ptr cache [[maybe_unused]]) const { // Generate opener. std::auto_ptr s; if (cfg.type == "generator_receiver") diff --git a/broker/graphite/inc/com/centreon/broker/graphite/factory.hh b/broker/graphite/inc/com/centreon/broker/graphite/factory.hh index 07505b3dc83..5a26ffb2f35 100644 --- a/broker/graphite/inc/com/centreon/broker/graphite/factory.hh +++ b/broker/graphite/inc/com/centreon/broker/graphite/factory.hh @@ -39,6 +39,7 @@ class factory : public io::factory { bool has_endpoint(config::endpoint& cfg, io::extension* ext) override; io::endpoint* new_endpoint( config::endpoint& cfg, + const std::map& global_params, bool& is_acceptor, std::shared_ptr cache) const override; }; diff --git a/broker/graphite/precomp_inc/precomp.hpp b/broker/graphite/precomp_inc/precomp.hpp index 74865157a86..ec881b8a2f1 100644 --- a/broker/graphite/precomp_inc/precomp.hpp +++ b/broker/graphite/precomp_inc/precomp.hpp @@ -1,20 +1,20 @@ -/* -** Copyright 2022 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2022-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #ifndef CC_CORE_PRECOMP_HH #define CC_CORE_PRECOMP_HH @@ -27,6 +27,7 @@ #include #include +#include #include #include diff --git a/broker/graphite/src/factory.cc b/broker/graphite/src/factory.cc index f1939972b12..08dce53c8a7 100644 --- a/broker/graphite/src/factory.cc +++ b/broker/graphite/src/factory.cc @@ -1,24 +1,23 @@ /** -* Copyright 2011-2017 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2011-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/graphite/factory.hh" #include -#include "com/centreon/broker/config/parser.hh" #include "com/centreon/broker/graphite/connector.hh" #include "com/centreon/exceptions/msg_fmt.hh" @@ -36,7 +35,7 @@ using namespace com::centreon::exceptions; */ static std::string find_param(config::endpoint const& cfg, std::string const& key) { - std::map::const_iterator it{cfg.params.find(key)}; + auto it = cfg.params.find(key); if (cfg.params.end() == it) throw msg_fmt("graphite: no '{}' defined for endpoint '{}'", key, cfg.name); return it->second; @@ -54,7 +53,7 @@ static std::string find_param(config::endpoint const& cfg, static std::string get_string_param(config::endpoint const& cfg, std::string const& key, std::string const& def) { - std::map::const_iterator it(cfg.params.find(key)); + auto it = cfg.params.find(key); if (cfg.params.end() == it) return def; else @@ -73,7 +72,7 @@ static std::string get_string_param(config::endpoint const& cfg, static uint32_t get_uint_param(config::endpoint const& cfg, std::string const& key, uint32_t def) { - std::map::const_iterator it(cfg.params.find(key)); + auto it = cfg.params.find(key); uint32_t retval = 0; if (cfg.params.end() == it) return def; @@ -116,6 +115,7 @@ bool factory::has_endpoint(config::endpoint& cfg, io::extension* ext) { */ io::endpoint* factory::new_endpoint( config::endpoint& cfg, + const std::map& global_params [[maybe_unused]], bool& is_acceptor, std::shared_ptr cache) const { std::string db_host(find_param(cfg, "db_host")); diff --git a/broker/graphite/src/stream.cc b/broker/graphite/src/stream.cc index 7e1d22afacf..7717258fd25 100644 --- a/broker/graphite/src/stream.cc +++ b/broker/graphite/src/stream.cc @@ -24,6 +24,7 @@ #include "com/centreon/broker/multiplexing/engine.hh" #include "com/centreon/broker/multiplexing/publisher.hh" #include "com/centreon/exceptions/msg_fmt.hh" +#include "common/crypto/base64.hh" #include "common/log_v2/log_v2.hh" using namespace asio; @@ -31,6 +32,7 @@ using namespace com::centreon::exceptions; using namespace com::centreon::broker; using namespace com::centreon::broker::graphite; using log_v2 = com::centreon::common::log_v2::log_v2; +using namespace com::centreon::common::crypto; /** * Constructor. @@ -69,7 +71,7 @@ stream::stream(std::string const& metric_naming, auth.append(":").append(_db_password); _auth_query.append("Authorization: Basic ") - .append(misc::string::base64_encode(auth)) + .append(base64_encode(auth)) .append("\n"); _query.append(_auth_query); } diff --git a/broker/graphite/test/factory.cc b/broker/graphite/test/factory.cc index 01b10a33639..4119d212b8b 100644 --- a/broker/graphite/test/factory.cc +++ b/broker/graphite/test/factory.cc @@ -42,17 +42,17 @@ TEST(graphiteFactory, MissingParams) { std::shared_ptr cache; bool is_acceptor; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["db_host"] = "host"; - ASSERT_NO_THROW(delete fact.new_endpoint(cfg, is_acceptor, cache)); + ASSERT_NO_THROW(delete fact.new_endpoint(cfg, {}, is_acceptor, cache)); cfg.params["db_port"] = "toto"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), std::exception); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), std::exception); cfg.params["db_port"] = "1234"; - ASSERT_NO_THROW(delete fact.new_endpoint(cfg, is_acceptor, cache)); + ASSERT_NO_THROW(delete fact.new_endpoint(cfg, {}, is_acceptor, cache)); cfg.params["queries_per_transaction"] = "toto"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), std::exception); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), std::exception); cfg.params["queries_per_transaction"] = "1234"; - ASSERT_NO_THROW(delete fact.new_endpoint(cfg, is_acceptor, cache)); + ASSERT_NO_THROW(delete fact.new_endpoint(cfg, {}, is_acceptor, cache)); cfg.params["metric_naming"] = "centreon.metrics.$METRICID$"; - ASSERT_NO_THROW(delete fact.new_endpoint(cfg, is_acceptor, cache)); + ASSERT_NO_THROW(delete fact.new_endpoint(cfg, {}, is_acceptor, cache)); } diff --git a/broker/grpc/CMakeLists.txt b/broker/grpc/CMakeLists.txt index 023c192501a..f76d29c8722 100644 --- a/broker/grpc/CMakeLists.txt +++ b/broker/grpc/CMakeLists.txt @@ -50,9 +50,11 @@ target_link_libraries( centreon_grpc "-Wl,--whole-archive" pb_neb_lib + pb_common_lib pb_storage_lib pb_bbdo_lib pb_bam_lib + pb_bam_state_lib pb_extcmd_lib pb_open_telemetry_lib pb_rebuild_message_lib diff --git a/broker/grpc/generate_proto.py b/broker/grpc/generate_proto.py index 4c158efcf60..e84f0e010ee 100755 --- a/broker/grpc/generate_proto.py +++ b/broker/grpc/generate_proto.py @@ -112,7 +112,7 @@ class received_protobuf : public io::protobuf { T& mut_obj() override { return *((*_received).*_mutable_access)(); } - void set_obj(T&& obj) override { + void set_obj([[maybe_unused]] T&& obj) override { throw com::centreon::exceptions::msg_fmt("unauthorized usage {}", static_cast(typeid(*this).name())); } @@ -228,7 +228,7 @@ class received_protobuf : public io::protobuf { print( f"generate_proto.py : Error: Message {{ {m.group(1)} }} has no protobuf id or missing the comment /* Ignore */ : file :{file}:{line_counter}", file=sys.stderr) print( - f"Error Add /* Ignore */ or a protobuf id as example: /*io::bam, bam::de_pb_services_book_state*/", file=sys.stderr) + f"Error Add /* Ignore */ or a protobuf id as example: /*io::bam, bam::de_pb_services_book_state, XXX */", file=sys.stderr) exit(1) if len(messages) > 0: diff --git a/broker/grpc/inc/com/centreon/broker/grpc/acceptor.hh b/broker/grpc/inc/com/centreon/broker/grpc/acceptor.hh index 53700596cfc..1b91d54f419 100644 --- a/broker/grpc/inc/com/centreon/broker/grpc/acceptor.hh +++ b/broker/grpc/inc/com/centreon/broker/grpc/acceptor.hh @@ -22,6 +22,7 @@ #include "com/centreon/broker/io/endpoint.hh" #include "com/centreon/common/grpc/grpc_server.hh" #include "grpc_config.hh" +#include "grpc_stream.grpc.pb.h" namespace com::centreon::broker::grpc { diff --git a/broker/grpc/inc/com/centreon/broker/grpc/factory.hh b/broker/grpc/inc/com/centreon/broker/grpc/factory.hh index b0e2bcddff0..e523f81b113 100644 --- a/broker/grpc/inc/com/centreon/broker/grpc/factory.hh +++ b/broker/grpc/inc/com/centreon/broker/grpc/factory.hh @@ -44,6 +44,7 @@ class factory : public io::factory { io::extension* ext) override; io::endpoint* new_endpoint( com::centreon::broker::config::endpoint& cfg, + const std::map& global_params, bool& is_acceptor, std::shared_ptr cache = std::shared_ptr()) const override; diff --git a/broker/grpc/inc/com/centreon/broker/grpc/stream.hh b/broker/grpc/inc/com/centreon/broker/grpc/stream.hh index d6fa2b17e97..79a1dd38d35 100644 --- a/broker/grpc/inc/com/centreon/broker/grpc/stream.hh +++ b/broker/grpc/inc/com/centreon/broker/grpc/stream.hh @@ -79,7 +79,12 @@ template class stream : public io::stream, public bireactor_class, public std::enable_shared_from_this> { - static std::set> _instances; + /** + * @brief we store reactor instances in this container until OnDone is called + * by grpc layers. We allocate this container and never free this because + * threads terminate in unknown order. + */ + static std::set>* _instances; static std::mutex _instances_m; using read_queue = std::queue; diff --git a/broker/grpc/src/factory.cc b/broker/grpc/src/factory.cc index edb576739fe..686444eabb8 100644 --- a/broker/grpc/src/factory.cc +++ b/broker/grpc/src/factory.cc @@ -23,7 +23,6 @@ #include "com/centreon/broker/grpc/factory.hh" -#include "com/centreon/broker/config/parser.hh" #include "com/centreon/broker/grpc/acceptor.hh" #include "com/centreon/broker/grpc/connector.hh" #include "com/centreon/exceptions/msg_fmt.hh" @@ -84,18 +83,15 @@ static std::string read_file(const std::string& path) { */ io::endpoint* factory::new_endpoint( com::centreon::broker::config::endpoint& cfg, + const std::map& global_params [[maybe_unused]], bool& is_acceptor, - std::shared_ptr cache) const { - (void)cache; - + std::shared_ptr cache [[maybe_unused]]) const { if (cfg.type == "bbdo_server" || cfg.type == "bbdo_client") return _new_endpoint_bbdo_cs(cfg, is_acceptor); - std::map::const_iterator it; - // Find host (if exists). std::string host; - it = cfg.params.find("host"); + auto it = cfg.params.find("host"); if (it != cfg.params.end()) host = it->second; if (!host.empty() && @@ -274,11 +270,9 @@ io::endpoint* factory::new_endpoint( io::endpoint* factory::_new_endpoint_bbdo_cs( com::centreon::broker::config::endpoint& cfg, bool& is_acceptor) const { - std::map::const_iterator it; - // Find host (if exists). std::string host; - it = cfg.params.find("host"); + auto it = cfg.params.find("host"); if (it != cfg.params.end()) host = it->second; if (!host.empty() && diff --git a/broker/grpc/src/stream.cc b/broker/grpc/src/stream.cc index 282ae57582b..6ab028ac28b 100644 --- a/broker/grpc/src/stream.cc +++ b/broker/grpc/src/stream.cc @@ -104,8 +104,9 @@ const std::string com::centreon::broker::grpc::authorization_header( * @tparam bireactor_class */ template -std::set>> - stream::_instances; +std::set>>* + stream::_instances = + new std::set>>; template std::mutex stream::_instances_m; @@ -149,7 +150,7 @@ template void stream::register_stream( const std::shared_ptr>& strm) { std::lock_guard l(_instances_m); - _instances.insert(strm); + _instances->insert(strm); } /** @@ -375,7 +376,8 @@ void stream::OnDone() { std::lock_guard l(_instances_m); SPDLOG_LOGGER_DEBUG(logger, "{:p} server::OnDone()", static_cast(me.get())); - _instances.erase(std::static_pointer_cast>(me)); + _instances->erase( + std::static_pointer_cast>(me)); }); } @@ -402,7 +404,8 @@ void stream::OnDone(const ::grpc::Status& status) { SPDLOG_LOGGER_DEBUG(logger, "{:p} client::OnDone({}) {}", static_cast(me.get()), status.error_message(), status.error_details()); - _instances.erase(std::static_pointer_cast>(me)); + _instances->erase( + std::static_pointer_cast>(me)); }); } diff --git a/broker/grpc/test/factory_test.cc b/broker/grpc/test/factory_test.cc index a6ea4cf27c4..667dd71d66d 100644 --- a/broker/grpc/test/factory_test.cc +++ b/broker/grpc/test/factory_test.cc @@ -17,8 +17,6 @@ * */ -#include "grpc_stream.grpc.pb.h" - #include "grpc_test_include.hh" using system_clock = std::chrono::system_clock; @@ -49,7 +47,7 @@ TEST(grpc_factory, Exception) { bool is_acceptor; std::shared_ptr cache; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); } TEST(grpc_factory, Acceptor) { @@ -60,7 +58,7 @@ TEST(grpc_factory, Acceptor) { cfg.type = "grpc"; cfg.params["port"] = "4343"; - io::endpoint* endp = fact.new_endpoint(cfg, is_acceptor, cache); + io::endpoint* endp = fact.new_endpoint(cfg, {}, is_acceptor, cache); ASSERT_TRUE(is_acceptor); ASSERT_TRUE(endp->is_acceptor()); @@ -77,7 +75,7 @@ TEST(grpc_factory, BadPort) { cfg.type = "grpc"; cfg.params["port"] = "a4a343"; cfg.params["host"] = "10.12.13.22"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); } TEST(grpc_factory, BadHost) { @@ -89,10 +87,10 @@ TEST(grpc_factory, BadHost) { cfg.type = "grpc"; cfg.params["port"] = "4343"; cfg.params["host"] = " 10.12.13.22"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["host"] = "10.12.13.22 "; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); } TEST(grpc_factory, Connector) { @@ -107,7 +105,7 @@ TEST(grpc_factory, Connector) { std::unique_ptr f{new com::centreon::broker::grpc::factory}; ASSERT_TRUE(f->has_endpoint(cfg, nullptr)); std::unique_ptr endp{ - fact.new_endpoint(cfg, is_acceptor, cache)}; + fact.new_endpoint(cfg, {}, is_acceptor, cache)}; ASSERT_FALSE(is_acceptor); ASSERT_TRUE(endp->is_connector()); diff --git a/broker/http_tsdb/CMakeLists.txt b/broker/http_tsdb/CMakeLists.txt index 0303f80e592..b2efc757c0e 100644 --- a/broker/http_tsdb/CMakeLists.txt +++ b/broker/http_tsdb/CMakeLists.txt @@ -43,6 +43,7 @@ set(HEADERS add_library(http_tsdb STATIC ${SOURCES} ${HEADERS}) add_dependencies(http_tsdb pb_neb_lib + pb_common_lib pb_header_lib pb_storage_lib ) diff --git a/broker/http_tsdb/src/factory.cc b/broker/http_tsdb/src/factory.cc index d456c708776..962261a71c5 100644 --- a/broker/http_tsdb/src/factory.cc +++ b/broker/http_tsdb/src/factory.cc @@ -19,7 +19,6 @@ #include "com/centreon/broker/http_tsdb/factory.hh" -#include "com/centreon/broker/config/parser.hh" #include "com/centreon/broker/http_tsdb/column.hh" #include "com/centreon/exceptions/msg_fmt.hh" @@ -42,7 +41,7 @@ factory::factory(const std::string& name, */ std::string factory::find_param(config::endpoint const& cfg, std::string const& key) const { - std::map::const_iterator it{cfg.params.find(key)}; + auto it = cfg.params.find(key); if (cfg.params.end() == it) throw msg_fmt("{}: no '{}' defined for endpoint '{}'", _name, key, cfg.name); @@ -143,8 +142,7 @@ void factory::create_conf(const config::endpoint& cfg, std::string addr(find_param(cfg, "db_host")); std::string target = "/write"; - std::map::const_iterator it{ - cfg.params.find("http_target")}; + auto it = cfg.params.find("http_target"); if (it != cfg.params.end()) { target = it->second; } diff --git a/broker/http_tsdb/src/stream.cc b/broker/http_tsdb/src/stream.cc index 103fe09f242..7803d48377a 100644 --- a/broker/http_tsdb/src/stream.cc +++ b/broker/http_tsdb/src/stream.cc @@ -23,7 +23,6 @@ #include "com/centreon/broker/cache/global_cache.hh" #include "com/centreon/broker/exceptions/shutdown.hh" #include "com/centreon/broker/http_tsdb/internal.hh" -#include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon::broker; using namespace com::centreon::exceptions; @@ -343,7 +342,8 @@ static time_point _epoch = system_clock::from_time_t(0); void stream::send_handler(const boost::beast::error_code& err, const std::string& detail, const request::pointer& request, - const common::http::response_ptr& response) { + const common::http::response_ptr& response + [[maybe_unused]]) { auto actu_stat_avg = [&]() -> void { if (request->get_connect_time() > _epoch && request->get_send_time() > _epoch) { diff --git a/broker/http_tsdb/test/factory_test.cc b/broker/http_tsdb/test/factory_test.cc index f9c6b60f789..bfacc10f6d3 100644 --- a/broker/http_tsdb/test/factory_test.cc +++ b/broker/http_tsdb/test/factory_test.cc @@ -43,10 +43,11 @@ class factory_test : public http_tsdb::factory { : http_tsdb::factory(name, g_io_context) {} io::endpoint* new_endpoint( - config::endpoint& cfg, - bool& is_acceptor, - std::shared_ptr cache = - std::shared_ptr()) const override { + config::endpoint& cfg [[maybe_unused]], + const std::map& global_params [[maybe_unused]], + bool& is_acceptor [[maybe_unused]], + std::shared_ptr cache + [[maybe_unused]] = std::shared_ptr()) const override { return nullptr; } void create_conf(const config::endpoint& cfg, diff --git a/broker/http_tsdb/test/stream_test.cc b/broker/http_tsdb/test/stream_test.cc index b7aec6f5d36..e90dd0570c8 100644 --- a/broker/http_tsdb/test/stream_test.cc +++ b/broker/http_tsdb/test/stream_test.cc @@ -73,9 +73,13 @@ class request_test : public http_tsdb::request { SPDLOG_LOGGER_TRACE(_logger, "delete request {}", _request_id); } - void add_metric(const storage::pb_metric& metric) override { ++_nb_metric; } + void add_metric(const storage::pb_metric& metric [[maybe_unused]]) override { + ++_nb_metric; + } - void add_status(const storage::pb_status& status) override { ++_nb_status; } + void add_status(const storage::pb_status& status [[maybe_unused]]) override { + ++_nb_status; + } unsigned get_request_id() const { return _request_id; } @@ -171,11 +175,14 @@ class connection_send_bagot : public http::connection_base { } } - void _on_accept(http::connect_callback_type&& callback) override{}; + void _on_accept(http::connect_callback_type&& callback + [[maybe_unused]]) override{}; - void answer(const http::response_ptr& response, - http::answer_callback_type&& callback) override {} - void receive_request(http::request_callback_type&& callback) override {} + void answer(const http::response_ptr& response [[maybe_unused]], + http::answer_callback_type&& callback [[maybe_unused]]) override { + } + void receive_request(http::request_callback_type&& callback + [[maybe_unused]]) override {} asio::ip::tcp::socket& get_socket() { return _not_used; } }; diff --git a/broker/influxdb/inc/com/centreon/broker/influxdb/connector.hh b/broker/influxdb/inc/com/centreon/broker/influxdb/connector.hh index 906cf3132d4..e81793b9b77 100644 --- a/broker/influxdb/inc/com/centreon/broker/influxdb/connector.hh +++ b/broker/influxdb/inc/com/centreon/broker/influxdb/connector.hh @@ -21,7 +21,6 @@ #include "com/centreon/broker/influxdb/column.hh" #include "com/centreon/broker/io/endpoint.hh" -#include "com/centreon/broker/sql/database_config.hh" namespace com::centreon::broker::influxdb { diff --git a/broker/influxdb/inc/com/centreon/broker/influxdb/factory.hh b/broker/influxdb/inc/com/centreon/broker/influxdb/factory.hh index 8b751bfe496..6c6cfb45b74 100644 --- a/broker/influxdb/inc/com/centreon/broker/influxdb/factory.hh +++ b/broker/influxdb/inc/com/centreon/broker/influxdb/factory.hh @@ -38,6 +38,7 @@ class factory : public io::factory { bool has_endpoint(config::endpoint& cfg, io::extension* ext) override; io::endpoint* new_endpoint( config::endpoint& cfg, + const std::map& global_params, bool& is_acceptor, std::shared_ptr cache) const override; }; diff --git a/broker/influxdb/inc/com/centreon/broker/influxdb/influxdb.hh b/broker/influxdb/inc/com/centreon/broker/influxdb/influxdb.hh index e70eaafd9cd..d96cb99617a 100644 --- a/broker/influxdb/inc/com/centreon/broker/influxdb/influxdb.hh +++ b/broker/influxdb/inc/com/centreon/broker/influxdb/influxdb.hh @@ -19,11 +19,8 @@ #ifndef CCB_INFLUXDB_INFLUXDB_HH #define CCB_INFLUXDB_INFLUXDB_HH -#include "bbdo/storage/metric.hh" -#include "com/centreon/broker/influxdb/column.hh" #include "com/centreon/broker/influxdb/influxdb.hh" #include "com/centreon/broker/influxdb/line_protocol_query.hh" -#include "com/centreon/broker/influxdb/macro_cache.hh" namespace com::centreon::broker::influxdb { /** diff --git a/broker/influxdb/inc/com/centreon/broker/influxdb/stream.hh b/broker/influxdb/inc/com/centreon/broker/influxdb/stream.hh index 4a96ccae577..919cd82150d 100644 --- a/broker/influxdb/inc/com/centreon/broker/influxdb/stream.hh +++ b/broker/influxdb/inc/com/centreon/broker/influxdb/stream.hh @@ -19,12 +19,7 @@ #ifndef CCB_INFLUXDB_STREAM_HH #define CCB_INFLUXDB_STREAM_HH -#include "com/centreon/broker/influxdb/column.hh" #include "com/centreon/broker/influxdb/influxdb.hh" -#include "com/centreon/broker/influxdb/macro_cache.hh" -#include "com/centreon/broker/io/stream.hh" - -#include "com/centreon/broker/persistent_cache.hh" namespace com::centreon::broker { diff --git a/broker/influxdb/src/factory.cc b/broker/influxdb/src/factory.cc index 4dc0f2ec576..f410c1de781 100644 --- a/broker/influxdb/src/factory.cc +++ b/broker/influxdb/src/factory.cc @@ -1,27 +1,25 @@ /** -* Copyright 2011-2017 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2011-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/influxdb/factory.hh" #include #include -#include "com/centreon/broker/config/parser.hh" -#include "com/centreon/broker/influxdb/column.hh" #include "com/centreon/broker/influxdb/connector.hh" #include "com/centreon/exceptions/msg_fmt.hh" @@ -40,7 +38,7 @@ using namespace com::centreon::exceptions; */ static std::string find_param(config::endpoint const& cfg, std::string const& key) { - std::map::const_iterator it{cfg.params.find(key)}; + auto it = cfg.params.find(key); if (cfg.params.end() == it) throw msg_fmt("influxdb: no '{}' defined for endpoint '{}'", key, cfg.name); return it->second; @@ -75,6 +73,7 @@ bool factory::has_endpoint(config::endpoint& cfg, io::extension* ext) { */ io::endpoint* factory::new_endpoint( config::endpoint& cfg, + const std::map& global_params [[maybe_unused]], bool& is_acceptor, std::shared_ptr cache) const { std::string user(find_param(cfg, "db_user")); @@ -85,8 +84,7 @@ io::endpoint* factory::new_endpoint( unsigned short port(0); { std::stringstream ss; - std::map::const_iterator it{ - cfg.params.find("db_port")}; + auto it = cfg.params.find("db_port"); if (it == cfg.params.end()) port = 8086; else { @@ -101,8 +99,7 @@ io::endpoint* factory::new_endpoint( uint32_t queries_per_transaction; { - std::map::const_iterator it{ - cfg.params.find("queries_per_transaction")}; + auto it = cfg.params.find("queries_per_transaction"); if (it != cfg.params.end()) { if (!absl::SimpleAtoi(it->second, &queries_per_transaction)) { throw msg_fmt( diff --git a/broker/influxdb/src/stream.cc b/broker/influxdb/src/stream.cc index ee0ba081fc9..940b512edfd 100644 --- a/broker/influxdb/src/stream.cc +++ b/broker/influxdb/src/stream.cc @@ -17,10 +17,7 @@ */ #include "com/centreon/broker/influxdb/stream.hh" -#include "bbdo/storage/metric.hh" #include "com/centreon/broker/exceptions/shutdown.hh" -#include "com/centreon/broker/influxdb/influxdb.hh" -#include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/multiplexing/engine.hh" #include "com/centreon/broker/multiplexing/publisher.hh" #include "common/log_v2/log_v2.hh" diff --git a/broker/influxdb/test/factory.cc b/broker/influxdb/test/factory.cc index 010855d32ca..84fb968ecdb 100644 --- a/broker/influxdb/test/factory.cc +++ b/broker/influxdb/test/factory.cc @@ -44,25 +44,25 @@ TEST(InfluxDBFactory, MissingParams) { std::shared_ptr cache; bool is_acceptor; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["db_user"] = "admin"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["db_password"] = "pass"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["db_host"] = "host"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["db_name"] = "centreon"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["db_port"] = "centreon"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["db_port"] = "4242"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["queries_per_transaction"] = "centreon"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["queries_per_transaction"] = "100"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["status_timeseries"] = "host_status"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); } TEST(InfluxDBFactory, StatusException) { @@ -85,7 +85,7 @@ TEST(InfluxDBFactory, StatusException) { conf["status_column"] = nullptr; cfg.cfg = conf; std::unique_ptr ep; - ASSERT_NO_THROW(ep.reset(fact.new_endpoint(cfg, is_acceptor, cache))); + ASSERT_NO_THROW(ep.reset(fact.new_endpoint(cfg, {}, is_acceptor, cache))); json js1 = json::object({{"name", json{nullptr}}, {"value", json{nullptr}}, @@ -93,7 +93,7 @@ TEST(InfluxDBFactory, StatusException) { {"type", json{nullptr}}}); conf["status_column"] = js1; cfg.cfg = conf; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); json js2 = json::object({{"name", "host"}, {"value", json{nullptr}}, @@ -101,7 +101,7 @@ TEST(InfluxDBFactory, StatusException) { {"type", json{nullptr}}}); conf["status_column"] = js2; cfg.cfg = conf; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); json js3 = json::object({{"name", "host"}, {"value", "val"}, @@ -109,7 +109,7 @@ TEST(InfluxDBFactory, StatusException) { {"type", json{nullptr}}}); conf["status_column"] = js3; cfg.cfg = conf; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); json js4 = json::object({{"name", "host"}, {"value", "val"}, @@ -117,7 +117,7 @@ TEST(InfluxDBFactory, StatusException) { {"type", json{nullptr}}}); conf["status_column"] = js4; cfg.cfg = conf; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); json js5 = json::object({{"name", "host"}, {"value", "val"}, @@ -125,7 +125,7 @@ TEST(InfluxDBFactory, StatusException) { {"type", "bad"}}); conf["status_column"] = js5; cfg.cfg = conf; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); json js6 = json::object({{"name", "host"}, {"value", "val"}, @@ -133,13 +133,13 @@ TEST(InfluxDBFactory, StatusException) { {"type", "number"}}); conf["status_column"] = js6; cfg.cfg = conf; - ASSERT_NO_THROW(delete fact.new_endpoint(cfg, is_acceptor, cache)); + ASSERT_NO_THROW(delete fact.new_endpoint(cfg, {}, is_acceptor, cache)); json js7 = json::object( {{"name", ""}, {"value", "val"}, {"is_tag", "true"}, {"type", "number"}}); conf["status_column"] = js7; cfg.cfg = conf; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); json array = json::array(); array.push_back(js6); @@ -148,7 +148,7 @@ TEST(InfluxDBFactory, StatusException) { array.push_back(js6); conf["status_column"] = array; cfg.cfg = conf; - ASSERT_NO_THROW(delete fact.new_endpoint(cfg, is_acceptor, cache)); + ASSERT_NO_THROW(delete fact.new_endpoint(cfg, {}, is_acceptor, cache)); } TEST(InfluxDBFactory, MetricException) { @@ -171,7 +171,7 @@ TEST(InfluxDBFactory, MetricException) { conf["metric_column"] = nullptr; cfg.cfg = conf; std::unique_ptr ep; - ASSERT_NO_THROW(ep.reset(fact.new_endpoint(cfg, is_acceptor, cache))); + ASSERT_NO_THROW(ep.reset(fact.new_endpoint(cfg, {}, is_acceptor, cache))); json js1 = json::object({{"name", json{nullptr}}, {"value", json{nullptr}}, @@ -179,7 +179,7 @@ TEST(InfluxDBFactory, MetricException) { {"type", json{nullptr}}}); conf["metrics_column"] = js1; cfg.cfg = conf; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); json js2 = json::object({{"name", "host"}, {"value", json{nullptr}}, @@ -187,7 +187,7 @@ TEST(InfluxDBFactory, MetricException) { {"type", json{nullptr}}}); conf["metrics_column"] = js2; cfg.cfg = conf; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); json js3 = json::object({{"name", "host"}, {"value", "val"}, @@ -195,7 +195,7 @@ TEST(InfluxDBFactory, MetricException) { {"type", json{nullptr}}}); conf["metrics_column"] = js3; cfg.cfg = conf; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); json js4 = json::object({{"name", "host"}, {"value", "val"}, @@ -203,7 +203,7 @@ TEST(InfluxDBFactory, MetricException) { {"type", json{nullptr}}}); conf["metrics_column"] = js4; cfg.cfg = conf; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); json js5 = json::object({{"name", "host"}, {"value", "val"}, @@ -211,7 +211,7 @@ TEST(InfluxDBFactory, MetricException) { {"type", "bad"}}); conf["metrics_column"] = js5; cfg.cfg = conf; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); json js6 = json::object({{"name", "host"}, {"value", "val"}, @@ -219,7 +219,7 @@ TEST(InfluxDBFactory, MetricException) { {"type", "number"}}); conf["metrics_column"] = js6; cfg.cfg = conf; - ASSERT_NO_THROW(delete fact.new_endpoint(cfg, is_acceptor, cache)); + ASSERT_NO_THROW(delete fact.new_endpoint(cfg, {}, is_acceptor, cache)); json js7 = json::object({{"name", ""}, {"value", "val"}, @@ -227,7 +227,7 @@ TEST(InfluxDBFactory, MetricException) { {"type", "number"}}); conf["metrics_column"] = js7; cfg.cfg = conf; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); json array = json::array(); array.push_back(js6); @@ -236,5 +236,5 @@ TEST(InfluxDBFactory, MetricException) { array.push_back(js6); conf["metrics_column"] = array; cfg.cfg = conf; - ASSERT_NO_THROW(delete fact.new_endpoint(cfg, is_acceptor, cache)); + ASSERT_NO_THROW(delete fact.new_endpoint(cfg, {}, is_acceptor, cache)); } diff --git a/broker/lua/CMakeLists.txt b/broker/lua/CMakeLists.txt index 52cfcedd95a..644b57529de 100644 --- a/broker/lua/CMakeLists.txt +++ b/broker/lua/CMakeLists.txt @@ -59,9 +59,11 @@ add_library("${LUA}" SHARED ) add_dependencies(${LUA} pb_neb_lib + pb_common_lib pb_header_lib pb_storage_lib pb_bam_lib + pb_bam_state_lib pb_open_telemetry_lib) target_link_libraries("${LUA}" ${LUA_LIBRARIES} crypto ssl bbdo_storage bbdo_bam spdlog::spdlog pb_storage_lib) diff --git a/broker/lua/inc/com/centreon/broker/lua/factory.hh b/broker/lua/inc/com/centreon/broker/lua/factory.hh index 0214a417a92..4e075fb3e66 100644 --- a/broker/lua/inc/com/centreon/broker/lua/factory.hh +++ b/broker/lua/inc/com/centreon/broker/lua/factory.hh @@ -38,6 +38,7 @@ class factory : public io::factory { bool has_endpoint(config::endpoint& cfg, io::extension* ext) override; io::endpoint* new_endpoint( config::endpoint& cfg, + const std::map& global_params, bool& is_acceptor, std::shared_ptr cache = std::shared_ptr()) const override; diff --git a/broker/lua/src/factory.cc b/broker/lua/src/factory.cc index d2de4d39553..1f453934485 100644 --- a/broker/lua/src/factory.cc +++ b/broker/lua/src/factory.cc @@ -1,20 +1,20 @@ /** -* Copyright 2017-2022 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2017-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/lua/factory.hh" #include @@ -37,7 +37,7 @@ using namespace nlohmann; */ static std::string find_param(config::endpoint const& cfg, std::string const& key) { - std::map::const_iterator it{cfg.params.find(key)}; + auto it = cfg.params.find(key); if (cfg.params.end() == it) throw msg_fmt("lua: no '{}' defined for endpoint '{}'", key, cfg.name); return it->second; @@ -72,6 +72,7 @@ bool factory::has_endpoint(config::endpoint& cfg, io::extension* ext) { */ io::endpoint* factory::new_endpoint( config::endpoint& cfg, + const std::map& global_params [[maybe_unused]], bool& is_acceptor, std::shared_ptr cache) const { std::map conf_map; diff --git a/broker/lua/src/macro_cache.cc b/broker/lua/src/macro_cache.cc index ceafc912a6e..70c7e172695 100644 --- a/broker/lua/src/macro_cache.cc +++ b/broker/lua/src/macro_cache.cc @@ -1014,6 +1014,7 @@ void macro_cache::_process_service(std::shared_ptr const& data) { switch (output.size()) { case 2: current_service.set_long_output(std::string(output[1])); + [[fallthrough]]; case 1: current_service.set_output(std::string(output[0])); break; diff --git a/broker/lua/test/lua.cc b/broker/lua/test/lua.cc index eef66569bdf..1e22ce0821c 100644 --- a/broker/lua/test/lua.cc +++ b/broker/lua/test/lua.cc @@ -53,7 +53,7 @@ class LuaTest : public ::testing::Test { _logger = log_v2::instance().get(log_v2::LUA); try { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); } catch (std::exception const& e) { (void)e; } diff --git a/broker/neb/CMakeLists.txt b/broker/neb/CMakeLists.txt index 8fd3e496809..fbaa1754f80 100644 --- a/broker/neb/CMakeLists.txt +++ b/broker/neb/CMakeLists.txt @@ -90,12 +90,14 @@ add_dependencies( table_max_size target_neb pb_neb_lib + pb_common_lib pb_header_lib) -target_link_libraries(nebbase +target_link_libraries(nebbase -L${PROTOBUF_LIB_DIR} - protobuf - pb_neb_lib + protobuf + pb_neb_lib + pb_common_lib pb_header_lib pb_open_telemetry_lib) diff --git a/broker/neb/inc/com/centreon/broker/neb/acknowledgement.hh b/broker/neb/inc/com/centreon/broker/neb/acknowledgement.hh index 2049c3cdd41..0d7c0af013f 100644 --- a/broker/neb/inc/com/centreon/broker/neb/acknowledgement.hh +++ b/broker/neb/inc/com/centreon/broker/neb/acknowledgement.hh @@ -19,12 +19,9 @@ #ifndef CCB_NEB_ACKNOWLEDGEMENT_HH #define CCB_NEB_ACKNOWLEDGEMENT_HH -#include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/internal.hh" -#include "com/centreon/broker/timestamp.hh" namespace com::centreon::broker { @@ -76,6 +73,6 @@ class acknowledgement : public io::data { }; } // namespace neb -} +} // namespace com::centreon::broker #endif // !CCB_NEB_ACKNOWLEDGEMENT_HH diff --git a/broker/neb/inc/com/centreon/broker/neb/callbacks.hh b/broker/neb/inc/com/centreon/broker/neb/callbacks.hh index f9ee10a1cb1..d411463a499 100644 --- a/broker/neb/inc/com/centreon/broker/neb/callbacks.hh +++ b/broker/neb/inc/com/centreon/broker/neb/callbacks.hh @@ -70,6 +70,8 @@ int callback_pb_bench(int callback_type, void* data); int callback_otl_metrics(int callback_type, void* data); +int callback_agent_stats(int callback_type, void* data); + void unregister_callbacks(); } // namespace neb diff --git a/broker/neb/inc/com/centreon/broker/neb/check.hh b/broker/neb/inc/com/centreon/broker/neb/check.hh index 435f8eb4456..03792d5308b 100644 --- a/broker/neb/inc/com/centreon/broker/neb/check.hh +++ b/broker/neb/inc/com/centreon/broker/neb/check.hh @@ -19,7 +19,7 @@ #ifndef CCB_NEB_CHECK_HH #define CCB_NEB_CHECK_HH -#include "com/centreon/broker/io/data.hh" +//#include "com/centreon/broker/io/data.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/neb/internal.hh" #include "com/centreon/broker/timestamp.hh" @@ -56,6 +56,6 @@ class check : public io::data { }; } // namespace neb -} +} // namespace com::centreon::broker #endif // !CCB_NEB_CHECK_HH diff --git a/broker/neb/inc/com/centreon/broker/neb/custom_variable.hh b/broker/neb/inc/com/centreon/broker/neb/custom_variable.hh index e71ab2b152b..7ffac45742b 100644 --- a/broker/neb/inc/com/centreon/broker/neb/custom_variable.hh +++ b/broker/neb/inc/com/centreon/broker/neb/custom_variable.hh @@ -19,11 +19,7 @@ #ifndef CCB_NEB_CUSTOM_VARIABLE_HH #define CCB_NEB_CUSTOM_VARIABLE_HH -#include "com/centreon/broker/io/event_info.hh" -#include "com/centreon/broker/io/events.hh" -#include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/custom_variable_status.hh" -#include "com/centreon/broker/neb/internal.hh" namespace com::centreon::broker { diff --git a/broker/neb/inc/com/centreon/broker/neb/engcmd/factory.hh b/broker/neb/inc/com/centreon/broker/neb/engcmd/factory.hh index 257dba34deb..9a506a88412 100644 --- a/broker/neb/inc/com/centreon/broker/neb/engcmd/factory.hh +++ b/broker/neb/inc/com/centreon/broker/neb/engcmd/factory.hh @@ -39,14 +39,16 @@ class factory : public io::factory { ~factory() = default; factory& operator=(factory const& other) = delete; bool has_endpoint(config::endpoint& cfg); - io::endpoint* new_endpoint(config::endpoint& cfg, - bool& is_acceptor, - std::shared_ptr cache = - std::shared_ptr()) const; + io::endpoint* new_endpoint( + config::endpoint& cfg, + const std::map& global_params, + bool& is_acceptor, + std::shared_ptr cache = + std::shared_ptr()) const; }; } // namespace engcmd } // namespace neb -} +} // namespace com::centreon::broker #endif // !CCB_ENGCMD_FACTORY_HH diff --git a/broker/neb/inc/com/centreon/broker/neb/events.hh b/broker/neb/inc/com/centreon/broker/neb/events.hh index d364d16fc23..5593fc6b0df 100644 --- a/broker/neb/inc/com/centreon/broker/neb/events.hh +++ b/broker/neb/inc/com/centreon/broker/neb/events.hh @@ -22,14 +22,12 @@ #include "com/centreon/broker/neb/acknowledgement.hh" #include "com/centreon/broker/neb/comment.hh" #include "com/centreon/broker/neb/custom_variable.hh" -#include "com/centreon/broker/neb/custom_variable_status.hh" #include "com/centreon/broker/neb/downtime.hh" #include "com/centreon/broker/neb/host.hh" #include "com/centreon/broker/neb/host_check.hh" #include "com/centreon/broker/neb/host_group.hh" #include "com/centreon/broker/neb/host_group_member.hh" #include "com/centreon/broker/neb/host_parent.hh" -#include "com/centreon/broker/neb/host_status.hh" #include "com/centreon/broker/neb/instance.hh" #include "com/centreon/broker/neb/instance_configuration.hh" #include "com/centreon/broker/neb/instance_status.hh" @@ -39,6 +37,5 @@ #include "com/centreon/broker/neb/service_check.hh" #include "com/centreon/broker/neb/service_group.hh" #include "com/centreon/broker/neb/service_group_member.hh" -#include "com/centreon/broker/neb/service_status.hh" #endif // CCB_NEB_EVENTS_HH diff --git a/broker/neb/inc/com/centreon/broker/neb/host.hh b/broker/neb/inc/com/centreon/broker/neb/host.hh index c8c2180a4e9..624ac4a8c9f 100644 --- a/broker/neb/inc/com/centreon/broker/neb/host.hh +++ b/broker/neb/inc/com/centreon/broker/neb/host.hh @@ -19,12 +19,8 @@ #ifndef CCB_NEB_HOST_HH #define CCB_NEB_HOST_HH -#include "com/centreon/broker/io/event_info.hh" -#include "com/centreon/broker/io/events.hh" -#include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/host_service.hh" #include "com/centreon/broker/neb/host_status.hh" -#include "com/centreon/broker/neb/internal.hh" namespace com::centreon::broker { diff --git a/broker/neb/inc/com/centreon/broker/neb/host_group.hh b/broker/neb/inc/com/centreon/broker/neb/host_group.hh index f2eb5527299..62a002340a5 100644 --- a/broker/neb/inc/com/centreon/broker/neb/host_group.hh +++ b/broker/neb/inc/com/centreon/broker/neb/host_group.hh @@ -19,7 +19,6 @@ #ifndef CCB_NEB_HOST_GROUP_HH #define CCB_NEB_HOST_GROUP_HH -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/group.hh" diff --git a/broker/neb/inc/com/centreon/broker/neb/host_group_member.hh b/broker/neb/inc/com/centreon/broker/neb/host_group_member.hh index 4b0df7caaf0..bbf3276d0bb 100644 --- a/broker/neb/inc/com/centreon/broker/neb/host_group_member.hh +++ b/broker/neb/inc/com/centreon/broker/neb/host_group_member.hh @@ -19,7 +19,6 @@ #ifndef CCB_NEB_HOST_GROUP_MEMBER_HH #define CCB_NEB_HOST_GROUP_MEMBER_HH -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/group_member.hh" diff --git a/broker/neb/inc/com/centreon/broker/neb/host_parent.hh b/broker/neb/inc/com/centreon/broker/neb/host_parent.hh index 6a80aa518a7..9c2749d4346 100644 --- a/broker/neb/inc/com/centreon/broker/neb/host_parent.hh +++ b/broker/neb/inc/com/centreon/broker/neb/host_parent.hh @@ -19,8 +19,6 @@ #ifndef CCB_NEB_HOST_PARENT_HH #define CCB_NEB_HOST_PARENT_HH -#include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/internal.hh" diff --git a/broker/neb/inc/com/centreon/broker/neb/host_status.hh b/broker/neb/inc/com/centreon/broker/neb/host_status.hh index c7f504aefc3..137933d00af 100644 --- a/broker/neb/inc/com/centreon/broker/neb/host_status.hh +++ b/broker/neb/inc/com/centreon/broker/neb/host_status.hh @@ -19,12 +19,10 @@ #ifndef CCB_NEB_HOST_STATUS_HH #define CCB_NEB_HOST_STATUS_HH -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/host_service_status.hh" #include "com/centreon/broker/neb/internal.hh" -#include "com/centreon/broker/timestamp.hh" namespace com::centreon::broker { @@ -58,6 +56,6 @@ class host_status : public host_service_status { }; } // namespace neb -} +} // namespace com::centreon::broker #endif // !CCB_NEB_HOST_STATUS_HH diff --git a/broker/neb/inc/com/centreon/broker/neb/instance.hh b/broker/neb/inc/com/centreon/broker/neb/instance.hh index 5bd2f4d45b4..fab2b805b79 100644 --- a/broker/neb/inc/com/centreon/broker/neb/instance.hh +++ b/broker/neb/inc/com/centreon/broker/neb/instance.hh @@ -19,12 +19,9 @@ #ifndef CCB_NEB_INSTANCE_HH #define CCB_NEB_INSTANCE_HH -#include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/internal.hh" -#include "com/centreon/broker/timestamp.hh" namespace com::centreon::broker { diff --git a/broker/neb/inc/com/centreon/broker/neb/instance_status.hh b/broker/neb/inc/com/centreon/broker/neb/instance_status.hh index badf7b9ffaa..13d499cdd14 100644 --- a/broker/neb/inc/com/centreon/broker/neb/instance_status.hh +++ b/broker/neb/inc/com/centreon/broker/neb/instance_status.hh @@ -19,7 +19,6 @@ #ifndef CCB_EVENTS_INSTANCE_STATUS_HH #define CCB_EVENTS_INSTANCE_STATUS_HH -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/internal.hh" diff --git a/broker/neb/inc/com/centreon/broker/neb/internal.hh b/broker/neb/inc/com/centreon/broker/neb/internal.hh index be3ea2e973e..d57d1131eb0 100644 --- a/broker/neb/inc/com/centreon/broker/neb/internal.hh +++ b/broker/neb/inc/com/centreon/broker/neb/internal.hh @@ -33,9 +33,6 @@ namespace neb { // Forward declaration. class acknowledgement; -// Configuration file. -extern std::string gl_configuration_file; - // Sender object. extern multiplexing::publisher gl_publisher; @@ -129,6 +126,9 @@ using pb_otl_metrics = io::protobuf< opentelemetry::proto::collector::metrics::v1::ExportMetricsServiceRequest, make_type(io::storage, storage::de_pb_otl_metrics)>; +using pb_agent_stats = + io::protobuf; + } // namespace neb } // namespace com::centreon::broker diff --git a/broker/neb/inc/com/centreon/broker/neb/log_entry.hh b/broker/neb/inc/com/centreon/broker/neb/log_entry.hh index 81468d4b231..940497c6b93 100644 --- a/broker/neb/inc/com/centreon/broker/neb/log_entry.hh +++ b/broker/neb/inc/com/centreon/broker/neb/log_entry.hh @@ -19,12 +19,9 @@ #ifndef CCB_NEB_LOG_ENTRY_HH #define CCB_NEB_LOG_ENTRY_HH -#include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/internal.hh" -#include "com/centreon/broker/timestamp.hh" namespace com::centreon::broker { @@ -88,6 +85,6 @@ class log_entry : public io::data { }; } // namespace neb -} +} // namespace com::centreon::broker #endif // !CCB_NEB_LOG_ENTRY_HH diff --git a/broker/neb/inc/com/centreon/broker/neb/node_events_factory.hh b/broker/neb/inc/com/centreon/broker/neb/node_events_factory.hh index 810ee16c338..38c7a9a5eb1 100644 --- a/broker/neb/inc/com/centreon/broker/neb/node_events_factory.hh +++ b/broker/neb/inc/com/centreon/broker/neb/node_events_factory.hh @@ -39,13 +39,15 @@ class node_events_factory : public io::factory { node_events_factory& operator=(node_events_factory const& other); io::factory* clone() const; bool has_endpoint(config::endpoint& cfg); - io::endpoint* new_endpoint(config::endpoint& cfg, - bool& is_acceptor, - std::shared_ptr cache = - std::shared_ptr()) const; + io::endpoint* new_endpoint( + config::endpoint& cfg, + const std::map& global_params, + bool& is_acceptor, + std::shared_ptr cache = + std::shared_ptr()) const; }; } // namespace neb -} +} // namespace com::centreon::broker #endif // !CCB_NEB_NODE_EVENTS_FACTORY_HH diff --git a/broker/neb/inc/com/centreon/broker/neb/responsive_instance.hh b/broker/neb/inc/com/centreon/broker/neb/responsive_instance.hh index b3ca3c19a73..47dbb521e37 100644 --- a/broker/neb/inc/com/centreon/broker/neb/responsive_instance.hh +++ b/broker/neb/inc/com/centreon/broker/neb/responsive_instance.hh @@ -19,7 +19,6 @@ #ifndef CCB_NEB_RESPONSIVE_INSTANCE_HH #define CCB_NEB_RESPONSIVE_INSTANCE_HH -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/internal.hh" diff --git a/broker/neb/inc/com/centreon/broker/neb/service.hh b/broker/neb/inc/com/centreon/broker/neb/service.hh index 04d988a507b..3321e5cd732 100644 --- a/broker/neb/inc/com/centreon/broker/neb/service.hh +++ b/broker/neb/inc/com/centreon/broker/neb/service.hh @@ -1,29 +1,25 @@ -/* -** Copyright 2009-2013,2015 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2009-2013,2015-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #ifndef CCB_NEB_SERVICE_HH #define CCB_NEB_SERVICE_HH -#include "com/centreon/broker/io/event_info.hh" -#include "com/centreon/broker/io/events.hh" -#include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/host_service.hh" -#include "com/centreon/broker/neb/internal.hh" #include "com/centreon/broker/neb/service_status.hh" namespace com::centreon::broker { @@ -71,6 +67,6 @@ class service : public host_service, public service_status { }; } // namespace neb -} +} // namespace com::centreon::broker #endif // !CCB_NEB_SERVICE_HH diff --git a/broker/neb/inc/com/centreon/broker/neb/service_group.hh b/broker/neb/inc/com/centreon/broker/neb/service_group.hh index a626709421f..6f8272863c9 100644 --- a/broker/neb/inc/com/centreon/broker/neb/service_group.hh +++ b/broker/neb/inc/com/centreon/broker/neb/service_group.hh @@ -19,7 +19,6 @@ #ifndef CCB_NEB_SERVICE_GROUP_HH #define CCB_NEB_SERVICE_GROUP_HH -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/group.hh" diff --git a/broker/neb/inc/com/centreon/broker/neb/service_group_member.hh b/broker/neb/inc/com/centreon/broker/neb/service_group_member.hh index ca8be8c2d7a..312a3761676 100644 --- a/broker/neb/inc/com/centreon/broker/neb/service_group_member.hh +++ b/broker/neb/inc/com/centreon/broker/neb/service_group_member.hh @@ -19,7 +19,6 @@ #ifndef CCB_NEB_SERVICE_GROUP_MEMBER_HH #define CCB_NEB_SERVICE_GROUP_MEMBER_HH -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/group_member.hh" diff --git a/broker/neb/inc/com/centreon/broker/neb/service_status.hh b/broker/neb/inc/com/centreon/broker/neb/service_status.hh index a2d635f5a34..80c9f956b1b 100644 --- a/broker/neb/inc/com/centreon/broker/neb/service_status.hh +++ b/broker/neb/inc/com/centreon/broker/neb/service_status.hh @@ -19,7 +19,6 @@ #ifndef CCB_NEB_SERVICE_STATUS_HH #define CCB_NEB_SERVICE_STATUS_HH -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/host_service_status.hh" diff --git a/broker/neb/inc/com/centreon/io/directory_entry.hh b/broker/neb/inc/com/centreon/io/directory_entry.hh deleted file mode 100644 index 1437c46c332..00000000000 --- a/broker/neb/inc/com/centreon/io/directory_entry.hh +++ /dev/null @@ -1,59 +0,0 @@ -/* -** Copyright 2012-2013 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ - -#ifndef CC_IO_DIRECTORY_ENTRY_HH -#define CC_IO_DIRECTORY_ENTRY_HH - -#include "com/centreon/handle.hh" -#include "com/centreon/io/file_entry.hh" - -namespace com::centreon { - -namespace io { -/** - * @class directory_entry directory_entry.hh - *"com/centreon/io/directory_entry.hh" - * @brief Wrapper of libc's directory_entryectory. - * - * Wrap standard directory_entryectory objects. - */ -class directory_entry { - public: - directory_entry(char const* path = NULL); - directory_entry(std::string const& path); - directory_entry(directory_entry const& right); - directory_entry& operator=(directory_entry const& right); - bool operator==(directory_entry const& right) const throw(); - bool operator!=(directory_entry const& right) const throw(); - ~directory_entry() throw(); - static std::string current_path(); - file_entry const& entry() const throw(); - std::list const& entry_list(std::string const& filter = ""); - - private: - void _internal_copy(directory_entry const& right); - static int _nmatch(char const* str, char const* pattern); - - file_entry _entry; - std::list _entry_lst; -}; -} // namespace io - -} - -#endif // !CC_IO_DIRECTORY_ENTRY_HH diff --git a/broker/neb/inc/com/centreon/io/file_entry.hh b/broker/neb/inc/com/centreon/io/file_entry.hh deleted file mode 100644 index 3daa97804b1..00000000000 --- a/broker/neb/inc/com/centreon/io/file_entry.hh +++ /dev/null @@ -1,70 +0,0 @@ -/* -** Copyright 2012-2013 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ - -#ifndef CC_IO_FILE_ENTRY_HH -#define CC_IO_FILE_ENTRY_HH - -#include -#include -#include "com/centreon/handle.hh" - -#ifdef _WIN32 -#define stat _stat -#endif // _WIN32 - -namespace com::centreon { - -namespace io { -/** - * @class file_entry file_entry.hh "com/centreon/io/file_entry.hh" - * @brief Wrapper of stat information. - * - * Wrap standard stat information. - */ -class file_entry { - public: - file_entry(char const* path = NULL); - file_entry(std::string const& path); - file_entry(file_entry const& right); - ~file_entry() throw(); - file_entry& operator=(file_entry const& right); - bool operator==(file_entry const& right) const throw(); - bool operator!=(file_entry const& right) const throw(); - std::string base_name() const; - std::string directory_name() const; - std::string file_name() const; - bool is_directory() const throw(); - bool is_link() const throw(); - bool is_regular() const throw(); - std::string const& path() const throw(); - void path(char const* path); - void path(std::string const& path); - void refresh(); - unsigned long long size() const throw(); - - private: - void _internal_copy(file_entry const& right); - - std::string _path; - struct stat _sbuf; -}; -} // namespace io - -} - -#endif // !CC_IO_FILE_ENTRY_HH diff --git a/broker/neb/src/broker.cc b/broker/neb/src/broker.cc index c69613aacc8..f75dba58b76 100644 --- a/broker/neb/src/broker.cc +++ b/broker/neb/src/broker.cc @@ -167,8 +167,7 @@ void broker_module_init(void const* arg) { &neb::pb_host_status::operations, "hosts"); e.register_event(make_type(io::neb, neb::de_pb_adaptive_host_status), "AdaptiveHostStatus", - &neb::pb_adaptive_host_status::operations, - "hosts"); + &neb::pb_adaptive_host_status::operations, "hosts"); e.register_event(make_type(io::neb, neb::de_pb_severity), "Severity", &neb::pb_severity::operations, "severities"); @@ -228,6 +227,8 @@ void broker_module_init(void const* arg) { &neb::pb_instance_configuration::operations, "no_table"); e.register_event(neb::pb_otl_metrics::static_type(), "OTLMetrics", &neb::pb_otl_metrics::operations, "otl_metrics"); + e.register_event(neb::pb_agent_stats::static_type(), "AgentStats", + &neb::pb_agent_stats::operations, "agent_information"); } } } diff --git a/broker/neb/src/callbacks.cc b/broker/neb/src/callbacks.cc index a40071ceaa2..deeabb59047 100644 --- a/broker/neb/src/callbacks.cc +++ b/broker/neb/src/callbacks.cc @@ -27,13 +27,16 @@ #include "com/centreon/broker/config/parser.hh" #include "com/centreon/broker/neb/events.hh" #include "com/centreon/broker/neb/initial.hh" +#include "com/centreon/broker/neb/internal.hh" #include "com/centreon/broker/neb/set_log_data.hh" +#include "com/centreon/common/file.hh" #include "com/centreon/common/time.hh" #include "com/centreon/common/utf8.hh" #include "com/centreon/engine/anomalydetection.hh" #include "com/centreon/engine/broker.hh" #include "com/centreon/engine/events/loop.hh" #include "com/centreon/engine/globals.hh" +#include "com/centreon/engine/nebcallbacks.hh" #include "com/centreon/engine/nebstructs.hh" #include "com/centreon/engine/severity.hh" @@ -111,7 +114,8 @@ static struct { {NEBCALLBACK_GROUP_DATA, &neb::callback_group}, {NEBCALLBACK_GROUP_MEMBER_DATA, &neb::callback_group_member}, {NEBCALLBACK_RELATION_DATA, &neb::callback_relation}, - {NEBCALLBACK_BENCH_DATA, &neb::callback_pb_bench}}; + {NEBCALLBACK_BENCH_DATA, &neb::callback_pb_bench}, + {NEBCALLBACK_AGENT_STATS, &neb::callback_agent_stats}}; static struct { uint32_t macro; @@ -123,7 +127,8 @@ static struct { {NEBCALLBACK_GROUP_DATA, &neb::callback_pb_group}, {NEBCALLBACK_GROUP_MEMBER_DATA, &neb::callback_pb_group_member}, {NEBCALLBACK_RELATION_DATA, &neb::callback_pb_relation}, - {NEBCALLBACK_BENCH_DATA, &neb::callback_pb_bench}}; + {NEBCALLBACK_BENCH_DATA, &neb::callback_pb_bench}, + {NEBCALLBACK_AGENT_STATS, &neb::callback_agent_stats}}; // Registered callbacks. std::list> neb::gl_registered_callbacks; @@ -2410,6 +2415,21 @@ int neb::callback_pb_process(int callback_type, void* data) { inst.set_pid(getpid()); inst.set_version(get_program_version()); + /* Here we are Engine. The idea is to know if broker is able to handle the + * evoluated negotiation. The goal is to send the hash of the configuration + * directory to the broker. */ + auto& engine_config = config::applier::state::instance().engine_config_dir(); + std::error_code ec; + if (!engine_config.empty() && std::filesystem::exists(engine_config, ec)) { + inst.set_engine_config_version(common::hash_directory( + config::applier::state::instance().engine_config_dir(), ec)); + } + if (ec) { + SPDLOG_LOGGER_ERROR( + neb_logger, "callbacks: error while hashing engine configuration: {}", + ec.message()); + } + // Check process event type. process_data = static_cast(data); if (NEBTYPE_PROCESS_EVENTLOOPSTART == process_data->type) { @@ -2614,12 +2634,8 @@ int neb::callback_relation(int callback_type, void* data) { if (relation->hst && relation->dep_hst && !relation->svc && !relation->dep_svc) { // Find host IDs. - int host_id; - int parent_id; - { - host_id = engine::get_host_id(relation->dep_hst->name()); - parent_id = engine::get_host_id(relation->hst->name()); - } + int host_id = relation->dep_hst->host_id(); + int parent_id = relation->hst->host_id(); if (host_id && parent_id) { // Generate parent event. auto new_host_parent{std::make_shared()}; @@ -2670,10 +2686,8 @@ int neb::callback_pb_relation(int callback_type [[maybe_unused]], void* data) { if (relation->hst && relation->dep_hst && !relation->svc && !relation->dep_svc) { // Find host IDs. - int host_id; - int parent_id; - host_id = engine::get_host_id(relation->dep_hst->name()); - parent_id = engine::get_host_id(relation->hst->name()); + int host_id = relation->dep_hst->host_id(); + int parent_id = relation->hst->host_id(); if (host_id && parent_id) { // Generate parent event. auto new_host_parent{std::make_shared()}; @@ -3429,10 +3443,11 @@ int32_t neb::callback_pb_service_status(int callback_type [[maybe_unused]], static_cast(data); const engine::service* es = static_cast(ds->object_ptr); neb_logger->debug( - "callbacks: pb_service_status ({},{}) status {}, attributes {}, type {}", + "callbacks: pb_service_status ({},{}) status {}, attributes {}, type {}, " + "last check {}", es->host_id(), es->service_id(), static_cast(es->get_current_state()), ds->attributes, - static_cast(es->get_check_type())); + static_cast(es->get_check_type()), es->get_last_check()); auto handle_acknowledgement = [](uint16_t state, auto& r) { neb_logger->debug("Looking for acknowledgement on service ({}:{})", @@ -3782,6 +3797,30 @@ int neb::callback_otl_metrics(int, void* data) { return 0; } +int neb::callback_agent_stats(int, void* data) { + nebstruct_agent_stats_data* ds = + static_cast(data); + + auto to_send = std::make_shared(); + + to_send->mut_obj().set_poller_id( + config::applier::state::instance().poller_id()); + + for (const auto& cumul_data : *ds->data) { + AgentInfo* to_fill = to_send->mut_obj().add_stats(); + to_fill->set_major(cumul_data.major); + to_fill->set_minor(cumul_data.minor); + to_fill->set_patch(cumul_data.patch); + to_fill->set_reverse(cumul_data.reverse); + to_fill->set_os(cumul_data.os); + to_fill->set_os_version(cumul_data.os_version); + to_fill->set_nb_agent(cumul_data.nb_agent); + } + + gl_publisher.write(to_send); + return 0; +} + /** * Unregister callbacks. */ diff --git a/broker/neb/src/initial.cc b/broker/neb/src/initial.cc index 4fc23eb0626..704157301fc 100644 --- a/broker/neb/src/initial.cc +++ b/broker/neb/src/initial.cc @@ -288,19 +288,15 @@ static void send_host_parents_list(neb_sender sender = neb::callback_relation) { try { // Loop through all hosts. - for (host_map::iterator it{com::centreon::engine::host::hosts.begin()}, - end{com::centreon::engine::host::hosts.end()}; - it != end; ++it) { + for (const auto& [_, sptr_host] : com::centreon::engine::host::hosts) { // Loop through all parents. - for (host_map_unsafe::iterator pit{it->second->parent_hosts.begin()}, - pend{it->second->parent_hosts.end()}; - pit != pend; ++pit) { + for (const auto& [_, sptr_host_parent] : sptr_host->parent_hosts) { // Fill callback struct. nebstruct_relation_data nsrd; memset(&nsrd, 0, sizeof(nsrd)); nsrd.type = NEBTYPE_PARENT_ADD; - nsrd.hst = pit->second; - nsrd.dep_hst = it->second.get(); + nsrd.hst = sptr_host_parent.get(); + nsrd.dep_hst = sptr_host.get(); // Callback. sender(NEBTYPE_PARENT_ADD, &nsrd); @@ -434,12 +430,6 @@ static void send_pb_instance_configuration() { neb::gl_publisher.write(ic); } -/************************************** - * * - * Global Functions * - * * - **************************************/ - /** * Send initial configuration to the global publisher. */ @@ -457,25 +447,24 @@ void neb::send_initial_configuration() { send_instance_configuration(); } -/************************************** - * * - * Global Functions * - * * - **************************************/ - /** * Send initial configuration to the global publisher. */ void neb::send_initial_pb_configuration() { - SPDLOG_LOGGER_INFO(neb_logger, "init: send poller pb conf"); - send_severity_list(); - send_tag_list(); - send_pb_host_list(); - send_pb_service_list(); - send_pb_custom_variables_list(); - send_pb_downtimes_list(); - send_pb_host_parents_list(); - send_pb_host_group_list(); - send_pb_service_group_list(); +// if (config::applier::state::instance().broker_needs_update()) { + SPDLOG_LOGGER_INFO(neb_logger, "init: sending poller configuration"); + send_severity_list(); + send_tag_list(); + send_pb_host_list(); + send_pb_service_list(); + send_pb_custom_variables_list(); + send_pb_downtimes_list(); + send_pb_host_parents_list(); + send_pb_host_group_list(); + send_pb_service_group_list(); +// } else { +// SPDLOG_LOGGER_INFO(neb_logger, +// "init: No need to send poller configuration"); +// } send_pb_instance_configuration(); } diff --git a/broker/neb/src/internal.cc b/broker/neb/src/internal.cc index 8de6fb606af..1f000b64fbb 100644 --- a/broker/neb/src/internal.cc +++ b/broker/neb/src/internal.cc @@ -1,33 +1,24 @@ /** -* Copyright 2011,2015 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2011,2015, 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/neb/internal.hh" using namespace com::centreon::broker; -/************************************** - * * - * Global Objects * - * * - **************************************/ - -// Configuration file name. -std::string neb::gl_configuration_file; - // Sender object. multiplexing::publisher neb::gl_publisher; diff --git a/broker/neb/src/neb.cc b/broker/neb/src/neb.cc index 8dde234fa4a..51b43fc9c5c 100644 --- a/broker/neb/src/neb.cc +++ b/broker/neb/src/neb.cc @@ -16,6 +16,8 @@ * For more information : contact@centreon.com */ +#include +#include #include #include @@ -25,12 +27,13 @@ #include "com/centreon/broker/neb/callbacks.hh" #include "com/centreon/broker/neb/instance_configuration.hh" #include "com/centreon/engine/nebcallbacks.hh" -#include "com/centreon/exceptions/msg_fmt.hh" +#include "common.pb.h" #include "common/log_v2/log_v2.hh" using namespace com::centreon::broker; using namespace com::centreon::exceptions; using com::centreon::common::log_v2::log_v2; +namespace po = boost::program_options; // Specify the event broker API version. NEB_API_VERSION(CURRENT_NEB_API_VERSION) @@ -87,7 +90,7 @@ int nebmodule_deinit(int flags, int reason) { * * @return 0 on success, any other value on failure. */ -int nebmodule_init(int flags, char const* args, void* handle) { +int nebmodule_init(int flags, const char* args, void* handle) { neb_logger = log_v2::instance().get(log_v2::NEB); try { @@ -120,25 +123,49 @@ int nebmodule_init(int flags, char const* args, void* handle) { try { // Set configuration file. - if (args) { - char const* config_file("config_file="); - size_t config_file_size(strlen(config_file)); - if (!strncmp(args, config_file, config_file_size)) - args += config_file_size; - neb::gl_configuration_file = args; - } else + if (!args) throw msg_fmt("main: no configuration file provided"); + // Declare the supported options. + po::options_description desc("Allowed options"); + desc.add_options() // list of options + ("config_file,c", po::value(), + "set the module JSON configuration file") // 1st option + ("engine_conf_dir,e", po::value(), + "set the Engine configuration directory"); // 2nd option + po::positional_options_description pos; + // The first positional argument is interpreted as config_file, this is + // useful because currently the wui configure cbmod like this. + pos.add("config_file", 1); + std::vector av = po::split_unix(args); + po::variables_map vm; + po::store(po::command_line_parser(av).options(desc).positional(pos).run(), + vm); + po::notify(vm); + + std::string configuration_file; + if (vm.count("config_file")) + configuration_file = vm["config_file"].as(); + else + throw msg_fmt("main: no configuration file provided"); + + std::string engine_conf_dir; + + if (vm.count("engine_conf_dir")) + engine_conf_dir = vm["engine_conf_dir"].as(); + // Try configuration parsing. com::centreon::broker::config::parser p; - com::centreon::broker::config::state s{ - p.parse(neb::gl_configuration_file)}; + com::centreon::broker::config::state s{p.parse(configuration_file)}; + + s.set_engine_config_dir(engine_conf_dir); // Initialization. /* This is a little hack to avoid to replace the log file set by * centengine */ s.mut_log_conf().allow_only_atomic_changes(true); - com::centreon::broker::config::applier::init(s); + com::centreon::broker::config::applier::init( + com::centreon::common::ENGINE, s); try { log_v2::instance().apply(s.log_conf()); } catch (const std::exception& e) { diff --git a/broker/rrd/inc/com/centreon/broker/rrd/factory.hh b/broker/rrd/inc/com/centreon/broker/rrd/factory.hh index 8fcc5afe351..68dc8474a5d 100644 --- a/broker/rrd/inc/com/centreon/broker/rrd/factory.hh +++ b/broker/rrd/inc/com/centreon/broker/rrd/factory.hh @@ -38,10 +38,12 @@ class factory : public io::factory { factory(factory const& other) = delete; factory& operator=(factory const& other) = delete; bool has_endpoint(config::endpoint& cfg, io::extension* ext); - io::endpoint* new_endpoint(config::endpoint& cfg, - bool& is_acceptor, - std::shared_ptr cache = - std::shared_ptr()) const; + io::endpoint* new_endpoint( + config::endpoint& cfg, + const std::map& global_params, + bool& is_acceptor, + std::shared_ptr cache = + std::shared_ptr()) const; }; } // namespace rrd diff --git a/broker/rrd/src/factory.cc b/broker/rrd/src/factory.cc index 2242c5218c7..e1834776286 100644 --- a/broker/rrd/src/factory.cc +++ b/broker/rrd/src/factory.cc @@ -18,7 +18,6 @@ #include "com/centreon/broker/rrd/factory.hh" -#include "com/centreon/broker/config/parser.hh" #include "com/centreon/broker/rrd/connector.hh" #include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" @@ -40,7 +39,7 @@ static std::string find_param(config::endpoint const& cfg, std::string const& key, bool thrw = true, std::string const& def = "") { - std::map::const_iterator it{cfg.params.find(key)}; + auto it = cfg.params.find(key); if (cfg.params.end() == it) { if (thrw) throw msg_fmt( @@ -77,10 +76,10 @@ bool factory::has_endpoint(config::endpoint& cfg, io::extension* ext) { */ io::endpoint* factory::new_endpoint( config::endpoint& cfg, + const std::map& global_params + [[maybe_unused]], bool& is_acceptor, - std::shared_ptr cache) const { - (void)cache; - + std::shared_ptr cache [[maybe_unused]]) const { auto logger = log_v2::instance().get(log_v2::RRD); // Local socket path. @@ -95,8 +94,7 @@ io::endpoint* factory::new_endpoint( // Get rrd creator cache size. uint32_t cache_size = 16; { - std::map::const_iterator it{ - cfg.params.find("cache_size")}; + auto it = cfg.params.find("cache_size"); if (it != cfg.params.end() && !absl::SimpleAtoi(it->second, &cache_size)) { throw msg_fmt("RRD: bad port defined for endpoint '{}'", cfg.name); } @@ -124,8 +122,7 @@ io::endpoint* factory::new_endpoint( // Should status be written ? bool write_status; { - std::map::const_iterator it{ - cfg.params.find("write_status")}; + auto it = cfg.params.find("write_status"); if (it != cfg.params.end()) { if (!absl::SimpleAtob(it->second, &write_status)) { log_v2::instance() @@ -150,8 +147,7 @@ io::endpoint* factory::new_endpoint( // Ignore update errors (2.4.0-compatible behavior). bool ignore_update_errors; { - std::map::const_iterator it{ - cfg.params.find("ignore_update_errors")}; + auto it = cfg.params.find("ignore_update_errors"); if (it != cfg.params.end()) { if (!absl::SimpleAtob(it->second, &ignore_update_errors)) { logger->error( diff --git a/broker/rrd/test/factory.cc b/broker/rrd/test/factory.cc index 2bd311b5cc9..5356e07355f 100644 --- a/broker/rrd/test/factory.cc +++ b/broker/rrd/test/factory.cc @@ -45,7 +45,7 @@ TEST(RRDFactory, Exception) { bool is_acceptor; std::shared_ptr cache; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); } TEST(RRDFactory, Simple) { @@ -55,26 +55,26 @@ TEST(RRDFactory, Simple) { bool is_acceptor; std::shared_ptr cache; - ASSERT_THROW(fact->new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact->new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["path"] = "/tmp/"; - ASSERT_THROW(fact->new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact->new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["port"] = "/tmp/test"; - ASSERT_THROW(fact->new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact->new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["port"] = "4242"; - ASSERT_THROW(fact->new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact->new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["cache_size"] = "dsasd"; - ASSERT_THROW(fact->new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact->new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["cache_size"] = "50"; - ASSERT_THROW(fact->new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact->new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["metrics_path"] = "toto"; - ASSERT_THROW(fact->new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact->new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["status_path"] = "toto"; - ASSERT_NO_THROW(delete fact->new_endpoint(cfg, is_acceptor, cache)); + ASSERT_NO_THROW(delete fact->new_endpoint(cfg, {}, is_acceptor, cache)); cfg.params["write_metrics"] = "false"; cfg.params["write_status"] = "false"; cfg.params["ignore_update_errors"] = "false"; cfg.params["path"] = ""; - ASSERT_NO_THROW(delete fact->new_endpoint(cfg, is_acceptor, cache)); + ASSERT_NO_THROW(delete fact->new_endpoint(cfg, {}, is_acceptor, cache)); } TEST(RRDFactory, Output) { @@ -92,8 +92,8 @@ TEST(RRDFactory, Output) { cfg.params["write_status"] = "false"; cfg.params["ignore_update_errors"] = "false"; cfg.params["path"] = ""; - rrd::connector* con{ - static_cast(fact.new_endpoint(cfg, is_acceptor, cache))}; + rrd::connector* con{static_cast( + fact.new_endpoint(cfg, {}, is_acceptor, cache))}; auto out = con->open(); diff --git a/broker/rrd/test/rrd.cc b/broker/rrd/test/rrd.cc index f9dd96646bf..a3b409d0a23 100644 --- a/broker/rrd/test/rrd.cc +++ b/broker/rrd/test/rrd.cc @@ -33,7 +33,7 @@ class Rrd : public ::testing::Test { public: void SetUp() override { try { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); } catch (std::exception const& e) { (void)e; } diff --git a/broker/simu/CMakeLists.txt b/broker/simu/CMakeLists.txt index 4dbedc26cc1..4aa12f0cc81 100644 --- a/broker/simu/CMakeLists.txt +++ b/broker/simu/CMakeLists.txt @@ -46,9 +46,11 @@ add_library("${SIMU}" SHARED ) add_dependencies(${SIMU} pb_neb_lib + pb_common_lib pb_header_lib pb_storage_lib pb_bam_lib + pb_bam_state_lib pb_open_telemetry_lib) target_link_libraries("${SIMU}" ${LUA_LIBRARIES} spdlog::spdlog) diff --git a/broker/simu/inc/com/centreon/broker/simu/factory.hh b/broker/simu/inc/com/centreon/broker/simu/factory.hh index 6d4176e53b7..445600c0a90 100644 --- a/broker/simu/inc/com/centreon/broker/simu/factory.hh +++ b/broker/simu/inc/com/centreon/broker/simu/factory.hh @@ -39,6 +39,7 @@ class factory : public io::factory { bool has_endpoint(config::endpoint& cfg, io::extension* ext) override; io::endpoint* new_endpoint( config::endpoint& cfg, + const std::map& global_params, bool& is_acceptor, std::shared_ptr cache = std::shared_ptr()) const override; diff --git a/broker/simu/src/factory.cc b/broker/simu/src/factory.cc index ef35cc8dd32..8fac144e572 100644 --- a/broker/simu/src/factory.cc +++ b/broker/simu/src/factory.cc @@ -1,25 +1,24 @@ /** -* Copyright 2017-2022 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2017-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/simu/factory.hh" #include #include -#include "com/centreon/broker/misc/variant.hh" #include "com/centreon/broker/simu/connector.hh" #include "com/centreon/exceptions/msg_fmt.hh" @@ -38,7 +37,7 @@ using namespace nlohmann; */ static std::string find_param(config::endpoint const& cfg, std::string const& key) { - std::map::const_iterator it{cfg.params.find(key)}; + auto it = cfg.params.find(key); if (cfg.params.end() == it) throw msg_fmt("lua: no '{}' defined for endpoint '{}'", key, cfg.name); return it->second; @@ -71,10 +70,11 @@ bool factory::has_endpoint(config::endpoint& cfg, io::extension* ext) { * * @return New endpoint. */ -io::endpoint* factory::new_endpoint(config::endpoint& cfg, - bool& is_acceptor, - std::shared_ptr cache - __attribute__((unused))) const { +io::endpoint* factory::new_endpoint( + config::endpoint& cfg, + const std::map& global_params [[maybe_unused]], + bool& is_acceptor, + std::shared_ptr cache __attribute__((unused))) const { std::map conf_map; std::string err; diff --git a/broker/simu/test/simu.cc b/broker/simu/test/simu.cc index 59ad7f8ccfa..2401951020f 100644 --- a/broker/simu/test/simu.cc +++ b/broker/simu/test/simu.cc @@ -40,7 +40,7 @@ class SimuGenericTest : public ::testing::Test { void SetUp() override { _logger = log_v2::instance().get(log_v2::LUA); try { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); } catch (std::exception const& e) { (void)e; } diff --git a/broker/sql/CMakeLists.txt b/broker/sql/CMakeLists.txt index 00789ede713..e90649709d6 100644 --- a/broker/sql/CMakeLists.txt +++ b/broker/sql/CMakeLists.txt @@ -43,6 +43,7 @@ add_library("${SQL}" SHARED ) add_dependencies(${SQL} pb_neb_lib + pb_common_lib pb_header_lib pb_open_telemetry_lib ) diff --git a/broker/sql/inc/com/centreon/broker/sql/factory.hh b/broker/sql/inc/com/centreon/broker/sql/factory.hh index dbeb248411d..f54e0ec6f88 100644 --- a/broker/sql/inc/com/centreon/broker/sql/factory.hh +++ b/broker/sql/inc/com/centreon/broker/sql/factory.hh @@ -38,10 +38,12 @@ class factory : public io::factory { ~factory() = default; factory& operator=(factory const& other) = delete; bool has_endpoint(config::endpoint& cfg, io::extension* ext); - io::endpoint* new_endpoint(config::endpoint& cfg, - bool& is_acceptor, - std::shared_ptr cache = - std::shared_ptr()) const; + io::endpoint* new_endpoint( + config::endpoint& cfg, + const std::map& global_params, + bool& is_acceptor, + std::shared_ptr cache = + std::shared_ptr()) const; }; } // namespace sql diff --git a/broker/sql/src/factory.cc b/broker/sql/src/factory.cc index de930a65a37..1b07b8a2318 100644 --- a/broker/sql/src/factory.cc +++ b/broker/sql/src/factory.cc @@ -20,7 +20,6 @@ #include -#include "com/centreon/broker/config/parser.hh" #include "com/centreon/broker/sql/connector.hh" #include "common/log_v2/log_v2.hh" @@ -53,18 +52,16 @@ bool factory::has_endpoint(config::endpoint& cfg, io::extension* ext) { */ io::endpoint* factory::new_endpoint( config::endpoint& cfg, + const std::map& global_params, bool& is_acceptor, - std::shared_ptr cache) const { - (void)cache; - + std::shared_ptr cache [[maybe_unused]]) const { // Database configuration. - database_config dbcfg(cfg); + database_config dbcfg(cfg, global_params); // Cleanup check interval. uint32_t cleanup_check_interval = 0; { - std::map::const_iterator it{ - cfg.params.find("cleanup_check_interval")}; + auto it = cfg.params.find("cleanup_check_interval"); if (it != cfg.params.end() && !absl::SimpleAtoi(it->second, &cleanup_check_interval)) { log_v2::instance() @@ -78,8 +75,7 @@ io::endpoint* factory::new_endpoint( bool enable_cmd_cache = false; { - std::map::const_iterator it( - cfg.params.find("enable_command_cache")); + auto it = cfg.params.find("enable_command_cache"); if (it != cfg.params.end() && !absl::SimpleAtob(it->second, &enable_cmd_cache)) { log_v2::instance() @@ -101,8 +97,7 @@ io::endpoint* factory::new_endpoint( // By default, 5 minutes. uint32_t instance_timeout(5 * 60); { - std::map::const_iterator it( - cfg.params.find("instance_timeout")); + auto it = cfg.params.find("instance_timeout"); if (it != cfg.params.end() && !absl::SimpleAtoi(it->second, &instance_timeout)) { log_v2::instance() @@ -117,8 +112,7 @@ io::endpoint* factory::new_endpoint( // Use state events ? bool wse = false; { - std::map::const_iterator it( - cfg.params.find("with_state_events")); + auto it = cfg.params.find("with_state_events"); if (it != cfg.params.end()) { if (!absl::SimpleAtob(it->second, &wse)) { log_v2::instance() diff --git a/broker/sql/src/main.cc b/broker/sql/src/main.cc index 7885bb8532f..ddf310b3c1d 100644 --- a/broker/sql/src/main.cc +++ b/broker/sql/src/main.cc @@ -18,7 +18,6 @@ #include "com/centreon/broker/io/protocols.hh" #include "com/centreon/broker/sql/factory.hh" -#include "com/centreon/broker/sql/stream.hh" #include "common/log_v2/log_v2.hh" using namespace com::centreon::broker; diff --git a/broker/stats/CMakeLists.txt b/broker/stats/CMakeLists.txt index d5cc721408d..ac04ef05bf0 100644 --- a/broker/stats/CMakeLists.txt +++ b/broker/stats/CMakeLists.txt @@ -19,9 +19,9 @@ set(INC_DIR "${PROJECT_SOURCE_DIR}/stats/inc/com/centreon/broker/stats") set(SRC_DIR "${PROJECT_SOURCE_DIR}/stats/src") set(TEST_DIR "${PROJECT_SOURCE_DIR}/stats/test") -include_directories("${PROJECT_SOURCE_DIR}/stats/inc") -include_directories("${PROJECT_SOURCE_DIR}/dumper/inc") -include_directories("${PROJECT_SOURCE_DIR}/neb/inc") +include_directories("${PROJECT_SOURCE_DIR}/stats/inc" + "${PROJECT_SOURCE_DIR}/dumper/inc" + "${PROJECT_SOURCE_DIR}/neb/inc") # Stats module. set(STATS "15-stats") @@ -41,6 +41,7 @@ add_library( ${INC_DIR}/parser.hh ${INC_DIR}/worker.hh ${INC_DIR}/worker_pool.hh) +add_dependencies("${STATS}" target_broker_message) set_target_properties("${STATS}" PROPERTIES PREFIX "") target_link_libraries("${STATS}" spdlog::spdlog) target_precompile_headers(${STATS} PRIVATE precomp_inc/precomp.hpp) diff --git a/broker/stats/src/main.cc b/broker/stats/src/main.cc index 58048da4383..2a7f5974126 100644 --- a/broker/stats/src/main.cc +++ b/broker/stats/src/main.cc @@ -62,8 +62,7 @@ void broker_module_init(void const* arg) { // Check that stats are enabled. config::state const& base_cfg(*static_cast(arg)); bool loaded(false); - std::map::const_iterator it( - base_cfg.params().find("stats")); + auto it = base_cfg.params().find("stats"); if (it != base_cfg.params().end()) { try { // Parse configuration. diff --git a/broker/stats/test/stats.cc b/broker/stats/test/stats.cc index 3bf822aa645..dd740adcb14 100644 --- a/broker/stats/test/stats.cc +++ b/broker/stats/test/stats.cc @@ -18,38 +18,29 @@ */ #include #include -#include #include -#include #include "com/centreon/broker/config/applier/endpoint.hh" #include "com/centreon/broker/config/applier/state.hh" #include "com/centreon/broker/config/parser.hh" #include "com/centreon/broker/exceptions/shutdown.hh" #include "com/centreon/broker/file/disk_accessor.hh" #include "com/centreon/broker/io/events.hh" -#include "com/centreon/broker/io/factory.hh" #include "com/centreon/broker/io/protocols.hh" #include "com/centreon/broker/io/stream.hh" #include "com/centreon/broker/misc/misc.hh" -#include "com/centreon/broker/misc/string.hh" #include "com/centreon/broker/multiplexing/engine.hh" -#include "com/centreon/broker/multiplexing/muxer_filter.hh" #include "com/centreon/broker/sql/mysql_manager.hh" #include "com/centreon/broker/stats/builder.hh" -#include "com/centreon/broker/stats/center.hh" -#include "com/centreon/common/pool.hh" -#include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon::exceptions; using namespace com::centreon::broker; - class StatsTest : public ::testing::Test { public: void SetUp() override { stats::center::load(); mysql_manager::load(); - config::applier::state::load(); + config::applier::state::load(com::centreon::common::BROKER); file::disk_accessor::load(10000); multiplexing::engine::load(); io::protocols::load(); @@ -159,10 +150,11 @@ class fact : public io::factory { } io::endpoint* new_endpoint( - config::endpoint& cfg __attribute__((__unused__)), + config::endpoint& cfg [[maybe_unused]], + const std::map& global_params [[maybe_unused]], bool& is_acceptor, - __attribute__((__unused__)) std::shared_ptr cache = - std::shared_ptr()) const override { + std::shared_ptr cache + [[maybe_unused]] = std::shared_ptr()) const override { endp* p{new endp()}; is_acceptor = true; return p; @@ -250,7 +242,7 @@ TEST_F(StatsTest, BuilderWithEndpoints) { io::protocols::instance().reg("CentreonRetention", test, 1, 7); io::protocols::instance().reg("CentreonSecondaryFailover1", test, 1, 7); io::protocols::instance().reg("CentreonSecondaryFailover2", test, 1, 7); - config::applier::endpoint::instance().apply(s.endpoints()); + config::applier::endpoint::instance().apply(s.endpoints(), {}); // Remove temporary file. ::remove(config_file.c_str()); diff --git a/broker/storage/CMakeLists.txt b/broker/storage/CMakeLists.txt index 1a7cc17974d..5ba7eaa101b 100644 --- a/broker/storage/CMakeLists.txt +++ b/broker/storage/CMakeLists.txt @@ -41,6 +41,7 @@ set(CONFLICTMGR PARENT_SCOPE) add_dependencies(conflictmgr pb_neb_lib + pb_common_lib pb_header_lib pb_storage_lib pb_open_telemetry_lib diff --git a/broker/storage/inc/com/centreon/broker/storage/factory.hh b/broker/storage/inc/com/centreon/broker/storage/factory.hh index 2dd9f4161b3..ccd646d55df 100644 --- a/broker/storage/inc/com/centreon/broker/storage/factory.hh +++ b/broker/storage/inc/com/centreon/broker/storage/factory.hh @@ -37,10 +37,12 @@ class factory : public io::factory { ~factory() = default; factory& operator=(factory const&) = delete; bool has_endpoint(config::endpoint& cfg, io::extension* ext); - io::endpoint* new_endpoint(config::endpoint& cfg, - bool& is_acceptor, - std::shared_ptr cache = - std::shared_ptr()) const; + io::endpoint* new_endpoint( + config::endpoint& cfg, + const std::map& global_params, + bool& is_acceptor, + std::shared_ptr cache = + std::shared_ptr()) const; }; } // namespace storage diff --git a/broker/storage/src/factory.cc b/broker/storage/src/factory.cc index ee2a4dd79ba..29a027e9655 100644 --- a/broker/storage/src/factory.cc +++ b/broker/storage/src/factory.cc @@ -1,5 +1,5 @@ /** - * Copyright 2011-2015,2017 Centreon + * Copyright 2011-2015,2017-2024 Centreon * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,7 +22,6 @@ #include "com/centreon/broker/config/parser.hh" #include "com/centreon/broker/storage/connector.hh" -#include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" using namespace com::centreon::exceptions; @@ -40,21 +39,12 @@ using log_v2 = com::centreon::common::log_v2::log_v2; */ static std::string const& find_param(config::endpoint const& cfg, std::string const& key) { - std::map::const_iterator it{cfg.params.find(key)}; + auto it = cfg.params.find(key); if (cfg.params.end() == it) - throw msg_fmt( - "storage: no '{}" - "' defined for endpoint '{}'", - key, cfg.name); + throw msg_fmt("storage: no '{}' defined for endpoint '{}'", key, cfg.name); return it->second; } -/************************************** - * * - * Public Methods * - * * - **************************************/ - /** * Check if a configuration match the storage layer. * @@ -80,10 +70,9 @@ bool factory::has_endpoint(config::endpoint& cfg, io::extension* ext) { */ io::endpoint* factory::new_endpoint( config::endpoint& cfg, + const std::map& global_params, bool& is_acceptor, - std::shared_ptr cache) const { - (void)cache; - + std::shared_ptr cache [[maybe_unused]]) const { // Find RRD length. uint32_t rrd_length; if (!absl::SimpleAtoi(find_param(cfg, "length"), &rrd_length)) { @@ -96,10 +85,9 @@ io::endpoint* factory::new_endpoint( } // Find interval length if set. - uint32_t interval_length{0}; + uint32_t interval_length = 0; { - std::map::const_iterator it{ - cfg.params.find("interval")}; + auto it = cfg.params.find("interval"); if (it != cfg.params.end()) { if (!absl::SimpleAtoi(it->second, &interval_length)) { interval_length = 60; @@ -107,8 +95,8 @@ io::endpoint* factory::new_endpoint( .get(log_v2::CORE) ->error( "storage: the interval field should contain a string " - "containing a " - "number. We use the default value in replacement 60."); + "containing a number. We use the default value in replacement " + "60."); } } if (!interval_length) @@ -116,13 +104,12 @@ io::endpoint* factory::new_endpoint( } // Find storage DB parameters. - database_config dbcfg(cfg); + database_config dbcfg(cfg, global_params); // Store or not in data_bin. bool store_in_data_bin{true}; { - std::map::const_iterator it{ - cfg.params.find("store_in_data_bin")}; + auto it = cfg.params.find("store_in_data_bin"); if (it != cfg.params.end()) { if (!absl::SimpleAtob(it->second, &store_in_data_bin)) { log_v2::instance() diff --git a/broker/storage/test/conflict_manager.cc b/broker/storage/test/conflict_manager.cc index 80df27225ff..2784e20b6fa 100644 --- a/broker/storage/test/conflict_manager.cc +++ b/broker/storage/test/conflict_manager.cc @@ -38,7 +38,7 @@ class ConflictManagerTest : public ::testing::Test { public: void SetUp() override { try { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); } catch (std::exception const& e) { (void)e; } diff --git a/broker/storage/test/connector.cc b/broker/storage/test/connector.cc index 958c2468018..f690ba21dd5 100644 --- a/broker/storage/test/connector.cc +++ b/broker/storage/test/connector.cc @@ -36,16 +36,16 @@ TEST(StorageFactory, Factory) { storage::factory factory; - ASSERT_THROW(factory.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(factory.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["length"] = "42"; - ASSERT_THROW(factory.new_endpoint(cfg, is_acceptor, cache), + ASSERT_THROW(factory.new_endpoint(cfg, {}, is_acceptor, cache), exceptions::config); cfg.params["db_type"] = "mysql"; cfg.params["db_name"] = "centreon"; ASSERT_FALSE(factory.has_endpoint(cfg, nullptr)); cfg.type = "storage"; storage::connector* endp = static_cast( - factory.new_endpoint(cfg, is_acceptor, cache)); + factory.new_endpoint(cfg, {}, is_acceptor, cache)); storage::connector con; con.connect_to(dbcfg, 60, 300, true); @@ -66,9 +66,9 @@ TEST(StorageFactory, FactoryWithFullConf) { storage::factory factory; - ASSERT_THROW(factory.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(factory.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["length"] = "42"; - ASSERT_THROW(factory.new_endpoint(cfg, is_acceptor, cache), + ASSERT_THROW(factory.new_endpoint(cfg, {}, is_acceptor, cache), exceptions::config); cfg.params["db_type"] = "mysql"; cfg.params["db_name"] = "centreon"; @@ -78,7 +78,7 @@ TEST(StorageFactory, FactoryWithFullConf) { ASSERT_FALSE(factory.has_endpoint(cfg, nullptr)); cfg.type = "storage"; storage::connector* endp = static_cast( - factory.new_endpoint(cfg, is_acceptor, cache)); + factory.new_endpoint(cfg, {}, is_acceptor, cache)); storage::connector con; con.connect_to(dbcfg, 43, 44, false); diff --git a/broker/storage/test/perfdata.cc b/broker/storage/test/perfdata.cc index 9d4fbf0d83e..91fcc82ef4f 100644 --- a/broker/storage/test/perfdata.cc +++ b/broker/storage/test/perfdata.cc @@ -196,7 +196,9 @@ TEST(StoragePerfdata, DefaultCtor) { class StorageParserParsePerfdata : public testing::Test { public: - void SetUp() override { config::applier::init(0, "test_broker", 0); } + void SetUp() override { + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); + } void TearDown() override { config::applier::deinit(); }; }; diff --git a/broker/storage/test/status-entry.cc b/broker/storage/test/status-entry.cc index 49ab813d8d6..69122e5a71b 100644 --- a/broker/storage/test/status-entry.cc +++ b/broker/storage/test/status-entry.cc @@ -72,7 +72,7 @@ class StatusEntryTest : public ::testing::Test { void SetUp() override { io::data::broker_id = 0; try { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); } catch (std::exception const& e) { (void)e; } diff --git a/broker/tcp/inc/com/centreon/broker/tcp/factory.hh b/broker/tcp/inc/com/centreon/broker/tcp/factory.hh index 5df442f411b..ed424566885 100644 --- a/broker/tcp/inc/com/centreon/broker/tcp/factory.hh +++ b/broker/tcp/inc/com/centreon/broker/tcp/factory.hh @@ -44,6 +44,7 @@ class factory : public io::factory { io::extension* ext) override; io::endpoint* new_endpoint( com::centreon::broker::config::endpoint& cfg, + const std::map& global_params, bool& is_acceptor, std::shared_ptr cache = std::shared_ptr()) const override; diff --git a/broker/tcp/src/factory.cc b/broker/tcp/src/factory.cc index edcd0959c28..19d44c9a0cf 100644 --- a/broker/tcp/src/factory.cc +++ b/broker/tcp/src/factory.cc @@ -21,10 +21,8 @@ #include -#include "com/centreon/broker/config/parser.hh" #include "com/centreon/broker/tcp/acceptor.hh" #include "com/centreon/broker/tcp/connector.hh" -#include "com/centreon/broker/tcp/tcp_async.hh" #include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" @@ -73,19 +71,17 @@ bool factory::has_endpoint(com::centreon::broker::config::endpoint& cfg, */ io::endpoint* factory::new_endpoint( com::centreon::broker::config::endpoint& cfg, + const std::map& global_params [[maybe_unused]], bool& is_acceptor, - std::shared_ptr cache) const { - (void)cache; - + std::shared_ptr cache [[maybe_unused]]) const { auto logger = log_v2::instance().get(log_v2::TCP); if (cfg.type == "bbdo_server" || cfg.type == "bbdo_client") return _new_endpoint_bbdo_cs(cfg, is_acceptor); // Find host (if exists). - std::map::const_iterator it; std::string host; - it = cfg.params.find("host"); + auto it = cfg.params.find("host"); if (it != cfg.params.end()) host = it->second; if (!host.empty() && @@ -193,13 +189,11 @@ io::endpoint* factory::new_endpoint( io::endpoint* factory::_new_endpoint_bbdo_cs( com::centreon::broker::config::endpoint& cfg, bool& is_acceptor) const { - std::map::const_iterator it; - auto logger = log_v2::instance().get(log_v2::TCP); // Find host (if exists). std::string host; - it = cfg.params.find("host"); + auto it = cfg.params.find("host"); if (it != cfg.params.end()) host = it->second; if (!host.empty() && diff --git a/broker/tcp/test/factory.cc b/broker/tcp/test/factory.cc index 05feac147a8..bbad1580351 100644 --- a/broker/tcp/test/factory.cc +++ b/broker/tcp/test/factory.cc @@ -47,7 +47,7 @@ TEST(TcpFactory, Exception) { bool is_acceptor; std::shared_ptr cache; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); } TEST(TcpFactory, Acceptor) { @@ -59,7 +59,7 @@ TEST(TcpFactory, Acceptor) { cfg.params["port"] = "4343"; cfg.params["socket_write_timeout"] = "10"; cfg.params["socket_read_timeout"] = "10"; - io::endpoint* endp = fact.new_endpoint(cfg, is_acceptor, cache); + io::endpoint* endp = fact.new_endpoint(cfg, {}, is_acceptor, cache); ASSERT_TRUE(is_acceptor); ASSERT_TRUE(endp->is_acceptor()); @@ -75,7 +75,7 @@ TEST(TcpFactory, BadPort) { cfg.params["port"] = "a4a343"; cfg.params["host"] = "10.12.13.22"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); } TEST(TcpFactory, BadHost) { @@ -86,10 +86,10 @@ TEST(TcpFactory, BadHost) { cfg.params["port"] = "4343"; cfg.params["host"] = " 10.12.13.22"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["host"] = "10.12.13.22 "; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); } TEST(TcpFactory, Connector) { @@ -104,7 +104,7 @@ TEST(TcpFactory, Connector) { std::unique_ptr f{new tcp::factory}; ASSERT_TRUE(f->has_endpoint(cfg, nullptr)); std::unique_ptr endp{ - fact.new_endpoint(cfg, is_acceptor, cache)}; + fact.new_endpoint(cfg, {}, is_acceptor, cache)}; ASSERT_FALSE(is_acceptor); ASSERT_TRUE(endp->is_connector()); diff --git a/broker/test/bench_bbdo.cc b/broker/test/bench_bbdo.cc deleted file mode 100644 index ab2d1d8a667..00000000000 --- a/broker/test/bench_bbdo.cc +++ /dev/null @@ -1,126 +0,0 @@ -/** -* Copyright 2014-2015 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ - -#include -#include -#include -#include -#include -#include -#include "com/centreon/broker/bbdo/stream.hh" -#include "com/centreon/broker/compression/stream.hh" -#include "com/centreon/broker/config/applier/init.hh" -#include "com/centreon/broker/neb/events.hh" -#include "test/bench_stream.hh" - -#define TEST_TIME 30 - -using namespace com::centreon::broker; - -/** - * Mesure time performance. - */ -static void send_events(io::stream* s) { - // Compute time limit. - timespec limit; - clock_gettime(CLOCK_REALTIME, &limit); - limit.tv_sec += TEST_TIME; - timespec ts; - - // Loop. - do { - // Send event. - std::shared_ptr ss(new neb::service_status); - s->write(ss); - - // Compute current time. - clock_gettime(CLOCK_REALTIME, &ts); - } while ((ts.tv_sec < limit.tv_sec) || - ((ts.tv_sec == limit.tv_sec) && (ts.tv_nsec < limit.tv_nsec))); - - // Forced commit (might be needed by the compression). - s->write(std::shared_ptr()); - - return; -} - -/** - * Benchmark two streams. - * - * @param[in] bbdos BBDO stream. - * @param[in] bbdob BBDO benchmark. - */ -static void benchmark_stream(bbdo::stream& bbdos, bench_stream& bbdob) { - // Benchmark BBDO stream. - std::cout << " BBDO\n"; - send_events(&bbdos); - std::cout << " - events " << bbdob.get_write_events() << "\n" - << " - size " << bbdob.get_write_size() << "\n"; - return; -} - -/** - * Compare BBDO performance to NDO. - * - * @param[in] argc Argument count. - * @param[in] argv Argument values. - * - * @return EXIT_SUCCESS. - */ -int main(int argc, char* argv[]) { - // Initialization. - QCoreApplication app(argc, argv); - config::applier::init(0, "test_broker", 0); - - // #1 Default streams. - std::cout << "Bench #1 (default streams)\n"; - { - bbdo::stream bbdos; - std::shared_ptr bbdob(new bench_stream); - bbdos.set_substream(bbdob); - benchmark_stream(bbdos, *bbdob); - } - - // #2 Default compression. - std::cout << "\nBench #2 (default compression)\n"; - { - bbdo::stream bbdos; - std::shared_ptr bbdoc(new compression::stream); - std::shared_ptr bbdob(new bench_stream); - bbdos.set_substream(bbdoc); - bbdoc->set_substream(bbdob); - benchmark_stream(bbdos, *bbdob); - } - - // #3 Optimized compression. - std::cout << "\nBench #3 (optimized compression)\n"; - { - bbdo::stream bbdos; - std::shared_ptr bbdoc( - new compression::stream(9, 1000000)); - std::shared_ptr bbdob(new bench_stream); - bbdos.set_substream(bbdoc); - bbdoc->set_substream(bbdob); - benchmark_stream(bbdos, *bbdob); - } - - // Cleanup. - config::applier::deinit(); - - return (EXIT_SUCCESS); -} diff --git a/broker/test/helgrind.cc b/broker/test/helgrind.cc deleted file mode 100644 index 4c35d846561..00000000000 --- a/broker/test/helgrind.cc +++ /dev/null @@ -1,125 +0,0 @@ -/** -* Copyright 2014-2015 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ - -#include -#include -#include -#include "com/centreon/broker/exceptions/msg.hh" -#include "test/cbd.hh" -#include "test/config.hh" -#include "test/engine.hh" -#include "test/generate.hh" -#include "test/misc.hh" -#include "test/vars.hh" - -static char const* valgrind_arg = "--tool=helgrind"; -static char const* db_name = "CENTREON_BROKER_TEST_HELGRIND"; -static uint32_t service_number = 5; -static uint32_t host_number = 5; -static uint32_t command_number = 10; - -using namespace com::centreon::broker; - -/** - * @brief Check that helgrind does not return a warning. - * - * We launch centreon broker into a the valgrind tool called 'helgrind'. - * This tool is used to detect various multi-threading defects, like - * potential deadlock and un-locked access of the same memory by two threads. - * - * @return EXIT_SUCCESS on success. - */ -int main() { - // Error flag. - bool error(true); - - // Variables that need cleaning. - std::list hosts; - std::list services; - std::list commands; - std::string engine_config_path(tmpnam(NULL)); - engine monitoring; - QProcess broker; - test_file cbmod_cfg; - test_file broker_cfg; - test_db db; - - try { - // Prepare database. - db.open(db_name, NULL, true); - generate_commands(commands, command_number); - generate_hosts(hosts, host_number); - generate_services(services, hosts, service_number); - - // Launch broker. - broker_cfg.set_template("/test/cfg/helgrind_1.xml.in"); - QStringList args; - args.push_back(valgrind_arg); - args.push_back(CBD_PATH); - args.push_back(broker_cfg.generate().c_str()); - broker.start("valgrind", args); - if (!broker.waitForStarted()) - throw(exceptions::msg() - << "couldn't start valgrind: " << broker.errorString()); - - // Generate configuration. - cbmod_cfg.set_template(PROJECT_SOURCE_DIR "/test/cfg/helgrind_1.xml.in"); - std::string additional_config; - { - std::ostringstream oss; - oss << "broker_module=" << CBMOD_PATH << " " << cbmod_cfg.generate() - << "\n"; - additional_config = oss.str(); - } - config_write(engine_config_path.c_str(), additional_config.c_str(), &hosts, - &services, &commands); - - // Start monitoring. - std::string engine_config_file(engine_config_path); - engine_config_file.append("/nagios.cfg"); - monitoring.set_config_file(engine_config_file); - monitoring.start(); - - // Wait. - sleep_for(30); - - // Check for warning. - QByteArray stderr = broker.readAllStandardError(); - - if (!stderr.isEmpty()) - throw(exceptions::msg() << "got helgrind warnings: " << QString(stderr)); - } catch (std::exception const& e) { - std::cerr << e.what() << std::endl; - error = true; - } catch (...) { - std::cerr << "unknown exception" << std::endl; - error = true; - } - - // Cleanup. - monitoring.stop(); - broker.kill(); - broker.waitForFinished(); - sleep_for(3); - config_remove(engine_config_path.c_str()); - free_hosts(hosts); - free_services(services); - free_commands(commands); - - return (error ? -1 : 0); -} diff --git a/broker/test/influxdb.cc b/broker/test/influxdb.cc deleted file mode 100644 index 6dfb1127162..00000000000 --- a/broker/test/influxdb.cc +++ /dev/null @@ -1,184 +0,0 @@ -/** -* Copyright 2014-2015 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "com/centreon/broker/exceptions/msg.hh" -#include "test/cbd.hh" -#include "test/config.hh" -#include "test/engine.hh" -#include "test/engine_extcmd.hh" -#include "test/generate.hh" -#include "test/misc.hh" -#include "test/vars.hh" - -using namespace com::centreon::broker; - -#define STORAGE_DB_NAME "broker_influxdb_centreon_storage" -#define COMMAND_FILE "broker_influxdb_command_file" - -#define INFLUXDB_DB_NAME "influxdb_unittest" -#define INFLUXDB_DB_PORT "6421" -#define INFLUXDB_DB_PORT_S 6421 -#define INFLUXDB_DB_PASSWORD "influxdb_password" -#define INFLUXDB_DB_HOST "localhost" -#define INFLUXDB_DB_USER "influxdb_user" - -static const char* expected_result = - "POST /write?u=" INFLUXDB_DB_USER "&p=" INFLUXDB_DB_PASSWORD - " HTTP/1.0\n" - "Content-Length: 442\n\n" - "{\"database\":\"" INFLUXDB_DB_NAME - "\",\"points\":[" - "{\"name\":\"status\",\"tags\":{\"status_id\":1 " - "},\"timestamp\":$timestamp$,\"fields\":{\"value\":0,\"hostid\":\"1\"," - "\"host\":\"1\",\"serviceid\":\"1\",\"service\":\"1\",\"instanceid\":" - "\"42\",\"instance\":\"MyBroker\" } }," - "{\"name\":\"influxdb_test\",\"tags\":{\"metric_id\":1 " - "},\"timestamp\":$timestamp$,\"fields\":{\"value\":0.8,\"metric\":" - "\"influxdb_test\",\"hostid\":\"1\",\"host\":\"1\",\"serviceid\":\"1\"," - "\"service\":\"1\",\"instanceid\":\"42\",\"instance\":\"MyBroker\" } } ]}"; - -/** - * Check that the influxdb works. - * - * @return EXIT_SUCCESS on success. - */ -int main() { - // Error flag. - bool error(true); - - // Variables that need cleaning. - std::list hosts; - std::list services; - test_db db; - engine_extcmd commander; - engine monitoring; - std::string engine_config_path(tmpnam(NULL)); - - try { - // Open the socket - QTcpServer server; - if (!server.listen(QHostAddress::Any, INFLUXDB_DB_PORT_S)) - throw exceptions::msg() << "couldn't listen to " << INFLUXDB_DB_PORT_S; - - // Prepare database. - db.open(STORAGE_DB_NAME); - - // Create the config influxdb xml file. - test_file file; - file.set_template(PROJECT_SOURCE_DIR "/test/cfg/influxdb.xml.in"); - file.set("MYSQL_DB_NAME", STORAGE_DB_NAME); - file.set("INFLUXDB_DB_NAME", INFLUXDB_DB_NAME); - file.set("INFLUXDB_DB_PORT", INFLUXDB_DB_PORT); - file.set("INFLUXDB_DB_PASSWORD", INFLUXDB_DB_PASSWORD); - file.set("INFLUXDB_DB_HOST", INFLUXDB_DB_HOST); - file.set("INFLUXDB_DB_USER", INFLUXDB_DB_USER); - std::string config_file = file.generate(); - - // Prepare monitoring engine configuration parameters. - generate_hosts(hosts, 1); - generate_services(services, hosts, 1); - services.back().accept_passive_service_checks = 1; - services.back().checks_enabled = 0; - services.back().max_attempts = 1; - commander.set_file(tmpnam(NULL)); - std::string additional_config; - { - std::ostringstream oss; - oss << commander.get_engine_config() << "broker_module=" << CBMOD_PATH - << " " << config_file << "\n"; - additional_config = oss.str(); - } - config_write(engine_config_path.c_str(), additional_config.c_str(), &hosts, - &services); - - std::string engine_config_file(engine_config_path); - engine_config_file.append("/nagios.cfg"); - monitoring.set_config_file(engine_config_file); - monitoring.start(); - - sleep_for(3); - time_t first_timestamp_possible = std::time(NULL); - commander.execute( - "PROCESS_SERVICE_CHECK_RESULT;1;1;0;Submitted by unit test | " - "influxdb_test=0.80"); - - // Wait twice for an incoming connection. The first connection is - // the connection used by influxdb module to check if the server exist, - // the second one is the data. - if (!server.waitForNewConnection(8000 * MONITORING_ENGINE_INTERVAL_LENGTH)) - throw exceptions::msg() - << "no incoming connection to " << INFLUXDB_DB_PORT_S; - QTcpSocket* s = server.nextPendingConnection(); - if (!s) - throw exceptions::msg() - << "no incoming connection to " << INFLUXDB_DB_PORT_S; - delete s; - if (!server.waitForNewConnection(8000 * MONITORING_ENGINE_INTERVAL_LENGTH)) - throw exceptions::msg() - << "no incoming connection to " << INFLUXDB_DB_PORT_S; - s = server.nextPendingConnection(); - if (!s) - throw exceptions::msg() - << "no incoming connection to " << INFLUXDB_DB_PORT_S; - QByteArray array; - while (s->isOpen() && s->waitForReadyRead()) - array.append(s->readAll()); - QString data(array); - - time_t last_timestamp_possible = std::time(NULL); - bool got = false; - // Check the data got for everything is okay. - for (; first_timestamp_possible <= last_timestamp_possible; - ++first_timestamp_possible) { - QString expected = expected_result; - expected.replace("$timestamp$", - QString::number(first_timestamp_possible)); - if (expected == data) { - got = true; - break; - } - } - if (!got) - throw(exceptions::msg() << "incorrect influxdb data: got: " << data - << "\nexpected: " << expected_result); - delete s; - - // Success. - error = false; - } catch (std::exception const& e) { - std::cerr << e.what() << std::endl; - } catch (...) { - std::cerr << "unknown exception" << std::endl; - } - - // Cleanup. - monitoring.stop(); - free_hosts(hosts); - free_services(services); - config_remove(engine_config_path.c_str()); - - return (error ? EXIT_FAILURE : EXIT_SUCCESS); -} diff --git a/broker/test/test_util/src/tcp_relais.cc b/broker/test/test_util/src/tcp_relais.cc index 7673f8d9a1d..64c6611d471 100644 --- a/broker/test/test_util/src/tcp_relais.cc +++ b/broker/test/test_util/src/tcp_relais.cc @@ -205,7 +205,7 @@ void incomming_outgoing::on_recv(const boost::system::error_code& err, } void incomming_outgoing::on_sent(const boost::system::error_code& err, - tcp::socket& sock) { + tcp::socket& sock [[maybe_unused]]) { if (err) { on_error(); return; diff --git a/broker/tls/inc/com/centreon/broker/tls/factory.hh b/broker/tls/inc/com/centreon/broker/tls/factory.hh index 8763f90e735..7aa8a60d8dc 100644 --- a/broker/tls/inc/com/centreon/broker/tls/factory.hh +++ b/broker/tls/inc/com/centreon/broker/tls/factory.hh @@ -39,6 +39,7 @@ class factory : public io::factory { bool has_endpoint(config::endpoint& cfg, io::extension* ext) override; io::endpoint* new_endpoint( config::endpoint& cfg, + const std::map& global_params, bool& is_acceptor, std::shared_ptr cache = std::shared_ptr()) const override; diff --git a/broker/tls/src/factory.cc b/broker/tls/src/factory.cc index 67b4ed29352..21674b0aba3 100644 --- a/broker/tls/src/factory.cc +++ b/broker/tls/src/factory.cc @@ -19,7 +19,6 @@ #include "com/centreon/broker/tls/factory.hh" #include -#include "com/centreon/broker/config/parser.hh" #include "com/centreon/broker/tls/acceptor.hh" #include "com/centreon/broker/tls/connector.hh" #include "common/log_v2/log_v2.hh" @@ -42,12 +41,12 @@ using log_v2 = com::centreon::common::log_v2::log_v2; */ bool factory::has_endpoint(config::endpoint& cfg, io::extension* ext) { bool has_tls; - std::map::iterator it; bool legacy; auto logger = log_v2::instance().get(log_v2::TLS); if (ext) { + std::map::iterator it; if (direct_grpc_serialized(cfg)) { return false; } @@ -129,10 +128,9 @@ bool factory::has_endpoint(config::endpoint& cfg, io::extension* ext) { */ io::endpoint* factory::new_endpoint( config::endpoint& cfg, + const std::map& global_params [[maybe_unused]], bool& is_acceptor, - std::shared_ptr cache) const { - (void)cache; - + std::shared_ptr cache [[maybe_unused]]) const { auto logger = log_v2::instance().get(log_v2::TLS); // Find TLS parameters (optional). @@ -143,8 +141,7 @@ io::endpoint* factory::new_endpoint( std::string tls_hostname; { // Is TLS enabled ? - std::map::const_iterator it{ - cfg.params.find("tls")}; + auto it = cfg.params.find("tls"); if (it != cfg.params.end()) { if (!absl::SimpleAtob(it->second, &tls)) { logger->error( diff --git a/broker/tls/test/read.cc b/broker/tls/test/read.cc index 4c3bcf6967a..13c5761b412 100644 --- a/broker/tls/test/read.cc +++ b/broker/tls/test/read.cc @@ -42,7 +42,7 @@ class TlsStreamRead : public ::testing::Test { public: void SetUp() override { try { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); } catch (const std::exception& e) { (void)e; } diff --git a/broker/tls2/inc/com/centreon/broker/tls2/factory.hh b/broker/tls2/inc/com/centreon/broker/tls2/factory.hh index 846558d7cd0..96805e05ce7 100644 --- a/broker/tls2/inc/com/centreon/broker/tls2/factory.hh +++ b/broker/tls2/inc/com/centreon/broker/tls2/factory.hh @@ -39,6 +39,7 @@ class factory : public io::factory { bool has_endpoint(config::endpoint& cfg, io::extension* ext) override; io::endpoint* new_endpoint( config::endpoint& cfg, + const absl::flat_hash_map& global_params, bool& is_acceptor, std::shared_ptr cache = std::shared_ptr()) const override; @@ -49,6 +50,6 @@ class factory : public io::factory { }; } // namespace tls2 -} +} // namespace com::centreon::broker #endif // !CCB_TLS_FACTORY_HH diff --git a/broker/tls2/src/factory.cc b/broker/tls2/src/factory.cc index d58a5085d0c..a9f9ecc70a7 100644 --- a/broker/tls2/src/factory.cc +++ b/broker/tls2/src/factory.cc @@ -1,20 +1,20 @@ /** -* Copyright 2013 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2013 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/tls2/factory.hh" @@ -90,12 +90,11 @@ bool factory::has_endpoint(config::endpoint& cfg, io::extension* ext) { */ io::endpoint* factory::new_endpoint( config::endpoint& cfg, + const absl::flat_hash_map& global_params, bool& is_acceptor, - std::shared_ptr cache) const { - (void)cache; - + std::shared_ptr cache[[maybe_unused]]) const { // Find TLS parameters (optional). - bool tls2{false}; + bool tls2 = false; std::string private_key; std::string public_cert; std::string ca_cert; diff --git a/broker/tls2/test/read.cc b/broker/tls2/test/read.cc index 4c3bcf6967a..13c5761b412 100644 --- a/broker/tls2/test/read.cc +++ b/broker/tls2/test/read.cc @@ -42,7 +42,7 @@ class TlsStreamRead : public ::testing::Test { public: void SetUp() override { try { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); } catch (const std::exception& e) { (void)e; } diff --git a/broker/unified_sql/CMakeLists.txt b/broker/unified_sql/CMakeLists.txt index 47ff4ee4e21..383aa053a5f 100644 --- a/broker/unified_sql/CMakeLists.txt +++ b/broker/unified_sql/CMakeLists.txt @@ -67,6 +67,7 @@ target_link_libraries( pb_rebuild_message_lib pb_remove_graph_message_lib spdlog::spdlog + engine_conf absl::any absl::log absl::base absl::bits) # Testing. diff --git a/broker/unified_sql/inc/com/centreon/broker/unified_sql/bulk_bind.hh b/broker/unified_sql/inc/com/centreon/broker/unified_sql/bulk_bind.hh index 41d8c2eb9a7..005c9af1e79 100644 --- a/broker/unified_sql/inc/com/centreon/broker/unified_sql/bulk_bind.hh +++ b/broker/unified_sql/inc/com/centreon/broker/unified_sql/bulk_bind.hh @@ -20,8 +20,7 @@ #include "com/centreon/broker/sql/mysql_bulk_stmt.hh" -namespace com::centreon::broker { -namespace unified_sql { +namespace com::centreon::broker::unified_sql { /** * @class bulk_bind "com/centreon/broker/unified_sql/bulk_bind.hh" * @brief Container used for a multiline statement bind. It is threadsafe and @@ -60,9 +59,10 @@ class bulk_bind { const uint32_t _interval; const uint32_t _max_size; database::mysql_bulk_stmt& _stmt; - mutable std::mutex _queue_m; - std::vector> _bind; - std::vector _next_time; + mutable absl::Mutex _queue_m; + std::vector> _bind + ABSL_GUARDED_BY(_queue_m); + std::vector _next_time ABSL_GUARDED_BY(_queue_m); std::shared_ptr _logger; public: @@ -72,17 +72,17 @@ class bulk_bind { database::mysql_bulk_stmt& stmt, const std::shared_ptr& logger); bulk_bind(const bulk_bind&) = delete; - std::unique_ptr& bind(int32_t conn); - void apply_to_stmt(int32_t conn); - bool ready(int32_t conn); - std::size_t size(int32_t conn = -1) const; - std::time_t next_time() const; - std::size_t connections_count() const; - void init_from_stmt(int32_t conn); - void lock(); - void unlock(); + std::unique_ptr& bind(int32_t conn) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(_queue_m); + void apply_to_stmt(int32_t conn) ABSL_LOCKS_EXCLUDED(_queue_m); + bool ready(int32_t conn) ABSL_LOCKS_EXCLUDED(_queue_m); + std::size_t size(int32_t conn = -1) const ABSL_LOCKS_EXCLUDED(_queue_m); + std::time_t next_time() const ABSL_LOCKS_EXCLUDED(_queue_m); + std::size_t connections_count() const ABSL_LOCKS_EXCLUDED(_queue_m); + void init_from_stmt(int32_t conn) ABSL_EXCLUSIVE_LOCKS_REQUIRED(_queue_m); + void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION(_queue_m); + void unlock() ABSL_UNLOCK_FUNCTION(_queue_m); }; -} // namespace unified_sql -} // namespace com::centreon::broker +} // namespace com::centreon::broker::unified_sql #endif /* !CCB_UNIFIED_SQL_BULK_BIND_HH */ diff --git a/broker/unified_sql/inc/com/centreon/broker/unified_sql/factory.hh b/broker/unified_sql/inc/com/centreon/broker/unified_sql/factory.hh index d678cc5aedd..55f792f1dce 100644 --- a/broker/unified_sql/inc/com/centreon/broker/unified_sql/factory.hh +++ b/broker/unified_sql/inc/com/centreon/broker/unified_sql/factory.hh @@ -1,20 +1,20 @@ -/* -** Copyright 2011-2013 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2011-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #ifndef CCB_UNIFIED_SQL_FACTORY_HH #define CCB_UNIFIED_SQL_FACTORY_HH @@ -37,13 +37,15 @@ class factory : public io::factory { ~factory() = default; factory& operator=(factory const&) = delete; bool has_endpoint(config::endpoint& cfg, io::extension* ext); - io::endpoint* new_endpoint(config::endpoint& cfg, - bool& is_acceptor, - std::shared_ptr cache = - std::shared_ptr()) const; + io::endpoint* new_endpoint( + config::endpoint& cfg, + const std::map& global_params, + bool& is_acceptor, + std::shared_ptr cache = + std::shared_ptr()) const; }; } // namespace unified_sql -} +} // namespace com::centreon::broker #endif // !CCB_UNIFIED_SQL_FACTORY_HH diff --git a/broker/unified_sql/inc/com/centreon/broker/unified_sql/rebuilder.hh b/broker/unified_sql/inc/com/centreon/broker/unified_sql/rebuilder.hh index 4705374db1d..da795899052 100644 --- a/broker/unified_sql/inc/com/centreon/broker/unified_sql/rebuilder.hh +++ b/broker/unified_sql/inc/com/centreon/broker/unified_sql/rebuilder.hh @@ -21,7 +21,6 @@ #include -#include "com/centreon/broker/sql/database_config.hh" #include "com/centreon/broker/sql/mysql.hh" #include "com/centreon/common/pool.hh" diff --git a/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh b/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh index f2ecb8136b0..079064e1406 100644 --- a/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh +++ b/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh @@ -363,6 +363,26 @@ class stream : public io::stream { std::unique_ptr _sscr_update; std::unique_ptr _sscr_bind; + /* Statement and binding to enable hosts in the hosts table. One value is + * set at index 0 that is the host ID. */ + std::unique_ptr _eh_update; + std::unique_ptr _eh_bind; + + /* Statement and binding to enable hosts in the resources table. One value + * is set at index 0 that is the host ID. */ + std::unique_ptr _ehr_update; + std::unique_ptr _ehr_bind; + + /* Statement and binding to enable services in the services table. One value + * is set at index 0 that is the service ID. */ + std::unique_ptr _es_update; + std::unique_ptr _es_bind; + + /* Statement and binding to enable services in the resources table. One value + * is set at index 0 that is the service ID. */ + std::unique_ptr _esr_update; + std::unique_ptr _esr_bind; + database::mysql_stmt _severity_insert; database::mysql_stmt _severity_update; database::mysql_stmt _tag_insert_update; @@ -388,6 +408,8 @@ class stream : public io::stream { database::mysql_stmt _index_data_query; database::mysql_stmt _metrics_insert; + database::mysql_stmt _agent_information_insert_update; + void _update_hosts_and_services_of_unresponsive_instances(); void _update_hosts_and_services_of_instance(uint32_t id, bool responsive); void _update_timestamp(uint32_t instance_id); @@ -434,6 +456,7 @@ class stream : public io::stream { void _process_pb_host(const std::shared_ptr& d); uint64_t _process_pb_host_in_resources(const Host& h, int32_t conn); + void _process_pb_instance_configuration(const std::shared_ptr& d); void _process_pb_host_status(const std::shared_ptr& d); void _process_pb_adaptive_host_status(const std::shared_ptr& d); void _process_pb_adaptive_host(const std::shared_ptr& d); @@ -446,7 +469,7 @@ class stream : public io::stream { void _process_tag(const std::shared_ptr& d); void _process_pb_log(const std::shared_ptr& d); void _process_pb_responsive_instance(const std::shared_ptr& d); - + void _process_agent_stats(const std::shared_ptr& d); void _unified_sql_process_service_status(const std::shared_ptr& d); void _check_and_update_index_cache(const Service& ss); diff --git a/broker/unified_sql/src/bulk_bind.cc b/broker/unified_sql/src/bulk_bind.cc index d61eecbf891..b53cf8e8a07 100644 --- a/broker/unified_sql/src/bulk_bind.cc +++ b/broker/unified_sql/src/bulk_bind.cc @@ -52,7 +52,7 @@ bulk_bind::bulk_bind(const size_t connections_count, * @return a boolean true if ready. */ bool bulk_bind::ready(int32_t conn) { - std::lock_guard lck(_queue_m); + absl::MutexLock lck(&_queue_m); auto* b = _bind[conn].get(); if (!b) return false; @@ -89,7 +89,7 @@ bool bulk_bind::ready(int32_t conn) { * @return a size. */ size_t bulk_bind::size(int32_t conn) const { - std::lock_guard lck(_queue_m); + absl::MutexLock lck(&_queue_m); if (conn == -1) { size_t retval = 0; for (auto& b : _bind) { @@ -110,7 +110,7 @@ size_t bulk_bind::size(int32_t conn) const { * @return a timestamp. */ std::time_t bulk_bind::next_time() const { - std::lock_guard lck(_queue_m); + absl::MutexLock lck(&_queue_m); auto it = std::min_element(_next_time.begin(), _next_time.end()); return *it; } @@ -122,7 +122,7 @@ std::time_t bulk_bind::next_time() const { * @param conn The connection to choose the bind. */ void bulk_bind::apply_to_stmt(int32_t conn) { - std::lock_guard lck(_queue_m); + absl::MutexLock lck(&_queue_m); _stmt.set_bind(std::move(_bind[conn])); _next_time[conn] = std::time(nullptr) + _interval; } @@ -143,7 +143,7 @@ void bulk_bind::init_from_stmt(int32_t conn) { * @return A size_t. */ std::size_t bulk_bind::connections_count() const { - std::lock_guard lck(_queue_m); + absl::MutexLock lck(&_queue_m); return _bind.size(); } @@ -160,9 +160,9 @@ std::unique_ptr& bulk_bind::bind(int32_t conn) { } void bulk_bind::lock() { - _queue_m.lock(); + _queue_m.Lock(); } void bulk_bind::unlock() { - _queue_m.unlock(); + _queue_m.Unlock(); } diff --git a/broker/unified_sql/src/factory.cc b/broker/unified_sql/src/factory.cc index 4c9ef804ac5..9f9cbc9c839 100644 --- a/broker/unified_sql/src/factory.cc +++ b/broker/unified_sql/src/factory.cc @@ -19,10 +19,7 @@ #include "com/centreon/broker/unified_sql/factory.hh" #include -#include -#include -#include "com/centreon/broker/config/parser.hh" #include "com/centreon/broker/unified_sql/connector.hh" #include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" @@ -40,14 +37,12 @@ using com::centreon::common::log_v2::log_v2; * * @return Property value. */ -static std::string const& find_param(config::endpoint const& cfg, +static const std::string& find_param(const config::endpoint& cfg, std::string const& key) { - std::map::const_iterator it{cfg.params.find(key)}; + auto it = cfg.params.find(key); if (cfg.params.end() == it) - throw msg_fmt( - "unified_sql: no '{}" - "' defined for endpoint '{}'", - key, cfg.name); + throw msg_fmt("unified_sql: no '{}' defined for endpoint '{}'", key, + cfg.name); return it->second; } @@ -76,10 +71,9 @@ bool factory::has_endpoint(config::endpoint& cfg, io::extension* ext) { */ io::endpoint* factory::new_endpoint( config::endpoint& cfg, + const std::map& global_params, bool& is_acceptor, - std::shared_ptr cache) const { - (void)cache; - + std::shared_ptr cache [[maybe_unused]]) const { auto logger = log_v2::instance().get(log_v2::SQL); // Find RRD length. uint32_t rrd_length; @@ -94,8 +88,7 @@ io::endpoint* factory::new_endpoint( // Find interval length if set. uint32_t interval_length{0}; { - std::map::const_iterator it{ - cfg.params.find("interval")}; + auto it = cfg.params.find("interval"); if (it != cfg.params.end()) { if (!absl::SimpleAtoi(it->second, &interval_length)) { interval_length = 60; @@ -109,13 +102,12 @@ io::endpoint* factory::new_endpoint( } // Find unified_sql DB parameters. - database_config dbcfg(cfg); + database_config dbcfg(cfg, global_params); // Store or not in data_bin. bool store_in_data_bin(true); { - std::map::const_iterator it{ - cfg.params.find("store_in_data_bin")}; + auto it = cfg.params.find("store_in_data_bin"); if (it != cfg.params.end()) { if (!absl::SimpleAtob(it->second, &store_in_data_bin)) { logger->error( @@ -130,8 +122,7 @@ io::endpoint* factory::new_endpoint( // Store or not in resources. bool store_in_resources{true}; { - std::map::const_iterator it{ - cfg.params.find("store_in_resources")}; + auto it = cfg.params.find("store_in_resources"); if (it != cfg.params.end()) { if (!absl::SimpleAtob(it->second, &store_in_resources)) { logger->error( @@ -146,8 +137,7 @@ io::endpoint* factory::new_endpoint( // Store or not in hosts_services. bool store_in_hosts_services{true}; { - std::map::const_iterator it{ - cfg.params.find("store_in_hosts_services")}; + auto it = cfg.params.find("store_in_hosts_services"); if (it != cfg.params.end()) { if (!absl::SimpleAtob(it->second, &store_in_hosts_services)) { logger->error( diff --git a/broker/unified_sql/src/stream.cc b/broker/unified_sql/src/stream.cc index 9547b43c742..15660a814f8 100644 --- a/broker/unified_sql/src/stream.cc +++ b/broker/unified_sql/src/stream.cc @@ -18,25 +18,12 @@ #include "com/centreon/broker/unified_sql/stream.hh" #include -#include -#include -#include -#include -#include "bbdo/events.hh" -#include "bbdo/remove_graph_message.pb.h" #include "bbdo/storage/index_mapping.hh" #include "com/centreon/broker/cache/global_cache.hh" -#include "com/centreon/broker/config/applier/state.hh" #include "com/centreon/broker/exceptions/shutdown.hh" -#include "com/centreon/broker/multiplexing/publisher.hh" #include "com/centreon/broker/neb/events.hh" -#include "com/centreon/broker/sql/mysql_bulk_stmt.hh" -#include "com/centreon/broker/sql/mysql_result.hh" -#include "com/centreon/broker/stats/center.hh" #include "com/centreon/broker/unified_sql/internal.hh" -#include "com/centreon/common/perfdata.hh" -#include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" using namespace com::centreon::exceptions; @@ -118,10 +105,10 @@ constexpr void (stream::*const stream::neb_processing_table[])( &stream::_process_pb_service_group, &stream::_process_pb_service_group_member, &stream::_process_pb_host_parent, - nullptr, // pb_instance_configuration + &stream::_process_pb_instance_configuration, &stream::_process_pb_adaptive_service_status, &stream::_process_pb_adaptive_host_status, -}; + &stream::_process_agent_stats}; constexpr size_t neb_processing_table_size = sizeof(stream::neb_processing_table) / diff --git a/broker/unified_sql/src/stream_sql.cc b/broker/unified_sql/src/stream_sql.cc index 0e24d255439..63b5e512767 100644 --- a/broker/unified_sql/src/stream_sql.cc +++ b/broker/unified_sql/src/stream_sql.cc @@ -16,7 +16,10 @@ * For more information : contact@centreon.com */ -#include "bbdo/neb.pb.h" +#include +#include +#include +#include #include "bbdo/storage/index_mapping.hh" #include "com/centreon/broker/cache/global_cache.hh" #include "com/centreon/broker/misc/string.hh" @@ -25,9 +28,11 @@ #include "com/centreon/broker/sql/table_max_size.hh" #include "com/centreon/broker/unified_sql/internal.hh" #include "com/centreon/broker/unified_sql/stream.hh" +#include "com/centreon/common/file.hh" #include "com/centreon/common/utf8.hh" #include "com/centreon/engine/host.hh" -#include "com/centreon/engine/service.hh" +#include "common/engine_conf/parser.hh" +#include "common/engine_conf/state_helper.hh" using namespace com::centreon::broker; using namespace com::centreon::broker::database; @@ -291,6 +296,10 @@ void stream::_update_hosts_and_services_of_instance(uint32_t id, id); _mysql.run_query(query, database::mysql_error::restore_instances, conn); _add_action(conn, actions::services); + query = fmt::format( + "UPDATE agent_information SET enabled = 1 WHERE poller_id={}", id); + _mysql.run_query(query, database::mysql_error::restore_instances, conn); + _add_action(conn, actions::services); } else { query = fmt::format( "UPDATE instances SET outdated=TRUE WHERE instance_id={}", id); @@ -305,6 +314,10 @@ void stream::_update_hosts_and_services_of_instance(uint32_t id, id); _mysql.run_query(query, database::mysql_error::restore_instances, conn); _add_action(conn, actions::hosts); + query = fmt::format( + "UPDATE agent_information SET enabled = 0 WHERE poller_id={}", id); + _mysql.run_query(query, database::mysql_error::restore_instances, conn); + _add_action(conn, actions::services); } auto bbdo = config::applier::state::instance().get_bbdo_version(); SPDLOG_LOGGER_TRACE( @@ -1495,7 +1508,7 @@ void stream::_process_pb_host_group_member(const std::shared_ptr& d) { } std::string query = fmt::format( - "DELETE FROM hosts_hostgroup WHERE host_id={} and hostgroup_id = {}", + "DELETE FROM hosts_hostgroups WHERE host_id={} and hostgroup_id = {}", hgm.host_id(), hgm.hostgroup_id()); _mysql.run_query(query, database::mysql_error::delete_host_group_member, @@ -1640,7 +1653,7 @@ void stream::_process_pb_host_parent(const std::shared_ptr& d) { hp.parent_id(), hp.child_id()); // Prepare queries. - if (!_host_parent_insert.prepared()) { + if (!_pb_host_parent_insert.prepared()) { query_preparator::event_pb_unique unique{ {3, "child_id", io::protobuf_base::invalid_on_zero, 0}, {4, "parent_id", io::protobuf_base::invalid_on_zero, 0}}; @@ -1682,12 +1695,302 @@ void stream::_process_pb_host_parent(const std::shared_ptr& d) { } } +/** + * @brief Process a Protobuf instance configuration. This event is sent once all + * the configuration is sent by Engine. Even if Engine doesn't need to send it, + * this event is sent so Broker can handle some configurations if needed. + * + * @param d Uncasted Protobuf instance configuration. + */ +void stream::_process_pb_instance_configuration( + const std::shared_ptr& d) { + /* The configuration has just been updated, so we can get the configuration + * from the php cache directory and copy it into the broker cache engine + * configuration directory. */ + std::shared_ptr ic = + std::static_pointer_cast(d); + auto obj = ic->obj(); + SPDLOG_LOGGER_INFO( + _logger_sql, + "unified_sql: processing Pb instance configuration (poller {})", + obj.poller_id()); + std::string current_version = + config::applier::state::instance().engine_configuration(obj.poller_id()); + /* The instance configuration message is only interesting with extended + * negociation. */ + if (!current_version.empty() && + !config::applier::state::instance().config_cache_dir().empty()) { + std::filesystem::path poller_dir = + config::applier::state::instance().pollers_config_dir() / + fmt::to_string(obj.poller_id()); + std::filesystem::path cache_dir = + config::applier::state::instance().config_cache_dir() / + fmt::to_string(obj.poller_id()); + if (!std::filesystem::exists(cache_dir)) { + _logger_sql->error( + "unified_sql: The cache directory that should contain the engine " + "configuration does not exist: '{}'", + cache_dir.string()); + return; + } + std::error_code ec; + std::string new_version = common::hash_directory(cache_dir, ec); + if (ec) { + _logger_sql->error( + "unified_sql: Error while hashing the cache directory '{}': {}", + cache_dir.string(), ec.message()); + } + if (new_version != current_version) { + _logger_sql->debug( + "unified_sql: New engine configuration, broker directories updated"); + std::filesystem::path pollers_dir = + config::applier::state::instance().pollers_config_dir(); + if (!std::filesystem::exists(pollers_dir)) { + _logger_sql->trace( + "unified_sql: Broker poller directory '{}' does not exist, " + "creating " + "it", + cache_dir.string()); + std::filesystem::create_directories(cache_dir); + } + if (!std::filesystem::is_empty(poller_dir)) { + _logger_sql->trace( + "unified_sql: Broker poller directory '{}' is not empty, cleaning " + "it", + poller_dir.string()); + std::filesystem::remove_all(poller_dir); + } + std::filesystem::copy(cache_dir, poller_dir, + std::filesystem::copy_options::recursive); + config::applier::state::instance().set_engine_configuration( + obj.poller_id(), new_version); + _logger_sql->info("SQL: Poller {} configuration updated in '{}'", + obj.poller_id(), poller_dir.string()); + } else { + _logger_sql->debug( + "unified_sql: Engine configuration already known by Broker"); + } + + if (_is_valid_poller(obj.poller_id())) { + engine::configuration::State state; + engine::configuration::state_helper state_hlp(&state); + engine::configuration::error_cnt err; + engine::configuration::parser p; + try { + p.parse(poller_dir / "centengine.cfg", &state, err); + state_hlp.expand(err); + + if (_store_in_hosts_services) { + if (!_eh_update) { + if (_bulk_prepared_statement) { + auto eh = std::make_unique( + "UPDATE hosts SET enabled = 1 WHERE host_id = ?"); + _mysql.prepare_statement(*eh); + _eh_bind = std::make_unique( + _dbcfg.get_connections_count(), dt_queue_timer_duration, + _max_pending_queries, *eh, _logger_sql); + _eh_update = std::move(eh); + } else { + _eh_update = std::make_unique( + "UPDATE hosts SET enabled = 1 WHERE host_id = ?"); + _mysql.prepare_statement(*_eh_update); + } + } + + if (!_es_update) { + if (_bulk_prepared_statement) { + auto es = std::make_unique( + "UPDATE services SET enabled=1 WHERE host_id=? AND " + "service_id=?"); + _mysql.prepare_statement(*es); + _es_bind = std::make_unique( + _dbcfg.get_connections_count(), dt_queue_timer_duration, + _max_pending_queries, *es, _logger_sql); + _es_update = std::move(es); + } else { + _es_update = std::make_unique( + "UPDATE services SET enabled=1 WHERE host_id=? AND " + "service_id=?"); + _mysql.prepare_statement(*_es_update); + } + } + + if (_bulk_prepared_statement) { + for (const auto& h : state.hosts()) { + if (!_eh_bind->bind(0)) + _eh_bind->init_from_stmt(0); + auto* b = _eh_bind->bind(0).get(); + b->set_value_as_u64(0, h.host_id()); + b->next_row(); + } + SPDLOG_LOGGER_TRACE( + _logger_sql, + "Check if some statements are ready, eh_bind connections " + "count = {}", + _eh_bind->connections_count()); + if (_eh_bind->size(0) > 0) { + SPDLOG_LOGGER_DEBUG(_logger_sql, + "Enabling {} hosts in hosts table", + _eh_bind->size(0)); + // Setting the good bind to the stmt + _eh_bind->apply_to_stmt(0); + // Executing the stmt + _mysql.run_statement( + *_eh_update, database::mysql_error::update_hosts_enabled, 0); + } + for (const auto& s : state.services()) { + if (!_es_bind->bind(0)) + _es_bind->init_from_stmt(0); + auto* b = _es_bind->bind(0).get(); + b->set_value_as_u64(0, s.host_id()); + b->set_value_as_u64(1, s.service_id()); + b->next_row(); + } + SPDLOG_LOGGER_TRACE( + _logger_sql, + "Check if some statements are ready, es_bind connections " + "count = {}", + _es_bind->connections_count()); + if (_es_bind->size(0) > 0) { + SPDLOG_LOGGER_DEBUG(_logger_sql, + "Enabling {} services in services table", + _es_bind->size(0)); + // Setting the good bind to the stmt + _es_bind->apply_to_stmt(0); + // Executing the stmt + _mysql.run_statement( + *_es_update, database::mysql_error::update_services_enabled, + 0); + } + } else { + for (const auto& h : state.hosts()) { + _eh_update->bind_value_as_u64(0, h.host_id()); + _mysql.run_statement( + *_eh_update, database::mysql_error::update_hosts_enabled, 0); + } + for (const auto& s : state.services()) { + _es_update->bind_value_as_u64(0, s.host_id()); + _es_update->bind_value_as_u64(1, s.service_id()); + _mysql.run_statement( + *_es_update, database::mysql_error::update_services_enabled, + 0); + } + } + } + if (_store_in_resources) { + if (!_ehr_update) { + std::string query = + "UPDATE resources SET enabled=1 WHERE id=? AND parent_id=0"; + if (_bulk_prepared_statement) { + auto ehr = std::make_unique(query); + _mysql.prepare_statement(*ehr); + _ehr_bind = std::make_unique( + _dbcfg.get_connections_count(), dt_queue_timer_duration, + _max_pending_queries, *ehr, _logger_sql); + _ehr_update = std::move(ehr); + } else { + _ehr_update = std::make_unique(query); + _mysql.prepare_statement(*_ehr_update); + } + } + if (!_esr_update) { + std::string query = + "UPDATE resources SET enabled=1 WHERE parent_id=? AND id=?"; + if (_bulk_prepared_statement) { + auto esr = std::make_unique(query); + _mysql.prepare_statement(*esr); + _esr_bind = std::make_unique( + _dbcfg.get_connections_count(), dt_queue_timer_duration, + _max_pending_queries, *esr, _logger_sql); + _esr_update = std::move(esr); + } else { + _esr_update = std::make_unique(query); + _mysql.prepare_statement(*_esr_update); + } + } + + if (_bulk_prepared_statement) { + for (const auto& h : state.hosts()) { + if (!_ehr_bind->bind(0)) + _ehr_bind->init_from_stmt(0); + auto* b = _ehr_bind->bind(0).get(); + _logger_sql->debug("Enabling host_id: {}", h.host_id()); + b->set_value_as_u64(0, h.host_id()); + b->next_row(); + } + SPDLOG_LOGGER_TRACE( + _logger_sql, + "Check if some statements are ready, ehr_bind connections " + "count = {}", + _ehr_bind->connections_count()); + + if (_ehr_bind->size(0) > 0) { + SPDLOG_LOGGER_DEBUG(_logger_sql, + "Enabling {} hosts in resources table", + _ehr_bind->size(0)); + // Setting the good bind to the stmt + _ehr_bind->apply_to_stmt(0); + // Executing the stmt + _mysql.run_statement( + *_ehr_update, + database::mysql_error::update_hosts_resources_enabled, 0); + } + for (const auto& s : state.services()) { + if (!_esr_bind->bind(0)) + _esr_bind->init_from_stmt(0); + _logger_sql->debug("Enabling service ({}:{})", s.host_id(), + s.service_id()); + auto* b = _esr_bind->bind(0).get(); + b->set_value_as_u64(0, s.host_id()); + b->set_value_as_u64(1, s.service_id()); + b->next_row(); + } + SPDLOG_LOGGER_TRACE( + _logger_sql, + "Check if some statements are ready, esr_bind connections " + "count = {}", + _esr_bind->connections_count()); + if (_esr_bind->size(0) > 0) { + SPDLOG_LOGGER_DEBUG(_logger_sql, + "Enabling {} services in resources table", + _esr_bind->size(0)); + // Setting the good bind to the stmt + _esr_bind->apply_to_stmt(0); + // Executing the stmt + _mysql.run_statement( + *_esr_update, + database::mysql_error::update_services_resources_enabled, 0); + } + } else { + for (const auto& h : state.hosts()) { + _ehr_update->bind_value_as_u64(0, h.host_id()); + _mysql.run_statement( + *_ehr_update, + database::mysql_error::update_hosts_resources_enabled, 0); + } + for (const auto& s : state.services()) { + _esr_update->bind_value_as_u64(0, s.host_id()); + _esr_update->bind_value_as_u64(1, s.service_id()); + _mysql.run_statement( + *_esr_update, + database::mysql_error::update_services_resources_enabled, 0); + } + } + } + } catch (const std::exception& e) { + _logger_sql->error( + "unified_sql: error while parsing poller {} Engine configuration: " + "{}", + obj.poller_id(), e.what()); + } + } + } +} + /** * Process a host status event. * * @param[in] e Uncasted host status. - * - * @return The number of events that can be acknowledged. */ void stream::_process_host_status(const std::shared_ptr& d) { if (!_store_in_hosts_services) @@ -4878,6 +5181,50 @@ void stream::_process_tag(const std::shared_ptr& d) { } } +void stream::_process_agent_stats(const std::shared_ptr& d) { + SPDLOG_LOGGER_INFO(_logger_sql, "unified_sql: processing agent stats"); + std::shared_ptr as{ + std::static_pointer_cast(d)}; + + std::string json_infos; + + const AgentStats& stats = as->obj(); + + using namespace rapidjson; + Document doc(rapidjson::kArrayType); + + for (const AgentInfo& info : stats.stats()) { + rapidjson::Value stat(rapidjson::kObjectType); + stat.AddMember("agent_major", info.major(), doc.GetAllocator()); + stat.AddMember("agent_minor", info.minor(), doc.GetAllocator()); + stat.AddMember("agent_patch", info.patch(), doc.GetAllocator()); + stat.AddMember("reverse", info.reverse(), doc.GetAllocator()); + stat.AddMember("os", StringRef(info.os().c_str()), doc.GetAllocator()); + stat.AddMember("os_version", StringRef(info.os_version().c_str()), + doc.GetAllocator()); + stat.AddMember("nb_agent", info.nb_agent(), doc.GetAllocator()); + doc.PushBack(stat, doc.GetAllocator()); + } + StringBuffer out_buff; + Writer writer(out_buff); + doc.Accept(writer); + + if (!_agent_information_insert_update.prepared()) { + _agent_information_insert_update = _mysql.prepare_query( + "INSERT INTO agent_information (poller_id, enabled, infos) VALUES " + "(?,?,?) ON DUPLICATE KEY UPDATE enabled=VALUES(enabled), " + "infos=VALUES(infos)"); + } + int32_t conn = _mysql.choose_connection_by_instance(stats.poller_id()); + + _agent_information_insert_update.bind_value_as_u32(0, stats.poller_id()); + _agent_information_insert_update.bind_value_as_bool(1, true); + _agent_information_insert_update.bind_value_as_str(2, out_buff.GetString()); + _mysql.run_statement(_agent_information_insert_update, + database::mysql_error::insert_update_agent_information, + conn); +} + /** * Process a responsive instance event. * diff --git a/broker/unified_sql/src/stream_storage.cc b/broker/unified_sql/src/stream_storage.cc index c502953f953..5fe8618fbc6 100644 --- a/broker/unified_sql/src/stream_storage.cc +++ b/broker/unified_sql/src/stream_storage.cc @@ -17,11 +17,8 @@ */ #include -#include #include -#include -#include #include #include "bbdo/storage/index_mapping.hh" @@ -31,15 +28,12 @@ #include "bbdo/storage/status.hh" #include "com/centreon/broker/cache/global_cache.hh" #include "com/centreon/broker/misc/misc.hh" -#include "com/centreon/broker/misc/shared_mutex.hh" #include "com/centreon/broker/misc/string.hh" #include "com/centreon/broker/neb/events.hh" #include "com/centreon/broker/sql/table_max_size.hh" #include "com/centreon/broker/unified_sql/internal.hh" #include "com/centreon/broker/unified_sql/stream.hh" -#include "com/centreon/common/perfdata.hh" #include "com/centreon/common/utf8.hh" -#include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon::exceptions; using namespace com::centreon::broker; diff --git a/broker/unified_sql/test/conflict_manager.cc b/broker/unified_sql/test/conflict_manager.cc index 3f2b99647bf..b4addb12f57 100644 --- a/broker/unified_sql/test/conflict_manager.cc +++ b/broker/unified_sql/test/conflict_manager.cc @@ -36,7 +36,7 @@ class USConflictManagerTest : public ::testing::Test { public: void SetUp() override { try { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); } catch (std::exception const& e) { (void)e; } diff --git a/broker/unified_sql/test/connector.cc b/broker/unified_sql/test/connector.cc index 23a3cdcef9b..8d638e6a43a 100644 --- a/broker/unified_sql/test/connector.cc +++ b/broker/unified_sql/test/connector.cc @@ -37,16 +37,16 @@ TEST(UnifiedSqlFactory, Factory) { unified_sql::factory factory; - ASSERT_THROW(factory.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(factory.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["length"] = "42"; - ASSERT_THROW(factory.new_endpoint(cfg, is_acceptor, cache), + ASSERT_THROW(factory.new_endpoint(cfg, {}, is_acceptor, cache), exceptions::config); cfg.params["db_type"] = "mysql"; cfg.params["db_name"] = "centreon"; ASSERT_FALSE(factory.has_endpoint(cfg, nullptr)); cfg.type = "unified_sql"; unified_sql::connector* endp = static_cast( - factory.new_endpoint(cfg, is_acceptor, cache)); + factory.new_endpoint(cfg, {}, is_acceptor, cache)); unified_sql::connector con; con.connect_to(dbcfg, 60, 300, 80, 250, true); @@ -68,9 +68,9 @@ TEST(UnifiedSqlFactory, FactoryWithFullConf) { unified_sql::factory factory; - ASSERT_THROW(factory.new_endpoint(cfg, is_acceptor, cache), msg_fmt); + ASSERT_THROW(factory.new_endpoint(cfg, {}, is_acceptor, cache), msg_fmt); cfg.params["length"] = "42"; - ASSERT_THROW(factory.new_endpoint(cfg, is_acceptor, cache), + ASSERT_THROW(factory.new_endpoint(cfg, {}, is_acceptor, cache), exceptions::config); cfg.params["db_type"] = "mysql"; cfg.params["db_name"] = "centreon"; @@ -80,7 +80,7 @@ TEST(UnifiedSqlFactory, FactoryWithFullConf) { ASSERT_FALSE(factory.has_endpoint(cfg, nullptr)); cfg.type = "unified_sql"; unified_sql::connector* endp = static_cast( - factory.new_endpoint(cfg, is_acceptor, cache)); + factory.new_endpoint(cfg, {}, is_acceptor, cache)); unified_sql::connector con; con.connect_to(dbcfg, 42, 43, 44, 45, false); diff --git a/broker/unified_sql/test/perfdata.cc b/broker/unified_sql/test/perfdata.cc index a19d3689dca..24815c1ec74 100644 --- a/broker/unified_sql/test/perfdata.cc +++ b/broker/unified_sql/test/perfdata.cc @@ -196,7 +196,9 @@ TEST(UnifiedSqlPerfdata, DefaultCtor) { class UnifiedSqlParserParsePerfdata : public testing::Test { public: - void SetUp() override { config::applier::init(0, "test_broker", 0); } + void SetUp() override { + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); + } void TearDown() override { config::applier::deinit(); }; }; diff --git a/broker/unified_sql/test/rebuild_message.cc b/broker/unified_sql/test/rebuild_message.cc index 0da54265b78..07686545327 100644 --- a/broker/unified_sql/test/rebuild_message.cc +++ b/broker/unified_sql/test/rebuild_message.cc @@ -79,7 +79,7 @@ class UnifiedSqlRebuild2Test : public ::testing::Test { _logger = log_v2::instance().get(log_v2::SQL); io::data::broker_id = 0; try { - config::applier::init(0, "broker_test", 0); + config::applier::init(com::centreon::common::BROKER, 0, "broker_test", 0); } catch (std::exception const& e) { (void)e; } diff --git a/broker/unified_sql/test/status-entry.cc b/broker/unified_sql/test/status-entry.cc index 36f53e0d18f..221e4aa593b 100644 --- a/broker/unified_sql/test/status-entry.cc +++ b/broker/unified_sql/test/status-entry.cc @@ -72,7 +72,7 @@ class UnifiedSqlEntryTest : public ::testing::Test { void SetUp() override { io::data::broker_id = 0; try { - config::applier::init(0, "test_broker", 0); + config::applier::init(com::centreon::common::BROKER, 0, "test_broker", 0); } catch (std::exception const& e) { (void)e; } diff --git a/broker/victoria_metrics/inc/com/centreon/broker/victoria_metrics/factory.hh b/broker/victoria_metrics/inc/com/centreon/broker/victoria_metrics/factory.hh index 5e5a6a082d9..3fbfaef5c2c 100644 --- a/broker/victoria_metrics/inc/com/centreon/broker/victoria_metrics/factory.hh +++ b/broker/victoria_metrics/inc/com/centreon/broker/victoria_metrics/factory.hh @@ -41,6 +41,7 @@ class factory : public http_tsdb::factory { factory& operator=(factory const& other) = delete; io::endpoint* new_endpoint( config::endpoint& cfg, + const std::map& global_params, bool& is_acceptor, std::shared_ptr cache) const override; }; diff --git a/broker/victoria_metrics/src/factory.cc b/broker/victoria_metrics/src/factory.cc index 321e088a168..492195cdd04 100644 --- a/broker/victoria_metrics/src/factory.cc +++ b/broker/victoria_metrics/src/factory.cc @@ -20,7 +20,6 @@ #include "com/centreon/broker/config/parser.hh" #include "com/centreon/broker/victoria_metrics/connector.hh" #include "com/centreon/common/pool.hh" -#include "com/centreon/exceptions/msg_fmt.hh" using namespace nlohmann; using namespace com::centreon::broker; @@ -55,8 +54,10 @@ factory::factory() io::endpoint* factory::new_endpoint( config::endpoint& cfg, + const std::map& global_params + [[maybe_unused]], bool& is_acceptor, - std::shared_ptr ) const { + std::shared_ptr) const { is_acceptor = false; std::shared_ptr conf( diff --git a/broker/victoria_metrics/src/request.cc b/broker/victoria_metrics/src/request.cc index 431ad991dd7..2d994fe17c3 100644 --- a/broker/victoria_metrics/src/request.cc +++ b/broker/victoria_metrics/src/request.cc @@ -93,7 +93,7 @@ void request::add_metric(const storage::pb_metric& metric) { void request::add_status(const storage::pb_status& status) { const Status status_obj = status.obj(); - if (status_obj.state() < 0 || status_obj.state() > 2) { + if (status_obj.state() > 2) { if (status_obj.state() != 3) { // we don't write unknown but it's not an error SPDLOG_LOGGER_ERROR(_logger, "unknown state: {}", status_obj.state()); diff --git a/broker/victoria_metrics/src/stream.cc b/broker/victoria_metrics/src/stream.cc index 7ed5b55096f..3a3446dae33 100644 --- a/broker/victoria_metrics/src/stream.cc +++ b/broker/victoria_metrics/src/stream.cc @@ -20,12 +20,13 @@ #include "com/centreon/broker/victoria_metrics/stream.hh" #include "bbdo/storage/metric.hh" #include "bbdo/storage/status.hh" -#include "com/centreon/broker/misc/string.hh" #include "com/centreon/broker/victoria_metrics/request.hh" +#include "common/crypto/base64.hh" #include "common/log_v2/log_v2.hh" using namespace com::centreon::broker; using namespace com::centreon::broker::victoria_metrics; +using namespace com::centreon::common::crypto; using log_v2 = com::centreon::common::log_v2::log_v2; const std::string stream::allowed_macros = @@ -60,8 +61,7 @@ stream::stream(const std::shared_ptr& io_context, 20); _authorization = "Basic "; - _authorization += - misc::string::base64_encode(conf->get_user() + ':' + conf->get_pwd()); + _authorization += base64_encode(conf->get_user() + ':' + conf->get_pwd()); } std::shared_ptr stream::load( diff --git a/broker/victoria_metrics/test/factory_test.cc b/broker/victoria_metrics/test/factory_test.cc index e114faff650..2a5b81eb229 100644 --- a/broker/victoria_metrics/test/factory_test.cc +++ b/broker/victoria_metrics/test/factory_test.cc @@ -43,19 +43,19 @@ TEST_F(VictoriaMetricsFactory, MissingParams) { bool is_acceptor; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, nullptr), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, nullptr), msg_fmt); cfg.params["db_user"] = "admin"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, nullptr), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, nullptr), msg_fmt); cfg.params["db_password"] = "pass"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, nullptr), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, nullptr), msg_fmt); cfg.params["db_host"] = "host"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, nullptr), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, nullptr), msg_fmt); cfg.params["db_name"] = "centreon"; - ASSERT_THROW(fact.new_endpoint(cfg, is_acceptor, nullptr), msg_fmt); + ASSERT_THROW(fact.new_endpoint(cfg, {}, is_acceptor, nullptr), msg_fmt); cfg.params["db_host"] = "localhost"; - ASSERT_NO_THROW(fact.new_endpoint(cfg, is_acceptor, nullptr)); + ASSERT_NO_THROW(fact.new_endpoint(cfg, {}, is_acceptor, nullptr)); cfg.params["db_host"] = "127.0.0.1"; - ASSERT_NO_THROW(fact.new_endpoint(cfg, is_acceptor, nullptr)); + ASSERT_NO_THROW(fact.new_endpoint(cfg, {}, is_acceptor, nullptr)); ASSERT_FALSE(is_acceptor); } @@ -82,7 +82,7 @@ TEST_F(VictoriaMetricsFactory, ParseParameter) { bool is_acceptor; victoria_metrics::connector* conn = static_cast( - fact.new_endpoint(cfg, is_acceptor, nullptr)); + fact.new_endpoint(cfg, {}, is_acceptor, nullptr)); ASSERT_FALSE(is_acceptor); const http_tsdb::http_tsdb_config& conf = *conn->get_conf(); diff --git a/broker/victoria_metrics/test/stream_test.cc b/broker/victoria_metrics/test/stream_test.cc index d04b13cfb0f..0c96f320a76 100644 --- a/broker/victoria_metrics/test/stream_test.cc +++ b/broker/victoria_metrics/test/stream_test.cc @@ -45,7 +45,7 @@ extern std::shared_ptr g_io_context; class victoria_stream_test : public ::testing::Test { public: static void SetUpTestSuite() { - config::applier::state::load(); + config::applier::state::load(com::centreon::common::BROKER); file::disk_accessor::load(1000); } static void TearDownTestSuite() {} diff --git a/ccc/CMakeLists.txt b/ccc/CMakeLists.txt index 3eb37f85b8f..40773c5ffc2 100644 --- a/ccc/CMakeLists.txt +++ b/ccc/CMakeLists.txt @@ -42,7 +42,7 @@ endif() include_directories( ${CMAKE_SOURCE_DIR}/broker/core/src ${CMAKE_SOURCE_DIR}/engine/enginerpc - ${CMAKE_SOURCE_DIR}/common/src) + ${CMAKE_SOURCE_DIR}/common/src ${CMAKE_SOURCE_DIR}/bbdo) set(ccc_files main.cc client.cc) if(WITH_ASAN) diff --git a/ccc/client.cc b/ccc/client.cc index 5c2ea11087c..a00f42a5fc8 100644 --- a/ccc/client.cc +++ b/ccc/client.cc @@ -1,5 +1,5 @@ /** - * Copyright 2024 Centreon + * Copyright 2024-2024 Centreon * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/ccc/client.hh b/ccc/client.hh index b11f2096a53..0cca74a1b5b 100644 --- a/ccc/client.hh +++ b/ccc/client.hh @@ -1,20 +1,20 @@ -/* -** Copyright 2024 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2022-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #ifndef _CCC_CLIENT_HH #define _CCC_CLIENT_HH diff --git a/clib/inc/com/centreon/io/directory_entry.hh b/clib/inc/com/centreon/io/directory_entry.hh deleted file mode 100644 index 9b91231a258..00000000000 --- a/clib/inc/com/centreon/io/directory_entry.hh +++ /dev/null @@ -1,61 +0,0 @@ -/* -** Copyright 2012-2013 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ - -#ifndef CC_IO_DIRECTORY_ENTRY_HH -#define CC_IO_DIRECTORY_ENTRY_HH - -#include -#include -#include "com/centreon/handle.hh" -#include "com/centreon/io/file_entry.hh" - -namespace com::centreon { - -namespace io { -/** - * @class directory_entry directory_entry.hh - *"com/centreon/io/directory_entry.hh" - * @brief Wrapper of libc's directory_entryectory. - * - * Wrap standard directory_entryectory objects. - */ -class directory_entry { - public: - directory_entry(char const* path = NULL); - directory_entry(std::string const& path); - directory_entry(directory_entry const& right); - directory_entry& operator=(directory_entry const& right); - bool operator==(directory_entry const& right) const throw(); - bool operator!=(directory_entry const& right) const throw(); - ~directory_entry() throw(); - static std::string current_path(); - file_entry const& entry() const throw(); - std::list const& entry_list(std::string const& filter = ""); - - private: - void _internal_copy(directory_entry const& right); - static int _nmatch(char const* str, char const* pattern); - - file_entry _entry; - std::list _entry_lst; -}; -} // namespace io - -} - -#endif // !CC_IO_DIRECTORY_ENTRY_HH diff --git a/clib/inc/com/centreon/io/file_entry.hh b/clib/inc/com/centreon/io/file_entry.hh deleted file mode 100644 index daaa7ff98d5..00000000000 --- a/clib/inc/com/centreon/io/file_entry.hh +++ /dev/null @@ -1,71 +0,0 @@ -/* -** Copyright 2012-2013 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ - -#ifndef CC_IO_FILE_ENTRY_HH -#define CC_IO_FILE_ENTRY_HH - -#include -#include -#include -#include "com/centreon/handle.hh" - -#ifdef _WIN32 -#define stat _stat -#endif // _WIN32 - -namespace com::centreon { - -namespace io { -/** - * @class file_entry file_entry.hh "com/centreon/io/file_entry.hh" - * @brief Wrapper of stat information. - * - * Wrap standard stat information. - */ -class file_entry { - public: - file_entry(char const* path = NULL); - file_entry(std::string const& path); - file_entry(file_entry const& right); - ~file_entry() throw(); - file_entry& operator=(file_entry const& right); - bool operator==(file_entry const& right) const throw(); - bool operator!=(file_entry const& right) const throw(); - std::string base_name() const; - std::string directory_name() const; - std::string file_name() const; - bool is_directory() const throw(); - bool is_link() const throw(); - bool is_regular() const throw(); - std::string const& path() const throw(); - void path(char const* path); - void path(std::string const& path); - void refresh(); - unsigned long long size() const throw(); - - private: - void _internal_copy(file_entry const& right); - - std::string _path; - struct stat _sbuf; -}; -} // namespace io - -} - -#endif // !CC_IO_FILE_ENTRY_HH diff --git a/clib/inc/com/centreon/io/file_stream.hh b/clib/inc/com/centreon/io/file_stream.hh index 7fcf536d28f..d45a8f9525f 100644 --- a/clib/inc/com/centreon/io/file_stream.hh +++ b/clib/inc/com/centreon/io/file_stream.hh @@ -1,20 +1,20 @@ -/* -** Copyright 2012-2013 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2012-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #ifndef CC_IO_FILE_STREAM_HH #define CC_IO_FILE_STREAM_HH @@ -23,9 +23,8 @@ #include #include "com/centreon/handle.hh" -namespace com::centreon { +namespace com::centreon::io { -namespace io { /** * @class file_stream file_stream.hh "com/centreon/io/file_stream.hh" * @brief Wrapper of libc's FILE streams. @@ -35,7 +34,7 @@ namespace io { class file_stream : public handle { public: file_stream(FILE* stream = NULL, bool auto_close = false); - ~file_stream() throw(); + ~file_stream() noexcept; void close(); static void copy(char const* src, char const* dst); static void copy(std::string const& src, std::string const& dst); @@ -62,8 +61,6 @@ class file_stream : public handle { bool _auto_close; FILE* _stream; }; -} // namespace io - -} +} // namespace com::centreon::io #endif // !CC_IO_FILE_STREAM_HH diff --git a/clib/src/handle_action.cc b/clib/src/handle_action.cc index 6da1d97f8b7..9bc3ef39006 100644 --- a/clib/src/handle_action.cc +++ b/clib/src/handle_action.cc @@ -1,24 +1,22 @@ /** -* Copyright 2011-2013 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2011-2013 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/handle_action.hh" -#include -#include "com/centreon/handle.hh" #include "com/centreon/handle_listener.hh" using namespace com::centreon; diff --git a/clib/src/io/CMakeLists.txt b/clib/src/io/CMakeLists.txt index 461e95084fa..bbb754453e4 100644 --- a/clib/src/io/CMakeLists.txt +++ b/clib/src/io/CMakeLists.txt @@ -1,41 +1,31 @@ -## -## Copyright 2011-2013 Centreon -## -## Licensed under the Apache License, Version 2.0 (the "License"); -## you may not use this file except in compliance with the License. -## You may obtain a copy of the License at -## -## http://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, software -## distributed under the License is distributed on an "AS IS" BASIS, -## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -## See the License for the specific language governing permissions and -## limitations under the License. -## -## For more information : contact@centreon.com -## +# +# Copyright 2011-2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +# For more information : contact@centreon.com +# # Set directories. set(SRC_DIR "${SRC_DIR}/io") set(INC_DIR "${INC_DIR}/io") # Set sources. -set( - SOURCES - ${SOURCES} - "${SRC_DIR}/directory_entry.cc" - "${SRC_DIR}/file_entry.cc" - "${SRC_DIR}/file_stream.cc" - PARENT_SCOPE -) +set(SOURCES + ${SOURCES} ${SRC_DIR}/file_stream.cc + PARENT_SCOPE) # Set headers. -set( - HEADERS - ${HEADERS} - "${INC_DIR}/directory_entry.hh" - "${INC_DIR}/file_entry.hh" - "${INC_DIR}/file_stream.hh" - PARENT_SCOPE -) +set(HEADERS + ${HEADERS} ${INC_DIR}/file_stream.hh + PARENT_SCOPE) diff --git a/clib/src/io/directory_entry.cc b/clib/src/io/directory_entry.cc deleted file mode 100644 index b8aac418e45..00000000000 --- a/clib/src/io/directory_entry.cc +++ /dev/null @@ -1,178 +0,0 @@ -/** - * Copyright 2012-2013 Centreon - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - */ - -#include "com/centreon/io/directory_entry.hh" -#include -#include -#include -#include -#include -#include "com/centreon/exceptions/msg_fmt.hh" - -using namespace com::centreon::io; -using com::centreon::exceptions::msg_fmt; - -/** - * Constructor. - * - * @param[in] path The directory path. - */ -directory_entry::directory_entry(char const* path) : _entry(path) {} - -/** - * Constructor. - * - * @param[in] path The directory path. - */ -directory_entry::directory_entry(std::string const& path) : _entry(path) {} - -/** - * Copy constructor. - * - * @param[in] right The object to copy. - */ -directory_entry::directory_entry(directory_entry const& right) { - _internal_copy(right); -} - -/** - * Copy operator. - * - * @param[in] right The object to copy. - * - * @return This object. - */ -directory_entry& directory_entry::operator=(directory_entry const& right) { - _internal_copy(right); - return *this; -} - -/** - * Equal operator. - * - * @param[in] right The object to compare. - * - * @return True if is the same object, owtherwise false. - */ -bool directory_entry::operator==(directory_entry const& right) const noexcept { - return _entry == right._entry; -} - -/** - * Not equal operator. - * - * @param[in] right The object to compare. - * - * @return True if is not the same object, owtherwise false. - */ -bool directory_entry::operator!=(directory_entry const& right) const noexcept { - return !operator==(right); -} - -/** - * Destructor. - */ -directory_entry::~directory_entry() noexcept {} - -/** - * Get the current directory path. - * - * @return The current directory path. - */ -std::string directory_entry::current_path() { - char* buffer(getcwd(NULL, 0)); - if (!buffer) - throw msg_fmt("current path failed"); - std::string path(buffer); - free(buffer); - return path; -} - -/** - * Get the directory information. - * - * @return The directory entry. - */ -file_entry const& directory_entry::entry() const noexcept { - return _entry; -} - -/** - * Get the list of all entry into a directory. - * - * @param[in] filter An optional filter. - * - * @return The file entry list. - */ -std::list const& directory_entry::entry_list( - std::string const& filter) { - _entry_lst.clear(); - char const* filter_ptr(filter.empty() ? NULL : filter.c_str()); - - DIR* dir(opendir(_entry.path().c_str())); - if (!dir) { - char const* msg(strerror(errno)); - throw msg_fmt("open directory failed: {}", msg); - } - - dirent entry; - dirent* result; - while (true) { - if (readdir_r(dir, &entry, &result)) { - closedir(dir); - throw msg_fmt("parse directory failed"); - } - if (!result) - break; - if (!filter_ptr || _nmatch(entry.d_name, filter_ptr)) - _entry_lst.push_back(file_entry(_entry.path() + "/" + entry.d_name)); - } - closedir(dir); - - return _entry_lst; -} - -/** - * Internal copy. - * - * @param[in] right The object to copy. - */ -void directory_entry::_internal_copy(directory_entry const& right) { - if (this != &right) { - _entry = right._entry; - _entry_lst = right._entry_lst; - } -} - -/** - * Check if a string match a pattern. - * - * @param[in] str The string to check. - * @param[in] pattern The partter to match. - * - * @return 1 on success, otherwiswe 0. - */ -int directory_entry::_nmatch(char const* str, char const* pattern) { - if (!*str && !*pattern) - return 1; - if (*str == *pattern) - return _nmatch(str + 1, pattern + 1); - return (*pattern == '*' ? (*str ? _nmatch(str + 1, pattern) : 0) + - _nmatch(str, pattern + 1) - : 0); -} diff --git a/clib/src/io/file_entry.cc b/clib/src/io/file_entry.cc deleted file mode 100644 index 3a3f3a0ac04..00000000000 --- a/clib/src/io/file_entry.cc +++ /dev/null @@ -1,215 +0,0 @@ -/** - * Copyright 2012-2013 Centreon - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - */ - -#include "com/centreon/io/file_entry.hh" -#include -#include -#include -#include -#include "com/centreon/exceptions/msg_fmt.hh" - -using namespace com::centreon::io; -using com::centreon::exceptions::msg_fmt; - -/** - * Constructor. - * - * @param[in] path The file path. - */ -file_entry::file_entry(char const* path) : _path(path ? path : "") { - refresh(); -} - -/** - * Constructor overload. - */ -file_entry::file_entry(std::string const& path) : _path(path) { - refresh(); -} - -/** - * Copy constructor. - * - * @param[in] right The object to copy. - */ -file_entry::file_entry(file_entry const& right) { - _internal_copy(right); -} - -/** - * Destructor. - */ -file_entry::~file_entry() noexcept {} - -/** - * Copy operator. - * - * @param[in] right The object to copy. - * - * @return This object. - */ -file_entry& file_entry::operator=(file_entry const& right) { - _internal_copy(right); - return *this; -} - -/** - * Equal operator. - * - * @param[in] right The object to compare. - * - * @return True if is the same object, otherwise false. - */ -bool file_entry::operator==(file_entry const& right) const noexcept { - return (_sbuf.st_dev == right._sbuf.st_dev && - _sbuf.st_ino == right._sbuf.st_ino); -} - -/** - * Not equal operator. - * - * @param[in] right The object to compare. - * - * @return True if is not the same object, otherwise false. - */ -bool file_entry::operator!=(file_entry const& right) const noexcept { - return !operator==(right); -} - -/** - * Get the file name without extention. - * - * @return The file name without extention. - */ -std::string file_entry::base_name() const { - std::string name; - name = file_name(); - size_t pos(name.find_last_of('.')); - if (pos != 0 && pos != std::string::npos) - name.erase(pos); - return name; -} - -/** - * Get the directory path. - * - * @return The directory path of this file. - */ -std::string file_entry::directory_name() const { - std::string retval{dirname(const_cast(_path.c_str()))}; - return retval; -} - -/** - * Get the file name without extention. - * - * @return The file name without extention. - */ -std::string file_entry::file_name() const { - std::string retval{basename(const_cast(_path.c_str()))}; - return retval; -} - -/** - * Check if this file is a directory. - * - * @return True if this file is a directory, otherwise false. - */ -bool file_entry::is_directory() const noexcept { - return (_sbuf.st_mode & S_IFMT) == S_IFDIR; -} - -/** - * Check if this file is a symbolic link. - * - * @return True if this file is a symbolic link, otherwise false. - */ -bool file_entry::is_link() const noexcept { - return (_sbuf.st_mode & S_IFMT) == S_IFLNK; -} - -/** - * Check if this file is regular. - * - * @return True if this file is regular, otherwise false. - */ -bool file_entry::is_regular() const noexcept { - return (_sbuf.st_mode & S_IFMT) == S_IFREG; -} - -/** - * Get the file path. - * - * @return The path. - */ -std::string const& file_entry::path() const noexcept { - return _path; -} - -/** - * Set the file entry path. - * - * @param[in] path The file entry path. - */ -void file_entry::path(char const* path) { - _path = path ? path : ""; - refresh(); -} - -/** - * Set the file entry path. - * - * @param[in] path The file entry path. - */ -void file_entry::path(std::string const& path) { - _path = path; - refresh(); -} - -/** - * Reload file information. - */ -void file_entry::refresh() { - if (_path.empty()) - memset(&_sbuf, 0, sizeof(_sbuf)); - else if (stat(_path.c_str(), &_sbuf)) { - char const* msg(strerror(errno)); - throw msg_fmt("get file information failed: {}", msg); - } -} - -/** - * Get the file size. - * - * @return The file size. - */ -unsigned long long file_entry::size() const noexcept { - return _sbuf.st_size; -} - -/** - * Internal copy. - * - * @param[in] right The object to copy. - */ -void file_entry::_internal_copy(file_entry const& right) { - if (this != &right) { - _path = right._path; - memcpy(&_sbuf, &right._sbuf, sizeof(_sbuf)); - } -} diff --git a/clib/src/io/file_stream.cc b/clib/src/io/file_stream.cc index 16b65db4dcf..2dca329beed 100644 --- a/clib/src/io/file_stream.cc +++ b/clib/src/io/file_stream.cc @@ -29,12 +29,6 @@ using namespace com::centreon::io; using com::centreon::exceptions::msg_fmt; -/************************************** - * * - * Public Methods * - * * - **************************************/ - /** * Constructor. * @@ -48,7 +42,7 @@ file_stream::file_stream(FILE* stream, bool auto_close) /** * Destructor. */ -file_stream::~file_stream() throw() { +file_stream::~file_stream() noexcept { close(); } @@ -291,7 +285,7 @@ unsigned long file_stream::size() { * @return Temporary name. */ char* file_stream::temp_path() { - char* ret(::tmpnam(static_cast(NULL))); + char* ret = ::tmpnam(static_cast(nullptr)); if (!ret) throw msg_fmt("could not generate temporary file name"); return ret; diff --git a/clib/test/io.cc b/clib/test/io.cc index eb93c0b3dda..12f6378908d 100644 --- a/clib/test/io.cc +++ b/clib/test/io.cc @@ -19,156 +19,29 @@ #include #include #include "com/centreon/exceptions/msg_fmt.hh" -#include "com/centreon/io/directory_entry.hh" #include "com/centreon/io/file_stream.hh" -unsigned int const DATA_SIZE = 42; - using namespace com::centreon; -static void create_fake_file(std::string const& path) { - if (!io::file_stream::exists(path)) { - io::file_stream fs; - fs.open(path, "w"); - fs.close(); - } -} - -TEST(ClibIO, DirEntryCopy) { - io::directory_entry e1("."); - io::directory_entry e2(e1); - ASSERT_EQ(e1, e2); - - io::directory_entry e3 = e1; - ASSERT_EQ(e1, e3); -} - -TEST(ClibIO, DirCtorDefault) { - io::directory_entry entry(NULL); - ASSERT_TRUE(entry.entry().path().empty()); - io::directory_entry entry2("."); - ASSERT_FALSE(entry2.entry().path().empty()); -} - -TEST(ClibIO, DirCurrentPath) { - std::string path(io::directory_entry::current_path()); - ASSERT_FALSE(path.empty()); -} - -TEST(ClibIO, DirFilter) { - io::directory_entry entry("."); - std::list lst_all(entry.entry_list()); - std::list lst_point(entry.entry_list(".*")); - std::list lst_de(entry.entry_list("io_directory_entry*")); - - ASSERT_GE(lst_all.size(), lst_point.size() + lst_de.size()); - ASSERT_GE(lst_point.size(), 2u); - ASSERT_LT(lst_de.size(), 2u); -} - -TEST(ClibIO, FileEntryCopy) { - io::file_entry e1("/tmp"); - io::file_entry e2(e1); - ASSERT_EQ(e1, e2); - - io::file_entry e3 = e1; - ASSERT_EQ(e1, e3); -} - -TEST(ClibIO, FileEntryCtorDefault) { - { - io::file_entry entry(NULL); - ASSERT_TRUE(entry.path().empty()); - } - - { - io::file_entry entry("/tmp"); - ASSERT_FALSE(entry.path().empty()); - } -} - -TEST(ClibIO, FileEntryPathInfo) { - std::string p1("/tmp/test.ext"); - std::string p2("/tmp/.test"); - std::string p3("/tmp/test"); - - create_fake_file(p1); - create_fake_file(p2); - create_fake_file(p3); - - io::file_entry e1(p1); - ASSERT_EQ(e1.base_name(), "test"); - ASSERT_EQ(e1.file_name(), "test.ext"); - ASSERT_EQ(e1.directory_name(), "/tmp"); - - io::file_entry e2(p2); - ASSERT_EQ(e2.base_name(), ".test"); - ASSERT_EQ(e2.file_name(), ".test"); - ASSERT_EQ(e2.directory_name(), "/tmp"); - - io::file_entry e3(p3); - ASSERT_EQ(e3.base_name(), "test"); - ASSERT_EQ(e3.file_name(), "test"); - ASSERT_EQ(e3.directory_name(), "/tmp"); - - io::file_stream::remove(p1); - io::file_stream::remove(p2); - io::file_stream::remove(p3); -} - -TEST(ClibIO, FileEntryPermission) { - io::file_entry entry("/tmp"); - ASSERT_TRUE(entry.is_directory()); - ASSERT_FALSE(entry.is_link()); - ASSERT_FALSE(entry.is_regular()); -} - -TEST(ClibIO, FileEntrySize) { - std::string temp(io::file_stream::temp_path()); - { - io::file_stream fs; - fs.open(temp, "w"); - fs.close(); - } - - io::file_entry entry(temp); - ASSERT_FALSE(entry.size()); - - { - std::string data(DATA_SIZE, ' '); - io::file_stream fs; - fs.open(temp, "w"); - fs.write(data.c_str(), data.size()); - fs.close(); - } - - ASSERT_FALSE(entry.size()); - entry.refresh(); - ASSERT_EQ(entry.size(), DATA_SIZE); - - if (io::file_stream::exists(temp)) - io::file_stream::remove(temp); -} - TEST(ClibIO, FileStreamCreateExistsRemove) { - char* path(io::file_stream::temp_path()); + std::string path(io::file_stream::temp_path()); // Remove old file. - io::file_stream::remove(path); + io::file_stream::remove(path.c_str()); // File must not exists. - ASSERT_FALSE(io::file_stream::exists(path)); + ASSERT_FALSE(io::file_stream::exists(path.c_str())); // Create file. { io::file_stream fs; - fs.open(path, "w"); + fs.open(path.c_str(), "w"); fs.close(); } - ASSERT_TRUE(io::file_stream::exists(path)); - ASSERT_TRUE(io::file_stream::remove(path)); - ASSERT_FALSE(io::file_stream::exists(path)); + ASSERT_TRUE(io::file_stream::exists(path.c_str())); + ASSERT_TRUE(io::file_stream::remove(path.c_str())); + ASSERT_FALSE(io::file_stream::exists(path.c_str())); } TEST(ClibIO, FileStreamCtorDefault) { @@ -177,11 +50,11 @@ TEST(ClibIO, FileStreamCtorDefault) { } TEST(ClibIO, FileStreamRead) { - char const* tmp_file_name(io::file_stream::temp_path()); + std::string tmp_file_name(io::file_stream::temp_path()); // Open temporary file. io::file_stream tmp_file_stream; - tmp_file_stream.open(tmp_file_name, "w"); + tmp_file_stream.open(tmp_file_name.c_str(), "w"); // Return value. int retval(0); @@ -202,7 +75,7 @@ TEST(ClibIO, FileStreamRead) { // Real read. char buffer[1024]; tmp_file_stream.close(); - tmp_file_stream.open(tmp_file_name, "r"); + tmp_file_stream.open(tmp_file_name.c_str(), "r"); ASSERT_FALSE(tmp_file_stream.read(buffer, sizeof(buffer)) == 0); } ASSERT_EQ(retval, 0); @@ -246,11 +119,11 @@ TEST(ClibIO, FileStreamRename) { TEST(ClibIO, FileStreamWrite) { // Generate temporary file name. - char const* tmp_file_name(io::file_stream::temp_path()); + std::string tmp_file_name(io::file_stream::temp_path()); // Open temporary file. io::file_stream tmp_file_stream; - tmp_file_stream.open(tmp_file_name, "w"); + tmp_file_stream.open(tmp_file_name.c_str(), "w"); // NULL write. ASSERT_THROW(tmp_file_stream.write(NULL, 1), exceptions::msg_fmt); diff --git a/clib/test/logging.cc b/clib/test/logging.cc index 79928d5c63a..a5d4c25b62c 100644 --- a/clib/test/logging.cc +++ b/clib/test/logging.cc @@ -1,20 +1,20 @@ /** -* Copyright 2011-2020 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2011-2020 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include #include @@ -343,7 +343,7 @@ TEST(ClibLogging, EngineWithThread) { TEST(ClibLogging, FileLog) { static char msg[] = "Centreon Clib test"; - char* tmp(com::centreon::io::file_stream::temp_path()); + std::string tmp(com::centreon::io::file_stream::temp_path()); { file f(tmp, false, false, none, false); f.log(1, 0, msg, sizeof(msg)); @@ -352,7 +352,7 @@ TEST(ClibLogging, FileLog) { { FILE* out(NULL); - ASSERT_TRUE((out = fopen(tmp, "w"))); + ASSERT_TRUE((out = fopen(tmp.c_str(), "w"))); file f(out, false, false, none, false); f.log(1, 0, msg, sizeof(msg)); } @@ -362,7 +362,7 @@ TEST(ClibLogging, FileLog) { TEST(ClibLogging, FileLogMultiline) { static unsigned int const nb_line(1024); - char* tmpfile(com::centreon::io::file_stream::temp_path()); + std::string tmpfile(com::centreon::io::file_stream::temp_path()); std::ostringstream tmp; std::ostringstream tmpref; diff --git a/clib/test/main.cc b/clib/test/main.cc index cda4e636f7c..65a35903883 100644 --- a/clib/test/main.cc +++ b/clib/test/main.cc @@ -18,7 +18,7 @@ */ #include -int main(int argc, char* argv[], char** env) { +int main(int argc, char* argv[]) { // GTest initialization. testing::InitGoogleTest(&argc, argv); diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index 5ec51626812..ca7bff67b83 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -60,19 +60,20 @@ if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") ${SRC_DIR}/process_stat.pb.cc ${SRC_DIR}/process_stat.grpc.pb.cc ${SRC_DIR}/rapidjson_helper.cc - ${SRC_DIR}/utf8.cc) + ${SRC_DIR}/utf8.cc + ${SRC_DIR}/file.cc) else() # we need not many things to just compile centreon-monitoring-agent # (centagent) - set(SOURCES + set(SOURCES ${SRC_DIR}/perfdata.cc ${SRC_DIR}/rapidjson_helper.cc ${SRC_DIR}/utf8.cc) endif() # Include directories. -include_directories("${INCLUDE_DIR}" ${HTTP_INCLUDE_DIR} ${VCPKG_INCLUDE_DIR} - ${PROCESS_INCLUDE_DIR}) +include_directories(${INCLUDE_DIR} ${HTTP_INCLUDE_DIR} ${VCPKG_INCLUDE_DIR} + ${PROCESS_INCLUDE_DIR}) add_definitions(-DSPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_TRACE) add_library(centreon_common STATIC ${SOURCES}) @@ -82,6 +83,8 @@ target_precompile_headers(centreon_common PRIVATE precomp_inc/precomp.hh) if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") add_subdirectory(http) + add_subdirectory(vault) + add_subdirectory(crypto) endif() add_subdirectory(grpc) diff --git a/common/crypto/CMakeLists.txt b/common/crypto/CMakeLists.txt new file mode 100644 index 00000000000..30cec929aad --- /dev/null +++ b/common/crypto/CMakeLists.txt @@ -0,0 +1,27 @@ +# +# Copyright 2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +# For more information : contact@centreon.com +# + +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) + +add_library( + ctncrypto STATIC + # Sources. + aes256.cc base64.cc) + +set_property(TARGET ctncrypto PROPERTY POSITION_INDEPENDENT_CODE ON) +target_link_libraries(ctncrypto PRIVATE OpenSSL::Crypto OpenSSL::SSL) diff --git a/common/crypto/aes256.cc b/common/crypto/aes256.cc new file mode 100644 index 00000000000..ea2a2ea50ef --- /dev/null +++ b/common/crypto/aes256.cc @@ -0,0 +1,204 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ +#include +#include +#include +#include +#include +#include + +#include "aes256.hh" +#include "base64.hh" +#include "com/centreon/exceptions/msg_fmt.hh" + +namespace com::centreon::common::crypto { + +/** + * @brief The aes256 constructor. This class is used to encrypt and decrypt + * data using the AES256 algorithm. Two keys are provided to the constructor. + * The first key is used to encrypt the data and the second key is used to + * generate a hash of the encrypted data. This hash is used to check The + * integrity of the data. + * + * @param first_key The first key used to encrypt the data. + * @param second_key The second key used to generate the hash of the encrypted. + */ +aes256::aes256(const std::string& first_key, const std::string& second_key) + : _first_key{base64_decode(first_key)}, + _second_key(base64_decode(second_key)) { + if (_first_key.size() != 32) + throw exceptions::msg_fmt( + "the key for aes256 must have a size of 256 bits and not {}", + _first_key.size() * 8); + assert(!_second_key.empty()); +} + +/** + * @brief Encrypt the input string using the AES256 algorithm. + * + * @param input The string to encrypt. + * + * @return The encrypted string. + */ +std::string aes256::encrypt(const std::string& input) { + const int iv_length = EVP_CIPHER_iv_length(EVP_aes_256_cbc()); + + uint32_t crypted_size = + input.size() + EVP_CIPHER_block_size(EVP_aes_256_cbc()); + + std::string result; + /* Here is the result just before the base64 encoding. It is composed of: + * * the iv + * * the hash of size 64 + * * the crypted vector + * We reserve result to be suffisantly big to contain them. crypted_size + * is an estimated size, always bigger than the real result. + */ + result.resize(iv_length + 64 + crypted_size); + + std::string_view iv(result.data(), iv_length); + std::string_view hmac(result.data() + iv_length, 64); + std::string_view crypted_vector(result.data() + iv_length + 64, crypted_size); + + RAND_bytes((unsigned char*)iv.data(), iv_length); + + EVP_CIPHER_CTX* ctx = EVP_CIPHER_CTX_new(); + if (!EVP_EncryptInit_ex(ctx, EVP_aes_256_cbc(), NULL, + (unsigned char*)_first_key.data(), + (unsigned char*)iv.data())) { + EVP_CIPHER_CTX_free(ctx); + throw exceptions::msg_fmt("Encryption initialization failed"); + } + + int output_length; + if (!EVP_EncryptUpdate(ctx, (unsigned char*)crypted_vector.data(), + &output_length, (unsigned char*)input.data(), + input.size())) { + EVP_CIPHER_CTX_free(ctx); + throw exceptions::msg_fmt("Encryption failed"); + } + int len = output_length; + + if (!EVP_EncryptFinal_ex( + ctx, (unsigned char*)crypted_vector.data() + output_length, &len)) { + uint64_t err = ERR_get_error(); + absl::FixedArray mess(0); + ERR_error_string_n(err, mess.data(), 1023); + EVP_CIPHER_CTX_free(ctx); + throw exceptions::msg_fmt("Encryption finalization failed: {}", + mess.data()); + } + assert(output_length + len <= (int)crypted_size); + crypted_size = output_length + len; + result.resize(iv_length + 64 + crypted_size); + crypted_vector = crypted_vector.substr(0, crypted_size); + EVP_CIPHER_CTX_free(ctx); + + // iv is already at the beginning of result. + // We put the hmac at the second place + uint32_t output_len; + if (!HMAC(EVP_sha3_512(), _second_key.data(), _second_key.length(), + (unsigned char*)crypted_vector.data(), crypted_vector.size(), + (unsigned char*)hmac.data(), &output_len)) + throw exceptions::msg_fmt( + "Error during the message authentication code computation"); + assert(output_len == 64); + + return base64_encode(result); +} + +/** + * @brief Decrypt the input string using the AES256 algorithm. + * + * @param input The string to decrypt. + * + * @return The decrypted string. + */ +std::string aes256::decrypt(const std::string& input) { + std::string mix = base64_decode(input); + + const int iv_length = EVP_CIPHER_iv_length(EVP_aes_256_cbc()); + if (iv_length <= 0) { + throw exceptions::msg_fmt("Error when retrieving the cipher length"); + } + + if (mix.size() <= static_cast(iv_length) + 64) + throw exceptions::msg_fmt("The content is not AES256 encrypted"); + + std::string_view iv(mix.data(), iv_length); + std::string_view hash(mix.data() + iv_length, 64); + std::string_view encrypted_first_part(mix.data() + iv_length + 64, + mix.size() - iv_length - 64); + + EVP_CIPHER_CTX* ctx = EVP_CIPHER_CTX_new(); + int len = 0; + int plaintext_len = 0; + + std::string data; + data.resize(encrypted_first_part.size() + + EVP_CIPHER_block_size(EVP_aes_256_cbc())); + + if (!EVP_DecryptInit_ex(ctx, EVP_aes_256_cbc(), NULL, + (unsigned char*)_first_key.data(), + (unsigned char*)iv.data())) { + EVP_CIPHER_CTX_free(ctx); + throw exceptions::msg_fmt("Decryption initialization failed"); + } + + if (!EVP_DecryptUpdate(ctx, (unsigned char*)data.data(), &len, + (unsigned char*)encrypted_first_part.data(), + encrypted_first_part.size())) { + EVP_CIPHER_CTX_free(ctx); + throw exceptions::msg_fmt("Decryption failed"); + } + plaintext_len = len; + + if (!EVP_DecryptFinal_ex(ctx, (unsigned char*)data.data() + len, &len)) { + uint64_t err = ERR_get_error(); + absl::FixedArray mess(0); + ERR_error_string_n(err, mess.data(), 1023); + EVP_CIPHER_CTX_free(ctx); + throw exceptions::msg_fmt("Decryption finalization failed: {}", + mess.data()); + } + plaintext_len += len; + + data.resize(plaintext_len); + EVP_CIPHER_CTX_free(ctx); + + if (!data.empty()) { + std::string second_encrypted_new; + second_encrypted_new.resize(SHA512_DIGEST_LENGTH); + uint32_t second_encrypted_length; + if (!HMAC(EVP_sha3_512(), _second_key.data(), _second_key.length(), + (unsigned char*)encrypted_first_part.data(), + encrypted_first_part.length(), + (unsigned char*)second_encrypted_new.data(), + &second_encrypted_length)) + throw exceptions::msg_fmt( + "Error during the message authentication code computation"); + + assert(second_encrypted_length == 64); + if (hash == second_encrypted_new) + return data; + } + + return std::string(); +} + +} // namespace com::centreon::common::crypto diff --git a/common/crypto/aes256.hh b/common/crypto/aes256.hh new file mode 100644 index 00000000000..585974c2553 --- /dev/null +++ b/common/crypto/aes256.hh @@ -0,0 +1,41 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CCC_VAULT_AES256_HH +#define CCC_VAULT_AES256_HH +#include + +namespace com::centreon::common::crypto { + +class aes256 { + const std::string _first_key; + const std::string _second_key; + + std::string _app_secret(); + + public: + aes256(const std::string& first_key, const std::string& second_key); + aes256(const aes256&) = delete; + aes256& operator=(const aes256&) = delete; + std::string decrypt(const std::string& input); + std::string encrypt(const std::string& input); + void set_env_file(const std::string& env_file); +}; +} // namespace com::centreon::common::crypto + +#endif /* !CCC_VAULT_AES256_HH */ diff --git a/common/crypto/base64.cc b/common/crypto/base64.cc new file mode 100644 index 00000000000..d615ea0bf7e --- /dev/null +++ b/common/crypto/base64.cc @@ -0,0 +1,101 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ +#include "base64.hh" +#include +#include "com/centreon/exceptions/msg_fmt.hh" + +using com::centreon::exceptions::msg_fmt; + +namespace com::centreon::common::crypto { +/** + * @brief Encode a string to base64. + * + * @param str The string to encode. + * + * @return The base64 encoding string. + */ +std::string base64_encode(const std::string& str) { + const constexpr std::string_view b( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"); + std::string retval; + retval.reserve((str.size() / 3 + (str.size() % 3 > 0)) * 4); + + int val = 0, valb = -6; + for (unsigned char c : str) { + val = (val << 8) + c; + valb += 8; + while (valb >= 0) { + retval.push_back(b[(val >> valb) & 0x3f]); + valb -= 6; + } + } + if (valb > -6) + retval.push_back(b[((val << 8) >> (valb + 8)) & 0x3f]); + while (retval.size() % 4) + retval.push_back('='); + + return retval; +} + +/** + * @brief Decode a base64 string. The result is stored into a string. + * + * @param ascdata A string base64 encoded. + * + * @return The decoded string. + */ +std::string base64_decode(const std::string& ascdata) { + const constexpr std::array reverse_table{ + 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, + 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, + 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64, 64, 63, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 64, 64, 64, + 64, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 64, + 64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 64, 64, 64, 64, 64}; + + std::string retval; + + const std::string::const_iterator last = ascdata.end(); + int bits_collected = 0; + unsigned int accumulator = 0; + + for (std::string::const_iterator i = ascdata.begin(); i != last; ++i) { + const int c = *i; + if (std::isspace(c) || c == '=') { + // Skip whitespace and padding. Be liberal in what you accept. + continue; + } + if (c > 127 || c < 0 || reverse_table[c] > 63) { + throw msg_fmt( + "This string '{}' contains characters not legal in a base64 encoded " + "string.", + ascdata); + } + accumulator = (accumulator << 6) | reverse_table[c]; + bits_collected += 6; + if (bits_collected >= 8) { + bits_collected -= 8; + retval += static_cast((accumulator >> bits_collected) & 0xffu); + } + } + + return retval; +} +} // namespace com::centreon::common::crypto diff --git a/common/crypto/base64.hh b/common/crypto/base64.hh new file mode 100644 index 00000000000..175dde05bf2 --- /dev/null +++ b/common/crypto/base64.hh @@ -0,0 +1,29 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ +#ifndef CCC_CRYPTO_BASE64_HH +#define CCC_CRYPTO_BASE64_HH +#include + +namespace com::centreon::common::crypto { + +std::string base64_encode(const std::string& str); +std::string base64_decode(const std::string& ascdata); + +} // namespace com::centreon::common::crypto + +#endif /* !CCC_CRYPTO_BASE64_HH */ diff --git a/common/doc/common-doc.md b/common/doc/common-doc.md index 9edf0d08e52..39e77a8e5df 100644 --- a/common/doc/common-doc.md +++ b/common/doc/common-doc.md @@ -5,6 +5,7 @@ * [Pool](#Pool) * [Grpc](#Grpc) * [Process](#Process) +* [Vault](#Vault) * [Engine configuration](#Engineconfiguration) @@ -14,7 +15,7 @@ After a fork, only caller thread is activated in child process, so we mustn't jo ## Grpc -The goal of the two classes provided, grpc_server_base and grpc_client_base is to create server or channel in order to use it with grpc generated services such as exchange in broker grpc module. +The goal of the two classes provided, grpc_server_base and grpc_client_base is to create server or channel in order to use it with grpc generated services such as exchange in broker grpc module. * `grpc_server_base` creates a ::grpc::server object. You can register service with third constructor parameter builder_option. * `grpc_client_base` creates a ::grpc::channel that can be used to create a stub. @@ -61,9 +62,9 @@ All is asynchronous, child process end of life is notified to on_process_end met You have 4 constructors that allow user to pass executable arguments in four different ways. On of them accept a string command line with exe and arguments -In order to use this, you have to inherit from this class +To be able to use all the possibilities of the process class, you have to inherit from it. -An example of usage: +Here is an example of an inheritence: ```c++ class process_wait : public process { std::condition_variable _cond; @@ -114,11 +115,94 @@ class process_wait : public process { _cond.wait(l); } }; +``` + +The `process` class can be used alone to execute a program directly in an asynchron way. + +Here is another practical example: + +``` +#include "com/centreon/common/process/process.hh" + +void do_stuff() { + /* process must be a shared_ptr. */ + auto p = std::make_shared>(g_io_context, _logger, + "/usr/bin/perl " HTTP_TEST_DIR "/vault-server.pl"); + + /* Here the process is started. */ + p->start_process(false); + + /* Now, while the process is running, we can do our work. */ + my_function_that_exchanges_with_process(); + my_other_function_that_doesnt_work_with_process(); + + /* When all the stuff is done, we can stop p */ + p->kill(); +} +``` + +## Vault + +Broker can work with Hashicorp Vault to get its database passwords. +We have a library designed for that in `common/vault`. + +To use it, we need two files: +* the JSON vault file that contains all the fields needed to access the Vault. +* an env file that contains the APP_SECRET salt used to encrypt the Vault access secrets. + +The Vault file is of the form: +``` +{ + "name": "my_vault", + "url": "localhost", + "port": 4443, + "root_path": "john-doe", + "secret_id": "clb0EZHfRypwmDSi61gZivsNj+VnSHUdAGgcD5bjaYiYIwZjb9NKrY+j/x/sUIivYDQWC3hm8J8L2qlk1tP9RkVrFMndR5fK+bTKJrTlc97NJzwxgvkzgNKXqfehmo6IOlcHMzun8/SoObYQW+bFJgTSeOkXlfIUYDJuBXv7FDU=", + "role_id": "S4t2tU2MgOgXESsIZfcw3LJlJiLd17OiEnjPCNSsLnJ81i7Rvr+sgrHP8EnWR+r6QT0c/XHH0XOoPd09RGyv06dBRmfmArvBz8itfVeFTGIbzsZltliua2NfcMT7A1W3VFRq9OpM29rOtrgmGxArAiFgXPGymDPLXmgIjMNz+K4=", + "salt": "U2FsdA==" +} +``` + +The `secret_id` and the `role_id` are used for the authentication to the Vault. They are AES256 encrypted +in this file so not directly usable. +The `salt` is used during the AES256 encryption, `url` and `port` are the access to the vault. + +If we have these two files and a spdlog::logger, it is pretty simple to access the vault. + +Let's suppose we have a path in the vault and we want to get the corresponding password, let's say +``` +std::string path = "secret::hashicorp_vault::johndoe/data/configuration/broker/08cb1f88-fc16-4d77-b27c-a97b2d5a1597::central-broker-master-unified-sql_db_password"; +``` +We can use the following code to get the password: + +``` +std::string env_file("/tmp/env_file"); +std::string vault_file("/tmp/vault_file"); +bool verify_peer = true; +std::shared_ptr logger = my_logger(); +common::vault::vault_access vault(env_file, vault_file, verify_peer, logger); +std::string password = vault.decrypt(path); +``` + +In case of error, an exception is thrown with the error message, so to also catch the +message we can write something like this: + +``` +std::string env_file("/tmp/env_file"); +std::string vault_file("/tmp/vault_file"); +bool verify_peer = true; +std::shared_ptr logger = my_logger(); +try { + common::vault::vault_access vault(env_file, vault_file, verify_peer, logger); + std::string password = vault.decrypt(path); +} catch (const std::exception& e) { + logger->error("Error with the vault: {}", e.what()); +} ``` ### Asio bug work around -There is an issue in io_context::notify_fork. Internally, ctx.notify_fork calls epoll_reactor::notify_fork which locks registered_descriptors_mutex_. An issue occurs when registered_descriptors_mutex_ is locked by another thread at fork timepoint. +There is an issue in io_context::notify_fork. Internally, ctx.notify_fork calls epoll_reactor::notify_fork which locks registered_descriptors_mutex_. An issue occurs when registered_descriptors_mutex_ is locked by another thread at fork timepoint. In such a case, child process starts with registered_descriptors_mutex_ already locked and both child and parent process will hang. ## Engine configuration diff --git a/common/engine_conf/CMakeLists.txt b/common/engine_conf/CMakeLists.txt index d8c2b3b3bdc..82a545f1e78 100644 --- a/common/engine_conf/CMakeLists.txt +++ b/common/engine_conf/CMakeLists.txt @@ -90,4 +90,5 @@ target_link_libraries( absl::base absl::bits -L${PROTOBUF_LIB_DIR} + centreon_common protobuf) diff --git a/common/engine_conf/anomalydetection_helper.cc b/common/engine_conf/anomalydetection_helper.cc index 1917ecbae77..6d9979ec1a6 100644 --- a/common/engine_conf/anomalydetection_helper.cc +++ b/common/engine_conf/anomalydetection_helper.cc @@ -19,7 +19,6 @@ #include "common/engine_conf/anomalydetection_helper.hh" #include "com/centreon/exceptions/msg_fmt.hh" -#include "common/engine_conf/state.pb.h" using com::centreon::exceptions::msg_fmt; @@ -57,7 +56,7 @@ anomalydetection_helper::anomalydetection_helper(Anomalydetection* obj) * @param value The value corresponding to the key */ bool anomalydetection_helper::hook(std::string_view key, - const std::string_view& value) { + std::string_view value) { Anomalydetection* obj = static_cast(mut_obj()); /* Since we use key to get back the good key value, it is faster to give key * by copy to the method. We avoid one key allocation... */ @@ -91,20 +90,6 @@ bool anomalydetection_helper::hook(std::string_view key, } obj->set_flap_detection_options(options); return true; - } else if (key == "initial_state") { - ServiceStatus initial_state; - if (value == "o" || value == "ok") - initial_state = ServiceStatus::state_ok; - else if (value == "w" || value == "warning") - initial_state = ServiceStatus::state_warning; - else if (value == "u" || value == "unknown") - initial_state = ServiceStatus::state_unknown; - else if (value == "c" || value == "critical") - initial_state = ServiceStatus::state_critical; - else - return false; - obj->set_initial_state(initial_state); - return true; } else if (key == "notification_options") { uint16_t options(action_svc_none); if (fill_service_notification_options(&options, value)) { @@ -249,7 +234,6 @@ void anomalydetection_helper::_init() { action_svc_unknown | action_svc_critical); obj->set_freshness_threshold(0); obj->set_high_flap_threshold(0); - obj->set_initial_state(ServiceStatus::state_ok); obj->set_is_volatile(false); obj->set_low_flap_threshold(0); obj->set_max_check_attempts(3); diff --git a/common/engine_conf/anomalydetection_helper.hh b/common/engine_conf/anomalydetection_helper.hh index 9b8698ba728..f1ad8d2b096 100644 --- a/common/engine_conf/anomalydetection_helper.hh +++ b/common/engine_conf/anomalydetection_helper.hh @@ -38,7 +38,7 @@ class anomalydetection_helper : public message_helper { ~anomalydetection_helper() noexcept = default; void check_validity(error_cnt& err) const override; - bool hook(std::string_view key, const std::string_view& value) override; + bool hook(std::string_view key, std::string_view value) override; bool insert_customvariable(std::string_view key, std::string_view value) override; diff --git a/common/engine_conf/contact_helper.cc b/common/engine_conf/contact_helper.cc index dd48ee548b4..c900ff41bc3 100644 --- a/common/engine_conf/contact_helper.cc +++ b/common/engine_conf/contact_helper.cc @@ -46,16 +46,26 @@ contact_helper::contact_helper(Contact* obj) * @param key The key to parse. * @param value The value corresponding to the key */ -bool contact_helper::hook(std::string_view key, const std::string_view& value) { +bool contact_helper::hook(std::string_view key, std::string_view value) { Contact* obj = static_cast(mut_obj()); /* Since we use key to get back the good key value, it is faster to give key * by copy to the method. We avoid one key allocation... */ key = validate_key(key); - - if (key == "host_notification_options") { + if (key == "contact_name") { + obj->set_contact_name(std::string(value)); + set_changed(obj->descriptor()->FindFieldByName("contact_name")->index()); + if (obj->alias().empty()) { + obj->set_alias(obj->contact_name()); + set_changed(obj->descriptor()->FindFieldByName("alias")->index()); + } + return true; + } else if (key == "host_notification_options") { uint16_t options = action_hst_none; if (fill_host_notification_options(&options, value)) { obj->set_host_notification_options(options); + set_changed(obj->descriptor() + ->FindFieldByName("host_notification_options") + ->index()); return true; } else return false; @@ -63,6 +73,9 @@ bool contact_helper::hook(std::string_view key, const std::string_view& value) { uint16_t options = action_svc_none; if (fill_service_notification_options(&options, value)) { obj->set_service_notification_options(options); + set_changed(obj->descriptor() + ->FindFieldByName("service_notification_options") + ->index()); return true; } else return false; @@ -75,6 +88,10 @@ bool contact_helper::hook(std::string_view key, const std::string_view& value) { } else if (key == "service_notification_commands") { fill_string_group(obj->mutable_service_notification_commands(), value); return true; + } else if (key.compare(0, 7, "address") == 0) { + obj->add_address(value.data(), value.size()); + set_changed(obj->descriptor()->FindFieldByName("address")->index()); + return true; } return false; } @@ -140,4 +157,32 @@ bool contact_helper::insert_customvariable(std::string_view key, new_cv->set_value(value.data(), value.size()); return true; } + +/** + * @brief Expand the Contact object. + * + * @param s The configuration::State object. + * @param err An error counter. + */ +void contact_helper::expand( + configuration::State& s, + configuration::error_cnt& err, + absl::flat_hash_map& + m_contactgroups) { + // Browse all contacts. + for (auto& c : *s.mutable_contacts()) { + // Browse current contact's groups. + for (auto& cg : *c.mutable_contactgroups()->mutable_data()) { + // Find contact group. + auto found_cg = m_contactgroups.find(cg); + if (found_cg == m_contactgroups.end()) { + err.config_errors++; + throw msg_fmt( + "Could not add contact '{}' to non-existing contact group '{}'", + c.contact_name(), cg); + } + fill_string_group(found_cg->second->mutable_members(), c.contact_name()); + } + } +} } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/contact_helper.hh b/common/engine_conf/contact_helper.hh index 4c887d75381..da2b7a0dbd3 100644 --- a/common/engine_conf/contact_helper.hh +++ b/common/engine_conf/contact_helper.hh @@ -37,10 +37,15 @@ class contact_helper : public message_helper { ~contact_helper() noexcept = default; void check_validity(error_cnt& err) const override; - bool hook(std::string_view key, const std::string_view& value) override; + bool hook(std::string_view key, std::string_view value) override; bool insert_customvariable(std::string_view key, std::string_view value) override; + static void expand( + configuration::State& s, + configuration::error_cnt& err, + absl::flat_hash_map& + m_contactgroups); }; } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/contactgroup_helper.cc b/common/engine_conf/contactgroup_helper.cc index d5f2bc7848b..92dff7d5771 100644 --- a/common/engine_conf/contactgroup_helper.cc +++ b/common/engine_conf/contactgroup_helper.cc @@ -44,8 +44,7 @@ contactgroup_helper::contactgroup_helper(Contactgroup* obj) * @param key The key to parse. * @param value The value corresponding to the key */ -bool contactgroup_helper::hook(std::string_view key, - const std::string_view& value) { +bool contactgroup_helper::hook(std::string_view key, std::string_view value) { Contactgroup* obj = static_cast(mut_obj()); /* Since we use key to get back the good key value, it is faster to give key * by copy to the method. We avoid one key allocation... */ @@ -81,4 +80,72 @@ void contactgroup_helper::_init() { Contactgroup* obj = static_cast(mut_obj()); obj->mutable_obj()->set_register_(true); } + +/** + * @brief Expand the contactgroups. + * + * @param s The configuration state to expand. + * @param err The error count object to update in case of errors. + */ +void contactgroup_helper::expand( + configuration::State& s, + configuration::error_cnt& err, + absl::flat_hash_map& + m_contactgroups) { + absl::flat_hash_set resolved; + + for (auto& cg : *s.mutable_contactgroups()) + _resolve_members(s, cg, resolved, err, m_contactgroups); +} + +/** + * @brief Resolves the members of a contact group by recursively processing its + * member groups. + * + * This function ensures that all members of a contact group, including those in + * nested groups, are resolved and added to the contact group's member list. It + * also handles errors when a non-existing contact group member is encountered. + * + * @param s The current configuration state. + * @param obj The contact group object whose members are to be resolved. + * @param resolved A set of already resolved contact group names to avoid + * circular dependencies. + * @param err A structure to count configuration errors. + * @param m_contactgroups A map of contact group names to their corresponding + * contact group objects. + * + * @throws msg_fmt If a non-existing contact group member is encountered. + */ +void contactgroup_helper::_resolve_members( + configuration::State& s, + configuration::Contactgroup& obj, + absl::flat_hash_set& resolved, + configuration::error_cnt& err, + absl::flat_hash_map& + m_contactgroups) { + if (resolved.contains(obj.contactgroup_name())) + return; + + resolved.emplace(obj.contactgroup_name()); + if (!obj.contactgroup_members().data().empty()) { + for (auto& cg_name : obj.contactgroup_members().data()) { + auto it = m_contactgroups.find(cg_name); + + if (it == m_contactgroups.end()) { + err.config_errors++; + throw msg_fmt( + "Error: Could not add non-existing contact group member '{}' to " + "contactgroup '{}'", + cg_name, obj.contactgroup_name()); + } + + Contactgroup& inner_cg = *it->second; + _resolve_members(s, inner_cg, resolved, err, m_contactgroups); + for (auto& c_name : inner_cg.members().data()) + fill_string_group(obj.mutable_members(), c_name); + } + obj.mutable_contactgroup_members()->clear_data(); + } +} + } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/contactgroup_helper.hh b/common/engine_conf/contactgroup_helper.hh index a97a055d950..527afc3aeba 100644 --- a/common/engine_conf/contactgroup_helper.hh +++ b/common/engine_conf/contactgroup_helper.hh @@ -31,13 +31,25 @@ namespace com::centreon::engine::configuration { */ class contactgroup_helper : public message_helper { void _init(); + static void _resolve_members( + configuration::State& s, + configuration::Contactgroup& obj, + absl::flat_hash_set& resolved, + configuration::error_cnt& err, + absl::flat_hash_map& + m_contactgroups); public: contactgroup_helper(Contactgroup* obj); ~contactgroup_helper() noexcept = default; void check_validity(error_cnt& err) const override; - bool hook(std::string_view key, const std::string_view& value) override; + bool hook(std::string_view key, std::string_view value) override; + static void expand( + configuration::State& s, + configuration::error_cnt& err, + absl::flat_hash_map& + m_contactgroups); }; } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/host_helper.cc b/common/engine_conf/host_helper.cc index b0fc2a5fd14..340d66afe05 100644 --- a/common/engine_conf/host_helper.cc +++ b/common/engine_conf/host_helper.cc @@ -17,6 +17,7 @@ * */ #include "common/engine_conf/host_helper.hh" +#include #include "com/centreon/exceptions/msg_fmt.hh" @@ -61,7 +62,7 @@ host_helper::host_helper(Host* obj) * @param key The key to parse. * @param value The value corresponding to the key */ -bool host_helper::hook(std::string_view key, const std::string_view& value) { +bool host_helper::hook(std::string_view key, std::string_view value) { Host* obj = static_cast(mut_obj()); /* Since we use key to get back the good key value, it is faster to give key * by copy to the method. We avoid one key allocation... */ @@ -70,7 +71,7 @@ bool host_helper::hook(std::string_view key, const std::string_view& value) { obj->set_host_name(std::string(value)); set_changed(obj->descriptor()->FindFieldByName("host_name")->index()); if (obj->alias().empty()) { - obj->set_alias(std::string(value)); + obj->set_alias(obj->host_name()); set_changed(obj->descriptor()->FindFieldByName("alias")->index()); } return true; @@ -87,6 +88,8 @@ bool host_helper::hook(std::string_view key, const std::string_view& value) { uint16_t options = action_hst_none; if (fill_host_notification_options(&options, value)) { obj->set_notification_options(options); + set_changed( + obj->descriptor()->FindFieldByName("notification_options")->index()); return true; } else return false; @@ -141,6 +144,97 @@ bool host_helper::hook(std::string_view key, const std::string_view& value) { } } return ret; + } else if (key == "coords_3d") { + std::vector coords_list{absl::StrSplit(value, ',')}; + + if (coords_list.size() != 3) + return false; + + double value; + if (absl::SimpleAtod(coords_list[0], &value)) + obj->mutable_coords_3d()->set_x(value); + else + return false; + + if (absl::SimpleAtod(coords_list[1], &value)) + obj->mutable_coords_3d()->set_y(value); + else + return false; + + if (absl::SimpleAtod(coords_list[2], &value)) + obj->mutable_coords_3d()->set_z(value); + else + return false; + + set_changed(obj->descriptor()->FindFieldByName("coords_3d")->index()); + + return true; + } else if (key == "coords_2d") { + std::vector coords_list{absl::StrSplit(value, ',')}; + + if (coords_list.size() != 2) + return false; + + double value; + if (absl::SimpleAtod(coords_list[0], &value)) + obj->mutable_coords_2d()->set_x(value); + else + return false; + + if (absl::SimpleAtod(coords_list[1], &value)) + obj->mutable_coords_2d()->set_y(value); + else + return false; + + set_changed(obj->descriptor()->FindFieldByName("coords_2d")->index()); + + return true; + } else if (key == "stalking_options") { + uint8_t options(action_hst_none); + auto values = absl::StrSplit(value, ','); + for (auto it = values.begin(); it != values.end(); ++it) { + std::string_view v = absl::StripAsciiWhitespace(*it); + if (v == "o" || v == "up") + options |= action_hst_up; + else if (v == "d" || v == "down") + options |= action_hst_down; + else if (v == "u" || v == "unreachable") + options |= action_hst_unreachable; + else if (v == "n" || v == "none") + options = action_hst_none; + else if (v == "a" || v == "all") + options = action_hst_up | action_hst_down | action_hst_unreachable; + else + return false; + } + obj->set_stalking_options(options); + set_changed( + obj->descriptor()->FindFieldByName("stalking_options")->index()); + + return true; + } else if (key == "flap_detection_options") { + uint8_t options(action_hst_none); + auto values = absl::StrSplit(value, ','); + for (auto& val : values) { + auto v = absl::StripAsciiWhitespace(val); + if (v == "o" || v == "up") + options |= action_hst_up; + else if (v == "d" || v == "down") + options |= action_hst_down; + else if (v == "u" || v == "unreachable") + options |= action_hst_unreachable; + else if (v == "n" || v == "none") + options = action_hst_none; + else if (v == "a" || v == "all") + options = action_hst_up | action_hst_down | action_hst_unreachable; + else + return false; + } + obj->set_flap_detection_options(options); + set_changed( + obj->descriptor()->FindFieldByName("flap_detection_options")->index()); + + return true; } return false; } @@ -183,7 +277,6 @@ void host_helper::_init() { action_hst_unreachable); obj->set_freshness_threshold(0); obj->set_high_flap_threshold(0); - obj->set_initial_state(HostStatus::state_up); obj->set_low_flap_threshold(0); obj->set_max_check_attempts(3); obj->set_notifications_enabled(true); @@ -230,4 +323,31 @@ bool host_helper::insert_customvariable(std::string_view key, new_cv->set_value(value.data(), value.size()); return true; } + +/** + * @brief Expand the hosts. + * + * @param s The configuration state to expand. + * @param err The error count object to update in case of errors. + */ +void host_helper::expand( + configuration::State& s, + configuration::error_cnt& err, + absl::flat_hash_map& hgs) { + // Browse all hosts. + for (auto& host_cfg : *s.mutable_hosts()) { + for (auto& grp : host_cfg.hostgroups().data()) { + auto it = hgs.find(grp); + if (it != hgs.end()) { + fill_string_group(it->second->mutable_members(), host_cfg.host_name()); + } else { + err.config_errors++; + throw msg_fmt( + "Could not add host '{}' to non-existing host group '{}'\n", + host_cfg.host_name(), grp); + } + } + } +} + } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/host_helper.hh b/common/engine_conf/host_helper.hh index 63e8a010b98..4d6022ea702 100644 --- a/common/engine_conf/host_helper.hh +++ b/common/engine_conf/host_helper.hh @@ -37,10 +37,14 @@ class host_helper : public message_helper { ~host_helper() noexcept = default; void check_validity(error_cnt& err) const override; - bool hook(std::string_view key, const std::string_view& value) override; + bool hook(std::string_view key, std::string_view value) override; bool insert_customvariable(std::string_view key, std::string_view value) override; + static void expand( + configuration::State& s, + configuration::error_cnt& err, + absl::flat_hash_map& hgs); }; } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/hostdependency_helper.cc b/common/engine_conf/hostdependency_helper.cc index 6dbd419f4d6..296cdc5c0bf 100644 --- a/common/engine_conf/hostdependency_helper.cc +++ b/common/engine_conf/hostdependency_helper.cc @@ -19,7 +19,6 @@ #include "common/engine_conf/hostdependency_helper.hh" #include "com/centreon/exceptions/msg_fmt.hh" -#include "common/engine_conf/state.pb.h" using com::centreon::exceptions::msg_fmt; @@ -76,8 +75,7 @@ hostdependency_helper::hostdependency_helper(Hostdependency* obj) * @param key The key to parse. * @param value The value corresponding to the key */ -bool hostdependency_helper::hook(std::string_view key, - const std::string_view& value) { +bool hostdependency_helper::hook(std::string_view key, std::string_view value) { Hostdependency* obj = static_cast(mut_obj()); /* Since we use key to get back the good key value, it is faster to give key * by copy to the method. We avoid one key allocation... */ @@ -163,4 +161,80 @@ void hostdependency_helper::_init() { obj->set_inherits_parent(false); obj->set_notification_failure_options(action_hd_none); } + +/** + * @brief Expand the hostdependencies. + * + * @param s The configuration state to expand. + * @param err The error count object to update in case of errors. + */ +void hostdependency_helper::expand( + State& s, + error_cnt& err, + absl::flat_hash_map& m_hostgroups) { + std::list > lst; + + for (int i = s.hostdependencies_size() - 1; i >= 0; --i) { + auto* hd_conf = s.mutable_hostdependencies(i); + if (hd_conf->hosts().data().size() > 1 || + !hd_conf->hostgroups().data().empty() || + hd_conf->dependent_hosts().data().size() > 1 || + !hd_conf->dependent_hostgroups().data().empty() || + hd_conf->dependency_type() == unknown) { + for (auto& hg_name : hd_conf->dependent_hostgroups().data()) { + auto found = m_hostgroups.find(hg_name); + if (found != m_hostgroups.end()) { + auto& hg_conf = *found->second; + for (auto& h : hg_conf.members().data()) + fill_string_group(hd_conf->mutable_dependent_hosts(), h); + } else { + err.config_errors++; + throw msg_fmt("Host dependency dependent hostgroup '{}' not found", + hg_name); + } + } + for (auto& hg_name : hd_conf->hostgroups().data()) { + auto found = m_hostgroups.find(hg_name); + if (found != m_hostgroups.end()) { + auto& hg_conf = *found->second; + for (auto& h : hg_conf.members().data()) + fill_string_group(hd_conf->mutable_hosts(), h); + } else { + err.config_errors++; + throw msg_fmt("Host dependency hostgroup '{}' not found", hg_name); + } + } + for (auto& h : hd_conf->hosts().data()) { + for (auto& h_dep : hd_conf->dependent_hosts().data()) { + for (int ii = 1; ii <= 2; ii++) { + if (hd_conf->dependency_type() == DependencyKind::unknown || + static_cast(hd_conf->dependency_type()) == ii) { + lst.emplace_back(std::make_unique()); + auto& new_hd = lst.back(); + new_hd->set_dependency_period(hd_conf->dependency_period()); + new_hd->set_inherits_parent(hd_conf->inherits_parent()); + fill_string_group(new_hd->mutable_hosts(), h); + fill_string_group(new_hd->mutable_dependent_hosts(), h_dep); + if (ii == 2) { + new_hd->set_dependency_type( + DependencyKind::execution_dependency); + new_hd->set_execution_failure_options( + hd_conf->execution_failure_options()); + } else { + new_hd->set_dependency_type( + DependencyKind::notification_dependency); + new_hd->set_notification_failure_options( + hd_conf->notification_failure_options()); + } + } + } + } + } + s.mutable_hostdependencies()->DeleteSubrange(i, 1); + } + } + for (auto& hd : lst) + s.mutable_hostdependencies()->AddAllocated(hd.release()); +} + } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/hostdependency_helper.hh b/common/engine_conf/hostdependency_helper.hh index 36a45dab338..4a25efa05ac 100644 --- a/common/engine_conf/hostdependency_helper.hh +++ b/common/engine_conf/hostdependency_helper.hh @@ -39,7 +39,12 @@ class hostdependency_helper : public message_helper { ~hostdependency_helper() noexcept = default; void check_validity(error_cnt& err) const override; - bool hook(std::string_view key, const std::string_view& value) override; + bool hook(std::string_view key, std::string_view value) override; + static void expand( + State& s, + error_cnt& err, + absl::flat_hash_map& + m_hostgroups); }; } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/hostescalation_helper.cc b/common/engine_conf/hostescalation_helper.cc index 5d6e9c69df6..9a575f85f55 100644 --- a/common/engine_conf/hostescalation_helper.cc +++ b/common/engine_conf/hostescalation_helper.cc @@ -19,7 +19,6 @@ #include "common/engine_conf/hostescalation_helper.hh" #include "com/centreon/exceptions/msg_fmt.hh" -#include "common/engine_conf/state.pb.h" using com::centreon::exceptions::msg_fmt; @@ -67,8 +66,7 @@ hostescalation_helper::hostescalation_helper(Hostescalation* obj) * @param key The key to parse. * @param value The value corresponding to the key */ -bool hostescalation_helper::hook(std::string_view key, - const std::string_view& value) { +bool hostescalation_helper::hook(std::string_view key, std::string_view value) { Hostescalation* obj = static_cast(mut_obj()); /* Since we use key to get back the good key value, it is faster to give key * by copy to the method. We avoid one key allocation... */ @@ -93,6 +91,8 @@ bool hostescalation_helper::hook(std::string_view key, return false; } obj->set_escalation_options(options); + set_changed( + obj->descriptor()->FindFieldByName("escalation_options")->index()); return true; } else if (key == "contactgroups") { fill_string_group(obj->mutable_contactgroups(), value); @@ -135,4 +135,46 @@ void hostescalation_helper::_init() { obj->set_last_notification(-2); obj->set_notification_interval(0); } + +/** + * @brief Expand the hostescalations. + * + * @param s The configuration state to expand. + * @param err The error count object to update in case of errors. + */ +void hostescalation_helper::expand( + configuration::State& s, + configuration::error_cnt& err, + absl::flat_hash_map& m_hostgroups) { + std::list > resolved; + for (auto& he : *s.mutable_hostescalations()) { + if (he.hostgroups().data().size() > 0) { + absl::flat_hash_set host_names; + for (auto& hname : he.hosts().data()) + host_names.emplace(hname); + for (auto& hg_name : he.hostgroups().data()) { + auto found_hg = m_hostgroups.find(hg_name); + if (found_hg != m_hostgroups.end()) { + for (auto& h : found_hg->second->members().data()) + host_names.emplace(h); + } else { + err.config_errors++; + throw msg_fmt("Could not expand non-existing host group '{}'", + hg_name); + } + } + he.mutable_hostgroups()->clear_data(); + he.mutable_hosts()->clear_data(); + for (auto& n : host_names) { + resolved.emplace_back(std::make_unique()); + auto& e = resolved.back(); + e->CopyFrom(he); + fill_string_group(e->mutable_hosts(), n); + } + } + } + s.clear_hostescalations(); + for (auto& e : resolved) + s.mutable_hostescalations()->AddAllocated(e.release()); +} } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/hostescalation_helper.hh b/common/engine_conf/hostescalation_helper.hh index 18f7751b66f..088240a9c14 100644 --- a/common/engine_conf/hostescalation_helper.hh +++ b/common/engine_conf/hostescalation_helper.hh @@ -39,7 +39,12 @@ class hostescalation_helper : public message_helper { ~hostescalation_helper() noexcept = default; void check_validity(error_cnt& err) const override; - bool hook(std::string_view key, const std::string_view& value) override; + bool hook(std::string_view key, std::string_view value) override; + static void expand( + configuration::State& s, + configuration::error_cnt& err, + absl::flat_hash_map& + m_hostgroups); }; } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/hostgroup_helper.cc b/common/engine_conf/hostgroup_helper.cc index 8d994ab6432..73319019cc0 100644 --- a/common/engine_conf/hostgroup_helper.cc +++ b/common/engine_conf/hostgroup_helper.cc @@ -44,8 +44,7 @@ hostgroup_helper::hostgroup_helper(Hostgroup* obj) * @param key The key to parse. * @param value The value corresponding to the key */ -bool hostgroup_helper::hook(std::string_view key, - const std::string_view& value) { +bool hostgroup_helper::hook(std::string_view key, std::string_view value) { Hostgroup* obj = static_cast(mut_obj()); /* Since we use key to get back the good key value, it is faster to give key * by copy to the method. We avoid one key allocation... */ diff --git a/common/engine_conf/hostgroup_helper.hh b/common/engine_conf/hostgroup_helper.hh index 93fb0d243ec..1c877d71b1c 100644 --- a/common/engine_conf/hostgroup_helper.hh +++ b/common/engine_conf/hostgroup_helper.hh @@ -37,7 +37,7 @@ class hostgroup_helper : public message_helper { ~hostgroup_helper() noexcept = default; void check_validity(error_cnt& err) const override; - bool hook(std::string_view key, const std::string_view& value) override; + bool hook(std::string_view key, std::string_view value) override; }; } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/message_helper.cc b/common/engine_conf/message_helper.cc index 5c575221ca2..91424cf5612 100644 --- a/common/engine_conf/message_helper.cc +++ b/common/engine_conf/message_helper.cc @@ -18,8 +18,6 @@ */ #include "common/engine_conf/message_helper.hh" -#include - using ::google::protobuf::Descriptor; using ::google::protobuf::FieldDescriptor; using ::google::protobuf::Reflection; @@ -431,7 +429,10 @@ bool message_helper::set(const std::string_view& key, set_changed(f->index()); return true; } + } else { + assert(22 == 23); } + break; case FieldDescriptor::TYPE_ENUM: { auto* v = f->enum_type()->FindValueByName( std::string(value.data(), value.size())); diff --git a/common/engine_conf/message_helper.hh b/common/engine_conf/message_helper.hh index 2ac92d48b89..74eb8652054 100644 --- a/common/engine_conf/message_helper.hh +++ b/common/engine_conf/message_helper.hh @@ -20,6 +20,7 @@ #ifndef CCE_CONFIGURATION_MESSAGE_HELPER_HH #define CCE_CONFIGURATION_MESSAGE_HELPER_HH #include +#include #include #include "common/engine_conf/state.pb.h" @@ -172,7 +173,7 @@ class message_helper { * @return True on success. */ virtual bool hook(std::string_view key [[maybe_unused]], - const std::string_view& value [[maybe_unused]]) { + std::string_view value [[maybe_unused]]) { return false; } virtual void check_validity(error_cnt& err [[maybe_unused]]) const {} @@ -251,6 +252,8 @@ class message_helper { retval = std::make_unique( static_cast(other)); break; + default: + break; } retval->_obj = obj; return retval; diff --git a/common/engine_conf/parser.cc b/common/engine_conf/parser.cc index 4948d0c7bee..4660126b037 100644 --- a/common/engine_conf/parser.cc +++ b/common/engine_conf/parser.cc @@ -17,13 +17,12 @@ * */ #include "parser.hh" -#include +#include "anomalydetection_helper.hh" +#include "com/centreon/common/file.hh" #include "com/centreon/exceptions/msg_fmt.hh" +#include "command_helper.hh" #include "common/engine_conf/state.pb.h" #include "common/log_v2/log_v2.hh" - -#include "anomalydetection_helper.hh" -#include "command_helper.hh" #include "connector_helper.hh" #include "contact_helper.hh" #include "contactgroup_helper.hh" @@ -51,28 +50,6 @@ using ::google::protobuf::FieldDescriptor; using ::google::protobuf::Message; using ::google::protobuf::Reflection; -/** - * @brief Reads the content of a text file and returns it in an std::string. - * - * @param file_path The file to read. - * - * @return The content as an std::string. - */ -static std::string read_file_content(const std::filesystem::path& file_path) { - std::ifstream in(file_path, std::ios::in); - std::string retval; - if (in) { - in.seekg(0, std::ios::end); - retval.resize(in.tellg()); - in.seekg(0, std::ios::beg); - in.read(&retval[0], retval.size()); - in.close(); - } else - throw msg_fmt("Parsing of resource file failed: can't open file '{}': {}", - file_path.string(), strerror(errno)); - return retval; -} - /** * Default constructor. * @@ -160,7 +137,7 @@ void parser::_parse_global_configuration(const std::string& path, State* pb_config) { _logger->info("Reading main configuration file '{}'.", path); - std::string content = read_file_content(path); + std::string content = common::read_file_content(path); pb_config->set_cfg_main(path); _current_line = 0; @@ -210,7 +187,7 @@ void parser::_parse_object_definitions(const std::string& path, State* pb_config) { _logger->info("Processing object config file '{}'", path); - std::string content = read_file_content(path); + std::string content = common::read_file_content(path); auto tab{absl::StrSplit(content, '\n')}; std::string ll; @@ -464,7 +441,7 @@ void parser::_parse_object_definitions(const std::string& path, void parser::_parse_resource_file(const std::string& path, State* pb_config) { _logger->info("Reading resource file '{}'", path); - std::string content = read_file_content(path); + std::string content = common::read_file_content(path); auto tab{absl::StrSplit(content, '\n')}; int current_line = 1; @@ -526,6 +503,14 @@ void parser::_resolve_template(State* pb_config, error_cnt& err) { _resolve_template(_pb_helper[&he], _pb_templates[message_helper::hostescalation]); + for (Hostgroup& hg : *pb_config->mutable_hostgroups()) + _resolve_template(_pb_helper[&hg], + _pb_templates[message_helper::hostgroup]); + + for (Servicegroup& sg : *pb_config->mutable_servicegroups()) + _resolve_template(_pb_helper[&sg], + _pb_templates[message_helper::servicegroup]); + for (const Command& c : pb_config->commands()) _pb_helper.at(&c)->check_validity(err); @@ -778,6 +763,28 @@ void parser::_merge(std::unique_ptr& msg_helper, lst->add_data(v); } else if (lst->data().empty()) *lst->mutable_data() = orig_lst->data(); + } else if (d && d->name() == "PairStringSet") { + PairStringSet* orig_pair = + static_cast(refl->MutableMessage(tmpl, f)); + PairStringSet* pair = + static_cast(refl->MutableMessage(msg, f)); + if (pair->additive()) { + for (auto& v : orig_pair->data()) { + bool found = false; + for (auto& s : *pair->mutable_data()) { + if (s.first() == v.first() && s.second() == v.second()) { + found = true; + break; + } + } + if (!found) + pair->add_data()->CopyFrom(v); + } + } else if (pair->data().empty()) + *pair->mutable_data() = orig_pair->data(); + } else { + refl->MutableMessage(msg, f)->CopyFrom( + refl->GetMessage(*tmpl, f)); } msg_helper->set_changed(f->index()); } break; diff --git a/common/engine_conf/parser.hh b/common/engine_conf/parser.hh index 35a3656e90f..c5245363495 100644 --- a/common/engine_conf/parser.hh +++ b/common/engine_conf/parser.hh @@ -19,9 +19,9 @@ #ifndef CCE_CONFIGURATION_PARSER_HH #define CCE_CONFIGURATION_PARSER_HH +#include #include "common/engine_conf/message_helper.hh" #include "state_helper.hh" -// #include "host.hh" namespace com::centreon::engine::configuration { @@ -62,17 +62,9 @@ class parser { */ pb_map_helper _pb_helper; - void _merge(std::unique_ptr& msg_helper, Message* tmpl); - void _cleanup(State* pb_config); - - public: - parser(); - parser(const parser&) = delete; - parser& operator=(const parser&) = delete; - ~parser() noexcept = default; - void parse(const std::string& path, State* config, error_cnt& err); + unsigned int _current_line; + std::string _current_path; - private: /** * Apply parse method into list. * @@ -94,9 +86,15 @@ class parser { void _resolve_template(State* pb_config, error_cnt& err); void _resolve_template(std::unique_ptr& msg_helper, const pb_map_object& tmpls); + void _merge(std::unique_ptr& msg_helper, Message* tmpl); + void _cleanup(State* pb_config); - unsigned int _current_line; - std::string _current_path; + public: + parser(); + parser(const parser&) = delete; + parser& operator=(const parser&) = delete; + ~parser() noexcept = default; + void parse(const std::string& path, State* config, error_cnt& err); }; } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/service_helper.cc b/common/engine_conf/service_helper.cc index dc8ef517612..87dc006dacd 100644 --- a/common/engine_conf/service_helper.cc +++ b/common/engine_conf/service_helper.cc @@ -57,7 +57,7 @@ service_helper::service_helper(Service* obj) * @param key The key to parse. * @param value The value corresponding to the key */ -bool service_helper::hook(std::string_view key, const std::string_view& value) { +bool service_helper::hook(std::string_view key, std::string_view value) { Service* obj = static_cast(mut_obj()); /* Since we use key to get back the good key value, it is faster to give key * by copy to the method. We avoid one key allocation... */ @@ -90,25 +90,17 @@ bool service_helper::hook(std::string_view key, const std::string_view& value) { return false; } obj->set_flap_detection_options(options); - return true; - } else if (key == "initial_state") { - ServiceStatus initial_state; - if (value == "o" || value == "ok") - initial_state = ServiceStatus::state_ok; - else if (value == "w" || value == "warning") - initial_state = ServiceStatus::state_warning; - else if (value == "u" || value == "unknown") - initial_state = ServiceStatus::state_unknown; - else if (value == "c" || value == "critical") - initial_state = ServiceStatus::state_critical; - else - return false; - obj->set_initial_state(initial_state); + set_changed(Service::descriptor() + ->FindFieldByName("flap_detection_options") + ->index()); return true; } else if (key == "notification_options") { uint16_t options(action_svc_none); if (fill_service_notification_options(&options, value)) { obj->set_notification_options(options); + set_changed(Service::descriptor() + ->FindFieldByName("notification_options") + ->index()); return true; } else return false; @@ -136,6 +128,8 @@ bool service_helper::hook(std::string_view key, const std::string_view& value) { else return false; } + set_changed( + Service::descriptor()->FindFieldByName("stalking_options")->index()); obj->set_stalking_options(options); return true; } else if (key == "category_tags") { @@ -235,7 +229,6 @@ void service_helper::_init() { action_svc_unknown | action_svc_critical); obj->set_freshness_threshold(0); obj->set_high_flap_threshold(0); - obj->set_initial_state(ServiceStatus::state_ok); obj->set_is_volatile(false); obj->set_low_flap_threshold(0); obj->set_max_check_attempts(3); @@ -283,4 +276,71 @@ bool service_helper::insert_customvariable(std::string_view key, new_cv->set_value(value.data(), value.size()); return true; } + +/** + * @brief Expand the Service object. + * + * @param s The configuration state to expand. + * @param err The error count object to update in case of errors. + */ +void service_helper::expand( + configuration::State& s, + configuration::error_cnt& err, + absl::flat_hash_map m_host, + absl::flat_hash_map sgs) { + // Browse all services. + for (auto& service_cfg : *s.mutable_services()) { + // Browse service groups. + for (auto& sg_name : service_cfg.servicegroups().data()) { + // Find service group. + auto found = sgs.find(sg_name); + if (found == sgs.end()) { + err.config_errors++; + throw msg_fmt( + "Could not add service '{}' of host '{}' to non-existing service " + "group '{}'", + service_cfg.service_description(), service_cfg.host_name(), + sg_name); + } + + // Add service to service members + fill_pair_string_group(found->second->mutable_members(), + service_cfg.host_name(), + service_cfg.service_description()); + } + + if (!service_cfg.host_id() || service_cfg.contacts().data().empty() || + service_cfg.contactgroups().data().empty() || + service_cfg.notification_interval() == 0 || + service_cfg.notification_period().empty() || + service_cfg.timezone().empty()) { + // Find host. + auto it = m_host.find(service_cfg.host_name()); + if (it == m_host.end()) { + err.config_errors++; + throw msg_fmt( + "Could not inherit special variables for service '{}': host '{}' " + "does not exist", + service_cfg.service_description(), service_cfg.host_name()); + } + + // Inherits variables. + if (!service_cfg.host_id()) + service_cfg.set_host_id(it->second.host_id()); + if (service_cfg.contacts().data().empty() && + service_cfg.contactgroups().data().empty()) { + service_cfg.mutable_contacts()->CopyFrom(it->second.contacts()); + service_cfg.mutable_contactgroups()->CopyFrom( + it->second.contactgroups()); + } + if (service_cfg.notification_interval() == 0) + service_cfg.set_notification_interval( + it->second.notification_interval()); + if (service_cfg.notification_period().empty()) + service_cfg.set_notification_period(it->second.notification_period()); + if (service_cfg.timezone().empty()) + service_cfg.set_timezone(it->second.timezone()); + } + } +} } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/service_helper.hh b/common/engine_conf/service_helper.hh index 3104c0b2601..94de9bdfa19 100644 --- a/common/engine_conf/service_helper.hh +++ b/common/engine_conf/service_helper.hh @@ -33,10 +33,15 @@ class service_helper : public message_helper { ~service_helper() noexcept = default; void check_validity(error_cnt& err) const override; - bool hook(std::string_view key, const std::string_view& value) override; + bool hook(std::string_view key, std::string_view value) override; bool insert_customvariable(std::string_view key, std::string_view value) override; + static void expand( + configuration::State& s, + configuration::error_cnt& err, + absl::flat_hash_map m_host, + absl::flat_hash_map sgs); }; } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/servicedependency_helper.cc b/common/engine_conf/servicedependency_helper.cc index 91e9c44226d..e8b57a6a39f 100644 --- a/common/engine_conf/servicedependency_helper.cc +++ b/common/engine_conf/servicedependency_helper.cc @@ -19,7 +19,6 @@ #include "common/engine_conf/servicedependency_helper.hh" #include "com/centreon/exceptions/msg_fmt.hh" -#include "common/engine_conf/state.pb.h" using com::centreon::exceptions::msg_fmt; @@ -77,7 +76,7 @@ servicedependency_helper::servicedependency_helper(Servicedependency* obj) * @param value The value corresponding to the key */ bool servicedependency_helper::hook(std::string_view key, - const std::string_view& value) { + std::string_view value) { Servicedependency* obj = static_cast(mut_obj()); /* Since we use key to get back the good key value, it is faster to give key * by copy to the method. We avoid one key allocation... */ @@ -196,4 +195,145 @@ void servicedependency_helper::_init() { obj->set_inherits_parent(false); obj->set_notification_failure_options(action_sd_none); } + +/** + * @brief Expand service dependencies. + * + * @param s The configuration state to expand. + * @param err The error count object to update in case of errors. + */ +void servicedependency_helper::expand( + State& s, + error_cnt& err [[maybe_unused]], + absl::flat_hash_map& hostgroups, + absl::flat_hash_map& + servicegroups) { + // Browse all dependencies. + std::list> expanded; + + for (auto& dep : s.servicedependencies()) { + // Expand service dependency instances. + if (dep.hosts().data().size() != 1 || !dep.hostgroups().data().empty() || + dep.service_description().data().size() != 1 || + !dep.servicegroups().data().empty() || + dep.dependent_hosts().data().size() != 1 || + !dep.dependent_hostgroups().data().empty() || + dep.dependent_service_description().data().size() != 1 || + !dep.dependent_servicegroups().data().empty() || + dep.dependency_type() == DependencyKind::unknown) { + // Expand depended services. + absl::flat_hash_set> + depended_services; + _expand_services(dep.hosts().data(), dep.hostgroups().data(), + dep.service_description().data(), + dep.servicegroups().data(), depended_services, + hostgroups, servicegroups); + + // Expand dependent services. + absl::flat_hash_set> + dependent_services; + _expand_services(dep.dependent_hosts().data(), + dep.dependent_hostgroups().data(), + dep.dependent_service_description().data(), + dep.dependent_servicegroups().data(), dependent_services, + hostgroups, servicegroups); + + // Browse all depended and dependent services. + for (auto& p1 : depended_services) + for (auto& p2 : dependent_services) { + // Create service dependency instance. + for (int32_t i = 1; i <= 2; i++) { + if (dep.dependency_type() == DependencyKind::unknown || + static_cast(dep.dependency_type()) == i) { + auto sdep = std::make_unique(); + sdep->CopyFrom(dep); + sdep->clear_hostgroups(); + sdep->clear_hosts(); + sdep->mutable_hosts()->add_data(p1.first); + sdep->clear_servicegroups(); + sdep->clear_service_description(); + sdep->mutable_service_description()->add_data(p1.second); + sdep->clear_dependent_hostgroups(); + sdep->clear_dependent_hosts(); + sdep->mutable_dependent_hosts()->add_data(p2.first); + sdep->clear_dependent_servicegroups(); + sdep->clear_dependent_service_description(); + sdep->mutable_dependent_service_description()->add_data( + p2.second); + if (i == 2) { + sdep->set_dependency_type(DependencyKind::execution_dependency); + sdep->set_notification_failure_options(0); + } else { + sdep->set_dependency_type( + DependencyKind::notification_dependency); + sdep->set_execution_failure_options(0); + } + expanded.push_back(std::move(sdep)); + } + } + } + } + } + + // Set expanded service dependencies in configuration state. + s.clear_servicedependencies(); + for (auto& e : expanded) + s.mutable_servicedependencies()->AddAllocated(e.release()); +} + +/** + * @brief Expand services. + * + * @param hst Hosts. + * @param hg Host groups. + * @param svc Service descriptions. + * @param sg Service groups. + * @param s Configuration state. + * @param expanded Expanded services. + */ +void servicedependency_helper::_expand_services( + const ::google::protobuf::RepeatedPtrField& hst, + const ::google::protobuf::RepeatedPtrField& hg, + const ::google::protobuf::RepeatedPtrField& svc, + const ::google::protobuf::RepeatedPtrField& sg, + absl::flat_hash_set>& expanded, + absl::flat_hash_map& hostgroups, + absl::flat_hash_map& + servicegroups) { + // Expanded hosts. + absl::flat_hash_set all_hosts; + + // Base hosts. + all_hosts.insert(hst.begin(), hst.end()); + + // Host groups. + for (auto& hgn : hg) { + // Find host group + auto found = hostgroups.find(hgn); + if (found == hostgroups.end()) + throw msg_fmt("Could not resolve host group '{}'", hgn); + // Add host group members. + all_hosts.insert(found->second->members().data().begin(), + found->second->members().data().end()); + } + + // Hosts * services. + for (auto& h : all_hosts) + for (auto& s : svc) + expanded.insert({h, s}); + + // Service groups. + for (auto& sgn : sg) { + // Find service group. + auto found = servicegroups.find(sgn); + ; + if (found == servicegroups.end()) + throw msg_fmt("Coulx not resolve service group '{}'", sgn); + + // Add service group members. + for (auto& m : found->second->members().data()) + expanded.insert({m.first(), m.second()}); + } +} + } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/servicedependency_helper.hh b/common/engine_conf/servicedependency_helper.hh index affa3b1df08..e035df4e610 100644 --- a/common/engine_conf/servicedependency_helper.hh +++ b/common/engine_conf/servicedependency_helper.hh @@ -29,13 +29,28 @@ size_t servicedependency_key(const Servicedependency& sd); class servicedependency_helper : public message_helper { void _init(); + static void _expand_services( + const ::google::protobuf::RepeatedPtrField& hst, + const ::google::protobuf::RepeatedPtrField& hg, + const ::google::protobuf::RepeatedPtrField& svc, + const ::google::protobuf::RepeatedPtrField& sg, + absl::flat_hash_set>& expanded, + absl::flat_hash_map& hostgroups, + absl::flat_hash_map& + servicegroups); public: servicedependency_helper(Servicedependency* obj); ~servicedependency_helper() noexcept = default; void check_validity(error_cnt& err) const override; - bool hook(std::string_view key, const std::string_view& value) override; + bool hook(std::string_view key, std::string_view value) override; + static void expand( + State& s, + error_cnt& err, + absl::flat_hash_map& hostgroups, + absl::flat_hash_map& + servicegroups); }; } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/serviceescalation_helper.cc b/common/engine_conf/serviceescalation_helper.cc index 0247bb5be80..8d5daf2f3ef 100644 --- a/common/engine_conf/serviceescalation_helper.cc +++ b/common/engine_conf/serviceescalation_helper.cc @@ -19,7 +19,6 @@ #include "common/engine_conf/serviceescalation_helper.hh" #include "com/centreon/exceptions/msg_fmt.hh" -#include "common/engine_conf/state.pb.h" using com::centreon::exceptions::msg_fmt; @@ -63,7 +62,7 @@ serviceescalation_helper::serviceescalation_helper(Serviceescalation* obj) * @param value The value corresponding to the key */ bool serviceescalation_helper::hook(std::string_view key, - const std::string_view& value) { + std::string_view value) { Serviceescalation* obj = static_cast(mut_obj()); /* Since we use key to get back the good key value, it is faster to give key * by copy to the method. We avoid one key allocation... */ @@ -91,6 +90,8 @@ bool serviceescalation_helper::hook(std::string_view key, return false; } obj->set_escalation_options(options); + set_changed( + obj->descriptor()->FindFieldByName("escalation_options")->index()); return true; } else if (key == "contactgroups") { fill_string_group(obj->mutable_contactgroups(), value); @@ -149,4 +150,72 @@ void serviceescalation_helper::_init() { obj->set_last_notification(-2); obj->set_notification_interval(0); } + +/** + * @brief Expand the Serviceescalation object. + * + * @param s The configuration state to expand. + * @param err The error count object to update in case of errors. + */ +void serviceescalation_helper::expand( + configuration::State& s, + configuration::error_cnt& err, + absl::flat_hash_map& hostgroups, + absl::flat_hash_map& + servicegroups) { + std::list> resolved; + + for (auto& se : *s.mutable_serviceescalations()) { + /* A set of all the hosts related to this escalation */ + absl::flat_hash_set host_names; + for (auto& hname : se.hosts().data()) + host_names.insert(hname); + if (se.hostgroups().data().size() > 0) { + for (auto& hg_name : se.hostgroups().data()) { + auto found_hg = hostgroups.find(hg_name); + if (found_hg != hostgroups.end()) { + for (auto& h : found_hg->second->members().data()) + host_names.emplace(h); + } else { + err.config_errors++; + throw msg_fmt("Could not expand non-existing host group '{}'", + hg_name); + } + } + } + + /* A set of all the pairs (hostname, service-description) impacted by this + * escalation. */ + absl::flat_hash_set> expanded; + for (auto& hn : host_names) { + for (auto& sn : se.service_description().data()) + expanded.emplace(hn, sn); + } + + for (auto& sg_name : se.servicegroups().data()) { + auto found = servicegroups.find(sg_name); + if (found == servicegroups.end()) { + err.config_errors++; + throw msg_fmt("Could not resolve service group '{}'", sg_name); + } + + for (auto& m : found->second->members().data()) + expanded.emplace(m.first(), m.second()); + } + se.mutable_hostgroups()->clear_data(); + se.mutable_hosts()->clear_data(); + se.mutable_servicegroups()->clear_data(); + se.mutable_service_description()->clear_data(); + for (auto& p : expanded) { + resolved.emplace_back(std::make_unique()); + auto& e = resolved.back(); + e->CopyFrom(se); + fill_string_group(e->mutable_hosts(), p.first); + fill_string_group(e->mutable_service_description(), p.second); + } + } + s.clear_serviceescalations(); + for (auto& e : resolved) + s.mutable_serviceescalations()->AddAllocated(e.release()); +} } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/serviceescalation_helper.hh b/common/engine_conf/serviceescalation_helper.hh index b161bd27c06..2fab869f392 100644 --- a/common/engine_conf/serviceescalation_helper.hh +++ b/common/engine_conf/serviceescalation_helper.hh @@ -35,7 +35,13 @@ class serviceescalation_helper : public message_helper { ~serviceescalation_helper() noexcept = default; void check_validity(error_cnt& err) const override; - bool hook(std::string_view key, const std::string_view& value) override; + bool hook(std::string_view key, std::string_view value) override; + static void expand( + configuration::State& s, + configuration::error_cnt& err, + absl::flat_hash_map& hostgroups, + absl::flat_hash_map& + servicegroups); }; } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/servicegroup_helper.cc b/common/engine_conf/servicegroup_helper.cc index 0ff2c23560f..1b589fdbc0a 100644 --- a/common/engine_conf/servicegroup_helper.cc +++ b/common/engine_conf/servicegroup_helper.cc @@ -19,7 +19,6 @@ #include "common/engine_conf/servicegroup_helper.hh" #include "com/centreon/exceptions/msg_fmt.hh" -#include "common/engine_conf/state.pb.h" using com::centreon::exceptions::msg_fmt; @@ -45,8 +44,7 @@ servicegroup_helper::servicegroup_helper(Servicegroup* obj) * @param key The key to parse. * @param value The value corresponding to the key */ -bool servicegroup_helper::hook(std::string_view key, - const std::string_view& value) { +bool servicegroup_helper::hook(std::string_view key, std::string_view value) { Servicegroup* obj = static_cast(mut_obj()); /* Since we use key to get back the good key value, it is faster to give key * by copy to the method. We avoid one key allocation... */ @@ -79,4 +77,69 @@ void servicegroup_helper::_init() { Servicegroup* obj = static_cast(mut_obj()); obj->mutable_obj()->set_register_(true); } + +/** + * @brief Expand the Servicegroup object. + * + * @param s The configuration state to expand. + * @param err The error count object to update in case of errors. + */ +void servicegroup_helper::expand( + configuration::State& s, + configuration::error_cnt& err, + absl::flat_hash_map& + m_servicegroups) { + // This set stores resolved service groups. + absl::flat_hash_set resolved; + + // Each servicegroup can contain servicegroups, that is to mean the services + // in the sub servicegroups are also in our servicegroup. + // So, we iterate through all the servicegroups defined in the configuration, + // and for each one if it has servicegroup members, we fill its service + // members with theirs and then we clear the servicegroup members. At that + // step, a servicegroup is considered as resolved. + for (auto& sg_conf : *s.mutable_servicegroups()) { + if (!resolved.contains(sg_conf.servicegroup_name())) { + _resolve_members(s, &sg_conf, resolved, m_servicegroups, err); + } + } +} + +/** + * @brief Resolve the members of a service group. + * + * @param s The configuration state. + * @param sg_conf The service group to resolve. + * @param resolved The set of resolved service groups. + * @param sg_by_name The map of service groups by name. + * @param err The error counter. + */ +void servicegroup_helper::_resolve_members( + configuration::State& s, + configuration::Servicegroup* sg_conf, + absl::flat_hash_set& resolved, + const absl::flat_hash_map& + sg_by_name, + configuration::error_cnt& err) { + for (auto& sgm : sg_conf->servicegroup_members().data()) { + std::cout << "Resolving service group member sgm " << sgm << std::endl; + auto sgm_conf = sg_by_name.find(sgm); + if (sgm_conf == sg_by_name.end()) { + err.config_errors++; + throw msg_fmt( + "Could not add non-existing service group member '{}' to service " + "group '{}'\n", + sgm, sg_conf->servicegroup_name()); + } + if (!resolved.contains(sgm_conf->second->servicegroup_name())) + _resolve_members(s, sgm_conf->second, resolved, sg_by_name, err); + + for (auto& sm : sgm_conf->second->members().data()) { + fill_pair_string_group(sg_conf->mutable_members(), sm.first(), + sm.second()); + } + } + sg_conf->clear_servicegroup_members(); + resolved.emplace(sg_conf->servicegroup_name()); +} } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/servicegroup_helper.hh b/common/engine_conf/servicegroup_helper.hh index 39230a3b9a0..0bdb8f2202f 100644 --- a/common/engine_conf/servicegroup_helper.hh +++ b/common/engine_conf/servicegroup_helper.hh @@ -27,13 +27,25 @@ namespace com::centreon::engine::configuration { class servicegroup_helper : public message_helper { void _init(); + static void _resolve_members( + configuration::State& s, + configuration::Servicegroup* sg_conf, + absl::flat_hash_set& resolved, + const absl::flat_hash_map& + sg_by_name, + configuration::error_cnt& err); public: servicegroup_helper(Servicegroup* obj); ~servicegroup_helper() noexcept = default; void check_validity(error_cnt& err) const override; - bool hook(std::string_view key, const std::string_view& value) override; + bool hook(std::string_view key, std::string_view value) override; + static void expand( + configuration::State& s, + configuration::error_cnt& err, + absl::flat_hash_map& + m_servicegroups); }; } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/severity_helper.cc b/common/engine_conf/severity_helper.cc index 086415045d0..3ec0e77be6d 100644 --- a/common/engine_conf/severity_helper.cc +++ b/common/engine_conf/severity_helper.cc @@ -19,7 +19,6 @@ #include "common/engine_conf/severity_helper.hh" #include "com/centreon/exceptions/msg_fmt.hh" -#include "common/engine_conf/state.pb.h" using com::centreon::exceptions::msg_fmt; @@ -50,8 +49,7 @@ severity_helper::severity_helper(Severity* obj) * @param key The key to parse. * @param value The value corresponding to the key */ -bool severity_helper::hook(std::string_view key, - const std::string_view& value) { +bool severity_helper::hook(std::string_view key, std::string_view value) { Severity* obj = static_cast(mut_obj()); /* Since we use key to get back the good key value, it is faster to give key * by copy to the method. We avoid one key allocation... */ diff --git a/common/engine_conf/severity_helper.hh b/common/engine_conf/severity_helper.hh index ef54828d091..c3d3c9dd25c 100644 --- a/common/engine_conf/severity_helper.hh +++ b/common/engine_conf/severity_helper.hh @@ -34,7 +34,7 @@ class severity_helper : public message_helper { ~severity_helper() noexcept = default; void check_validity(error_cnt& err) const override; - bool hook(std::string_view key, const std::string_view& value) override; + bool hook(std::string_view key, std::string_view value) override; }; } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/state.proto b/common/engine_conf/state.proto index a773c32c128..9736901d68a 100644 --- a/common/engine_conf/state.proto +++ b/common/engine_conf/state.proto @@ -466,38 +466,36 @@ message Anomalydetection { string host_name = 22; string icon_image = 23; string icon_image_alt = 24; - ServiceStatus initial_state = - 25; // - Default value: ServiceStatus::state_ok - bool is_volatile = 26; // Optional - Default value: false - uint32 low_flap_threshold = 27; // Optional - Default value: 0 - uint32 max_check_attempts = 28; // Optional - Default value: 3 - string notes = 29; - string notes_url = 30; - bool notifications_enabled = 31; // Optional - Default value: true - uint32 notification_interval = 32; // Optional - Default value: 0 + bool is_volatile = 25; // Optional - Default value: false + uint32 low_flap_threshold = 26; // Optional - Default value: 0 + uint32 max_check_attempts = 27; // Optional - Default value: 3 + string notes = 28; + string notes_url = 29; + bool notifications_enabled = 30; // Optional - Default value: true + uint32 notification_interval = 31; // Optional - Default value: 0 uint32 notification_options = - 33; // Optional - Default value: action_svc_ok | action_svc_warning + 32; // Optional - Default value: action_svc_ok | action_svc_warning // |action_svc_critical | action_svc_unknown |action_svc_flapping | // action_svc_downtime - optional string notification_period = 34; // Optional - bool obsess_over_service = 35; // Optional - Default value: true - bool process_perf_data = 36; // Optional - Default value: true - bool retain_nonstatus_information = 37; // Optional - Default value: true - bool retain_status_information = 38; // Optional - Default value: true - uint32 retry_interval = 39; // Optional - Default value: 1 - optional uint32 recovery_notification_delay = 40; // Optional - StringSet servicegroups = 41; - string service_description = 42; - uint64 host_id = 43; - uint64 service_id = 44; - uint64 internal_id = 45; - uint64 dependent_service_id = 46; - uint32 stalking_options = 47; // Optional - Default value: action_svc_none - optional string timezone = 48; // Optional - optional uint64 severity_id = 49; // Optional - optional uint64 icon_id = 50; // Optional - repeated PairUint64_32 tags = 51; - double sensitivity = 52; + optional string notification_period = 33; // Optional + bool obsess_over_service = 34; // Optional - Default value: true + bool process_perf_data = 35; // Optional - Default value: true + bool retain_nonstatus_information = 36; // Optional - Default value: true + bool retain_status_information = 37; // Optional - Default value: true + uint32 retry_interval = 38; // Optional - Default value: 1 + optional uint32 recovery_notification_delay = 39; // Optional + StringSet servicegroups = 40; + string service_description = 41; + uint64 host_id = 42; + uint64 service_id = 43; + uint64 internal_id = 44; + uint64 dependent_service_id = 45; + uint32 stalking_options = 46; // Optional - Default value: action_svc_none + optional string timezone = 47; // Optional + optional uint64 severity_id = 48; // Optional + optional uint64 icon_id = 49; // Optional + repeated PairUint64_32 tags = 50; + double sensitivity = 51; } message Command { @@ -578,31 +576,30 @@ message Host { string host_name = 27; string icon_image = 28; string icon_image_alt = 29; - HostStatus initial_state = 30; // - Default value: HostStatus::state_up - uint32 low_flap_threshold = 31; // Optional - Default value: 0 - uint32 max_check_attempts = 32; // Optional - Default value: 3 - string notes = 33; - string notes_url = 34; - bool notifications_enabled = 35; // Optional - Default value: true - uint32 notification_interval = 36; // Optional - Default value: 0 + uint32 low_flap_threshold = 30; // Optional - Default value: 0 + uint32 max_check_attempts = 31; // Optional - Default value: 3 + string notes = 32; + string notes_url = 33; + bool notifications_enabled = 34; // Optional - Default value: true + uint32 notification_interval = 35; // Optional - Default value: 0 uint32 notification_options = - 37; // Optional - Default value: action_hst_up | action_hst_down + 36; // Optional - Default value: action_hst_up | action_hst_down // |action_hst_unreachable |action_hst_flapping |action_hst_downtime - string notification_period = 38; - bool obsess_over_host = 39; // Optional - Default value: true - StringSet parents = 40; - bool process_perf_data = 41; // Optional - Default value: true - bool retain_nonstatus_information = 42; // Optional - Default value: true - bool retain_status_information = 43; // Optional - Default value: true - uint32 retry_interval = 44; // Optional - Default value: 1 - optional uint32 recovery_notification_delay = 45; // Optional - uint32 stalking_options = 46; // Optional - Default value: action_hst_none - string statusmap_image = 47; - optional string timezone = 48; // Optional - string vrml_image = 49; - optional uint64 severity_id = 50; // Optional - optional uint64 icon_id = 51; // Optional - repeated PairUint64_32 tags = 52; + string notification_period = 37; + bool obsess_over_host = 38; // Optional - Default value: true + StringSet parents = 39; + bool process_perf_data = 40; // Optional - Default value: true + bool retain_nonstatus_information = 41; // Optional - Default value: true + bool retain_status_information = 42; // Optional - Default value: true + uint32 retry_interval = 43; // Optional - Default value: 1 + optional uint32 recovery_notification_delay = 44; // Optional + uint32 stalking_options = 45; // Optional - Default value: action_hst_none + string statusmap_image = 46; + optional string timezone = 47; // Optional + string vrml_image = 48; + optional uint64 severity_id = 49; // Optional + optional uint64 icon_id = 50; // Optional + repeated PairUint64_32 tags = 51; } message Hostdependency { @@ -670,35 +667,33 @@ message Service { string host_name = 22; string icon_image = 23; string icon_image_alt = 24; - ServiceStatus initial_state = - 25; // - Default value: ServiceStatus::state_ok - bool is_volatile = 26; // Optional - Default value: false - uint32 low_flap_threshold = 27; // Optional - Default value: 0 - uint32 max_check_attempts = 28; // Optional - Default value: 3 - string notes = 29; - string notes_url = 30; - bool notifications_enabled = 31; // Optional - Default value: true - uint32 notification_interval = 32; // Optional - Default value: 0 + bool is_volatile = 25; // Optional - Default value: false + uint32 low_flap_threshold = 26; // Optional - Default value: 0 + uint32 max_check_attempts = 27; // Optional - Default value: 3 + string notes = 28; + string notes_url = 29; + bool notifications_enabled = 30; // Optional - Default value: true + uint32 notification_interval = 31; // Optional - Default value: 0 uint32 notification_options = - 33; // Optional - Default value: action_svc_ok | action_svc_warning | + 32; // Optional - Default value: action_svc_ok | action_svc_warning | // action_svc_critical | action_svc_unknown |action_svc_flapping | // action_svc_downtime - optional string notification_period = 34; // Optional - bool obsess_over_service = 35; // Optional - Default value: true - bool process_perf_data = 36; // Optional - Default value: true - bool retain_nonstatus_information = 37; // Optional - Default value: true - bool retain_status_information = 38; // Optional - Default value: true - uint32 retry_interval = 39; // Optional - Default value: 1 - optional uint32 recovery_notification_delay = 40; // Optional - StringSet servicegroups = 41; - string service_description = 42; - uint64 host_id = 43; - uint64 service_id = 44; - uint32 stalking_options = 45; // Optional - Default value: action_svc_none - optional string timezone = 46; // Optional - optional uint64 severity_id = 47; // Optional - optional uint64 icon_id = 48; // Optional - repeated PairUint64_32 tags = 49; + optional string notification_period = 33; // Optional + bool obsess_over_service = 34; // Optional - Default value: true + bool process_perf_data = 35; // Optional - Default value: true + bool retain_nonstatus_information = 36; // Optional - Default value: true + bool retain_status_information = 37; // Optional - Default value: true + uint32 retry_interval = 38; // Optional - Default value: 1 + optional uint32 recovery_notification_delay = 39; // Optional + StringSet servicegroups = 40; + string service_description = 41; + uint64 host_id = 42; + uint64 service_id = 43; + uint32 stalking_options = 44; // Optional - Default value: action_svc_none + optional string timezone = 45; // Optional + optional uint64 severity_id = 46; // Optional + optional uint64 icon_id = 47; // Optional + repeated PairUint64_32 tags = 48; } message Servicedependency { diff --git a/common/engine_conf/state_helper.cc b/common/engine_conf/state_helper.cc index a48afd55e44..c2204e86ed2 100644 --- a/common/engine_conf/state_helper.cc +++ b/common/engine_conf/state_helper.cc @@ -17,11 +17,19 @@ * */ #include "common/engine_conf/state_helper.hh" -#include #include #include #include "com/centreon/engine/events/sched_info.hh" #include "com/centreon/exceptions/msg_fmt.hh" +#include "common/engine_conf/contact_helper.hh" +#include "common/engine_conf/contactgroup_helper.hh" +#include "common/engine_conf/host_helper.hh" +#include "common/engine_conf/hostdependency_helper.hh" +#include "common/engine_conf/hostescalation_helper.hh" +#include "common/engine_conf/service_helper.hh" +#include "common/engine_conf/servicedependency_helper.hh" +#include "common/engine_conf/serviceescalation_helper.hh" +#include "common/engine_conf/servicegroup_helper.hh" using com::centreon::exceptions::msg_fmt; using ::google::protobuf::Descriptor; @@ -67,7 +75,7 @@ state_helper::state_helper(State* obj) * @param key The key to parse. * @param value The value corresponding to the key */ -bool state_helper::hook(std::string_view key, const std::string_view& value) { +bool state_helper::hook(std::string_view key, std::string_view value) { State* obj = static_cast(mut_obj()); /* Since we use key to get back the good key value, it is faster to give key * by copy to the method. We avoid one key allocation... */ @@ -75,13 +83,15 @@ bool state_helper::hook(std::string_view key, const std::string_view& value) { if (key.substr(0, 10) == "log_level_") { if (value == "off" || value == "critical" || value == "error" || - value == "warning" || value == "info" || value == "debug" || - value == "trace") { + value == "err" || value == "warning" || value == "info" || + value == "debug" || value == "trace") { + if (value == "err") + value = "error"; return set_global(key, value); } else throw msg_fmt( "Log level '{}' has value '{}' but it cannot be a different string " - "than off, critical, error, warning, info, debug or trace", + "than off, critical, error, err, warning, info, debug or trace", key, value); } else if (key == "date_format") { if (value == "euro") @@ -431,7 +441,10 @@ bool state_helper::set_global(const std::string_view& key, fill_string_group(lst, value); return true; } + } else { + assert(124 == 123); } + break; default: return false; } @@ -489,4 +502,100 @@ bool state_helper::apply_extended_conf( } return retval; } + +/** + * @brief Expand configuration objects. + * + * @param pb_config The protobuf configuration state to expand. + * @param err The error count object to update in case of errors. + */ +void state_helper::expand(configuration::error_cnt& err) { + configuration::State& pb_config = *static_cast(mut_obj()); + + absl::flat_hash_map m_host; + for (auto& h : pb_config.hosts()) { + m_host.emplace(h.host_name(), h); + } + + absl::flat_hash_map + m_contactgroups; + for (auto& cg : *pb_config.mutable_contactgroups()) { + m_contactgroups.emplace(cg.contactgroup_name(), &cg); + } + + absl::flat_hash_map m_hostgroups; + for (auto& hg : *pb_config.mutable_hostgroups()) { + m_hostgroups.emplace(hg.hostgroup_name(), &hg); + } + + absl::flat_hash_map + m_servicegroups; + for (auto& sg : *pb_config.mutable_servicegroups()) + m_servicegroups.emplace(sg.servicegroup_name(), &sg); + + // Expand contacts + contact_helper::expand(pb_config, err, m_contactgroups); + // Expand contactgroups + contactgroup_helper::expand(pb_config, err, m_contactgroups); + // Expand hosts + host_helper::expand(pb_config, err, m_hostgroups); + // Expand services + service_helper::expand(pb_config, err, m_host, m_servicegroups); + + // Expand servicegroups + servicegroup_helper::expand(pb_config, err, m_servicegroups); + + // Expand hostdependencies. + hostdependency_helper::expand(pb_config, err, m_hostgroups); + // Expand servicedependencies. + servicedependency_helper::expand(pb_config, err, m_hostgroups, + m_servicegroups); + + // Expand hostescalations + hostescalation_helper::expand(pb_config, err, m_hostgroups); + // Expand serviceescalations + serviceescalation_helper::expand(pb_config, err, m_hostgroups, + m_servicegroups); + // Expand custom variables + state_helper::_expand_cv(pb_config); +} + +void state_helper::_expand_cv(configuration::State& s) { + absl::flat_hash_set cvs; + for (auto& cv : s.macros_filter().data()) + cvs.emplace(cv); + + // Browse all anomalydetections. + for (auto& ad_cfg : *s.mutable_anomalydetections()) { + // Should custom variables be sent to broker ? + for (auto& cv : *ad_cfg.mutable_customvariables()) { + if (!s.enable_macros_filter() || cvs.contains(cv.name())) + cv.set_is_sent(true); + } + } + // Browse all contacts. + for (auto& c : *s.mutable_contacts()) { + // Should custom variables be sent to broker ? + for (auto& cv : *c.mutable_customvariables()) { + if (!s.enable_macros_filter() || cvs.contains(cv.name())) + cv.set_is_sent(true); + } + } + // Browse all hosts. + for (auto& host_cfg : *s.mutable_hosts()) { + // Should custom variables be sent to broker ? + for (auto& cv : *host_cfg.mutable_customvariables()) { + if (!s.enable_macros_filter() || cvs.contains(cv.name())) + cv.set_is_sent(true); + } + } + // Browse all services. + for (auto& service_cfg : *s.mutable_services()) { + // Should custom variables be sent to broker ? + for (auto& cv : *service_cfg.mutable_customvariables()) { + if (!s.enable_macros_filter() || cvs.contains(cv.name())) + cv.set_is_sent(true); + } + } +} } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/state_helper.hh b/common/engine_conf/state_helper.hh index 112fab8ac22..5a89dc216ee 100644 --- a/common/engine_conf/state_helper.hh +++ b/common/engine_conf/state_helper.hh @@ -28,16 +28,18 @@ namespace com::centreon::engine::configuration { class state_helper : public message_helper { void _init(); + static void _expand_cv(configuration::State& s); public: state_helper(State* obj); ~state_helper() noexcept = default; - bool hook(std::string_view key, const std::string_view& value) override; + bool hook(std::string_view key, std::string_view value) override; bool apply_extended_conf(const std::string& file_path, const rapidjson::Document& json_doc, const std::shared_ptr& logger); bool set_global(const std::string_view& key, const std::string_view& value); + void expand(configuration::error_cnt& err); }; } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/tag_helper.cc b/common/engine_conf/tag_helper.cc index bb5cfecc6d4..e83b762674c 100644 --- a/common/engine_conf/tag_helper.cc +++ b/common/engine_conf/tag_helper.cc @@ -47,7 +47,7 @@ tag_helper::tag_helper(Tag* obj) * @param key The key to parse. * @param value The value corresponding to the key */ -bool tag_helper::hook(std::string_view key, const std::string_view& value) { +bool tag_helper::hook(std::string_view key, std::string_view value) { Tag* obj = static_cast(mut_obj()); /* Since we use key to get back the good key value, it is faster to give key * by copy to the method. We avoid one key allocation... */ diff --git a/common/engine_conf/tag_helper.hh b/common/engine_conf/tag_helper.hh index 2b1e02fe93c..91d09a49c9e 100644 --- a/common/engine_conf/tag_helper.hh +++ b/common/engine_conf/tag_helper.hh @@ -33,7 +33,7 @@ class tag_helper : public message_helper { ~tag_helper() noexcept = default; void check_validity(error_cnt& err) const override; - bool hook(std::string_view key, const std::string_view& value) override; + bool hook(std::string_view key, std::string_view value) override; }; } // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/timeperiod_helper.cc b/common/engine_conf/timeperiod_helper.cc index 63e77406b88..e64a0753eba 100644 --- a/common/engine_conf/timeperiod_helper.cc +++ b/common/engine_conf/timeperiod_helper.cc @@ -46,8 +46,7 @@ timeperiod_helper::timeperiod_helper(Timeperiod* obj) * @param key The key to parse. * @param value The value corresponding to the key */ -bool timeperiod_helper::hook(std::string_view key, - const std::string_view& value) { +bool timeperiod_helper::hook(std::string_view key, std::string_view value) { Timeperiod* obj = static_cast(mut_obj()); /* Since we use key to get back the good key value, it is faster to give key * by copy to the method. We avoid one key allocation... */ diff --git a/common/engine_conf/timeperiod_helper.hh b/common/engine_conf/timeperiod_helper.hh index e56c999bbb9..cff0cf4d83b 100644 --- a/common/engine_conf/timeperiod_helper.hh +++ b/common/engine_conf/timeperiod_helper.hh @@ -42,7 +42,7 @@ class timeperiod_helper : public message_helper { ~timeperiod_helper() noexcept = default; void check_validity(error_cnt& err) const override; - bool hook(std::string_view key, const std::string_view& value) override; + bool hook(std::string_view key, std::string_view value) override; }; std::string daterange_to_str(const Daterange& dr); diff --git a/common/engine_legacy_conf/CMakeLists.txt b/common/engine_legacy_conf/CMakeLists.txt index f7fb36ea749..4364dc0b476 100644 --- a/common/engine_legacy_conf/CMakeLists.txt +++ b/common/engine_legacy_conf/CMakeLists.txt @@ -44,7 +44,7 @@ if(LEGACY_ENGINE) tag.cc timeperiod.cc) - add_dependencies(engine_legacy_conf pb_neb_lib pb_bam_lib) + add_dependencies(engine_legacy_conf pb_neb_lib pb_common_lib pb_bam_lib pb_bam_state_lib) include_directories(${CMAKE_SOURCE_DIR}/common/inc) target_precompile_headers(engine_legacy_conf PRIVATE diff --git a/common/engine_legacy_conf/anomalydetection.cc b/common/engine_legacy_conf/anomalydetection.cc index b6e1ef13322..382642eab7d 100644 --- a/common/engine_legacy_conf/anomalydetection.cc +++ b/common/engine_legacy_conf/anomalydetection.cc @@ -18,10 +18,6 @@ */ #include "anomalydetection.hh" -#include -#include -#include -#include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon; using namespace com::centreon::engine::configuration; @@ -58,7 +54,6 @@ std::unordered_map const {"action_url", SETTER(std::string const&, _set_action_url)}, {"icon_image", SETTER(std::string const&, _set_icon_image)}, {"icon_image_alt", SETTER(std::string const&, _set_icon_image_alt)}, - {"initial_state", SETTER(std::string const&, _set_initial_state)}, {"max_check_attempts", SETTER(unsigned int, _set_max_check_attempts)}, {"check_interval", SETTER(unsigned int, _set_check_interval)}, {"normal_check_interval", SETTER(unsigned int, _set_check_interval)}, @@ -116,7 +111,6 @@ static unsigned short const default_flap_detection_options( anomalydetection::unknown | anomalydetection::critical); static unsigned int const default_freshness_threshold(0); static unsigned int const default_high_flap_threshold(0); -static unsigned int const default_initial_state(broker::Service_State_OK); static bool const default_is_volatile(false); static unsigned int const default_low_flap_threshold(0); static unsigned int const default_max_check_attempts(3); @@ -151,7 +145,6 @@ anomalydetection::anomalydetection() _flap_detection_options(default_flap_detection_options), _freshness_threshold(default_freshness_threshold), _high_flap_threshold(default_high_flap_threshold), - _initial_state(default_initial_state), _is_volatile(default_is_volatile), _low_flap_threshold(default_low_flap_threshold), _max_check_attempts(default_max_check_attempts), @@ -202,7 +195,6 @@ anomalydetection::anomalydetection(anomalydetection const& other) _host_name(other._host_name), _icon_image(other._icon_image), _icon_image_alt(other._icon_image_alt), - _initial_state(other._initial_state), _is_volatile(other._is_volatile), _low_flap_threshold(other._low_flap_threshold), _max_check_attempts(other._max_check_attempts), @@ -269,7 +261,6 @@ anomalydetection& anomalydetection::operator=(anomalydetection const& other) { _host_name = other._host_name; _icon_image = other._icon_image; _icon_image_alt = other._icon_image_alt; - _initial_state = other._initial_state; _is_volatile = other._is_volatile; _low_flap_threshold = other._low_flap_threshold; _max_check_attempts = other._max_check_attempts; @@ -451,12 +442,6 @@ bool anomalydetection::operator==( "equality => icon_image_alt don't match"); return false; } - if (_initial_state != other._initial_state) { - _logger->debug( - "configuration::anomalydetection::" - "equality => initial_state don't match"); - return false; - } if (_is_volatile != other._is_volatile) { _logger->debug( "configuration::anomalydetection::" @@ -696,8 +681,6 @@ bool anomalydetection::operator<(anomalydetection const& other) const noexcept { return _icon_image < other._icon_image; else if (_icon_image_alt != other._icon_image_alt) return _icon_image_alt < other._icon_image_alt; - else if (_initial_state != other._initial_state) - return _initial_state < other._initial_state; else if (_is_volatile != other._is_volatile) return _is_volatile < other._is_volatile; else if (_low_flap_threshold != other._low_flap_threshold) @@ -752,20 +735,26 @@ void anomalydetection::check_validity(error_cnt& err) const { if (_service_description.empty()) throw msg_fmt( "Service has no description (property 'service_description')"); - if (_host_name.empty()) + if (_host_name.empty()) { + err.config_errors++; throw msg_fmt( "Service '{}' is not attached to any host (property 'host_name')", _service_description); - if (_metric_name.empty()) + } + if (_metric_name.empty()) { + err.config_errors++; throw msg_fmt( "Anomaly detection service '{}' has no metric name specified (property " "'metric_name')", _service_description); - if (_thresholds_file.empty()) + } + if (_thresholds_file.empty()) { + err.config_errors++; throw msg_fmt( "Anomaly detection service '{}' has no thresholds file specified " "(property 'thresholds_file')", _service_description); + } } /** @@ -812,7 +801,6 @@ void anomalydetection::merge(object const& obj) { MRG_DEFAULT(_host_name); MRG_DEFAULT(_icon_image); MRG_DEFAULT(_icon_image_alt); - MRG_OPTION(_initial_state); MRG_OPTION(_is_volatile); MRG_OPTION(_low_flap_threshold); MRG_OPTION(_max_check_attempts); @@ -1117,15 +1105,6 @@ std::string const& anomalydetection::icon_image_alt() const noexcept { return _icon_image_alt; } -/** - * Get initial_state. - * - * @return The initial_state. - */ -unsigned int anomalydetection::initial_state() const noexcept { - return _initial_state; -} - /** * Get is_volatile. * @@ -1713,29 +1692,6 @@ bool anomalydetection::_set_icon_image_alt(std::string const& value) { return true; } -/** - * Set initial_state value. - * - * @param[in] value The new initial_state value. - * - * @return True on success, otherwise false. - */ -bool anomalydetection::_set_initial_state(std::string const& value) { - std::string_view data(value); - data = absl::StripAsciiWhitespace(data); - if (data == "o" || data == "ok") - _initial_state = broker::Service_State_OK; - else if (data == "w" || data == "warning") - _initial_state = broker::Service_State_WARNING; - else if (data == "u" || data == "unknown") - _initial_state = broker::Service_State_UNKNOWN; - else if (data == "c" || data == "critical") - _initial_state = broker::Service_State_CRITICAL; - else - return false; - return true; -} - /** * Set is_volatile value. * diff --git a/common/engine_legacy_conf/anomalydetection.hh b/common/engine_legacy_conf/anomalydetection.hh index 988768cc699..4ba2488b23a 100644 --- a/common/engine_legacy_conf/anomalydetection.hh +++ b/common/engine_legacy_conf/anomalydetection.hh @@ -87,7 +87,6 @@ class anomalydetection : public object { bool set_host_id(uint64_t id); std::string const& icon_image() const noexcept; std::string const& icon_image_alt() const noexcept; - unsigned int initial_state() const noexcept; bool is_volatile() const noexcept; unsigned int low_flap_threshold() const noexcept; unsigned int max_check_attempts() const noexcept; @@ -152,7 +151,6 @@ class anomalydetection : public object { bool _set_host_name(std::string const& value); bool _set_icon_image(std::string const& value); bool _set_icon_image_alt(std::string const& value); - bool _set_initial_state(std::string const& value); bool _set_is_volatile(bool value); bool _set_low_flap_threshold(unsigned int value); bool _set_max_check_attempts(unsigned int value); @@ -201,7 +199,6 @@ class anomalydetection : public object { std::string _host_name; std::string _icon_image; std::string _icon_image_alt; - opt _initial_state; opt _is_volatile; opt _low_flap_threshold; opt _max_check_attempts; diff --git a/common/engine_legacy_conf/command.cc b/common/engine_legacy_conf/command.cc index 77a1f0bb75c..2ea05eb75d3 100644 --- a/common/engine_legacy_conf/command.cc +++ b/common/engine_legacy_conf/command.cc @@ -18,7 +18,6 @@ * */ #include "command.hh" -#include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon; using namespace com::centreon::engine::configuration; diff --git a/common/engine_legacy_conf/connector.cc b/common/engine_legacy_conf/connector.cc index 95ed88ce3aa..3df0239d8d3 100644 --- a/common/engine_legacy_conf/connector.cc +++ b/common/engine_legacy_conf/connector.cc @@ -18,7 +18,6 @@ * */ #include "connector.hh" -#include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon; using namespace com::centreon::engine::configuration; diff --git a/common/engine_legacy_conf/contact.cc b/common/engine_legacy_conf/contact.cc index 0f5577669a9..83d724312ce 100644 --- a/common/engine_legacy_conf/contact.cc +++ b/common/engine_legacy_conf/contact.cc @@ -557,6 +557,10 @@ bool contact::_set_contactgroups(std::string const& value) { */ bool contact::_set_contact_name(std::string const& value) { _contact_name = value; + // if alias is empty we take the contact name, better than taking template + // alias + if (_alias.empty()) + _alias = value; return true; } diff --git a/common/engine_legacy_conf/contactgroup.hh b/common/engine_legacy_conf/contactgroup.hh index 8d3e2d14575..536b75fb15c 100644 --- a/common/engine_legacy_conf/contactgroup.hh +++ b/common/engine_legacy_conf/contactgroup.hh @@ -19,7 +19,6 @@ #ifndef CCE_CONFIGURATION_CONTACTGROUP_HH #define CCE_CONFIGURATION_CONTACTGROUP_HH -#include "com/centreon/common/opt.hh" #include "group.hh" #include "object.hh" diff --git a/common/engine_legacy_conf/host.cc b/common/engine_legacy_conf/host.cc index e64abf9e8f6..e16a569a321 100644 --- a/common/engine_legacy_conf/host.cc +++ b/common/engine_legacy_conf/host.cc @@ -19,10 +19,7 @@ */ #include "host.hh" -#include -#include #include "bbdo/neb.pb.h" -#include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon; using namespace com::centreon::engine::configuration; @@ -56,7 +53,6 @@ std::unordered_map const host::_setters{ {"vrml_image", SETTER(std::string const&, _set_vrml_image)}, {"gd2_image", SETTER(std::string const&, _set_statusmap_image)}, {"statusmap_image", SETTER(std::string const&, _set_statusmap_image)}, - {"initial_state", SETTER(std::string const&, _set_initial_state)}, {"check_interval", SETTER(unsigned int, _set_check_interval)}, {"normal_check_interval", SETTER(unsigned int, _set_check_interval)}, {"retry_interval", SETTER(unsigned int, _set_retry_interval)}, @@ -112,7 +108,6 @@ static unsigned short const default_flap_detection_options(host::up | host::unreachable); static unsigned int const default_freshness_threshold(0); static unsigned int const default_high_flap_threshold(0); -static unsigned short const default_initial_state(broker::Host_State_UP); static unsigned int const default_low_flap_threshold(0); static unsigned int const default_max_check_attempts(3); static bool const default_notifications_enabled(true); @@ -148,7 +143,6 @@ host::host(host::key_type const& key) _high_flap_threshold(default_high_flap_threshold), _host_id(key), _host_name(""), - _initial_state(default_initial_state), _low_flap_threshold(default_low_flap_threshold), _max_check_attempts(default_max_check_attempts), _notifications_enabled(default_notifications_enabled), @@ -211,7 +205,6 @@ host& host::operator=(host const& other) { _host_name = other._host_name; _icon_image = other._icon_image; _icon_image_alt = other._icon_image_alt; - _initial_state = other._initial_state; _low_flap_threshold = other._low_flap_threshold; _max_check_attempts = other._max_check_attempts; _notes = other._notes; @@ -270,7 +263,6 @@ bool host::operator==(host const& other) const noexcept { _hostgroups == other._hostgroups && _host_id == other._host_id && _host_name == other._host_name && _icon_image == other._icon_image && _icon_image_alt == other._icon_image_alt && - _initial_state == other._initial_state && _low_flap_threshold == other._low_flap_threshold && _max_check_attempts == other._max_check_attempts && _notes == other._notes && _notes_url == other._notes_url && @@ -369,8 +361,6 @@ bool host::operator<(host const& other) const noexcept { return _icon_image < other._icon_image; else if (_icon_image_alt != other._icon_image_alt) return _icon_image_alt < other._icon_image_alt; - else if (_initial_state != other._initial_state) - return _initial_state < other._initial_state; else if (_low_flap_threshold != other._low_flap_threshold) return _low_flap_threshold < other._low_flap_threshold; else if (_max_check_attempts != other._max_check_attempts) @@ -806,15 +796,6 @@ std::string const& host::icon_image_alt() const noexcept { return _icon_image_alt; } -/** - * Get initial_state. - * - * @return The initial_state. - */ -unsigned int host::initial_state() const noexcept { - return _initial_state; -} - /** * Get low_flap_threshold. * @@ -1390,27 +1371,6 @@ bool host::_set_icon_image_alt(std::string const& value) { return true; } -/** - * Set initial_state value. - * - * @param[in] value The new initial_state value. - * - * @return True on success, otherwise false. - */ -bool host::_set_initial_state(std::string const& value) { - std::string_view data(value); - data = absl::StripAsciiWhitespace(data); - if (data == "o" || data == "up") - _initial_state = broker::Host_State_UP; - else if (data == "d" || data == "down") - _initial_state = broker::Host_State_DOWN; - else if (data == "u" || data == "unreachable") - _initial_state = broker::Host_State_UNREACHABLE; - else - return false; - return true; -} - /** * Set low_flap_threshold value. * diff --git a/common/engine_legacy_conf/host.hh b/common/engine_legacy_conf/host.hh index 1c5d61b0632..d07f30db494 100644 --- a/common/engine_legacy_conf/host.hh +++ b/common/engine_legacy_conf/host.hh @@ -86,7 +86,6 @@ class host : public object { std::string const& host_name() const noexcept; std::string const& icon_image() const noexcept; std::string const& icon_image_alt() const noexcept; - unsigned int initial_state() const noexcept; unsigned int low_flap_threshold() const noexcept; unsigned int max_check_attempts() const noexcept; std::string const& notes() const noexcept; @@ -142,7 +141,6 @@ class host : public object { bool _set_hostgroups(std::string const& value); bool _set_icon_image(std::string const& value); bool _set_icon_image_alt(std::string const& value); - bool _set_initial_state(std::string const& value); bool _set_low_flap_threshold(unsigned int value); bool _set_max_check_attempts(unsigned int value); bool _set_notes(std::string const& value); @@ -195,7 +193,6 @@ class host : public object { std::string _host_name; std::string _icon_image; std::string _icon_image_alt; - uint32_t _initial_state; opt _low_flap_threshold; opt _max_check_attempts; std::string _notes; diff --git a/common/engine_legacy_conf/parser.cc b/common/engine_legacy_conf/parser.cc index 1158ff831e3..6494eeba9b8 100644 --- a/common/engine_legacy_conf/parser.cc +++ b/common/engine_legacy_conf/parser.cc @@ -17,13 +17,11 @@ * */ #include "parser.hh" -#include "com/centreon/exceptions/msg_fmt.hh" -#include "com/centreon/io/directory_entry.hh" +#include #include "common/log_v2/log_v2.hh" using namespace com::centreon; using namespace com::centreon::engine::configuration; -using namespace com::centreon::io; using com::centreon::common::log_v2::log_v2; using com::centreon::exceptions::msg_fmt; @@ -288,11 +286,12 @@ std::string const& parser::_map_object_type(map_object const& objects) const * @param[in] path The directory path. */ void parser::_parse_directory_configuration(std::string const& path) { - directory_entry dir(path); - std::list const& lst(dir.entry_list("*.cfg")); - for (std::list::const_iterator it(lst.begin()), end(lst.end()); - it != end; ++it) - _parse_object_definitions(it->path()); + for (const auto& entry : std::filesystem::directory_iterator(path)) { + if (entry.is_regular_file() && entry.path().extension() == ".cfg") + _parse_object_definitions(entry.path().string()); + else if (entry.is_directory()) + _parse_directory_configuration(entry.path().string()); + } } /** diff --git a/common/engine_legacy_conf/service.cc b/common/engine_legacy_conf/service.cc index fa9c55d72e6..1432d48f295 100644 --- a/common/engine_legacy_conf/service.cc +++ b/common/engine_legacy_conf/service.cc @@ -57,7 +57,6 @@ std::unordered_map const service::_setters{ {"action_url", SETTER(std::string const&, _set_action_url)}, {"icon_image", SETTER(std::string const&, _set_icon_image)}, {"icon_image_alt", SETTER(std::string const&, _set_icon_image_alt)}, - {"initial_state", SETTER(std::string const&, _set_initial_state)}, {"max_check_attempts", SETTER(unsigned int, _set_max_check_attempts)}, {"check_interval", SETTER(unsigned int, _set_check_interval)}, {"normal_check_interval", SETTER(unsigned int, _set_check_interval)}, @@ -111,7 +110,6 @@ static unsigned short const default_flap_detection_options(service::ok | service::critical); static unsigned int const default_freshness_threshold(0); static unsigned int const default_high_flap_threshold(0); -static unsigned int const default_initial_state(broker::Service_State_OK); static bool const default_is_volatile(false); static unsigned int const default_low_flap_threshold(0); static unsigned int const default_max_check_attempts(3); @@ -145,7 +143,6 @@ service::service() _flap_detection_options(default_flap_detection_options), _freshness_threshold(default_freshness_threshold), _high_flap_threshold(default_high_flap_threshold), - _initial_state(default_initial_state), _is_volatile(default_is_volatile), _low_flap_threshold(default_low_flap_threshold), _max_check_attempts(default_max_check_attempts), @@ -194,7 +191,6 @@ service::service(service const& other) _host_name(other._host_name), _icon_image(other._icon_image), _icon_image_alt(other._icon_image_alt), - _initial_state(other._initial_state), _is_volatile(other._is_volatile), _low_flap_threshold(other._low_flap_threshold), _max_check_attempts(other._max_check_attempts), @@ -253,7 +249,6 @@ service& service::operator=(service const& other) { _host_name = other._host_name; _icon_image = other._icon_image; _icon_image_alt = other._icon_image_alt; - _initial_state = other._initial_state; _is_volatile = other._is_volatile; _low_flap_threshold = other._low_flap_threshold; _max_check_attempts = other._max_check_attempts; @@ -414,11 +409,6 @@ bool service::operator==(service const& other) const noexcept { "configuration::service::equality => icon_image_alt don't match"); return false; } - if (_initial_state != other._initial_state) { - _logger->debug( - "configuration::service::equality => initial_state don't match"); - return false; - } if (_is_volatile != other._is_volatile) { _logger->debug( "configuration::service::equality => is_volatile don't match"); @@ -618,8 +608,6 @@ bool service::operator<(service const& other) const noexcept { return _icon_image < other._icon_image; else if (_icon_image_alt != other._icon_image_alt) return _icon_image_alt < other._icon_image_alt; - else if (_initial_state != other._initial_state) - return _initial_state < other._initial_state; else if (_is_volatile != other._is_volatile) return _is_volatile < other._is_volatile; else if (_low_flap_threshold != other._low_flap_threshold) @@ -1030,15 +1018,6 @@ std::string const& service::icon_image_alt() const noexcept { return _icon_image_alt; } -/** - * Get initial_state. - * - * @return The initial_state. - */ -unsigned int service::initial_state() const noexcept { - return _initial_state; -} - /** * Get is_volatile. * @@ -1588,29 +1567,6 @@ bool service::_set_icon_image_alt(std::string const& value) { return true; } -/** - * Set initial_state value. - * - * @param[in] value The new initial_state value. - * - * @return True on success, otherwise false. - */ -bool service::_set_initial_state(std::string const& value) { - std::string_view data(value); - data = absl::StripAsciiWhitespace(data); - if (data == "o" || data == "ok") - _initial_state = broker::Service_State_OK; - else if (data == "w" || data == "warning") - _initial_state = broker::Service_State_WARNING; - else if (data == "u" || data == "unknown") - _initial_state = broker::Service_State_UNKNOWN; - else if (data == "c" || data == "critical") - _initial_state = broker::Service_State_CRITICAL; - else - return false; - return true; -} - /** * Set is_volatile value. * diff --git a/common/engine_legacy_conf/service.hh b/common/engine_legacy_conf/service.hh index b0edf116723..be19aeade64 100644 --- a/common/engine_legacy_conf/service.hh +++ b/common/engine_legacy_conf/service.hh @@ -83,7 +83,6 @@ class service : public object { void set_host_id(uint64_t id); std::string const& icon_image() const noexcept; std::string const& icon_image_alt() const noexcept; - unsigned int initial_state() const noexcept; bool is_volatile() const noexcept; unsigned int low_flap_threshold() const noexcept; unsigned int max_check_attempts() const noexcept; @@ -142,7 +141,6 @@ class service : public object { bool _set_host_name(const std::string& value); bool _set_icon_image(std::string const& value); bool _set_icon_image_alt(std::string const& value); - bool _set_initial_state(std::string const& value); bool _set_is_volatile(bool value); bool _set_low_flap_threshold(unsigned int value); bool _set_max_check_attempts(unsigned int value); @@ -190,7 +188,6 @@ class service : public object { std::string _host_name; std::string _icon_image; std::string _icon_image_alt; - uint32_t _initial_state; opt _is_volatile; opt _low_flap_threshold; opt _max_check_attempts; diff --git a/common/grpc/inc/com/centreon/common/grpc/grpc_config.hh b/common/grpc/inc/com/centreon/common/grpc/grpc_config.hh index 4d151fa0baa..552d25a4ae8 100644 --- a/common/grpc/inc/com/centreon/common/grpc/grpc_config.hh +++ b/common/grpc/inc/com/centreon/common/grpc/grpc_config.hh @@ -55,17 +55,32 @@ class grpc_config { bool _compress; int _second_keepalive_interval; + /** + * @brief (client side) if this parameter is > 0 this is the longest delay in + * second between two failed connection. if is the + * GRPC_ARG_MAX_RECONNECT_BACKOFF_MS parameter + * + */ + unsigned _second_max_reconnect_backoff; + public: using pointer = std::shared_ptr; - grpc_config() : _compress(false), _second_keepalive_interval(30) {} + grpc_config() + : _compress(false), + _second_keepalive_interval(30), + _second_max_reconnect_backoff(0) {} grpc_config(const std::string& hostp) - : _hostport(hostp), _compress(false), _second_keepalive_interval(30) {} + : _hostport(hostp), + _compress(false), + _second_keepalive_interval(30), + _second_max_reconnect_backoff(0) {} grpc_config(const std::string& hostp, bool crypted) : _hostport(hostp), _crypted(crypted), _compress(false), - _second_keepalive_interval(30) {} + _second_keepalive_interval(30), + _second_max_reconnect_backoff(0) {} grpc_config(const std::string& hostp, bool crypted, const std::string& certificate, @@ -81,7 +96,27 @@ class grpc_config { _ca_cert(ca_cert), _ca_name(ca_name), _compress(compression), - _second_keepalive_interval(second_keepalive_interval) {} + _second_keepalive_interval(second_keepalive_interval), + _second_max_reconnect_backoff(0) {} + + grpc_config(const std::string& hostp, + bool crypted, + const std::string& certificate, + const std::string& cert_key, + const std::string& ca_cert, + const std::string& ca_name, + bool compression, + int second_keepalive_interval, + unsigned second_max_reconnect_backoff) + : _hostport(hostp), + _crypted(crypted), + _certificate(certificate), + _cert_key(cert_key), + _ca_cert(ca_cert), + _ca_name(ca_name), + _compress(compression), + _second_keepalive_interval(second_keepalive_interval), + _second_max_reconnect_backoff(second_max_reconnect_backoff) {} const std::string& get_hostport() const { return _hostport; } bool is_crypted() const { return _crypted; } @@ -94,12 +129,17 @@ class grpc_config { return _second_keepalive_interval; } + unsigned get_second_max_reconnect_backoff() const { + return _second_max_reconnect_backoff; + } + bool operator==(const grpc_config& right) const { return _hostport == right._hostport && _crypted == right._crypted && _certificate == right._certificate && _cert_key == right._cert_key && _ca_cert == right._ca_cert && _ca_name == right._ca_name && _compress == right._compress && - _second_keepalive_interval == right._second_keepalive_interval; + _second_keepalive_interval == right._second_keepalive_interval && + _second_max_reconnect_backoff == right._second_max_reconnect_backoff; } /** diff --git a/common/grpc/src/grpc_client.cc b/common/grpc/src/grpc_client.cc index e62396901f7..53614b1e845 100644 --- a/common/grpc/src/grpc_client.cc +++ b/common/grpc/src/grpc_client.cc @@ -79,5 +79,10 @@ grpc_client_base::grpc_client_base( creds = ::grpc::InsecureChannelCredentials(); } + if (conf->get_second_max_reconnect_backoff() > 0) { + args.SetInt(GRPC_ARG_MAX_RECONNECT_BACKOFF_MS, + conf->get_second_max_reconnect_backoff() * 1000); + } + _channel = ::grpc::CreateCustomChannel(conf->get_hostport(), creds, args); } diff --git a/common/http/CMakeLists.txt b/common/http/CMakeLists.txt index 2bb1e2c4fb8..c9fa11409da 100644 --- a/common/http/CMakeLists.txt +++ b/common/http/CMakeLists.txt @@ -1,5 +1,5 @@ # -# Copyright 2022 Centreon +# Copyright 2022-2024 Centreon # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of @@ -23,15 +23,10 @@ set(SRC_DIR "src") set(TEST_DIR "${PROJECT_SOURCE_DIR}/http/test") # Sources. -set(SOURCES - ${SRC_DIR}/http_client.cc - ${SRC_DIR}/http_connection.cc - ${SRC_DIR}/https_connection.cc - ${SRC_DIR}/http_server.cc -) +set(SOURCES ${SRC_DIR}/http_client.cc ${SRC_DIR}/http_connection.cc + ${SRC_DIR}/https_connection.cc ${SRC_DIR}/http_server.cc) - -add_library(centreon_http STATIC ${SOURCES} ) +add_library(centreon_http STATIC ${SOURCES}) target_include_directories(centreon_http PRIVATE ${INC_DIR}) target_precompile_headers(centreon_http REUSE_FROM centreon_common) @@ -41,9 +36,8 @@ set_target_properties(centreon_http PROPERTIES COMPILE_FLAGS "-fPIC") # Testing. if(WITH_TESTING) set(TESTS_SOURCES - ${TESTS_SOURCES} - ${TEST_DIR}/http_client_test.cc - ${TEST_DIR}/http_connection_test.cc - ${TEST_DIR}/http_server_test.cc - PARENT_SCOPE) + ${TESTS_SOURCES} ${TEST_DIR}/http_client_test.cc + ${TEST_DIR}/http_connection_test.cc ${TEST_DIR}/http_server_test.cc + ${TEST_DIR}/vault_test.cc + PARENT_SCOPE) endif(WITH_TESTING) diff --git a/common/http/doc/common-http.md b/common/http/doc/common-http.md index 1448b32074b..08b2f3bea6b 100644 --- a/common/http/doc/common-http.md +++ b/common/http/doc/common-http.md @@ -210,13 +210,13 @@ void create_server(const std::shared_ptr & io_ctx, server_creator = [io_ctx, logger, conf]() { return std::make_shared>( io_ctx, logger, conf, https_connection::load_server_certificate); - }; + }; } else { server_creator = [io_ctx, logger, conf]() { return std::make_shared>( io_ctx, logger, conf); - }; + }; } auto server = http_server::load(io_ctx, logger, conf, std::move(server_creator)); diff --git a/common/http/inc/com/centreon/common/http/http_config.hh b/common/http/inc/com/centreon/common/http/http_config.hh index c20fdf5065c..d0690aa93e9 100644 --- a/common/http/inc/com/centreon/common/http/http_config.hh +++ b/common/http/inc/com/centreon/common/http/http_config.hh @@ -19,6 +19,7 @@ #ifndef CCB_HTTP_CLIENT_CONFIG_HH__ #define CCB_HTTP_CLIENT_CONFIG_HH__ +#include namespace com::centreon::common::http { @@ -33,6 +34,7 @@ using duration = system_clock::duration; class http_config { // destination or listen address asio::ip::tcp::endpoint _endpoint; + asio::ip::tcp::resolver::results_type _endpoints_list; std::string _server_name; bool _crypted; duration _connect_timeout; @@ -48,12 +50,14 @@ class http_config { std::string _certificate_path; // path to key file (server case) std::string _key_path; + // Should we verify peer (available for a https client, default value: true) + bool _verify_peer = true; public: using pointer = std::shared_ptr; http_config(const asio::ip::tcp::endpoint& endpoint, - const std::string& server_name, + const std::string_view& server_name, bool crypted = false, duration connect_timeout = std::chrono::seconds(10), duration send_timeout = std::chrono::seconds(30), @@ -82,12 +86,45 @@ class http_config { _certificate_path(certificate_path), _key_path(key_path) {} + http_config(const asio::ip::tcp::resolver::results_type& endpoints_list, + const std::string_view& server_name, + bool crypted = false, + duration connect_timeout = std::chrono::seconds(10), + duration send_timeout = std::chrono::seconds(30), + duration receive_timeout = std::chrono::seconds(30), + unsigned second_tcp_keep_alive_interval = 30, + duration max_retry_interval = std::chrono::seconds(10), + unsigned max_send_retry = 5, + duration default_http_keepalive_duration = std::chrono::hours(1), + unsigned max_connections = 10, + asio::ssl::context_base::method ssl_method = + asio::ssl::context_base::tlsv13_client, + const std::string& certificate_path = "", + const std::string& key_path = "") + : _endpoints_list(endpoints_list), + _server_name(server_name), + _crypted(crypted), + _connect_timeout(connect_timeout), + _send_timeout(send_timeout), + _receive_timeout(receive_timeout), + _second_tcp_keep_alive_interval(second_tcp_keep_alive_interval), + _max_retry_interval(max_retry_interval), + _max_send_retry(max_send_retry), + _default_http_keepalive_duration(default_http_keepalive_duration), + _max_connections(max_connections), + _ssl_method(ssl_method), + _certificate_path(certificate_path), + _key_path(key_path) {} + http_config() : _crypted(false), _second_tcp_keep_alive_interval(30), _max_send_retry(0), _max_connections(0) {} + const asio::ip::tcp::resolver::results_type& get_endpoints_list() const { + return _endpoints_list; + } const asio::ip::tcp::endpoint& get_endpoint() const { return _endpoint; } const std::string& get_server_name() const { return _server_name; } bool is_crypted() const { return _crypted; } @@ -106,6 +143,8 @@ class http_config { asio::ssl::context_base::method get_ssl_method() const { return _ssl_method; } const std::string& get_certificate_path() const { return _certificate_path; } const std::string& get_key_path() const { return _key_path; } + void set_verify_peer(bool verify_peer) { _verify_peer = verify_peer; } + bool verify_peer() const { return _verify_peer; } }; } // namespace com::centreon::common::http diff --git a/common/http/inc/com/centreon/common/http/http_connection.hh b/common/http/inc/com/centreon/common/http/http_connection.hh index b650bac138b..967958650a6 100644 --- a/common/http/inc/com/centreon/common/http/http_connection.hh +++ b/common/http/inc/com/centreon/common/http/http_connection.hh @@ -93,7 +93,7 @@ class request_base : public request_type { public: request_base(); request_base(boost::beast::http::verb method, - const std::string& server_name, + const std::string_view& server_name, boost::beast::string_view target); virtual ~request_base() {} diff --git a/common/http/inc/com/centreon/common/http/https_connection.hh b/common/http/inc/com/centreon/common/http/https_connection.hh index 5fe9daa2325..37a881eebde 100644 --- a/common/http/inc/com/centreon/common/http/https_connection.hh +++ b/common/http/inc/com/centreon/common/http/https_connection.hh @@ -20,6 +20,7 @@ #ifndef CCB_HTTPS_CLIENT_CONNECTION_HH__ #define CCB_HTTPS_CLIENT_CONNECTION_HH__ +#include #include "http_connection.hh" namespace com::centreon::common::http { diff --git a/common/http/src/http_connection.cc b/common/http/src/http_connection.cc index a964a78c730..fa56bf291e8 100644 --- a/common/http/src/http_connection.cc +++ b/common/http/src/http_connection.cc @@ -48,7 +48,7 @@ request_base::request_base() { } request_base::request_base(boost::beast::http::verb method, - const std::string& server_name, + const std::string_view& server_name, boost::beast::string_view target) : request_type(method, target, 11) { set(boost::beast::http::field::host, server_name); @@ -189,10 +189,17 @@ void http_connection::connect(connect_callback_type&& callback) { *_conf); std::lock_guard l(_socket_m); _socket.expires_after(_conf->get_connect_timeout()); - _socket.async_connect( - _conf->get_endpoint(), - [me = shared_from_this(), cb = std::move(callback)]( - const boost::beast::error_code& err) { me->on_connect(err, cb); }); + if (_conf->get_endpoints_list().empty()) + _socket.async_connect( + _conf->get_endpoint(), + [me = shared_from_this(), cb = std::move(callback)]( + const boost::beast::error_code& err) { me->on_connect(err, cb); }); + else + _socket.async_connect(_conf->get_endpoints_list(), + [me = shared_from_this(), cb = std::move(callback)]( + const boost::beast::error_code& err, + const asio::ip::tcp::endpoint& endpoint + [[maybe_unused]]) { me->on_connect(err, cb); }); } /** @@ -204,10 +211,16 @@ void http_connection::connect(connect_callback_type&& callback) { */ void http_connection::on_connect(const boost::beast::error_code& err, const connect_callback_type& callback) { + std::string detail; if (err) { - std::string detail = - fmt::format("{:p} fail connect to {}: {}", static_cast(this), - _conf->get_endpoint(), err.message()); + if (_conf->get_endpoints_list().empty()) + detail = + fmt::format("{:p} fail connect to {}: {}", static_cast(this), + _conf->get_endpoint(), err.message()); + else + detail = + fmt::format("{:p} fail connect to {}: {}", static_cast(this), + _conf->get_server_name(), err.message()); SPDLOG_LOGGER_ERROR(_logger, detail); callback(err, detail); shutdown(); @@ -236,7 +249,8 @@ void http_connection::on_connect(const boost::beast::error_code& err, * callback is useless in this case but is mandatory to have the same interface * than https_connection * - * @param callback called via io_context::post (must have the same signature as https) + * @param callback called via io_context::post (must have the same signature as + * https) */ void http_connection::_on_accept(connect_callback_type&& callback) { unsigned expected = e_not_connected; diff --git a/common/http/src/https_connection.cc b/common/http/src/https_connection.cc index 20defaa2685..15dbf2203cd 100644 --- a/common/http/src/https_connection.cc +++ b/common/http/src/https_connection.cc @@ -142,6 +142,13 @@ https_connection::https_connection( } _stream = std::make_unique(beast::net::make_strand(*io_context), _sslcontext); + if (!SSL_set_tlsext_host_name(_stream->native_handle(), + conf->get_server_name().c_str())) { + beast::error_code ec{static_cast(::ERR_get_error()), + boost::beast::net::error::get_ssl_category()}; + SPDLOG_LOGGER_ERROR(logger, "Failed to initialize the https connection: {}", + ec.message()); + } SPDLOG_LOGGER_DEBUG(_logger, "create https_connection {:p} to {}", static_cast(this), *conf); } @@ -191,10 +198,18 @@ void https_connection::connect(connect_callback_type&& callback) { *_conf); std::lock_guard l(_socket_m); beast::get_lowest_layer(*_stream).expires_after(_conf->get_connect_timeout()); - beast::get_lowest_layer(*_stream).async_connect( - _conf->get_endpoint(), - [me = shared_from_this(), cb = std::move(callback)]( - const beast::error_code& err) mutable { me->on_connect(err, cb); }); + if (_conf->get_endpoints_list().empty()) + beast::get_lowest_layer(*_stream).async_connect( + _conf->get_endpoint(), + [me = shared_from_this(), cb = std::move(callback)]( + const beast::error_code& err) mutable { me->on_connect(err, cb); }); + else + beast::get_lowest_layer(*_stream).async_connect( + _conf->get_endpoints_list(), + [me = shared_from_this(), cb = std::move(callback)]( + const beast::error_code& err, + const asio::ip::tcp::endpoint& endpoint + [[maybe_unused]]) mutable { me->on_connect(err, cb); }); } /** @@ -206,8 +221,13 @@ void https_connection::connect(connect_callback_type&& callback) { void https_connection::on_connect(const beast::error_code& err, connect_callback_type& callback) { if (err) { - std::string detail = fmt::format("fail connect to {}: {}", - _conf->get_endpoint(), err.message()); + std::string detail; + if (_conf->get_endpoints_list().empty()) + detail = fmt::format("fail connect to {}: {}", _conf->get_endpoint(), + err.message()); + else + detail = fmt::format("fail connect to {}: {}", _conf->get_server_name(), + err.message()); SPDLOG_LOGGER_ERROR(_logger, detail); callback(err, detail); shutdown(); diff --git a/common/http/test/http_client_test.cc b/common/http/test/http_client_test.cc index 27e1f1aa5d7..c98ad313f72 100644 --- a/common/http/test/http_client_test.cc +++ b/common/http/test/http_client_test.cc @@ -99,11 +99,12 @@ class connection_ok : public connection_base { ++_request_counter; } - void _on_accept(connect_callback_type&& callback) override {} + void _on_accept(connect_callback_type&& callback [[maybe_unused]]) override {} - void answer(const response_ptr& response, - answer_callback_type&& callback) override {} - void receive_request(request_callback_type&& callback) override {} + void answer(const response_ptr& response [[maybe_unused]], + answer_callback_type&& callback [[maybe_unused]]) override {} + void receive_request(request_callback_type&& callback + [[maybe_unused]]) override {} asio::ip::tcp::socket& get_socket() override { return _useless; } }; @@ -272,11 +273,12 @@ class connection_bagot : public connection_base { } } - void _on_accept(connect_callback_type&& callback) override {} + void _on_accept(connect_callback_type&& callback [[maybe_unused]]) override {} - void answer(const response_ptr& response, - answer_callback_type&& callback) override {} - void receive_request(request_callback_type&& callback) override {} + void answer(const response_ptr& response [[maybe_unused]], + answer_callback_type&& callback [[maybe_unused]]) override {} + void receive_request(request_callback_type&& callback + [[maybe_unused]]) override {} asio::ip::tcp::socket& get_socket() override { return _useless; } }; @@ -322,7 +324,7 @@ TEST_F(http_client_test, all_handler_called) { } std::unique_lock l(cond_m); - bool res_wait = var.wait_for(l, std::chrono::seconds(10), [&]() -> bool { + var.wait_for(l, std::chrono::seconds(10), [&]() -> bool { return error_handler_cpt + success_handler_cpt == 1000; }); diff --git a/common/http/test/http_connection_test.cc b/common/http/test/http_connection_test.cc index b093db90e12..9143f4cd85c 100644 --- a/common/http/test/http_connection_test.cc +++ b/common/http/test/http_connection_test.cc @@ -203,14 +203,16 @@ class dummy_connection : public connection_base { void shutdown() override { _state = e_not_connected; } - void connect(connect_callback_type&& callback) override {} + void connect(connect_callback_type&& callback [[maybe_unused]]) override {} - void send(request_ptr request, send_callback_type&& callback) override {} + void send(request_ptr request [[maybe_unused]], + send_callback_type&& callback [[maybe_unused]]) override {} - void _on_accept(connect_callback_type&& callback) override{}; - void answer(const response_ptr& response, - answer_callback_type&& callback) override{}; - void receive_request(request_callback_type&& callback) override{}; + void _on_accept(connect_callback_type&& callback [[maybe_unused]]) override{}; + void answer(const response_ptr& response [[maybe_unused]], + answer_callback_type&& callback [[maybe_unused]]) override{}; + void receive_request(request_callback_type&& callback + [[maybe_unused]]) override{}; asio::ip::tcp::socket& get_socket() override { return _useless; } }; @@ -339,8 +341,8 @@ class answer_no_keep_alive : public base_class { }); }); } - void add_keep_alive_to_server_response( - const response_ptr& response) const override {} + void add_keep_alive_to_server_response(const response_ptr& response + [[maybe_unused]]) const override {} }; TEST_P(http_test, connect_send_answer_without_keepalive) { diff --git a/common/http/test/http_server_test.cc b/common/http/test/http_server_test.cc index 52d06f7d510..978d80f5037 100644 --- a/common/http/test/http_server_test.cc +++ b/common/http/test/http_server_test.cc @@ -180,7 +180,7 @@ template void session_test::wait_for_request() { connection_class::receive_request( [me = shared_from_this()](const boost::beast::error_code& err, - const std::string& detail, + const std::string& detail [[maybe_unused]], const std::shared_ptr& request) { if (err) { SPDLOG_LOGGER_DEBUG(me->_logger, @@ -295,15 +295,15 @@ TEST_P(http_server_test, many_request_by_connection) { req->body() = fmt::format("hello server {}", send_cpt); req->content_length(req->body().length()); - client->send(req, - [&cond, req, &resp_cpt](const beast::error_code& err, - const std::string& detail, - const response_ptr& response) mutable { - ASSERT_FALSE(err); - ASSERT_EQ(req->body(), response->body()); - if (resp_cpt.fetch_add(1) == 199) - cond.notify_one(); - }); + client->send( + req, [&cond, req, &resp_cpt](const beast::error_code& err, + const std::string& detail [[maybe_unused]], + const response_ptr& response) mutable { + ASSERT_FALSE(err); + ASSERT_EQ(req->body(), response->body()); + if (resp_cpt.fetch_add(1) == 199) + cond.notify_one(); + }); } std::unique_lock l(cond_m); cond.wait(l); @@ -356,15 +356,15 @@ TEST_P(http_server_test, many_request_and_many_connection) { req->body() = fmt::format("hello server {}", send_cpt); req->content_length(req->body().length()); - client->send(req, - [&cond, req, &resp_cpt](const beast::error_code& err, - const std::string& detail, - const response_ptr& response) mutable { - ASSERT_FALSE(err); - ASSERT_EQ(req->body(), response->body()); - if (resp_cpt.fetch_add(1) == 999) - cond.notify_one(); - }); + client->send( + req, [&cond, req, &resp_cpt](const beast::error_code& err, + const std::string& detail [[maybe_unused]], + const response_ptr& response) mutable { + ASSERT_FALSE(err); + ASSERT_EQ(req->body(), response->body()); + if (resp_cpt.fetch_add(1) == 999) + cond.notify_one(); + }); } std::unique_lock l(cond_m); cond.wait(l); diff --git a/common/http/test/vault-server.pl b/common/http/test/vault-server.pl new file mode 100644 index 00000000000..e2171da8053 --- /dev/null +++ b/common/http/test/vault-server.pl @@ -0,0 +1,76 @@ +#!/usr/bin/perl +use strict; +use warnings; + +use HTTP::Daemon::SSL; +use HTTP::Status; +use HTTP::Request; +use JSON; + +my $pem_file = '/tmp/vault.pem'; + +system("openssl req -new -x509 -newkey rsa:2048 -nodes -keyout $pem_file -out $pem_file -days 365 -subj /CN=localhost"); + +# Création du serveur HTTPS sur le port 4443 +my $d = HTTP::Daemon::SSL->new( + LocalPort => 4443, + SSL_cert_file => $pem_file, + SSL_key_file => $pem_file, +) || die "Impossible de démarrer le serveur HTTPS: $!"; + +print "Serveur HTTPS démarré sur ", $d->url, "\n"; + +# Boucle principale pour accepter les connexions +while (my $client_conn = $d->accept) { + while (my $request = $client_conn->get_request) { + # Gérer les requêtes POST sur le chemin /v1/auth/approle/login + if ($request->method eq 'POST' && $request->uri->path eq '/v1/auth/approle/login') { + + # Récupérer les données du corps de la requête POST + my $content = $request->content; + + # Parse du contenu JSON de la requête (si c'est du JSON) + my $json_data = eval { decode_json($content) }; + if ($@) { + # Si erreur dans le JSON, renvoyer une erreur 400 (Bad Request) + $client_conn->send_response(400, 'Bad Request', undef, 'Invalid JSON'); + } else { + # Simuler une réponse JSON pour le login + my $response_data = { + request_id => "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee", + lease_id => "", + renewable => JSON::false, + lease_duration => 0, + data => undef, + wrap_info => undef, + warnings => undef, + auth => { + client_token => "hvs.key that does not exist", + accessor => "A0A0A0A0A0A0A0A0A0A0A0A0", + policies => ["default", "john-doe"], + token_policies => ["default", "john-doe"], + metadata => { + role_name => "john-doe" + }, + lease_duration => 2764800, + renewable => JSON::true, + entity_id => "bbbbbbbb-bbbb-cccc-dddd-ffffffffffff", + token_type => "service", + orphan => JSON::true, + mfa_requirement => undef, + num_uses => 0 + }, + mount_type => "" + }; + + # Envoyer une réponse JSON + $client_conn->send_response(200, 'OK', undef, encode_json($response_data)); + } + } else { + # Gérer les autres chemins (404 Not Found) + $client_conn->send_error(RC_NOT_FOUND, "Path not found"); + } + } + $client_conn->close; + undef($client_conn); +} diff --git a/common/http/test/vault_test.cc b/common/http/test/vault_test.cc new file mode 100644 index 00000000000..a356388e308 --- /dev/null +++ b/common/http/test/vault_test.cc @@ -0,0 +1,103 @@ +/** + * Copyright 2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include +#include +#include +#include +#include +#include "com/centreon/common/process/process.hh" +#include "http_config.hh" +#include "https_connection.hh" + +#include "defer.hh" + +using system_clock = std::chrono::system_clock; +using time_point = system_clock::time_point; +using duration = system_clock::duration; + +#include "http_client.hh" + +using namespace com::centreon::common; +using namespace com::centreon::common::http; +extern std::shared_ptr g_io_context; + +class vault_test : public ::testing::Test { + protected: + std::shared_ptr _logger; + + public: + void SetUp() override { + _logger = spdlog::stdout_color_mt("vault_test"); + _logger->set_level(spdlog::level::debug); + }; +}; + +TEST_F(vault_test, httpsConnection) { + auto p = std::make_shared>( + g_io_context, _logger, "/usr/bin/perl " HTTP_TEST_DIR "/vault-server.pl"); + p->start_process(false); + + std::promise promise; + std::future future = promise.get_future(); + asio::ip::tcp::resolver resolver(*g_io_context); + std::string_view server_name("localhost"); + std::string_view server_port("4443"); + const auto results = resolver.resolve(server_name, server_port); + ASSERT_FALSE(results.empty()) << "One endpoint expected at least on " + << server_name << ':' << server_port; + http_config::pointer client_conf = std::make_shared( + results, server_name, true, std::chrono::seconds(10), + std::chrono::seconds(30), std::chrono::seconds(30), 30, + std::chrono::seconds(10), 5, std::chrono::hours(1), 1, + asio::ssl::context_base::tlsv12_client); + connection_creator conn_creator = [client_conf, logger = _logger]() { + auto ssl_init = [](asio::ssl::context& ctx, + const http_config::pointer& conf [[maybe_unused]]) { + ctx.set_verify_mode(asio::ssl::context::verify_none); + // ctx.set_verify_mode(asio::ssl::context::verify_peer); + // ctx.set_default_verify_paths(); + }; + return https_connection::load(g_io_context, logger, client_conf, ssl_init); + }; + auto client = client::load(g_io_context, _logger, client_conf, conn_creator); + auto req = std::make_shared( + boost::beast::http::verb::post, server_name, "/v1/auth/approle/login"); + req->body() = fmt::format("{{ \"role_id\":\"{}\", \"secret_id\":\"{}\" }}", + "abababab-abab-abab-abab-abababababab", + "abababab-abab-abab-abab-abababababab"); + req->content_length(req->body().length()); + std::string resp; + client->send(req, [logger = _logger, &promise]( + const boost::beast::error_code& err, + const std::string& detail [[maybe_unused]], + const response_ptr& response) mutable { + logger->info("We are at the callback"); + if (err) + logger->error("Error from http server: {}", err.message()); + else + promise.set_value(response->body()); + }); + nlohmann::json js = nlohmann::json::parse(future.get()); + p->kill(); + ASSERT_EQ(js["auth"]["client_token"], + std::string_view("hvs." + "key that does not exist")) + << "No result received from https server after 20s"; +} diff --git a/common/inc/com/centreon/common/defer.hh b/common/inc/com/centreon/common/defer.hh index 788a708de91..862ce7d0860 100644 --- a/common/inc/com/centreon/common/defer.hh +++ b/common/inc/com/centreon/common/defer.hh @@ -44,6 +44,29 @@ void defer(const std::shared_ptr& io_context, }); }; +/** + * @brief this function executes the handler action in delay + * + * @tparam handler_type + * @param io_context + * @param tp the time point when to execute handler + * @param handler job to do + */ +template +void defer(const std::shared_ptr& io_context, + const std::chrono::system_clock::time_point& tp, + handler_type&& handler) { + std::shared_ptr timer( + std::make_shared(*io_context)); + timer->expires_at(tp); + timer->async_wait([io_context, timer, m_handler = std::move(handler)]( + const boost::system::error_code& err) { + if (!err) { + m_handler(); + } + }); +}; + } // namespace com::centreon::common #endif diff --git a/common/inc/com/centreon/common/file.hh b/common/inc/com/centreon/common/file.hh new file mode 100644 index 00000000000..d4663476f2f --- /dev/null +++ b/common/inc/com/centreon/common/file.hh @@ -0,0 +1,28 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ +#ifndef CCCM_FILE_HH +#define CCCM_FILE_HH +#include + +namespace com::centreon::common { +std::string read_file_content(const std::filesystem::path& file_path); +std::string hash_directory(const std::filesystem::path& dir_path, + std::error_code& ec) noexcept; +} // namespace com::centreon::common + +#endif /* !CCCM_FILE_HH */ diff --git a/common/inc/com/centreon/common/perfdata.hh b/common/inc/com/centreon/common/perfdata.hh index cc863df3d21..d60f3dc2588 100644 --- a/common/inc/com/centreon/common/perfdata.hh +++ b/common/inc/com/centreon/common/perfdata.hh @@ -59,7 +59,7 @@ class perfdata { float min() const { return _min; } void min(float val) { _min = val; } const std::string& name() const { return _name; } - void name(const std::string&& val) { _name = val; } + void name(std::string_view val) { _name = val; } void resize_name(size_t new_size); const std::string& unit() const { return _unit; } void resize_unit(size_t new_size); @@ -76,11 +76,11 @@ class perfdata { void warning_mode(bool val) { _warning_mode = val; } }; -} // namespace com::centreon::common - bool operator==(com::centreon::common::perfdata const& left, com::centreon::common::perfdata const& right); bool operator!=(com::centreon::common::perfdata const& left, com::centreon::common::perfdata const& right); +} // namespace com::centreon::common + #endif diff --git a/common/inc/com/centreon/common/pool.hh b/common/inc/com/centreon/common/pool.hh index 8df447e2fba..a83bd54bb4c 100644 --- a/common/inc/com/centreon/common/pool.hh +++ b/common/inc/com/centreon/common/pool.hh @@ -1,20 +1,20 @@ -/* -** Copyright 2020-2021 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2020-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #ifndef CENTREON_COMMON_POOL_HH #define CENTREON_COMMON_POOL_HH diff --git a/common/inc/com/centreon/common/rapidjson_helper.hh b/common/inc/com/centreon/common/rapidjson_helper.hh index 5cecd1ba14d..42336fda6f9 100644 --- a/common/inc/com/centreon/common/rapidjson_helper.hh +++ b/common/inc/com/centreon/common/rapidjson_helper.hh @@ -244,6 +244,9 @@ class rapidjson_helper { return (member->value.*original_getter)(); } if (member->value.IsString()) { + if (!*member->value.GetString()) { + return default_value; + } return_type ret; if (!simple_ato(member->value.GetString(), &ret)) { throw exceptions::msg_fmt("field {} is not a {} string", field_name, @@ -263,6 +266,7 @@ class rapidjson_helper { float get_float(const char* field_name) const; uint64_t get_uint64_t(const char* field_name) const; + uint64_t get_uint64_t(const char* field_name, uint64_t default_value) const; int64_t get_int64_t(const char* field_name) const; uint32_t get_uint32_t(const char* field_name) const; diff --git a/common/log_v2/CMakeLists.txt b/common/log_v2/CMakeLists.txt index 9ac562a369f..5c117ac5933 100644 --- a/common/log_v2/CMakeLists.txt +++ b/common/log_v2/CMakeLists.txt @@ -25,7 +25,8 @@ add_library( # Sources. log_v2.cc log_v2.hh config.hh) -#set_target_properties(log_v2 PROPERTIES CXX_VISIBILITY_PRESET hidden) +set_target_properties(log_v2 + PROPERTIES POSITION_INDEPENDENT_CODE ON) target_link_libraries(log_v2 spdlog::spdlog stdc++fs gRPC::gpr absl::raw_hash_set) diff --git a/common/log_v2/log_v2.cc b/common/log_v2/log_v2.cc index 086e8ac0963..38f4a1ffa41 100644 --- a/common/log_v2/log_v2.cc +++ b/common/log_v2/log_v2.cc @@ -20,7 +20,6 @@ #include #include -#include #include #include #include @@ -30,7 +29,6 @@ #include #include -#include using namespace com::centreon::common::log_v2; using namespace spdlog; diff --git a/common/precomp_inc/precomp.hh b/common/precomp_inc/precomp.hh index 227f2533caa..d9649257d9a 100644 --- a/common/precomp_inc/precomp.hh +++ b/common/precomp_inc/precomp.hh @@ -43,11 +43,8 @@ #include #include -#include -#include - #include -#ifndef _WINDOWS +#ifndef _WIN32 #include #include #endif diff --git a/common/process/inc/com/centreon/common/process/detail/centreon_posix_process_launcher.hh b/common/process/inc/com/centreon/common/process/detail/centreon_posix_process_launcher.hh index 79dc1eac355..69cc9868e83 100644 --- a/common/process/inc/com/centreon/common/process/detail/centreon_posix_process_launcher.hh +++ b/common/process/inc/com/centreon/common/process/detail/centreon_posix_process_launcher.hh @@ -13,7 +13,8 @@ struct centreon_process_stdio { boost::process::v2::detail::process_output_binding out; boost::process::v2::detail::process_error_binding err; - error_code on_exec_setup(centreon_posix_default_launcher& launcher, + error_code on_exec_setup(centreon_posix_default_launcher& launcher + [[maybe_unused]], const filesystem::path&, const char* const*) { if (::dup2(in.fd, in.target) == -1) @@ -69,7 +70,7 @@ struct centreon_posix_default_launcher { template auto operator()( ExecutionContext& context, - error_code& ec, + error_code& ec [[maybe_unused]], const typename std::enable_if< std::is_convertible< ExecutionContext&, @@ -189,6 +190,10 @@ struct centreon_posix_default_launcher { BOOST_PROCESS_V2_ASSIGN_EC(ec, child_error, system_category()) if (ec) { + if (pid > 0) { + ::kill(pid, SIGKILL); + ::waitpid(pid, nullptr, 0); + } detail::on_error(*this, executable, argv, ec, inits...); return basic_process{exec}; } diff --git a/common/process/inc/com/centreon/common/process/process.hh b/common/process/inc/com/centreon/common/process/process.hh index 06a6799bd3b..adcd8602ba9 100644 --- a/common/process/inc/com/centreon/common/process/process.hh +++ b/common/process/inc/com/centreon/common/process/process.hh @@ -48,7 +48,7 @@ class mutex {}; template <> class lock { public: - lock(mutex* dummy_mut) {} + lock(mutex* /* dummy_mut*/) {} }; } // namespace detail @@ -129,6 +129,8 @@ class process : public std::enable_shared_from_this> { virtual ~process() = default; + int get_pid(); + template void write_to_stdin(const string_class& content); diff --git a/common/process/src/process.cc b/common/process/src/process.cc index 6036a0fca19..738c89b8b2a 100644 --- a/common/process/src/process.cc +++ b/common/process/src/process.cc @@ -16,9 +16,14 @@ * For more information : contact@centreon.com */ -#include #include +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +#include +#include + #include "com/centreon/common/process/process.hh" #if !defined(BOOST_PROCESS_V2_WINDOWS) @@ -26,6 +31,7 @@ #endif #include +#pragma GCC diagnostic pop namespace proc = boost::process::v2; @@ -113,7 +119,7 @@ struct boost_process { boost_process(asio::io_context& io_context, const std::string& exe_path, const std::vector& args, - bool no_stdin) + bool no_stdin [[maybe_unused]]) : stdout_pipe(io_context), stderr_pipe(io_context), stdin_pipe(io_context), @@ -150,7 +156,7 @@ process::process( const std::shared_ptr& logger, const std::string_view& cmd_line) : _io_context(io_context), _logger(logger) { -#ifdef _WINDOWS +#ifdef _WIN32 auto split_res = boost::program_options::split_winmain(std::string(cmd_line)); #else auto split_res = boost::program_options::split_unix(std::string(cmd_line)); @@ -167,6 +173,21 @@ process::process( } } +/** + * @brief returns pid of process, -1 otherwise + * + * @tparam use_mutex + * @return int + */ +template +int process::get_pid() { + detail::lock l(&_protect); + if (_proc) { + return _proc->proc.id(); + } + return -1; +} + /** * @brief start a new process, if a previous one is running, it's killed * In this function, we start child process and stdout, stderr asynchronous read @@ -277,9 +298,9 @@ void process::stdin_write_no_lock( try { _write_pending = true; _proc->stdin_pipe.async_write_some( - asio::buffer(*data), - [me = shared_from_this(), caller = _proc, data]( - const boost::system::error_code& err, size_t nb_written) { + asio::buffer(*data), [me = shared_from_this(), caller = _proc, data]( + const boost::system::error_code& err, + size_t nb_written [[maybe_unused]]) { detail::lock l(&me->_protect); if (caller != me->_proc) { return; @@ -438,4 +459,4 @@ template class process; template class process; -} // namespace com::centreon::common \ No newline at end of file +} // namespace com::centreon::common diff --git a/common/src/file.cc b/common/src/file.cc new file mode 100644 index 00000000000..7c8fc9057bf --- /dev/null +++ b/common/src/file.cc @@ -0,0 +1,106 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "file.hh" +#include "com/centreon/exceptions/msg_fmt.hh" + +namespace com::centreon::common { + +/** + * @brief Reads the content of a text file and returns it in an std::string. + * + * @param file_path The file to read. + * + * @return The content as an std::string. + */ +std::string read_file_content(const std::filesystem::path& file_path) { + std::ifstream in(file_path, std::ios::in); + std::string retval; + if (in) { + in.seekg(0, std::ios::end); + retval.resize(in.tellg()); + in.seekg(0, std::ios::beg); + in.read(&retval[0], retval.size()); + in.close(); + } else + throw exceptions::msg_fmt("Can't open file '{}': {}", file_path.string(), + strerror(errno)); + return retval; +} + +/** + * @brief Compute the hash of a directory content. + * + * @param dir_path The directory to parse. + * + * @return a size_t hash. + */ +std::string hash_directory(const std::filesystem::path& dir_path, + std::error_code& ec) noexcept { + std::list files; + ec.clear(); + + /* Recursively parse the directory */ + for (const auto& entry : + std::filesystem::recursive_directory_iterator(dir_path, ec)) { + if (entry.is_regular_file() && entry.path().extension() == ".cfg") + files.push_back(entry.path()); + } + + if (ec) + return ""; + + files.sort(); + + EVP_MD_CTX* mdctx = EVP_MD_CTX_new(); + EVP_DigestInit_ex(mdctx, EVP_sha256(), nullptr); + + for (auto& f : files) { + const std::string& fname = + std::filesystem::relative(f, dir_path, ec).string(); + if (ec) + break; + EVP_DigestUpdate(mdctx, fname.data(), fname.size()); + std::string content = read_file_content(f); + EVP_DigestUpdate(mdctx, content.data(), content.size()); + } + + unsigned char hash[SHA256_DIGEST_LENGTH]; + unsigned int size; + EVP_DigestFinal_ex(mdctx, hash, &size); + EVP_MD_CTX_free(mdctx); + + if (ec) + return ""; + + std::string retval; + retval.reserve(SHA256_DIGEST_LENGTH * 2); + auto digit = [](unsigned char d) -> char { + if (d < 10) + return '0' + d; + else + return 'a' + (d - 10); + }; + + for (auto h : hash) { + retval.push_back(digit(h >> 4)); + retval.push_back(digit(h & 0xf)); + } + return retval; +} +} // namespace com::centreon::common diff --git a/common/src/perfdata.cc b/common/src/perfdata.cc index 80945b75950..73b5306c742 100644 --- a/common/src/perfdata.cc +++ b/common/src/perfdata.cc @@ -54,6 +54,7 @@ static inline bool float_equal(float a, float b) { fabs(a - b) <= 0.01 * fabs(a)); } +namespace com::centreon::common { /** * Compare two perfdata objects. * @@ -87,6 +88,9 @@ bool operator==(perfdata const& left, perfdata const& right) { bool operator!=(perfdata const& left, perfdata const& right) { return !(left == right); } + +} // namespace com::centreon::common + /** * @brief in case of db insertions we need to ensure that name can be stored in * table With it, you can reduce name size @@ -265,18 +269,21 @@ std::list perfdata::parse_perfdata( /* The label is given by s and finishes at end */ if (*end == ']') { - --end; if (strncmp(s, "a[", 2) == 0) { s += 2; + --end; p._value_type = perfdata::data_type::absolute; } else if (strncmp(s, "c[", 2) == 0) { s += 2; + --end; p._value_type = perfdata::data_type::counter; } else if (strncmp(s, "d[", 2) == 0) { s += 2; + --end; p._value_type = perfdata::data_type::derive; } else if (strncmp(s, "g[", 2) == 0) { s += 2; + --end; p._value_type = perfdata::data_type::gauge; } } diff --git a/common/src/process_stat.cc b/common/src/process_stat.cc index fa784d68ffa..ec98dd9ac37 100644 --- a/common/src/process_stat.cc +++ b/common/src/process_stat.cc @@ -17,7 +17,6 @@ * */ -#include #include #include diff --git a/common/src/rapidjson_helper.cc b/common/src/rapidjson_helper.cc index 2252cdc262f..a6907400a15 100644 --- a/common/src/rapidjson_helper.cc +++ b/common/src/rapidjson_helper.cc @@ -19,7 +19,6 @@ #include #include #include -#include #include "rapidjson_helper.hh" @@ -141,6 +140,23 @@ uint64_t rapidjson_helper::get_uint64_t(const char* field_name) const { &rapidjson::Value::GetUint64, &absl::SimpleAtoi); } +/** + * @brief read an uint64_t field + * + * @param field_name + * @param default_value value returned if member does not exist + * @return const char* field value + * @throw msg_fmt if field value is nor a integer nor a + * string containing a integer + */ +uint64_t rapidjson_helper::get_uint64_t(const char* field_name, + uint64_t default_value) const { + return get_or_default( + field_name, "uint64_t", + [](const rapidjson::Value& val) { return val.IsUint64(); }, + &rapidjson::Value::GetUint64, &absl::SimpleAtoi, default_value); +} + /** * @brief read a int64_t field * diff --git a/common/src/utf8.cc b/common/src/utf8.cc index 7ef6ebed5ed..ac963b501f3 100644 --- a/common/src/utf8.cc +++ b/common/src/utf8.cc @@ -16,6 +16,8 @@ * For more information : contact@centreon.com */ +#include "utf8.hh" + /** * @brief Checks if the string given as parameter is a real UTF-8 string. * If it is not, it tries to convert it to UTF-8. Encodings correctly changed @@ -26,18 +28,18 @@ * @return The string itself or a new string converted to UTF-8. The output * string should always be an UTF-8 string. */ - -#include "utf8.hh" - std::string com::centreon::common::check_string_utf8( const std::string_view& str) noexcept { std::string_view::const_iterator it; - for (it = str.begin(); it != str.end();) { + for (it = str.begin(); it < str.end();) { uint32_t val = (*it & 0xff); if ((val & 0x80) == 0) { ++it; continue; } + if (it + 1 >= str.end()) { + break; + } val = (val << 8) | (*(it + 1) & 0xff); if ((val & 0xe0c0) == 0xc080) { val &= 0x1e00; @@ -47,6 +49,9 @@ std::string com::centreon::common::check_string_utf8( continue; } + if (it + 2 >= str.end()) { + break; + } val = (val << 8) | (*(it + 2) & 0xff); if ((val & 0xf0c0c0) == 0xe08080) { val &= 0xf2000; @@ -56,6 +61,9 @@ std::string com::centreon::common::check_string_utf8( continue; } + if (it + 3 >= str.end()) { + break; + } val = (val << 8) | (*(it + 3) & 0xff); if ((val & 0xf8c0c0c0) == 0xF0808080) { val &= 0x7300000; diff --git a/common/tests/CMakeLists.txt b/common/tests/CMakeLists.txt index bfc0cbbff4c..f5aaa290ae6 100644 --- a/common/tests/CMakeLists.txt +++ b/common/tests/CMakeLists.txt @@ -16,14 +16,17 @@ # For more information : contact@centreon.com # - if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") - add_executable(ut_common - process_stat_test.cc + add_executable( + ut_common + crypto/base64.cc + engine_conf/expand_conf.cc + file_test.cc hex_dump_test.cc log_v2/log_v2.cc node_allocator_test.cc perfdata_test.cc + process_stat_test.cc process_test.cc rapidjson_helper_test.cc test_main.cc @@ -39,6 +42,13 @@ else() ${TESTS_SOURCES}) endif() +set(TESTS_DIR "${PROJECT_SOURCE_DIR}/tests") + +target_compile_definitions( + ut_common PUBLIC -DHTTP_TEST_DIR="${PROJECT_SOURCE_DIR}/http/test" + -DCOMMON_CFG_TEST=\"${TESTS_DIR}/cfg_files\" + ) + set_target_properties( ut_common PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/tests @@ -54,7 +64,6 @@ if(WITH_COVERAGE) set(GCOV gcov) endif() - file(COPY ${PROJECT_SOURCE_DIR}/tests/scripts/echo.bat DESTINATION ${CMAKE_BINARY_DIR}/tests) file(COPY ${PROJECT_SOURCE_DIR}/tests/scripts/bad_script.bat @@ -62,53 +71,55 @@ file(COPY ${PROJECT_SOURCE_DIR}/tests/scripts/bad_script.bat add_test(NAME tests COMMAND ut_common) - if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") target_link_libraries( ut_common PRIVATE centreon_common - centreon_http - centreon_process - -L${Boost_LIBRARY_DIR_RELEASE} - boost_program_options - re2::re2 - log_v2 - crypto - ssl - GTest::gtest - GTest::gtest_main - GTest::gmock - GTest::gmock_main - absl::any - absl::log - absl::base - absl::bits - fmt::fmt pthread) - - add_dependencies(ut_common centreon_common centreon_http) + engine_conf + ctncrypto + centreon_http + centreon_process + -L${Boost_LIBRARY_DIR_RELEASE} + boost_program_options + re2::re2 + log_v2 + OpenSSL::SSL + OpenSSL::Crypto + GTest::gtest + GTest::gtest_main + GTest::gmock + GTest::gmock_main + absl::any + absl::log + absl::base + absl::bits + fmt::fmt + pthread) + + add_dependencies(ut_common centreon_common centreon_http) else() target_link_libraries( ut_common PRIVATE centreon_common - centreon_process - Boost::program_options - re2::re2 - GTest::gtest - GTest::gtest_main - GTest::gmock - GTest::gmock_main - absl::any - absl::log - absl::base - absl::bits - fmt::fmt) - - add_dependencies(ut_common centreon_common) + centreon_process + Boost::program_options + re2::re2 + GTest::gtest + GTest::gtest_main + GTest::gmock + GTest::gmock_main + absl::any + absl::log + absl::base + absl::bits + fmt::fmt) + + add_dependencies(ut_common centreon_common) endif() - set_property(TARGET ut_common PROPERTY POSITION_INDEPENDENT_CODE ON) -target_precompile_headers(ut_common PRIVATE ${PROJECT_SOURCE_DIR}/precomp_inc/precomp.hh) +target_precompile_headers(ut_common PRIVATE + ${PROJECT_SOURCE_DIR}/precomp_inc/precomp.hh) diff --git a/common/tests/cfg_files/config0/anomaly_detection.cfg b/common/tests/cfg_files/config0/anomaly_detection.cfg new file mode 100644 index 00000000000..0c3666c2a20 --- /dev/null +++ b/common/tests/cfg_files/config0/anomaly_detection.cfg @@ -0,0 +1,13 @@ +define anomalydetection { + host_id 1 + host_name host_1 + internal_id 1 + _KEY1 _VAL01 + service_id 26 + service_description anomaly_26 + dependent_service_id 1 + metric_name metric + sensitivity 0.0 + status_change 1 + thresholds_file /tmp/anomaly_threshold.json +} \ No newline at end of file diff --git a/common/tests/cfg_files/config0/centengine.cfg b/common/tests/cfg_files/config0/centengine.cfg new file mode 100644 index 00000000000..1912c54a4c0 --- /dev/null +++ b/common/tests/cfg_files/config0/centengine.cfg @@ -0,0 +1,104 @@ +cfg_file=/tmp/etc/centreon-engine/config0/anomaly_detection.cfg +cfg_file=/tmp/etc/centreon-engine/config0/severities.cfg +cfg_file=/tmp/etc/centreon-engine/config0/tags.cfg +cfg_file=/tmp/etc/centreon-engine/config0/escalations.cfg +cfg_file=/tmp/etc/centreon-engine/config0/servicegroups.cfg +cfg_file=/tmp/etc/centreon-engine/config0/contactgroups.cfg +cfg_file=/tmp/etc/centreon-engine/config0/contacts.cfg +cfg_file=/tmp/etc/centreon-engine/config0/hosts.cfg +cfg_file=/tmp/etc/centreon-engine/config0/services.cfg +cfg_file=/tmp/etc/centreon-engine/config0/commands.cfg +#cfg_file=/tmp/etc/centreon-engine/config0/contactgroups.cfg +#cfg_file=/tmp/etc/centreon-engine/config0/contacts.cfg +cfg_file=/tmp/etc/centreon-engine/config0/hostgroups.cfg +cfg_file=/tmp/etc/centreon-engine/config0/timeperiods.cfg +#cfg_file=/tmp/etc/centreon-engine/config0/escalations.cfg +#cfg_file=/tmp/etc/centreon-engine/config0/dependencies.cfg +cfg_file=/tmp/etc/centreon-engine/config0/connectors.cfg +#cfg_file=/tmp/etc/centreon-engine/config0/meta_commands.cfg +#cfg_file=/tmp/etc/centreon-engine/config0/meta_timeperiod.cfg +#cfg_file=/tmp/etc/centreon-engine/config0/meta_host.cfg +#cfg_file=/tmp/etc/centreon-engine/config0/meta_services.cfg +broker_module=/usr/lib64/centreon-engine/externalcmd.so +broker_module=/usr/lib64/nagios/cbmod.so /tmp/etc/centreon-broker/central-module0.json +interval_length=60 +use_timezone=:Europe/Paris +resource_file=/tmp/etc/centreon-engine/config0/resource.cfg +log_file=/tmp/var/log/centreon-engine/config0/centengine.log +status_file=/tmp/var/log/centreon-engine/config0/status.dat +command_check_interval=1s +command_file=/tmp/var/lib/centreon-engine/config0/rw/centengine.cmd +state_retention_file=/tmp/var/log/centreon-engine/config0/retention.dat +retention_update_interval=60 +sleep_time=0.2 +service_inter_check_delay_method=s +service_interleave_factor=s +max_concurrent_checks=400 +max_service_check_spread=5 +check_result_reaper_frequency=5 +low_service_flap_threshold=25.0 +high_service_flap_threshold=50.0 +low_host_flap_threshold=25.0 +high_host_flap_threshold=50.0 +service_check_timeout=10 +host_check_timeout=12 +event_handler_timeout=30 +notification_timeout=30 +ocsp_timeout=5 +ochp_timeout=5 +perfdata_timeout=5 +date_format=euro +illegal_object_name_chars=~!$%^&*"|'<>?,()= +illegal_macro_output_chars=`~$^&"|'<> +admin_email=titus@bidibule.com +admin_pager=admin +event_broker_options=-1 +cached_host_check_horizon=60 +debug_file=/tmp/var/log/centreon-engine/config0/centengine.debug +debug_level=0 +debug_verbosity=2 +log_pid=1 +macros_filter=KEY80,KEY81,KEY82,KEY83,KEY84 +enable_macros_filter=0 +rpc_port=50001 +postpone_notification_to_timeperiod=0 +instance_heartbeat_interval=30 +enable_notifications=1 +execute_service_checks=1 +accept_passive_service_checks=1 +enable_event_handlers=1 +check_external_commands=1 +use_retained_program_state=1 +use_retained_scheduling_info=1 +use_syslog=0 +log_notifications=1 +log_service_retries=1 +log_host_retries=1 +log_event_handlers=1 +log_external_commands=1 +log_v2_enabled=1 +log_legacy_enabled=0 +log_file_line=1 +log_v2_logger=file +log_level_functions=trace +log_level_config=info +log_level_events=info +log_level_checks=info +log_level_notifications=info +log_level_eventbroker=info +log_level_external_command=trace +log_level_commands=info +log_level_downtimes=trace +log_level_comments=info +log_level_macros=info +log_level_process=info +log_level_runtime=info +log_level_otl=trace +log_flush_period=0 +soft_state_dependencies=0 +obsess_over_services=0 +process_performance_data=0 +check_for_orphaned_services=0 +check_for_orphaned_hosts=0 +check_service_freshness=1 +enable_flap_detection=0 diff --git a/common/tests/cfg_files/config0/commands.cfg b/common/tests/cfg_files/config0/commands.cfg new file mode 100644 index 00000000000..0b1c8bcd0ee --- /dev/null +++ b/common/tests/cfg_files/config0/commands.cfg @@ -0,0 +1,258 @@ +define command { + command_name command_1 + command_line /tmp/var/lib/centreon-engine/check.pl --id 1 +} +define command { + command_name command_2 + command_line /tmp/var/lib/centreon-engine/check.pl --id 2 + connector Perl Connector +} +define command { + command_name command_3 + command_line /tmp/var/lib/centreon-engine/check.pl --id 3 +} +define command { + command_name command_4 + command_line /tmp/var/lib/centreon-engine/check.pl --id 4 + connector Perl Connector +} +define command { + command_name command_5 + command_line /tmp/var/lib/centreon-engine/check.pl --id 5 +} +define command { + command_name command_6 + command_line /tmp/var/lib/centreon-engine/check.pl --id 6 + connector Perl Connector +} +define command { + command_name command_7 + command_line /tmp/var/lib/centreon-engine/check.pl --id 7 +} +define command { + command_name command_8 + command_line /tmp/var/lib/centreon-engine/check.pl --id 8 + connector Perl Connector +} +define command { + command_name command_9 + command_line /tmp/var/lib/centreon-engine/check.pl --id 9 +} +define command { + command_name command_10 + command_line /tmp/var/lib/centreon-engine/check.pl --id 10 + connector Perl Connector +} +define command { + command_name command_11 + command_line /tmp/var/lib/centreon-engine/check.pl --id 11 +} +define command { + command_name command_12 + command_line /tmp/var/lib/centreon-engine/check.pl --id 12 + connector Perl Connector +} +define command { + command_name command_13 + command_line /tmp/var/lib/centreon-engine/check.pl --id 13 +} +define command { + command_name command_14 + command_line /tmp/var/lib/centreon-engine/check.pl --id 14 + connector Perl Connector +} +define command { + command_name command_15 + command_line /tmp/var/lib/centreon-engine/check.pl --id 15 +} +define command { + command_name command_16 + command_line /tmp/var/lib/centreon-engine/check.pl --id 16 + connector Perl Connector +} +define command { + command_name command_17 + command_line /tmp/var/lib/centreon-engine/check.pl --id 17 +} +define command { + command_name command_18 + command_line /tmp/var/lib/centreon-engine/check.pl --id 18 + connector Perl Connector +} +define command { + command_name command_19 + command_line /tmp/var/lib/centreon-engine/check.pl --id 19 +} +define command { + command_name command_20 + command_line /tmp/var/lib/centreon-engine/check.pl --id 20 + connector Perl Connector +} +define command { + command_name command_21 + command_line /tmp/var/lib/centreon-engine/check.pl --id 21 +} +define command { + command_name command_22 + command_line /tmp/var/lib/centreon-engine/check.pl --id 22 + connector Perl Connector +} +define command { + command_name command_23 + command_line /tmp/var/lib/centreon-engine/check.pl --id 23 +} +define command { + command_name command_24 + command_line /tmp/var/lib/centreon-engine/check.pl --id 24 + connector Perl Connector +} +define command { + command_name command_25 + command_line /tmp/var/lib/centreon-engine/check.pl --id 25 +} +define command { + command_name command_26 + command_line /tmp/var/lib/centreon-engine/check.pl --id 26 + connector Perl Connector +} +define command { + command_name command_27 + command_line /tmp/var/lib/centreon-engine/check.pl --id 27 +} +define command { + command_name command_28 + command_line /tmp/var/lib/centreon-engine/check.pl --id 28 + connector Perl Connector +} +define command { + command_name command_29 + command_line /tmp/var/lib/centreon-engine/check.pl --id 29 +} +define command { + command_name command_30 + command_line /tmp/var/lib/centreon-engine/check.pl --id 30 + connector Perl Connector +} +define command { + command_name command_31 + command_line /tmp/var/lib/centreon-engine/check.pl --id 31 +} +define command { + command_name command_32 + command_line /tmp/var/lib/centreon-engine/check.pl --id 32 + connector Perl Connector +} +define command { + command_name command_33 + command_line /tmp/var/lib/centreon-engine/check.pl --id 33 +} +define command { + command_name command_34 + command_line /tmp/var/lib/centreon-engine/check.pl --id 34 + connector Perl Connector +} +define command { + command_name command_35 + command_line /tmp/var/lib/centreon-engine/check.pl --id 35 +} +define command { + command_name command_36 + command_line /tmp/var/lib/centreon-engine/check.pl --id 36 + connector Perl Connector +} +define command { + command_name command_37 + command_line /tmp/var/lib/centreon-engine/check.pl --id 37 +} +define command { + command_name command_38 + command_line /tmp/var/lib/centreon-engine/check.pl --id 38 + connector Perl Connector +} +define command { + command_name command_39 + command_line /tmp/var/lib/centreon-engine/check.pl --id 39 +} +define command { + command_name command_40 + command_line /tmp/var/lib/centreon-engine/check.pl --id 40 + connector Perl Connector +} +define command { + command_name command_41 + command_line /tmp/var/lib/centreon-engine/check.pl --id 41 +} +define command { + command_name command_42 + command_line /tmp/var/lib/centreon-engine/check.pl --id 42 + connector Perl Connector +} +define command { + command_name command_43 + command_line /tmp/var/lib/centreon-engine/check.pl --id 43 +} +define command { + command_name command_44 + command_line /tmp/var/lib/centreon-engine/check.pl --id 44 + connector Perl Connector +} +define command { + command_name command_45 + command_line /tmp/var/lib/centreon-engine/check.pl --id 45 +} +define command { + command_name command_46 + command_line /tmp/var/lib/centreon-engine/check.pl --id 46 + connector Perl Connector +} +define command { + command_name command_47 + command_line /tmp/var/lib/centreon-engine/check.pl --id 47 +} +define command { + command_name command_48 + command_line /tmp/var/lib/centreon-engine/check.pl --id 48 + connector Perl Connector +} +define command { + command_name command_49 + command_line /tmp/var/lib/centreon-engine/check.pl --id 49 +} +define command { + command_name command_50 + command_line /tmp/var/lib/centreon-engine/check.pl --id 50 + connector Perl Connector +} +define command { + command_name checkh1 + command_line /tmp/var/lib/centreon-engine/check.pl --id 0 +} +define command { + command_name checkh2 + command_line /tmp/var/lib/centreon-engine/check.pl --id 0 +} +define command { + command_name checkh3 + command_line /tmp/var/lib/centreon-engine/check.pl --id 0 +} +define command { + command_name checkh4 + command_line /tmp/var/lib/centreon-engine/check.pl --id 0 +} +define command { + command_name checkh5 + command_line /tmp/var/lib/centreon-engine/check.pl --id 0 +} +define command { + command_name notif + command_line /tmp/var/lib/centreon-engine/notif.pl +} +define command { + command_name test-notif + command_line /tmp/var/lib/centreon-engine/notif.pl +} +define command { + command_name command_notif + command_line /usr/bin/true + } + \ No newline at end of file diff --git a/common/tests/cfg_files/config0/connectors.cfg b/common/tests/cfg_files/config0/connectors.cfg new file mode 100644 index 00000000000..02f7d292117 --- /dev/null +++ b/common/tests/cfg_files/config0/connectors.cfg @@ -0,0 +1,9 @@ +define connector { + connector_name Perl Connector + connector_line /usr/lib64/centreon-connector/centreon_connector_perl --debug --log-file=/tmp/var/log/centreon-engine/config0/connector_perl.log +} + +define connector { + connector_name SSH Connector + connector_line /usr/lib64/centreon-connector/centreon_connector_ssh --debug --log-file=/tmp/var/log/centreon-engine/config0/connector_ssh.log +} diff --git a/common/tests/cfg_files/config0/contactgroups.cfg b/common/tests/cfg_files/config0/contactgroups.cfg new file mode 100644 index 00000000000..eb61f77968b --- /dev/null +++ b/common/tests/cfg_files/config0/contactgroups.cfg @@ -0,0 +1,13 @@ +#contactgroups.cfg +define contactgroup { + contactgroup_name contactgroup_1 + members U2 + contactgroup_members contactgroup_2 + alias contactgroup_1 + members U3 +} +define contactgroup { + contactgroup_name contactgroup_2 + alias contactgroup_2 + members U4 +} diff --git a/common/tests/cfg_files/config0/contacts.cfg b/common/tests/cfg_files/config0/contacts.cfg new file mode 100644 index 00000000000..ee585dfcb10 --- /dev/null +++ b/common/tests/cfg_files/config0/contacts.cfg @@ -0,0 +1,93 @@ +define contact { + contact_name John_Doe + _SNMPCOMMUNITY public + address6 dummy_address_6 + address5 dummy_address_5 + address4 dummy_address_4 + address3 dummy_address_3 + address2 dummy_address_2 + address1 dummy_address_1 + timezone GMT+01 + retain_nonstatus_information 1 + retain_status_information 1 + can_submit_commands 1 + service_notifications_enabled 1 + host_notifications_enabled 1 + service_notification_options none + host_notification_options none + service_notification_commands command_notif + service_notification_period workhours + host_notification_commands command_notif + host_notification_period workhours + pager John_Doepager + email John_Doe@gmail.com + contact_groups contactgroup_1 + alias John_Doe_alias + service_notification_commands command_notif + host_notification_commands command_notif + alias admin + email admin@admin.tld + host_notification_period 24x7 + service_notification_period 24x7 + host_notification_options d,u,r,f,s + service_notification_options w,c,r + register 1 + host_notifications_enabled 1 + service_notifications_enabled 1 +} +define contact { + contact_name U1 + alias U1 + email U1@gmail.com + host_notification_period 24x7 + service_notification_period 24x7 + host_notification_options d,u,r,f,s + service_notification_options w,u,c,r,f,s + register 1 + host_notifications_enabled 1 + service_notifications_enabled 1 + service_notification_commands command_notif + host_notification_commands command_notif +} +define contact { + contact_name U2 + alias U2 + email U2@gmail.com + host_notification_period 24x7 + service_notification_period 24x7 + host_notification_options d,u,r,f,s + service_notification_options w,u,c,r,f,s + register 1 + host_notifications_enabled 1 + service_notifications_enabled 1 + service_notification_commands command_notif + host_notification_commands command_notif +} +define contact { + contact_name U3 + alias U3 + email U3@gmail.com + host_notification_period 24x7 + service_notification_period 24x7 + host_notification_options d,u,r,f,s + service_notification_options w,u,c,r,f,s + register 1 + host_notifications_enabled 1 + service_notifications_enabled 1 + service_notification_commands command_notif + host_notification_commands command_notif +} +define contact { + contact_name U4 + alias U4 + email U4@gmail.com + host_notification_period 24x7 + service_notification_period 24x7 + host_notification_options d,u,r,f,s + service_notification_options w,u,c,r,f,s + register 1 + host_notifications_enabled 1 + service_notifications_enabled 1 + service_notification_commands command_notif + host_notification_commands command_notif +} diff --git a/common/tests/cfg_files/config0/dependencies.cfg b/common/tests/cfg_files/config0/dependencies.cfg new file mode 100644 index 00000000000..e86fc304f47 --- /dev/null +++ b/common/tests/cfg_files/config0/dependencies.cfg @@ -0,0 +1,22 @@ +#dependencies.cfg +define hostdependency { + ;dependency_name HD_test2 + execution_failure_criteria n + notification_failure_criteria d + inherits_parent 1 + dependent_host_name host_4 + host_name host_5 + + } + define servicedependency { + ;dependency_name HD_test + execution_failure_criteria n + notification_failure_criteria c + inherits_parent 1 + dependent_host_name host_3 + host_name host_4 + dependent_service_description service_11 + service_description service_16 + + } + \ No newline at end of file diff --git a/common/tests/cfg_files/config0/escalations.cfg b/common/tests/cfg_files/config0/escalations.cfg new file mode 100644 index 00000000000..ec7b9cdfa18 --- /dev/null +++ b/common/tests/cfg_files/config0/escalations.cfg @@ -0,0 +1,22 @@ +define serviceescalation { + ;escalation_name esc1 + hostgroup hostgroup_1 + notification_interval 1 + last_notification 2 + first_notification 2 + escalation_period 24x7 + escalation_options w,c,r +servicegroup_name servicegroup_1 + contact_groups contactgroup_1 + } + define hostescalation { + ;escalation_name esc2 + notification_interval 1 + last_notification 2 + first_notification 2 + escalation_period 24x7 + escalation_options all +hostgroup_name hostgroup_1 + contact_groups contactgroup_1 + } + \ No newline at end of file diff --git a/common/tests/cfg_files/config0/hostgroups.cfg b/common/tests/cfg_files/config0/hostgroups.cfg new file mode 100644 index 00000000000..a9c6aff6f39 --- /dev/null +++ b/common/tests/cfg_files/config0/hostgroups.cfg @@ -0,0 +1,12 @@ +define hostgroup { + hostgroup_id 1 + hostgroup_name hostgroup_1 + alias hostgroup_1 + members host_2,host_3 +} +define hostgroup { + hostgroup_id 2 + hostgroup_name hostgroup_2 + alias hostgroup_2 + members host_4,host_5 +} diff --git a/common/tests/cfg_files/config0/hosts.cfg b/common/tests/cfg_files/config0/hosts.cfg new file mode 100644 index 00000000000..2191bc5c005 --- /dev/null +++ b/common/tests/cfg_files/config0/hosts.cfg @@ -0,0 +1,110 @@ +define host { + host_name host_1 + category_tags 7 + group_tags 6 + _KEY3 VAL3 + _KEY2 VAL2 + _KEY1 VAL1 + icon_id 15 + severity_id 2 + timezone GMT+01 + retain_nonstatus_information 1 + retain_status_information 1 + obsess_over_host 1 + 3d_coords 3.5,400,500 + 2d_coords 50,50 + process_perf_data 0 + stalking_options none + first_notification_delay 6 + notification_interval 8 + notifications_enabled 0 + notification_options none + flap_detection_options none + flap_detection_enabled 1 + high_flap_threshold 15 + low_flap_threshold 53 + freshness_threshold 14 + check_freshness 0 + event_handler_enabled 1 + passive_checks_enabled 1 + active_checks_enabled 1 + max_check_attempts 8 + recovery_notification_delay 2 + retry_interval 2 + check_interval 3 + statusmap_image statusmap_image + gd2_image gd2_image + vrml_image vrml_image + icon_image_alt icon_image_alt + icon_image icon_image + action_url action_url + notes_url notes_url + notes notes + event_handler command_1 + check_period none + notification_period none + contacts U1 + contact_groups contactgroup_2 + hostgroups hostgroup_2 + parents host_3 + address 127.0.0.1 + acknowledgement_timeout 5 + alias host_1_alias + alias host_1 + address 1.0.0.0 + check_command checkh1 + check_period 24x7 + register 1 + _KEY1 VAL1 + _SNMPCOMMUNITY public + _SNMPVERSION 2c + _HOST_ID 1 +} +define host { + host_name host_2 + alias host_2 + address 2.0.0.0 + check_command checkh2 + check_period 24x7 + register 1 + _KEY2 VAL2 + _SNMPCOMMUNITY public + _SNMPVERSION 2c + _HOST_ID 2 +} +define host { + host_name host_3 + alias host_3 + address 3.0.0.0 + check_command checkh3 + check_period 24x7 + register 1 + _KEY3 VAL3 + _SNMPCOMMUNITY public + _SNMPVERSION 2c + _HOST_ID 3 +} +define host { + host_name host_4 + alias host_4 + address 4.0.0.0 + check_command checkh4 + check_period 24x7 + register 1 + _KEY4 VAL4 + _SNMPCOMMUNITY public + _SNMPVERSION 2c + _HOST_ID 4 +} +define host { + host_name host_5 + alias host_5 + address 5.0.0.0 + check_command checkh5 + check_period 24x7 + register 1 + _KEY5 VAL5 + _SNMPCOMMUNITY public + _SNMPVERSION 2c + _HOST_ID 5 +} diff --git a/common/tests/cfg_files/config0/resource.cfg b/common/tests/cfg_files/config0/resource.cfg new file mode 100644 index 00000000000..74b4b75a194 --- /dev/null +++ b/common/tests/cfg_files/config0/resource.cfg @@ -0,0 +1,2 @@ +$USER1$=/usr/lib64/nagios/plugins +$CENTREONPLUGINS$=/usr/lib/centreon/plugins \ No newline at end of file diff --git a/common/tests/cfg_files/config0/servicegroups.cfg b/common/tests/cfg_files/config0/servicegroups.cfg new file mode 100644 index 00000000000..b3637c2cb08 --- /dev/null +++ b/common/tests/cfg_files/config0/servicegroups.cfg @@ -0,0 +1,12 @@ +define servicegroup { + servicegroup_id 1 + servicegroup_name servicegroup_1 + alias servicegroup_1 + members host_3,service_11,host_3,service_12 +} +define servicegroup { + servicegroup_id 2 + servicegroup_name servicegroup_2 + alias servicegroup_2 + members host_2,service_9,host_2,service_10 +} diff --git a/common/tests/cfg_files/config0/services.cfg b/common/tests/cfg_files/config0/services.cfg new file mode 100644 index 00000000000..20f1124cb8c --- /dev/null +++ b/common/tests/cfg_files/config0/services.cfg @@ -0,0 +1,391 @@ +define service { + host_name host_1 + service_description service_1 + _SNMPCOMMUNITY public + icon_id 1 + category_tags 2 + severity_id 11 + timezone GMT+01 + retain_nonstatus_information 1 + retain_status_information 1 + process_perf_data 1 + stalking_options all + first_notification_delay 3 + notification_interval 6 + notifications_enabled 1 + notification_options all + flap_detection_options all + flap_detection_enabled 1 + high_flap_threshold 126 + low_flap_threshold 83 + freshness_threshold 123 + check_freshness 1 + event_handler_enabled 1 + obsess_over_service 1 + is_volatile 1 + passive_checks_enabled 1 + active_checks_enabled 1 + recovery_notification_delay 1 + retry_interval 1 + check_interval 2 + max_check_attempts 4 + icon_image_alt icon_image_alt + icon_image icon_image + action_url action_url + notes_url note_url + notes note + contacts John_Doe + contact_groups contactgroup_1 + notification_period workhours + event_handler command_notif + check_period workhours + check_command checkh2 + service_groups servicegroup_1 + acknowledgement_timeout 10 + _SERVICE_ID 1 + check_command command_39 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV1_1 VAL_SERV1 +} +define service { + host_name host_1 + service_description service_2 + _SERVICE_ID 2 + check_command command_49 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV1_2 VAL_SERV2 +} +define service { + host_name host_1 + service_description service_3 + _SERVICE_ID 3 + check_command command_35 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV1_3 VAL_SERV3 +} +define service { + host_name host_1 + service_description service_4 + _SERVICE_ID 4 + check_command command_10 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV1_4 VAL_SERV4 +} +define service { + host_name host_1 + service_description service_5 + _SERVICE_ID 5 + check_command command_36 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV1_5 VAL_SERV5 +} +define service { + host_name host_2 + service_description service_6 + _SERVICE_ID 6 + check_command command_28 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV2_6 VAL_SERV6 +} +define service { + host_name host_2 + service_description service_7 + _SERVICE_ID 7 + check_command command_6 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV2_7 VAL_SERV7 +} +define service { + host_name host_2 + service_description service_8 + _SERVICE_ID 8 + check_command command_38 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV2_8 VAL_SERV8 +} +define service { + host_name host_2 + service_description service_9 + _SERVICE_ID 9 + check_command command_45 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV2_9 VAL_SERV9 +} +define service { + host_name host_2 + service_description service_10 + _SERVICE_ID 10 + check_command command_36 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV2_10 VAL_SERV10 +} +define service { + host_name host_3 + service_description service_11 + _SERVICE_ID 11 + check_command command_43 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV3_11 VAL_SERV11 +} +define service { + host_name host_3 + service_description service_12 + _SERVICE_ID 12 + check_command command_48 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV3_12 VAL_SERV12 +} +define service { + host_name host_3 + service_description service_13 + _SERVICE_ID 13 + check_command command_44 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV3_13 VAL_SERV13 +} +define service { + host_name host_3 + service_description service_14 + _SERVICE_ID 14 + check_command command_6 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV3_14 VAL_SERV14 +} +define service { + host_name host_3 + service_description service_15 + _SERVICE_ID 15 + check_command command_18 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV3_15 VAL_SERV15 +} +define service { + host_name host_4 + service_description service_16 + _SERVICE_ID 16 + check_command command_21 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV4_16 VAL_SERV16 +} +define service { + host_name host_4 + service_description service_17 + _SERVICE_ID 17 + check_command command_42 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV4_17 VAL_SERV17 +} +define service { + host_name host_4 + service_description service_18 + _SERVICE_ID 18 + check_command command_49 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV4_18 VAL_SERV18 +} +define service { + host_name host_4 + service_description service_19 + _SERVICE_ID 19 + check_command command_27 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV4_19 VAL_SERV19 +} +define service { + host_name host_4 + service_description service_20 + _SERVICE_ID 20 + check_command command_18 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV4_20 VAL_SERV20 +} +define service { + host_name host_5 + service_description service_21 + _SERVICE_ID 21 + check_command command_20 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV5_21 VAL_SERV21 +} +define service { + host_name host_5 + service_description service_22 + _SERVICE_ID 22 + check_command command_48 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV5_22 VAL_SERV22 +} +define service { + host_name host_5 + service_description service_23 + _SERVICE_ID 23 + check_command command_10 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV5_23 VAL_SERV23 +} +define service { + host_name host_5 + service_description service_24 + _SERVICE_ID 24 + check_command command_5 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV5_24 VAL_SERV24 +} +define service { + host_name host_5 + service_description service_25 + _SERVICE_ID 25 + check_command command_10 + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 5 + register 1 + active_checks_enabled 1 + passive_checks_enabled 1 + _KEY_SERV5_25 VAL_SERV25 +} diff --git a/common/tests/cfg_files/config0/severities.cfg b/common/tests/cfg_files/config0/severities.cfg new file mode 100644 index 00000000000..fa5ba2e0c43 --- /dev/null +++ b/common/tests/cfg_files/config0/severities.cfg @@ -0,0 +1,140 @@ +define severity { + id 1 + severity_name severity1 + level 1 + icon_id 5 + type service +} +define severity { + id 2 + severity_name severity2 + level 2 + icon_id 4 + type host +} +define severity { + id 3 + severity_name severity3 + level 3 + icon_id 3 + type service +} +define severity { + id 4 + severity_name severity4 + level 4 + icon_id 2 + type host +} +define severity { + id 5 + severity_name severity5 + level 5 + icon_id 1 + type service +} +define severity { + id 6 + severity_name severity6 + level 1 + icon_id 5 + type host +} +define severity { + id 7 + severity_name severity7 + level 2 + icon_id 4 + type service +} +define severity { + id 8 + severity_name severity8 + level 3 + icon_id 3 + type host +} +define severity { + id 9 + severity_name severity9 + level 4 + icon_id 2 + type service +} +define severity { + id 10 + severity_name severity10 + level 5 + icon_id 1 + type host +} +define severity { + id 11 + severity_name severity11 + level 1 + icon_id 5 + type service +} +define severity { + id 12 + severity_name severity12 + level 2 + icon_id 4 + type host +} +define severity { + id 13 + severity_name severity13 + level 3 + icon_id 3 + type service +} +define severity { + id 14 + severity_name severity14 + level 4 + icon_id 2 + type host +} +define severity { + id 15 + severity_name severity15 + level 5 + icon_id 1 + type service +} +define severity { + id 16 + severity_name severity16 + level 1 + icon_id 5 + type host +} +define severity { + id 17 + severity_name severity17 + level 2 + icon_id 4 + type service +} +define severity { + id 18 + severity_name severity18 + level 3 + icon_id 3 + type host +} +define severity { + id 19 + severity_name severity19 + level 4 + icon_id 2 + type service +} +define severity { + id 20 + severity_name severity20 + level 5 + icon_id 1 + type host +} diff --git a/common/tests/cfg_files/config0/tags.cfg b/common/tests/cfg_files/config0/tags.cfg new file mode 100644 index 00000000000..4941ba667ea --- /dev/null +++ b/common/tests/cfg_files/config0/tags.cfg @@ -0,0 +1,200 @@ +define tag { + id 1 + tag_name tag1 + type servicegroup +} +define tag { + id 1 + tag_name tag2 + type hostgroup +} +define tag { + id 1 + tag_name tag3 + type servicecategory +} +define tag { + id 1 + tag_name tag4 + type hostcategory +} +define tag { + id 2 + tag_name tag5 + type servicegroup +} +define tag { + id 2 + tag_name tag6 + type hostgroup +} +define tag { + id 2 + tag_name tag7 + type servicecategory +} +define tag { + id 2 + tag_name tag8 + type hostcategory +} +define tag { + id 3 + tag_name tag9 + type servicegroup +} +define tag { + id 3 + tag_name tag10 + type hostgroup +} +define tag { + id 3 + tag_name tag11 + type servicecategory +} +define tag { + id 3 + tag_name tag12 + type hostcategory +} +define tag { + id 4 + tag_name tag13 + type servicegroup +} +define tag { + id 4 + tag_name tag14 + type hostgroup +} +define tag { + id 4 + tag_name tag15 + type servicecategory +} +define tag { + id 4 + tag_name tag16 + type hostcategory +} +define tag { + id 5 + tag_name tag17 + type servicegroup +} +define tag { + id 5 + tag_name tag18 + type hostgroup +} +define tag { + id 5 + tag_name tag19 + type servicecategory +} +define tag { + id 5 + tag_name tag20 + type hostcategory +} +define tag { + id 6 + tag_name tag21 + type servicegroup +} +define tag { + id 6 + tag_name tag22 + type hostgroup +} +define tag { + id 6 + tag_name tag23 + type servicecategory +} +define tag { + id 6 + tag_name tag24 + type hostcategory +} +define tag { + id 7 + tag_name tag25 + type servicegroup +} +define tag { + id 7 + tag_name tag26 + type hostgroup +} +define tag { + id 7 + tag_name tag27 + type servicecategory +} +define tag { + id 7 + tag_name tag28 + type hostcategory +} +define tag { + id 8 + tag_name tag29 + type servicegroup +} +define tag { + id 8 + tag_name tag30 + type hostgroup +} +define tag { + id 8 + tag_name tag31 + type servicecategory +} +define tag { + id 8 + tag_name tag32 + type hostcategory +} +define tag { + id 9 + tag_name tag33 + type servicegroup +} +define tag { + id 9 + tag_name tag34 + type hostgroup +} +define tag { + id 9 + tag_name tag35 + type servicecategory +} +define tag { + id 9 + tag_name tag36 + type hostcategory +} +define tag { + id 10 + tag_name tag37 + type servicegroup +} +define tag { + id 10 + tag_name tag38 + type hostgroup +} +define tag { + id 10 + tag_name tag39 + type servicecategory +} +define tag { + id 10 + tag_name tag40 + type hostcategory +} diff --git a/common/tests/cfg_files/config0/timeperiods.cfg b/common/tests/cfg_files/config0/timeperiods.cfg new file mode 100644 index 00000000000..27b053b01d8 --- /dev/null +++ b/common/tests/cfg_files/config0/timeperiods.cfg @@ -0,0 +1,41 @@ +define timeperiod { + name 24x7 + timeperiod_name 24x7 + alias 24_Hours_A_Day,_7_Days_A_Week + sunday 00:00-24:00 + monday 00:00-24:00 + tuesday 00:00-24:00 + wednesday 00:00-24:00 + thursday 00:00-24:00 + friday 00:00-24:00 + saturday 00:00-24:00 +} +define timeperiod { + name 24x6 + timeperiod_name 24x6 + alias 24_Hours_A_Day,_7_Days_A_Week + sunday 00:00-24:00 + monday 00:00-24:00 + tuesday 00:00-24:00 + wednesday 00:00-24:00 + thursday 00:00-24:00 + friday 00:00-24:00 + saturday 00:00-24:00 +} +define timeperiod { + name none + timeperiod_name none + alias Never +} +define timeperiod { + name workhours + timeperiod_name workhours + alias Work Hours + sunday 09:00-12:00,14:00-18:00 + monday 09:00-12:00,14:00-18:00 + tuesday 09:00-12:00,14:00-18:00 + wednesday 09:00-12:00,14:00-18:00 + thursday 09:00-12:00,14:00-18:00 + friday 09:00-12:00,14:00-18:00 + saturday 09:00-12:00,14:00-18:00 +} diff --git a/common/tests/crypto/base64.cc b/common/tests/crypto/base64.cc new file mode 100644 index 00000000000..a6808a8009b --- /dev/null +++ b/common/tests/crypto/base64.cc @@ -0,0 +1,46 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include "common/crypto/base64.hh" + +using namespace com::centreon::common::crypto; + +TEST(StringBase64, Encode) { + ASSERT_EQ(base64_encode("A first little attempt."), + "QSBmaXJzdCBsaXR0bGUgYXR0ZW1wdC4="); + ASSERT_EQ(base64_encode("A"), "QQ=="); + ASSERT_EQ(base64_encode("AB"), "QUI="); + ASSERT_EQ(base64_encode("ABC"), "QUJD"); +} + +TEST(StringBase64, Decode) { + ASSERT_EQ( + base64_decode(base64_encode("A first little attempt.")), + "A first little attempt."); + ASSERT_EQ( + base64_decode(base64_encode( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789")), + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"); + ASSERT_EQ(base64_decode(base64_encode("a")), "a"); + ASSERT_EQ(base64_decode(base64_encode("ab")), "ab"); + ASSERT_EQ(base64_decode(base64_encode("abc")), "abc"); + std::string str("告'警'数\\量"); + ASSERT_EQ(base64_decode(base64_encode(str)), str); +} diff --git a/common/tests/engine_conf/expand_conf.cc b/common/tests/engine_conf/expand_conf.cc new file mode 100644 index 00000000000..d4b1aba2a2e --- /dev/null +++ b/common/tests/engine_conf/expand_conf.cc @@ -0,0 +1,673 @@ +/** + * Copyright 2017 - 2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include +#include +#include +#include + +#include "common/engine_conf/parser.hh" +#include "common/engine_conf/state.pb.h" +#include "common/engine_conf/state_helper.hh" + +#include "common/log_v2/log_v2.hh" + +#define CONFIG_PATH "./tests/config0/" + +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; +using namespace rapidjson; +namespace fs = std::filesystem; + +static void RmConf() { + std::filesystem::remove_all("/tmp/etc/centreon-engine/config0"); +} + +static void CreateConf() { + if (!fs::exists("/tmp/etc/centreon-engine/config0/")) { + fs::create_directories("/tmp/etc/centreon-engine/config0/"); + } + + constexpr const char* cmd1 = + "for i in " COMMON_CFG_TEST + "/config0/*.cfg ; do cp $i /tmp/etc/centreon-engine/config0/ ; done"; + system(cmd1); +} + +class Pb_Expand : public ::testing::Test { + public: + static void SetUpTestSuite() { + com::centreon::common::log_v2::log_v2::load("expand-tests"); + CreateConf(); + } + static void TearDownTestSuite() { + RmConf(); + com::centreon::common::log_v2::log_v2::unload(); + std::cout << "Directories deleted: " << std::endl; + } + void SetUp() override {} + void TearDown() override {} +}; + +TEST_F(Pb_Expand, host) { + configuration::State pb_config; + configuration::state_helper state_hlp(&pb_config); + configuration::error_cnt err; + configuration::parser p; + + p.parse("/tmp/etc/centreon-engine/config0/centengine.cfg", &pb_config, err); + + absl::flat_hash_map m_hostgroups; + for (auto& hg : *pb_config.mutable_hostgroups()) { + m_hostgroups.emplace(hg.hostgroup_name(), &hg); + } + state_hlp.expand(err); + + google::protobuf::util::JsonPrintOptions options; + options.always_print_primitive_fields = true; + std::string json_output; + google::protobuf::util::MessageToJsonString(pb_config, &json_output, options); + + rapidjson::Document doc1; + + if (doc1.Parse(json_output.c_str()).HasParseError()) { + throw std::runtime_error("Error parsing JSON."); + } + if (!doc1.HasMember("hosts") || !doc1["hosts"].IsArray() || + doc1["hosts"].Empty()) { + throw std::runtime_error("Missing or invalid 'hosts' field."); + } + if (!doc1["hosts"][0].HasMember("hostId")) { + throw std::runtime_error("Missing 'hostId' field."); + } + if (!doc1["hosts"][0].HasMember("hostName")) { + throw std::runtime_error("Missing 'hostName' field."); + } + if (!doc1["hosts"][0].HasMember("customvariables") || + !doc1["hosts"][0]["customvariables"].IsArray()) { + throw std::runtime_error("Missing or invalid 'customvariables' field."); + } + if (!doc1.HasMember("hostgroups") || !doc1["hostgroups"].IsArray() || + doc1["hostgroups"].Size() <= 1) { + throw std::runtime_error("Missing or invalid 'hostgroups' field."); + } + if (!doc1["hostgroups"][1].HasMember("hostgroupId")) { + throw std::runtime_error("Missing 'hostgroupId' field."); + } + if (!doc1["hostgroups"][1].HasMember("hostgroupName")) { + throw std::runtime_error("Missing 'hostgroupName' field."); + } + if (!doc1["hostgroups"][1].HasMember("members") || + !doc1["hostgroups"][1]["members"].HasMember("data") || + !doc1["hostgroups"][1]["members"]["data"].IsArray()) { + throw std::runtime_error("Missing or invalid 'members' or 'data' field."); + } + + ASSERT_EQ(std::string(doc1["hosts"][0]["hostId"].GetString()), "1"); + ASSERT_EQ(std::string(doc1["hosts"][0]["hostName"].GetString()), "host_1"); + + bool found = false; + for (const auto& item : doc1["hosts"][0]["customvariables"].GetArray()) { + if (item["name"].GetString() == std::string("KEY3") && + item["value"].GetString() == std::string("VAL3") && + item["isSent"].GetBool() == true) { + found = true; + break; + } + } + ASSERT_TRUE(found) + << "Custom variable KEY3 with value VAL3 and isSent true not found."; + + found = false; + for (const auto& item : doc1["hosts"][0]["customvariables"].GetArray()) { + if (item["name"].GetString() == std::string("KEY2") && + item["value"].GetString() == std::string("VAL2") && + item["isSent"].GetBool() == true) { + found = true; + break; + } + } + ASSERT_TRUE(found) + << "Custom variable KEY2 with value VAL2 and isSent true not found."; + + found = false; + for (const auto& item : doc1["hosts"][0]["customvariables"].GetArray()) { + if (item["name"].GetString() == std::string("KEY1") && + item["value"].GetString() == std::string("VAL1") && + item["isSent"].GetBool() == true) { + found = true; + break; + } + } + ASSERT_TRUE(found) + << "Custom variable KEY1 with value VAL1 and isSent true not found."; + + found = false; + for (const auto& item : doc1["hosts"][0]["customvariables"].GetArray()) { + if (item["name"].GetString() == std::string("SNMPCOMMUNITY") && + item["value"].GetString() == std::string("public") && + item["isSent"].GetBool() == true) { + found = true; + break; + } + } + ASSERT_TRUE(found) << "Custom variable SNMPCOMMUNITY with value public and " + "isSent true not found."; + + found = false; + for (const auto& item : doc1["hosts"][0]["customvariables"].GetArray()) { + if (item["name"].GetString() == std::string("SNMPVERSION") && + item["value"].GetString() == std::string("2c") && + item["isSent"].GetBool() == true) { + found = true; + break; + } + } + ASSERT_TRUE(found) + << "Custom variable SNMPVERSION with value 2c and isSent true not found."; + + ASSERT_EQ(doc1["hostgroups"][1]["hostgroupId"].GetInt(), 2); + ASSERT_EQ(std::string(doc1["hostgroups"][1]["hostgroupName"].GetString()), + "hostgroup_2"); + + found = false; + // host expand add host to corresponding hostgroup + for (const auto& item : doc1["hostgroups"][1]["members"]["data"].GetArray()) { + if (item.GetString() == std::string("host_4") || + item.GetString() == std::string("host_5") || + item.GetString() == std::string("host_1")) { + found = true; + } else { + found = false; + break; + } + } + ASSERT_TRUE(found) + << "Hostgroup members data does not match expected values."; +} + +TEST_F(Pb_Expand, service) { + configuration::State pb_config; + configuration::state_helper state_hlp(&pb_config); + configuration::error_cnt err; + configuration::parser p; + + p.parse("/tmp/etc/centreon-engine/config0/centengine.cfg", &pb_config, err); + absl::flat_hash_map m_host; + for (auto& h : pb_config.hosts()) { + m_host.emplace(h.host_name(), h); + } + + absl::flat_hash_map m_servicegroups; + for (auto& sg : *pb_config.mutable_servicegroups()) + m_servicegroups.emplace(sg.servicegroup_name(), &sg); + + state_hlp.expand(err); + + google::protobuf::util::JsonPrintOptions options; + options.always_print_primitive_fields = true; + std::string json_output; + google::protobuf::util::MessageToJsonString(pb_config, &json_output, options); + + rapidjson::Document doc1; + + if (doc1.Parse(json_output.c_str()).HasParseError()) { + throw std::runtime_error("Error parsing JSON."); + } + if (!doc1.HasMember("services") || !doc1["services"].IsArray() || + doc1["services"].Empty()) { + throw std::runtime_error("Missing or invalid 'services' field."); + } + if (!doc1["services"][0].HasMember("serviceId")) { + throw std::runtime_error("Missing 'serviceId' field."); + } + if (!doc1["services"][0].HasMember("hostId")) { + throw std::runtime_error("Missing 'hostId' field."); + } + if (!doc1["services"][0].HasMember("customvariables") || + !doc1["services"][0]["customvariables"].IsArray()) { + throw std::runtime_error("Missing or invalid 'customvariables' field."); + } + if (!doc1["services"][1].HasMember("contactgroups") || + !doc1["services"][1]["contactgroups"].HasMember("data") || + !doc1["services"][1]["contactgroups"]["data"].IsArray()) { + throw std::runtime_error( + "Missing or invalid 'contactgroups' or 'data' field."); + } + if (!doc1["services"][1].HasMember("contacts") || + !doc1["services"][1]["contacts"].HasMember("data") || + !doc1["services"][1]["contacts"]["data"].IsArray()) { + throw std::runtime_error("Missing or invalid 'contacts' or 'data' field."); + } + if (!doc1.HasMember("servicegroups") || !doc1["servicegroups"].IsArray() || + doc1["servicegroups"].Empty()) { + throw std::runtime_error("Missing or invalid 'servicegroups' field."); + } + if (!doc1["servicegroups"][0].HasMember("members") || + !doc1["servicegroups"][0]["members"].HasMember("data") || + !doc1["servicegroups"][0]["members"]["data"].IsArray()) { + throw std::runtime_error("Missing or invalid 'members' or 'data' field."); + } + if (!doc1["services"][1].HasMember("timezone")) { + throw std::runtime_error("Missing 'timezone' field."); + } + if (!doc1["services"][1].HasMember("notificationPeriod")) { + throw std::runtime_error("Missing 'notificationPeriod' field."); + } + if (!doc1["services"][1].HasMember("notificationInterval")) { + throw std::runtime_error("Missing 'notificationInterval' field."); + } + + ASSERT_EQ(std::string(doc1["services"][0]["serviceId"].GetString()), "1"); + ASSERT_EQ(std::string(doc1["services"][0]["hostId"].GetString()), "1"); + + bool found = false; + for (const auto& item : doc1["services"][0]["customvariables"].GetArray()) { + if (item["name"].GetString() == std::string("SNMPCOMMUNITY") && + item["value"].GetString() == std::string("public") && + item["isSent"].GetBool() == true) { + found = true; + break; + } + } + ASSERT_TRUE(found) << "Custom variable SNMPCOMMUNITY with value public and " + "isSent true not found."; + + found = false; + for (const auto& item : doc1["services"][0]["customvariables"].GetArray()) { + if (item["name"].GetString() == std::string("KEY_SERV1_1") && + item["value"].GetString() == std::string("VAL_SERV1") && + item["isSent"].GetBool() == true) { + found = true; + break; + } + } + ASSERT_TRUE(found) << "Custom variable KEY_SERV1_1 with value VAL_SERV1 and " + "isSent true not found."; + + ASSERT_EQ(std::string(doc1["services"][1]["serviceId"].GetString()), "2"); + ASSERT_EQ(std::string(doc1["services"][1]["hostId"].GetString()), "1"); + + found = false; + for (const auto& item : + doc1["services"][1]["contactgroups"]["data"].GetArray()) { + if (item.GetString() == std::string("contactgroup_2")) { + found = true; + break; + } + } + ASSERT_TRUE(found) + << "contactgroups members data does not match expected values."; + + found = false; + + for (const auto& item : doc1["services"][1]["contacts"]["data"].GetArray()) { + if (item.GetString() == std::string("U1")) { + found = true; + break; + } + } + ASSERT_TRUE(found) + << "contactgroups members data does not match expected values."; + + ASSERT_TRUE(!doc1["services"][1]["contactgroups"]["additive"].GetBool()); + ASSERT_TRUE(!doc1["services"][1]["contacts"]["additive"].GetBool()); + ASSERT_EQ(doc1["services"][1]["notificationInterval"].GetInt(), 8); + + ASSERT_EQ(std::string(doc1["services"][1]["notificationPeriod"].GetString()), + "none"); + ASSERT_EQ(std::string(doc1["services"][1]["timezone"].GetString()), "GMT+01"); + + // service expand add service to corresponding SERVICEGROUP + + found = false; + // host expand add host to corresponding hostgroup + for (const auto& item : + doc1["servicegroups"][0]["members"]["data"].GetArray()) { + if (item["first"].GetString() == std::string("host_1") && + item["second"].GetString() == std::string("service_1")) { + found = true; + break; + } + } + ASSERT_TRUE(found) + << "servicegroups members data does not match expected values."; +} + +TEST_F(Pb_Expand, contact) { + configuration::State pb_config; + configuration::state_helper state_hlp(&pb_config); + configuration::error_cnt err; + configuration::parser p; + + p.parse("/tmp/etc/centreon-engine/config0/centengine.cfg", &pb_config, err); + + absl::flat_hash_map + m_contactgroups; + for (auto& cg : *pb_config.mutable_contactgroups()) { + m_contactgroups[cg.contactgroup_name()] = &cg; + } + + state_hlp.expand(err); + + google::protobuf::util::JsonPrintOptions options; + options.always_print_primitive_fields = true; + std::string json_output; + google::protobuf::util::MessageToJsonString(pb_config, &json_output, options); + + rapidjson::Document doc1; + + if (doc1.Parse(json_output.c_str()).HasParseError()) { + throw std::runtime_error("Error parsing JSON."); + } + + if (!doc1.HasMember("contacts") || !doc1["contacts"].IsArray() || + doc1["contacts"].Empty()) { + throw std::runtime_error("Missing or invalid 'contacts' field."); + } + if (!doc1["contacts"][0].HasMember("contactName")) { + throw std::runtime_error("Missing 'contactName' field."); + } + if (!doc1["contacts"][0].HasMember("customvariables") || + !doc1["contacts"][0]["customvariables"].IsArray()) { + throw std::runtime_error("Missing or invalid 'customvariables' field."); + } + if (!doc1.HasMember("contactgroups") || !doc1["contactgroups"].IsArray() || + doc1["contactgroups"].Empty()) { + throw std::runtime_error("Missing or invalid 'contactgroups' field."); + } + if (!doc1["contactgroups"][0].HasMember("members") || + !doc1["contactgroups"][0]["members"].HasMember("data") || + !doc1["contactgroups"][0]["members"]["data"].IsArray()) { + throw std::runtime_error("Missing or invalid 'members' or 'data' field."); + } + + ASSERT_EQ(std::string(doc1["contacts"][0]["contactName"].GetString()), + "John_Doe"); + + bool found = false; + for (const auto& item : doc1["contacts"][0]["customvariables"].GetArray()) { + if (item["name"].GetString() == std::string("SNMPCOMMUNITY") && + item["value"].GetString() == std::string("public") && + item["isSent"].GetBool() == true) { + found = true; + break; + } + } + ASSERT_TRUE(found) << "Custom variable SNMPCOMMUNITY with value public and " + "isSent true not found."; + found = false; + for (const auto& item : + doc1["contactgroups"][0]["members"]["data"].GetArray()) { + if (item.GetString() == std::string("John_Doe") || + item.GetString() == std::string("U2") || + item.GetString() == std::string("U3") || + item.GetString() == std::string("U4")) { + found = true; + } else { + found = false; + break; + } + } + ASSERT_TRUE(found) + << "contactgroups members data does not match expected values."; +} + +TEST_F(Pb_Expand, contactgroup) { + configuration::State pb_config; + configuration::state_helper state_hlp(&pb_config); + configuration::error_cnt err; + configuration::parser p; + + p.parse("/tmp/etc/centreon-engine/config0/centengine.cfg", &pb_config, err); + absl::flat_hash_map + m_contactgroups; + for (auto& cg : *pb_config.mutable_contactgroups()) { + m_contactgroups[cg.contactgroup_name()] = &cg; + } + state_hlp.expand(err); + + google::protobuf::util::JsonPrintOptions options; + options.always_print_primitive_fields = true; + std::string json_output; + google::protobuf::util::MessageToJsonString(pb_config, &json_output, options); + + rapidjson::Document doc1; + + if (doc1.Parse(json_output.c_str()).HasParseError()) { + throw std::runtime_error("Error parsing JSON."); + } + + if (!doc1["contactgroups"][0].HasMember("contactgroupName")) { + throw std::runtime_error("Missing 'contactgroupName' field."); + } + if (!doc1["contactgroups"][0].HasMember("contactgroupMembers") || + !doc1["contactgroups"][0]["contactgroupMembers"].HasMember("data") || + !doc1["contactgroups"][0]["contactgroupMembers"]["data"].IsArray()) { + throw std::runtime_error( + "Missing or invalid 'contactgroupMembers' or 'data' field."); + } + if (!doc1["contactgroups"][0].HasMember("members") || + !doc1["contactgroups"][0]["members"].HasMember("data") || + !doc1["contactgroups"][0]["members"]["data"].IsArray()) { + throw std::runtime_error("Missing or invalid 'members' or 'data' field."); + } + + ASSERT_EQ( + std::string(doc1["contactgroups"][0]["contactgroupName"].GetString()), + "contactgroup_1"); + + ASSERT_TRUE(doc1["contactgroups"][0]["contactgroupMembers"]["data"].Empty()); + + bool found = false; + for (const auto& item : + doc1["contactgroups"][0]["members"]["data"].GetArray()) { + if (item.GetString() == std::string("John_Doe") || + item.GetString() == std::string("U2") || + item.GetString() == std::string("U3") || + item.GetString() == std::string("U4")) { + found = true; + } else { + found = false; + break; + } + } + ASSERT_TRUE(found) + << "contactgroups members data does not match expected values."; +} + +TEST_F(Pb_Expand, serviceescalation) { + configuration::State pb_config; + configuration::state_helper state_hlp(&pb_config); + configuration::error_cnt err; + configuration::parser p; + + p.parse("/tmp/etc/centreon-engine/config0/centengine.cfg", &pb_config, err); + + absl::flat_hash_map m_hostgroups; + for (auto& hg : *pb_config.mutable_hostgroups()) { + m_hostgroups.emplace(hg.hostgroup_name(), &hg); + } + + absl::flat_hash_map + m_servicegroups; + for (auto& sg : *pb_config.mutable_servicegroups()) + m_servicegroups.emplace(sg.servicegroup_name(), &sg); + + state_hlp.expand(err); + + google::protobuf::util::JsonPrintOptions options; + options.always_print_primitive_fields = true; + std::string json_output; + google::protobuf::util::MessageToJsonString(pb_config, &json_output, options); + + rapidjson::Document doc1; + + if (doc1.Parse(json_output.c_str()).HasParseError()) { + throw std::runtime_error("Error parsing JSON."); + } + + if (!doc1.HasMember("serviceescalations") || + !doc1["serviceescalations"].IsArray() || + doc1["serviceescalations"].Empty()) { + throw std::runtime_error("Missing or invalid 'serviceescalations' field."); + } + if (!doc1["serviceescalations"][0].HasMember("hosts") || + !doc1["serviceescalations"][0]["hosts"].HasMember("data") || + !doc1["serviceescalations"][0]["hosts"]["data"].IsArray()) { + throw std::runtime_error("Missing 'hosts' field in serviceescalation."); + } + if (!doc1["serviceescalations"][0].HasMember("serviceDescription") || + !doc1["serviceescalations"][0]["serviceDescription"].HasMember("data") || + !doc1["serviceescalations"][0]["serviceDescription"]["data"].IsArray()) { + throw std::runtime_error( + "Missing 'serviceDescription' field in serviceescalation."); + } + + bool found = false; + for (const auto& item : doc1["serviceescalations"].GetArray()) { + if (item["hosts"]["data"][0].GetString() == std::string("host_3") && + item["serviceDescription"]["data"][0].GetString() == + std::string("service_11")) { + ASSERT_TRUE(item["hostgroups"]["data"].Empty()) << "Hostgroups not empty"; + ASSERT_TRUE(item["servicegroups"]["data"].Empty()) + << "Servicegroups not empty"; + found = true; + break; + } + } + ASSERT_TRUE(found) + << "Service escalation with host_3 and service_11 not found."; + + found = false; + for (const auto& item : doc1["serviceescalations"].GetArray()) { + if (item["hosts"]["data"][0].GetString() == std::string("host_3") && + item["serviceDescription"]["data"][0].GetString() == + std::string("service_12")) { + ASSERT_TRUE(item["hostgroups"]["data"].Empty()) << "Hostgroups not empty"; + ASSERT_TRUE(item["servicegroups"]["data"].Empty()) + << "Servicegroups not empty"; + found = true; + break; + } + } + ASSERT_TRUE(found) + << "Service escalation with host_3 and service_12 not found."; +} + +TEST_F(Pb_Expand, hostescalation) { + configuration::State pb_config; + configuration::state_helper state_hlp(&pb_config); + configuration::error_cnt err; + configuration::parser p; + + p.parse("/tmp/etc/centreon-engine/config0/centengine.cfg", &pb_config, err); + + absl::flat_hash_map m_hostgroups; + for (auto& hg : *pb_config.mutable_hostgroups()) { + m_hostgroups.emplace(hg.hostgroup_name(), &hg); + } + state_hlp.expand(err); + + google::protobuf::util::JsonPrintOptions options; + options.always_print_primitive_fields = true; + std::string json_output; + google::protobuf::util::MessageToJsonString(pb_config, &json_output, options); + + rapidjson::Document doc1; + + if (doc1.Parse(json_output.c_str()).HasParseError()) { + throw std::runtime_error("Error parsing JSON."); + } + + if (!doc1.HasMember("hostescalations") || + !doc1["hostescalations"].IsArray() || doc1["hostescalations"].Empty()) { + throw std::runtime_error("Missing or invalid 'hostescalations' field."); + } + if (!doc1["hostescalations"][0].HasMember("hosts") || + !doc1["hostescalations"][0]["hosts"].HasMember("data") || + !doc1["hostescalations"][0]["hosts"]["data"].IsArray()) { + throw std::runtime_error("Missing 'hosts' field in hostescalations."); + } + + bool found = false; + for (const auto& item : doc1["hostescalations"].GetArray()) { + if (item["hosts"]["data"][0].GetString() == std::string("host_3")) { + ASSERT_TRUE(item["hostgroups"]["data"].Empty()) << "Hostgroups not empty"; + found = true; + break; + } + } + ASSERT_TRUE(found) << "Host escalation with host_3 not found."; + + found = false; + for (const auto& item : doc1["hostescalations"].GetArray()) { + if (item["hosts"]["data"][0].GetString() == std::string("host_2")) { + ASSERT_TRUE(item["hostgroups"]["data"].Empty()) << "Hostgroups not empty"; + found = true; + break; + } + } + ASSERT_TRUE(found) << "Host escalation with host_2 not found."; +} + +TEST_F(Pb_Expand, anomalydetection) { + configuration::State pb_config; + configuration::state_helper state_hlp(&pb_config); + configuration::error_cnt err; + configuration::parser p; + + p.parse("/tmp/etc/centreon-engine/config0/centengine.cfg", &pb_config, err); + state_hlp.expand(err); + + google::protobuf::util::JsonPrintOptions options; + options.always_print_primitive_fields = true; + std::string json_output; + google::protobuf::util::MessageToJsonString(pb_config, &json_output, options); + + rapidjson::Document doc1; + + if (doc1.Parse(json_output.c_str()).HasParseError()) { + throw std::runtime_error("Error parsing JSON."); + } + + if (!doc1.HasMember("anomalydetections") || + !doc1["anomalydetections"].IsArray() || + doc1["anomalydetections"].Empty()) { + throw std::runtime_error("Missing or invalid 'anomalydetections' field."); + } + if (!doc1["anomalydetections"][0].HasMember("customvariables") || + !doc1["anomalydetections"][0]["customvariables"].IsArray()) { + throw std::runtime_error( + "Missing or invalid 'customvariables' field in anomalydetections."); + } + + bool found = false; + for (const auto& item : + doc1["anomalydetections"][0]["customvariables"].GetArray()) { + if (item["name"].GetString() == std::string("KEY1") && + item["value"].GetString() == std::string("_VAL01") && + item["isSent"].GetBool() == true) { + found = true; + break; + } + } + ASSERT_TRUE(found) + << "Custom variable KEY1 with value _VAL01 and isSent true not found."; +} \ No newline at end of file diff --git a/common/tests/engine_conf/parser.cc b/common/tests/engine_conf/parser.cc new file mode 100644 index 00000000000..52e8ceabab1 --- /dev/null +++ b/common/tests/engine_conf/parser.cc @@ -0,0 +1,28 @@ +/** + * Copyright 2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "common/engine_conf/parser.hh" +#include + +using namespace com::centreon::engine::configuration; + +class TestParser : public ::testing::Test { + public: + // void SetUp() override {} + void TearDown() override {} +}; diff --git a/common/tests/file_test.cc b/common/tests/file_test.cc new file mode 100644 index 00000000000..79597473f9f --- /dev/null +++ b/common/tests/file_test.cc @@ -0,0 +1,100 @@ +/** + * Copyright 2024 Centreon + * Licensed under the Apache License, Version 2.0(the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include "file.hh" + +using namespace com::centreon::common; + +TEST(TestParser, hashDirectory_empty) { + system("mkdir -p /tmp/foo ; rm -rf /tmp/foo/*"); + system("mkdir -p /tmp/bar ; rm -rf /tmp/bar/*"); + std::error_code ec1, ec2; + std::string hash_foo = hash_directory("/tmp/foo", ec1); + std::string hash_bar = hash_directory("/tmp/bar", ec2); + ASSERT_FALSE(ec1); + ASSERT_FALSE(ec2); + ASSERT_EQ(hash_foo, hash_bar); +} + +TEST(TestParser, hashDirectory_simple) { + system( + "mkdir -p /tmp/foo ; rm -rf /tmp/foo/* ; mkdir -p /tmp/foo/a ; mkdir -p " + "/tmp/foo/b ; mkdir -p /tmp/foo/b/a ; touch /tmp/foo/b/a/foobar"); + system( + "mkdir -p /tmp/bar ; rm -rf /tmp/bar/* ; mkdir -p /tmp/bar/b ; mkdir -p " + "/tmp/bar/b/a ; touch /tmp/bar/b/a/foobar ; mkdir -p /tmp/bar/a"); + std::error_code ec1, ec2; + std::string hash_foo = hash_directory("/tmp/foo", ec1); + std::string hash_bar = hash_directory("/tmp/bar", ec2); + ASSERT_FALSE(ec1); + ASSERT_FALSE(ec2); + ASSERT_EQ(hash_foo, hash_bar); +} + +TEST(TestParser, hashDirectory_multifiles) { + system("mkdir -p /tmp/foo ; rm -rf /tmp/foo/*"); + system("mkdir -p /tmp/bar ; rm -rf /tmp/bar/*"); + for (int i = 0; i < 20; i++) { + system(fmt::format("touch /tmp/foo/file_{}", i).c_str()); + } + for (int i = 19; i >= 0; i--) { + system(fmt::format("touch /tmp/bar/file_{}", i).c_str()); + } + std::error_code ec1, ec2; + std::string hash_foo = hash_directory("/tmp/foo", ec1); + std::string hash_bar = hash_directory("/tmp/bar", ec2); + ASSERT_FALSE(ec1); + ASSERT_FALSE(ec2); + ASSERT_EQ(hash_foo, + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); + ASSERT_EQ(hash_foo, hash_bar); +} + +TEST(TestParser, hashDirectory_realSituation) { + system("rm -rf /tmp/tests_foo ; cp -rf tests /tmp/tests_foo"); + std::error_code ec1, ec2; + std::string hash = hash_directory("tests", ec1); + std::string hash1 = hash_directory("/tmp/tests_foo", ec2); + ASSERT_FALSE(ec1); + ASSERT_FALSE(ec2); + ASSERT_EQ(hash, hash1); + + // A new line added to a file. + system("echo test >> /tmp/tests_foo/timeperiods.cfg"); + hash = hash_directory("tests", ec1); + hash1 = hash_directory("/tmp/tests_foo", ec2); + ASSERT_FALSE(ec1); + ASSERT_FALSE(ec2); + ASSERT_NE(hash, hash1); +} + +TEST(TestParser, hashDirectory_error) { + std::error_code ec; + std::string hash = hash_directory("/tmp/doesnotexist", ec); + ASSERT_TRUE(ec); + ASSERT_EQ(hash, ""); +} + +TEST(TestParser, with_file_error) { + std::error_code ec; + system("echo test > /tmp/my_file"); + std::string hash = hash_directory("/tmp/my_file", ec); + ASSERT_TRUE(ec); + ASSERT_EQ(hash, ""); +} diff --git a/common/tests/perfdata_test.cc b/common/tests/perfdata_test.cc index bab234f9522..c64d9fe623a 100644 --- a/common/tests/perfdata_test.cc +++ b/common/tests/perfdata_test.cc @@ -623,3 +623,18 @@ TEST_F(PerfdataParser, BadMetric1) { ++i; } } + +TEST_F(PerfdataParser, ExtractPerfdataBrackets) { + std::string perfdata( + "'xx[aa a aa]'=2;3;7;1;9 '[a aa]'=12;25;50;0;118 'aa a]'=28;13;54;0;80"); + auto lst{common::perfdata::parse_perfdata(0, 0, perfdata.c_str(), _logger)}; + auto it = lst.begin(); + ASSERT_NE(it, lst.end()); + ASSERT_EQ(it->name(), "xx[aa a aa]"); + ++it; + ASSERT_NE(it, lst.end()); + ASSERT_EQ(it->name(), "[a aa]"); + ++it; + ASSERT_NE(it, lst.end()); + ASSERT_EQ(it->name(), "aa a]"); +} diff --git a/common/tests/process_test.cc b/common/tests/process_test.cc index 325524a406a..92d1b7d25c7 100644 --- a/common/tests/process_test.cc +++ b/common/tests/process_test.cc @@ -18,16 +18,20 @@ #include #include +#include +#include #include "com/centreon/common/process/process.hh" using namespace com::centreon::common; -#ifdef _WINDOWS +#ifdef _WIN32 #define ECHO_PATH "tests\\echo.bat" +#define SLEEP_PATH "tests\\sleep.bat" #define END_OF_LINE "\r\n" #else #define ECHO_PATH "/bin/echo" +#define SLEEP_PATH "/bin/sleep" #define END_OF_LINE "\n" #endif @@ -143,7 +147,7 @@ TEST_F(process_test, throw_on_error) { TEST_F(process_test, script_error) { using namespace std::literals; -#ifdef _WINDOWS +#ifdef _WIN32 std::shared_ptr to_wait( new process_wait(g_io_context, _logger, "tests\\\\bad_script.bat")); #else @@ -188,7 +192,7 @@ TEST_F(process_test, call_start_several_time_no_args) { ASSERT_EQ(to_wait->get_stderr(), ""); } -#ifndef _WINDOWS +#ifndef _WIN32 TEST_F(process_test, stdin_to_stdout) { ::remove("toto.sh"); @@ -233,3 +237,27 @@ TEST_F(process_test, shell_stdin_to_stdout) { } #endif + +TEST_F(process_test, kill_process) { + std::shared_ptr to_wait( + new process_wait(g_io_context, _logger, SLEEP_PATH, {"10"})); + to_wait->start_process(true); + + // wait process starts + std::this_thread::sleep_for(std::chrono::seconds(1)); + int pid = to_wait->get_pid(); + // kill process + to_wait->kill(); + std::this_thread::sleep_for(std::chrono::seconds(1)); +#ifdef _WIN32 + auto process_handle = + OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, FALSE, pid); + ASSERT_NE(process_handle, nullptr); + DWORD exit_code; + ASSERT_EQ(GetExitCodeProcess(process_handle, &exit_code), TRUE); + ASSERT_NE(exit_code, STILL_ACTIVE); + CloseHandle(process_handle); +#else + ASSERT_EQ(kill(pid, 0), -1); +#endif +} diff --git a/common/tests/rapidjson_helper_test.cc b/common/tests/rapidjson_helper_test.cc index b88afa599f9..1bbdb6fb416 100644 --- a/common/tests/rapidjson_helper_test.cc +++ b/common/tests/rapidjson_helper_test.cc @@ -30,7 +30,7 @@ using namespace com::centreon; using namespace com::centreon::common; -#ifdef _WINDOWS +#ifdef _WIN32 #define JSON_FILE_PATH "C:/Users/Public/toto.json" #else #define JSON_FILE_PATH "/tmp/toto.json" diff --git a/common/tests/scripts/sleep.bat b/common/tests/scripts/sleep.bat new file mode 100644 index 00000000000..0866e1576ff --- /dev/null +++ b/common/tests/scripts/sleep.bat @@ -0,0 +1,2 @@ +@echo off +ping 127.0.0.1 -n1 %~1 diff --git a/common/tests/test_main.cc b/common/tests/test_main.cc index 09955d482aa..8b3b9e8523e 100644 --- a/common/tests/test_main.cc +++ b/common/tests/test_main.cc @@ -18,6 +18,7 @@ */ #include +#include "common/log_v2/log_v2.hh" #include "pool.hh" std::shared_ptr g_io_context( diff --git a/common/tests/utf8_test.cc b/common/tests/utf8_test.cc index 98376f390ce..77dbe2e3f31 100644 --- a/common/tests/utf8_test.cc +++ b/common/tests/utf8_test.cc @@ -48,6 +48,16 @@ TEST(string_check_utf8, cp1252) { ASSERT_EQ(check_string_utf8(txt), "Le ticket coûte 12€\n"); } +/* + * Given a string encoded in CP-1252 + * Then the check_string_utf8 function converts it to UTF-8. + */ +TEST(string_check_utf8, cp1252_bis) { + std::string txt("Service de plateforme des appareils connect\xe9s"); + ASSERT_EQ(check_string_utf8(txt), + "Service de plateforme des appareils connectés"); +} + /* * Given a string encoded in ISO-8859-15 * Then the check_string_utf8 function converts it to UTF-8. diff --git a/common/vault/CMakeLists.txt b/common/vault/CMakeLists.txt new file mode 100644 index 00000000000..e5f1e8483b4 --- /dev/null +++ b/common/vault/CMakeLists.txt @@ -0,0 +1,29 @@ +# +# Copyright 2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +# For more information : contact@centreon.com +# + +include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${PROJECT_SOURCE_DIR}/inc + ${PROJECT_SOURCE_DIR}/http/inc) + +add_library( + ctnvault STATIC + # Sources. + vault_access.cc) + +target_precompile_headers(ctnvault REUSE_FROM centreon_common) + +set_property(TARGET ctnvault PROPERTY POSITION_INDEPENDENT_CODE ON) diff --git a/common/vault/vault_access.cc b/common/vault/vault_access.cc new file mode 100644 index 00000000000..bb6ef750a4d --- /dev/null +++ b/common/vault/vault_access.cc @@ -0,0 +1,212 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ +#include "common/vault/vault_access.hh" +#include +#include +#include +#include "com/centreon/common/http/http_config.hh" +#include "com/centreon/common/http/https_connection.hh" +#include "com/centreon/common/pool.hh" +#include "com/centreon/exceptions/msg_fmt.hh" + +using namespace com::centreon::common::http; +using namespace com::centreon::common::vault; +using com::centreon::common::crypto::aes256; + +/** + * @brief Construct a new vault_access object. the constructor needs the path to + * the env file and the path to the vault file. The vault file should contain + * the following keys: 'salt', 'role_id', 'secret_id', 'url', 'port', + * 'root_path'. The env file should contain the key 'APP_SECRET'. + * The parameter verify_peer is used to verify the certificate of the vault + * server. + * + * @param env_file The path to the env file. + * @param vault_file The path to the vault file. + * @param verify_peer A boolean to verify the certificate of the vault server. + * @param logger The logger. + */ +vault_access::vault_access(const std::string& env_file, + const std::string& vault_file, + bool verify_peer, + const std::shared_ptr& logger) + : _logger{logger} { + if (env_file.empty()) + _set_env_informations("/usr/share/centreon/.env"); + else + _set_env_informations(env_file); + + if (_app_secret.empty()) + throw exceptions::msg_fmt("No APP_SECRET provided."); + _set_vault_informations(vault_file); + + _aes_encryptor = std::make_unique(_app_secret, _salt); + + _role_id = _aes_encryptor->decrypt(_role_id); + _secret_id = _aes_encryptor->decrypt(_secret_id); + + asio::ip::tcp::resolver resolver(common::pool::io_context()); + const auto results = resolver.resolve(_url, fmt::format("{}", _port)); + if (results.empty()) + throw exceptions::msg_fmt("Unable to resolve the vault server '{}'", _url); + else { + http_config::pointer client_conf = std::make_shared( + results, _url, true, std::chrono::seconds(10), std::chrono::seconds(30), + std::chrono::seconds(30), 30, std::chrono::seconds(10), 5, + std::chrono::hours(1), 1, asio::ssl::context_base::tlsv12_client); + client_conf->set_verify_peer(verify_peer); + connection_creator conn_creator = [client_conf, logger = _logger]() { + auto ssl_init = [](asio::ssl::context& ctx, + const http_config::pointer& conf [[maybe_unused]]) { + if (conf->verify_peer()) + ctx.set_verify_mode(asio::ssl::context::verify_peer); + else + ctx.set_verify_mode(asio::ssl::context::verify_none); + ctx.set_default_verify_paths(); + }; + return https_connection::load(common::pool::io_context_ptr(), logger, + client_conf, ssl_init); + }; + _client = client::load(common::pool::io_context_ptr(), _logger, client_conf, + conn_creator); + } +} + +/** + * @brief Read the vaul file and set its informations in the object. Throw an + * exception if the file could not be open or if the file is malformed. + * + * @param vault_file The path to the vault file. + */ +void vault_access::_set_vault_informations(const std::string& vault_file) { + std::ifstream ifs(vault_file); + nlohmann::json vault_configuration = nlohmann::json::parse(ifs); + if (vault_configuration.contains("salt") && + vault_configuration.contains("role_id") && + vault_configuration.contains("secret_id") && + vault_configuration.contains("url") && + vault_configuration.contains("port") && + vault_configuration.contains("root_path")) { + _salt = vault_configuration["salt"]; + _url = vault_configuration["url"]; + _port = vault_configuration["port"]; + _root_path = vault_configuration["root_path"]; + _role_id = vault_configuration["role_id"]; + _secret_id = vault_configuration["secret_id"]; + } else + throw exceptions::msg_fmt( + "The '{}' file is malformed, we should have keys 'salt', 'role_id', " + "'secret_id', 'url', 'port', 'root_path'.", + vault_file); +} + +/** + * @brief Read the env file and set the APP_SECRET in the object. Throw an + * exception if the file could not be open. + * + * @param env_file The path to the env file. + */ +void vault_access::_set_env_informations(const std::string& env_file) { + std::ifstream ifs(env_file); + if (ifs.is_open()) { + std::string line; + while (std::getline(ifs, line)) { + if (line.find("APP_SECRET=") == 0) { + _app_secret = line.substr(11); + _app_secret = absl::StripAsciiWhitespace(_app_secret); + break; + } + } + } else + throw exceptions::msg_fmt("The env file could not be open"); +} + +/** + * @brief Decrypt the input string using the AES256 algorithm. Throw an + * exception if the input string is not stored in the vault. + * + * @param encrypted The string to decrypt. + * + * @return The decrypted string. + */ +std::string vault_access::decrypt(const std::string& encrypted) { + std::string_view head = encrypted; + if (head.substr(0, 25) != "secret::hashicorp_vault::") { + _logger->debug("Password is not stored in the vault"); + return encrypted; + } else + head.remove_prefix(25); + + /* We get the token */ + auto req = std::make_shared(boost::beast::http::verb::post, + _url, "/v1/auth/approle/login"); + req->body() = fmt::format("{{ \"role_id\":\"{}\", \"secret_id\":\"{}\" }}", + _role_id, _secret_id); + req->content_length(req->body().length()); + + std::promise promise; + std::future future = promise.get_future(); + _client->send(req, [logger = _logger, &promise]( + const boost::beast::error_code& err, + const std::string& detail [[maybe_unused]], + const response_ptr& response) mutable { + if (err && err != boost::asio::ssl::error::stream_truncated) { + auto exc = std::make_exception_ptr( + exceptions::msg_fmt("Error from http server: {}", err.message())); + promise.set_exception(exc); + } else { + nlohmann::json resp = nlohmann::json::parse(response->body()); + std::string token = resp["auth"]["client_token"].get(); + promise.set_value(std::move(token)); + } + }); + + std::pair p = + absl::StrSplit(head, absl::ByString("::")); + + std::string token(future.get()); + req = std::make_shared(boost::beast::http::verb::get, _url, + fmt::format("/v1/{}", p.first)); + req->set("X-Vault-Token", token); + std::promise promise_decrypted; + std::future future_decrypted = promise_decrypted.get_future(); + _client->send( + req, [logger = _logger, &promise_decrypted, field = p.second]( + const boost::beast::error_code& err, const std::string& detail, + const response_ptr& response) mutable { + if (err && err != boost::asio::ssl::error::stream_truncated) { + logger->error("Error from http server: {}", err.message()); + auto exc = std::make_exception_ptr( + exceptions::msg_fmt("Error from http server: {}", err.message())); + promise_decrypted.set_exception(exc); + } else { + logger->info("We got a the result: detail = {} ; response = {}", + detail, response ? response->body() : "nullptr"); + try { + nlohmann::json resp = nlohmann::json::parse(response->body()); + std::string result = resp["data"]["data"][field]; + promise_decrypted.set_value(result); + } catch (const std::exception& e) { + auto exc = std::make_exception_ptr(exceptions::msg_fmt( + "Response is not as expected: {}", err.message())); + promise_decrypted.set_exception(exc); + } + } + }); + return future_decrypted.get(); +} diff --git a/common/vault/vault_access.hh b/common/vault/vault_access.hh new file mode 100644 index 00000000000..16222b87ae6 --- /dev/null +++ b/common/vault/vault_access.hh @@ -0,0 +1,65 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ +#ifndef CCC_VAULT_VAULT_ACCESS_HH +#define CCC_VAULT_VAULT_ACCESS_HH +#include "com/centreon/common/http/http_client.hh" +#include "common/crypto/aes256.hh" + +using com::centreon::common::crypto::aes256; +using com::centreon::common::http::client; + +namespace com::centreon::common::vault { +class vault_access { + /* The url and port to access to the Vault. */ + std::string _url; + uint16_t _port; + std::shared_ptr _logger; + + std::string _root_path; + + /* The AES256 encrypt/decrypt tool to access the vault. */ + std::unique_ptr _aes_encryptor; + + /* First key needed to use _aes_encryptor. */ + std::string _app_secret; + /* Second key needed to use _aes_encryptor. */ + std::string _salt; + + /* The main credentials to access the Vault. */ + std::string _role_id; + std::string _secret_id; + + /* The http client to the vault */ + std::shared_ptr _client; + + /* The token to ask for a password */ + std::string _token; + + void _decrypt_role_and_secret(); + void _set_vault_informations(const std::string& vault_file); + void _set_env_informations(const std::string& env_file); + + public: + vault_access(const std::string& env_file, + const std::string& vault_file, + bool verify_peer, + const std::shared_ptr& logger); + std::string decrypt(const std::string& encrypted); +}; +} // namespace com::centreon::common::vault +#endif /* !CCC_VAULT_VAULT_ACCESS_HH */ diff --git a/connectors/perl/test/connector.cc b/connectors/perl/test/connector.cc index a0396074875..1b44b63ada3 100644 --- a/connectors/perl/test/connector.cc +++ b/connectors/perl/test/connector.cc @@ -364,7 +364,7 @@ class TestConnector : public testing::Test { return p.read_std_out(std::chrono::seconds(5)); } - static void _write_file(char const* filename, + static void _write_file(const char* filename, char const* content, unsigned int size = 0) { // Check size. @@ -372,7 +372,7 @@ class TestConnector : public testing::Test { size = strlen(content); // Open file. - FILE* f(fopen(filename, "w")); + FILE* f = fopen(filename, "w"); if (!f) throw msg_fmt("could not open file {}", filename); @@ -404,8 +404,8 @@ TEST_F(TestConnector, EofOnStdin) { TEST_F(TestConnector, ExecuteModuleLoading) { // Write Perl script. - std::string script_path(com::centreon::io::file_stream::temp_path()); - _write_file(script_path.c_str(), + const char* script_path = com::centreon::io::file_stream::temp_path(); + _write_file(script_path, "#!/usr/bin/perl\n" "\n" "use Sys::Hostname;\n" @@ -432,7 +432,7 @@ TEST_F(TestConnector, ExecuteModuleLoading) { int retval{wait_for_termination(*p)}; // Remove temporary files. - remove(script_path.c_str()); + remove(script_path); ASSERT_EQ(retval, 0); std::string expected(result, result + sizeof(result) - 1); @@ -648,9 +648,9 @@ TEST_F(TestConnector, ExecuteSingleScriptLogFile) { TEST_F(TestConnector, ExecuteWithAdditionalCode) { // Write Perl script. - std::string script_path(com::centreon::io::file_stream::temp_path()); + const char* script_path(com::centreon::io::file_stream::temp_path()); _write_file( - script_path.c_str(), + script_path, "#!/usr/bin/perl\n" "\n" "print \"$Centreon::Test::company is $Centreon::Test::attribute\\n\";\n" @@ -678,7 +678,7 @@ TEST_F(TestConnector, ExecuteWithAdditionalCode) { int retval{wait_for_termination(*p)}; // Remove temporary files. - remove(script_path.c_str()); + remove(script_path); ASSERT_EQ(retval, 0); std::string expected(result, result + sizeof(result) - 1); diff --git a/deps.py b/deps.py new file mode 100755 index 00000000000..274eba281bc --- /dev/null +++ b/deps.py @@ -0,0 +1,348 @@ +#!/usr/bin/python3 +""" + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com +""" + +import argparse +import re +import os +import json +import sys +from collections import defaultdict + +ESC = '\x1b' +YELLOW = ESC + '[1;33m' +CYAN = ESC + '[1;36m' +GREEN = ESC + '[1;32m' +RESET = ESC + '[0m' + +home = os.getcwd() + +parser = argparse.ArgumentParser( + prog="deps.py", description='Draw a header dependency tree from one file.') +parser.add_argument('filename', nargs='+', + help='Specify the file whose headers are to analyze.') +parser.add_argument('--output', '-o', type=str, + help='Specify the DOT file to store the result graph.') +parser.add_argument('--depth', '-d', type=int, default=2, + help='Specify the depth to look up headers.') +parser.add_argument('--compile-commands', '-c', type=str, default="compile_commands.json", + help='Specify the depth to look up headers.') +parser.add_argument('--explain', '-e', action='store_true', default=False, + help='Explain what header to remove from the file.') +parser.add_argument('--fix', '-f', action='store_true', default=False, + help='Removed headers that seems not needed (this action is dangerous).') +args = parser.parse_args() + + +# Graph class to represent a directed graph +class Graph: + def __init__(self): + self.graph = defaultdict(list) + + # Method to add an edge between two nodes u and v (strings) + def add_edge(self, u, v): + self.graph[u].append(v) + + # Method to find all paths from source to destination + def find_all_paths(self, source, destination): + paths = [] + current_path = [] + + # Check for a direct path (single edge path) + if destination in self.graph[source]: + paths.append([source, destination]) + + # Find all other paths using DFS + self.dfs(source, destination, current_path, paths) + + return paths + + # Recursive DFS function to explore paths + def dfs(self, current_node, destination, current_path, paths): + # Add the current node to the current path + current_path.append(current_node) + + # If we reach the destination node, store the current path + if current_node == destination and len(current_path) > 2: # Ensure this is not the single-edge path + paths.append(list(current_path)) + else: + # Otherwise, explore all neighbors of the current node + for neighbor in self.graph[current_node]: + if neighbor not in current_path: # Avoid immediate cycles + self.dfs(neighbor, destination, current_path, paths) + + # Backtrack: remove the current node from the path before returning + current_path.pop() + + # Method to check if at least two paths exist from source to destination + def find_paths_with_at_least_two(self, source, destination): + all_paths = self.find_all_paths(source, destination) + + # Return paths only if there are at least two + if len(all_paths) >= 2: + return all_paths + else: + return None + + # Method to find all node pairs with at least two paths + def find_all_pairs_with_at_least_two_paths(self): + result = {} + destinations = set() + # We get all the leafs. + for v in self.graph.values(): + destinations.update(v) + nodes = set(self.graph.keys()) # All the nodes except leafs. + destinations.update(nodes) # And the union of them. + # Iterate through all pairs of nodes in the graph + for source in nodes: + for destination in destinations: + if source != destination: + paths = self.find_paths_with_at_least_two(source, destination) + if paths: + for path in paths: + if len(path) == 2: + result[(source, destination)] = paths + return result + + +def parse_command(entry): + """ + Returns all the include directories used in the compilation command and also + a file to get all the precompiled files. They are stored in a dictionary, + the first one with key 'include_dirs' and the second one with key 'pch'. + + Args: + entry: An entry of the compile_commands.json + + Returns: A list of the directories with absolute paths. + """ + command = entry['command'] + args = command.split(' ') + + retval = {} + + if "clang" in args[0]: + for a in args: + if 'cmake_pch.hxx.pch' in a: + retval['pch'] = a[:4] + break + elif "g++" in args[0]: + for a in args: + if 'cmake_pch.hxx' in a: + retval['pch'] = a + break + + # -I at first + retval['include_dirs'] = [a[2:].strip() + for a in args if a.startswith('-I')] + + # -isystem at second + try: + idx = args.index("-isystem") + retval['include_dirs'].append(args[idx + 1]) + except ValueError: + pass + + # and system headers at last + retval['include_dirs'].append("/usr/include") + if os.path.exists("/usr/include/c++/12"): + retval['include_dirs'].append("/usr/include/c++/12") + elif os.path.exists("/usr/include/c++/10"): + retval['include_dirs'].append("/usr/include/c++/10") + return retval + + +def get_headers(full_name): + headers = [] + with open(full_name, 'r') as f: + lines = f.readlines() + + r_include = re.compile(r"^\s*#include\s*[\"<](.*)[\">]") + for line in lines: + m = r_include.match(line) + if m: + headers.append(m.group(1)) + return headers + + +def build_full_headers(includes, headers): + retval = [] + for header in headers: + for inc in includes: + file = f"{inc}/{header}" + if os.path.isfile(file): + retval.append((file, header)) + break + return retval + + +def get_precomp_headers(precomp): + try: + with open(precomp, 'r') as f: + lines = f.readlines() + r_include = re.compile(r"^\s*#include\s*[\"<](.*)[\">]") + for line in lines: + m = r_include.match(line) + if m: + my_precomp = m.group(1) + return get_headers(my_precomp) + except FileNotFoundError: + return [] + + +def build_recursive_headers(parent, includes, headers, precomp_headers, level, output): + if level == 0: + return + level -= 1 + + full_precomp_headers = build_full_headers(includes, precomp_headers) + for pair in full_precomp_headers: + output.add(f"\"{parent[1]}\" -> \"{pair[1]}\" [color=red]\n") + + full_headers = build_full_headers(includes, headers) + for pair in full_headers: + if level == args.depth - 1: + output.add(f"\"{parent[1]}\" -> \"{pair[1]}\" [color=blue]\n") + else: + output.add(f"\"{parent[1]}\" -> \"{pair[1]}\"\n") + new_headers = get_headers(pair[0]) + if not pair[0].startswith("/usr/include") and "vcpkg" not in pair[0]: + build_recursive_headers( + pair, includes, new_headers, [], level, output) + + +def build_recursive_headers_explain(parent, includes, headers, precomp_headers, level, output): + global args + if level == 0: + return + + if level == args.depth: + full_precomp_headers = build_full_headers(includes, precomp_headers) + for pair in full_precomp_headers: + output.add_edge(parent[0], pair[0]) + + level -= 1 + full_headers = build_full_headers(includes, headers) + for pair in full_headers: + output.add_edge(parent[0], pair[0]) + new_headers = get_headers(pair[0]) + if not pair[0].startswith("/usr/include") and "vcpkg" not in pair[0] and ".pb." not in pair[0]: + build_recursive_headers_explain( + pair, includes, new_headers, [], level, output) + + +def remove_header_from_file(header, filename): + print(f" * {YELLOW}{header}{RESET} removed from {CYAN}{filename}{RESET}.") + r = re.compile(r"^#include\s*[\"<](.*)[\">]") + with open(filename, "r") as f: + lines = f.readlines() + with open(filename, "w") as f: + for l in lines: + ls = l.strip() + if l.startswith("#include"): + m = r.match(ls) + if m and header.endswith(m.group(1)): + continue + f.write(l) + + +if not os.path.isfile(args.compile_commands): + print("the compile_commands.json file must be provided, by default deps looks for it in the current path, otherwise you can provide it with the -c option.", file=sys.stderr) + sys.exit(1) + +with open(args.compile_commands, "r") as f: + js = json.load(f) + +# An array of pairs with (fullname, shortname) of the files given on the command line. +filename = [] +# new_js contains only files matching those in filename. +new_js = [] +for f in args.filename: + if f.startswith(home): + full_name = (f, f) + else: + full_name = (home + '/' + f, f) + for ff in js: + if ff['file'] == full_name[0]: + filename.append(full_name) + new_js.append(ff) + +if not args.explain: + for full_name in filename: + output = set() + for entry in new_js: + if entry["file"] == full_name[0]: + result = parse_command(entry) + if 'pch' in result: + precomp_headers = get_precomp_headers(result['pch']) + else: + precomp_headers = [] + includes = result['include_dirs'] + + headers = get_headers(full_name[0]) + build_recursive_headers( + full_name, includes, headers, precomp_headers, args.depth, output) + break + + if args.output: + output_file = args.output + else: + output_file = "/tmp/deps.dot" + + with open(output_file, "w") as f: + f.write("digraph deps {\n") + for o in output: + f.write(o) + f.write("}\n") + + if os.path.exists("/usr/bin/dot"): + os.system(f"/usr/bin/dot -Tpng {output_file} -o /tmp/deps.png") + if os.path.exists("/usr/bin/lximage-qt"): + os.system("/usr/bin/lximage-qt /tmp/deps.png") + elif os.path.exists("/usr/bin/eog"): + os.system("/usr/bin/eog /tmp/deps.png") + else: + print(f"Output written at '{output_file}'.") +else: + for full_name in filename: + print(f"Analyzing '{full_name[0]}'") + output = Graph() + for entry in new_js: + # A little hack so that if we specify no file, they are all analyzed. + if entry["file"] == full_name[0]: + result = parse_command(entry) + if 'pch' in result: + precomp_headers = get_precomp_headers(result['pch']) + else: + precomp_headers = [] + includes = result['include_dirs'] + + headers = get_headers(full_name[0]) + build_recursive_headers_explain( + full_name, includes, headers, precomp_headers, args.depth, output) + + result = output.find_all_pairs_with_at_least_two_paths() + if result: + if args.fix: + for (source, destination), paths in result.items(): + remove_header_from_file(destination, source) + else: + print(f"{GREEN}{full_name[0]}{RESET}:") + for (source, destination), paths in result.items(): + print(f" * {YELLOW}{destination}{RESET} can be removed from {CYAN}{source}{RESET}.") + break diff --git a/engine/CMakeLists.txt b/engine/CMakeLists.txt index 56b06efcddf..5a868a692db 100644 --- a/engine/CMakeLists.txt +++ b/engine/CMakeLists.txt @@ -494,7 +494,7 @@ target_precompile_headers(centenginestats PRIVATE ${PRECOMP_HEADER}) if(LEGACY_ENGINE) add_library(cce_core ${LIBRARY_TYPE} ${FILES}) - add_dependencies(cce_core engine_rpc centreon_clib pb_neb_lib) + add_dependencies(cce_core engine_rpc centreon_clib pb_neb_lib pb_common_lib) target_precompile_headers(cce_core PRIVATE ${PRECOMP_HEADER}) @@ -555,7 +555,7 @@ install( COMPONENT "runtime") else() add_library(cce_core ${LIBRARY_TYPE} ${FILES}) - add_dependencies(cce_core engine_rpc centreon_clib pb_neb_lib) + add_dependencies(cce_core engine_rpc centreon_clib pb_neb_lib pb_common_lib) target_precompile_headers(cce_core PRIVATE ${PRECOMP_HEADER}) diff --git a/engine/enginerpc/engine.proto b/engine/enginerpc/engine.proto index eeb9d0bda19..c9d4ebdba7e 100644 --- a/engine/enginerpc/engine.proto +++ b/engine/enginerpc/engine.proto @@ -32,9 +32,18 @@ service Engine { returns (com.centreon.common.pb_process_stat) {} rpc GetVersion(google.protobuf.Empty) returns (Version) {} rpc GetStats(GenericString) returns (Stats) {} - rpc GetHost(HostIdentifier) returns (EngineHost) {} - rpc GetContact(ContactIdentifier) returns (EngineContact) {} + rpc GetHost(NameOrIdIdentifier) returns (EngineHost) {} + rpc GetContact(NameIdentifier) returns (EngineContact) {} rpc GetService(ServiceIdentifier) returns (EngineService) {} + rpc GetHostGroup(NameIdentifier) returns (EngineHostGroup) {} + rpc GetServiceGroup(NameIdentifier) returns (EngineServiceGroup) {} + rpc GetContactGroup(NameIdentifier) returns (EngineContactGroup) {} + rpc GetTag(IdOrTypeIdentifier) returns (EngineTag) {} + rpc GetSeverity(IdOrTypeIdentifier) returns (EngineSeverity) {} + rpc GetCommand(NameIdentifier) returns (EngineCommand) {} + rpc GetConnector(NameIdentifier) returns (EngineConnector) {} + rpc GetHostEscalation(NameIdentifier) returns (EngineHostEscalation) {} + rpc GetServiceEscalation(PairNamesIdentifier) returns (EngineServiceEscalation) {} rpc GetHostsCount(google.protobuf.Empty) returns (GenericValue) {} rpc GetContactsCount(google.protobuf.Empty) returns (GenericValue) {} rpc GetServicesCount(google.protobuf.Empty) returns (GenericValue) {} @@ -50,9 +59,9 @@ service Engine { rpc AddHostComment(EngineComment) returns (CommandSuccess) {} rpc AddServiceComment(EngineComment) returns (CommandSuccess) {} rpc DeleteComment(GenericValue) returns (CommandSuccess) {} - rpc DeleteAllHostComments(HostIdentifier) returns (CommandSuccess) {} + rpc DeleteAllHostComments(NameOrIdIdentifier) returns (CommandSuccess) {} rpc DeleteAllServiceComments(ServiceIdentifier) returns (CommandSuccess) {} - rpc RemoveHostAcknowledgement(HostIdentifier) returns (CommandSuccess) {} + rpc RemoveHostAcknowledgement(NameOrIdIdentifier) returns (CommandSuccess) {} rpc RemoveServiceAcknowledgement(ServiceIdentifier) returns (CommandSuccess) { } rpc AcknowledgementHostProblem(EngineAcknowledgement) @@ -108,12 +117,12 @@ service Engine { rpc ChangeContactObjectCustomVar(ChangeObjectCustomVar) returns (CommandSuccess) {} rpc ShutdownProgram(google.protobuf.Empty) returns (google.protobuf.Empty) {} - rpc EnableHostAndChildNotifications(HostIdentifier) returns (CommandSuccess) { + rpc EnableHostAndChildNotifications(NameOrIdIdentifier) returns (CommandSuccess) { } - rpc DisableHostAndChildNotifications(HostIdentifier) + rpc DisableHostAndChildNotifications(NameOrIdIdentifier) returns (CommandSuccess) {} - rpc DisableHostNotifications(HostIdentifier) returns (CommandSuccess) {} - rpc EnableHostNotifications(HostIdentifier) returns (CommandSuccess) {} + rpc DisableHostNotifications(NameOrIdIdentifier) returns (CommandSuccess) {} + rpc EnableHostNotifications(NameOrIdIdentifier) returns (CommandSuccess) {} rpc DisableNotifications(google.protobuf.Empty) returns (CommandSuccess) {} rpc EnableNotifications(google.protobuf.Empty) returns (CommandSuccess) {} rpc DisableServiceNotifications(ServiceIdentifier) returns (CommandSuccess) {} @@ -337,7 +346,7 @@ message ThresholdsFile { string filename = 1; } -message HostIdentifier { +message NameOrIdIdentifier { oneof identifier { string name = 1; uint32 id = 2; @@ -470,11 +479,15 @@ message EngineHost { bool contains_circular_path = 109; string timezone = 110; uint64 icon_id = 111; - string group_name = 112; - repeated string custom_variables = 113; + repeated string group_name = 112; + int32 acknowledgement_timeout = 113; + uint32 severity_level = 114; + uint64 severity_id = 115; + repeated string tag = 116; + repeated string custom_variables = 117; } -message ContactIdentifier { +message NameIdentifier { string name = 1; } @@ -482,22 +495,51 @@ message EngineContact { string name = 1; string alias = 2; string email = 3; -} - -message NameIdentifier { + repeated string contact_groups = 4; + string pager = 5; + string host_notification_period = 6; + repeated string host_notification_commands = 7; + string service_notification_period = 8; + repeated string service_notification_commands = 9; + bool host_notification_on_up = 10; + bool host_notification_on_down = 11; + bool host_notification_on_unreachable = 12; + bool host_notification_on_flappingstart = 13; + bool host_notification_on_flappingstop = 14; + bool host_notification_on_flappingdisabled = 15; + bool host_notification_on_downtime = 16; + bool service_notification_on_ok = 17; + bool service_notification_on_warning = 18; + bool service_notification_on_critical = 19; + bool service_notification_on_unknown = 20; + bool service_notification_on_flappingstart = 21; + bool service_notification_on_flappingstop = 22; + bool service_notification_on_flappingdisabled = 23; + bool service_notification_on_downtime = 24; + bool host_notifications_enabled = 25; + bool service_notifications_enabled = 26; + bool can_submit_commands = 27; + bool retain_status_information = 28; + bool retain_nonstatus_information = 29; + string timezone = 30; + repeated string addresses = 31; + repeated string custom_variables = 32; +} + +message PairNamesIdentifier { string host_name = 1; string service_name = 2; } -message IdIdentifier { +message PairIdsIdentifier { uint32 host_id = 1; uint32 service_id = 2; } message ServiceIdentifier { oneof identifier { - NameIdentifier names = 1; - IdIdentifier ids = 2; + PairNamesIdentifier names = 1; + PairIdsIdentifier ids = 2; } } @@ -514,6 +556,229 @@ message EngineService { UNKNOWN = 3; } State current_state = 6; + string display_name = 7; + string check_command = 8; + string event_handler = 9; + State initial_state = 10; + uint32 check_interval = 11; + double retry_interval = 12; + int32 max_check_attempts = 13; + repeated string contactgroups = 14; + repeated string contacts = 15; + uint32 notification_interval = 16; + uint32 first_notification_delay = 17 ; + uint32 recovery_notification_delay = 18 ; + bool notify_on_unknown = 19 ; + bool notify_on_warning = 20 ; + bool notify_on_critical = 21 ; + bool notify_on_ok = 22 ; + bool notify_on_flappingstart = 23 ; + bool notify_on_flappingstop = 24 ; + bool notify_on_flappingdisabled = 25 ; + bool notify_on_downtime = 26 ; + bool stalk_on_ok = 27 ; + bool stalk_on_unknown = 28 ; + bool stalk_on_warning = 29 ; + bool stalk_on_critical = 30 ; + bool is_volatile = 31 ; + string notification_period = 32 ; + bool flap_detection_enabled = 33; + double low_flap_threshold = 34; + double high_flap_threshold = 35; + bool flap_detection_on_ok = 36; + bool flap_detection_on_warning = 37; + bool flap_detection_on_unknown = 38; + bool flap_detection_on_critical = 39; + int32 process_performance_data = 40; + bool check_freshness_enabled = 41; + int32 freshness_threshold = 42; + bool passive_checks_enabled = 43; + bool event_handler_enabled = 44; + bool active_checks_enabled = 45; + int32 retain_status_information = 46; + bool retain_nonstatus_information = 47; + bool notifications_enabled = 48; + bool obsess_over = 49; + string notes = 50; + string notes_url = 51; + string action_url = 52; + string icon_image = 53; + string icon_image_alt = 54; + enum AckType { + NONE = 0; + NORMAL = 1; + STICKY = 2; + } + AckType acknowledgement = 55; + bool host_problem_at_last_check = 56; + enum CheckType { + CHECK_ACTIVE = 0; + CHECK_PASSIVE = 1; + } + CheckType check_type = 57; + State last_state = 58; + State last_hard_state = 59; + string plugin_output = 60; + string long_plugin_output = 61; + string perf_data = 62; + State state_type = 63; + string next_check = 65; + bool should_be_scheduled = 66; + string last_check = 67; + int32 current_attempt = 68; + uint64 current_event_id = 69; + uint64 last_event_id = 70; + uint64 current_problem_id = 71; + uint64 last_problem_id = 72; + string last_notification = 73; + string next_notification = 74; + bool no_more_notifications = 75; + string last_state_change = 76; + string last_hard_state_change = 77; + string last_time_ok = 78; + string last_time_warning = 79; + string last_time_unknown = 80; + string last_time_critical = 81; + bool has_been_checked = 82; + bool is_being_freshened = 83; + bool notified_on_unknown = 84; + bool notified_on_warning = 85; + bool notified_on_critical = 86; + int32 notification_number = 87; + uint64 current_notification_id = 88; + double latency = 89; + double execution_time = 90; + bool is_executing = 91; + int32 check_options = 92; + int32 scheduled_downtime_depth = 93; + int32 pending_flex_downtime = 94; + string state_history = 95; + uint32 state_history_index = 96; + bool is_flapping = 97; + uint64 flapping_comment_id = 98; + double percent_state_change = 99; + uint32 modified_attributes = 100; + string host_ptr = 101; + string event_handler_args = 102; + string check_command_args = 103; + repeated string custom_variables = 104; + int32 acknowledgement_timeout = 105; + repeated string servicegroups = 106; + uint32 severity_level = 107; + uint64 severity_id = 108; + repeated string tag = 109; + string timezone = 110; + uint64 icon_id = 111; + uint64 internal_id = 112; + string metric_name = 113; + string thresholds_file = 114; + double sensitivity = 115; + uint64 dependent_service_id = 116; + enum ServiceType { + NONE_TYPE = 0; + SERVICE = 1; + METASERVICE = 3; + BA = 4; + ANOMALY_DETECTION = 5; + } + ServiceType service_type = 117; +} + +message EngineHostGroup { + uint32 id = 1; + string name = 2; + string alias = 3; + repeated string members = 4; + string notes = 5; + string notes_url = 6; + string action_url = 7; +} + +message EngineServiceGroup { + uint64 id = 1; + string name = 2; + string alias = 3; + string notes = 4; + string notes_url = 5; + string action_url = 6; + repeated string members = 7; +} + +message EngineContactGroup { + string name = 1; + string alias = 2; + repeated string members = 3; +} + +message IdOrTypeIdentifier { + uint64 id = 1; + uint32 type = 2; +} + +message EngineTag { + enum TagType { + SERVICEGROUP = 0; + HOSTGROUP = 1; + SERVICECATEGORY = 2; + HOSTCATEGORY = 3; + } + uint64 id = 1; + TagType type = 2; + string name = 3; +} + +message EngineSeverity { + enum SeverityType { + NONE = 0; + SERVICE = 1; + HOST = 2; + } + uint64 id = 1; + uint32 level = 2; + uint64 icon_id = 3; + string name = 4; + SeverityType type = 5; +} + +message EngineCommand { + string command_line = 1; + string command_name = 2; + enum CmdType { + EXEC = 0; + FORWARD = 1; + RAW = 2; + CONNECTOR = 3; + OTEL = 4; + } + CmdType type = 3; +} + +message EngineConnector { + string connector_line = 1; + string connector_name = 2; +} + +message EngineHostEscalation { + string host_name = 1; + repeated string contact_group = 2; + string escalation_option = 3; + string escalation_period = 4; + uint32 first_notification = 5; + uint32 last_notification = 6; + uint32 notification_interval = 7; +} + +message EngineServiceEscalation { + string host = 1; + string service_description = 2; + string service_group = 3; + string host_group = 4; + repeated string contact_group = 5; + string escalation_option = 6; + string escalation_period = 7; + uint32 first_notification = 8; + uint32 last_notification = 9; + uint32 notification_interval = 10; } message EngineComment { @@ -535,8 +800,8 @@ message HostDelayIdentifier { message ServiceDelayIdentifier { oneof identifier { - NameIdentifier names = 1; - IdIdentifier ids = 2; + PairNamesIdentifier names = 1; + PairIdsIdentifier ids = 2; } uint32 delay_time = 3; } diff --git a/engine/enginerpc/engine_impl.cc b/engine/enginerpc/engine_impl.cc index 07ce45d2fc9..37809f7a31e 100644 --- a/engine/enginerpc/engine_impl.cc +++ b/engine/enginerpc/engine_impl.cc @@ -28,19 +28,22 @@ #include #include - #include "com/centreon/common/process_stat.hh" #include "com/centreon/common/time.hh" #include "com/centreon/engine/broker.hh" #include "com/centreon/engine/command_manager.hh" +#include "com/centreon/engine/commands/command.hh" #include "com/centreon/engine/commands/commands.hh" +#include "com/centreon/engine/commands/connector.hh" #include "com/centreon/engine/commands/processing.hh" #include "com/centreon/engine/downtimes/downtime_finder.hh" #include "com/centreon/engine/downtimes/downtime_manager.hh" #include "com/centreon/engine/downtimes/service_downtime.hh" #include "com/centreon/engine/events/loop.hh" #include "com/centreon/engine/globals.hh" +#include "com/centreon/engine/hostescalation.hh" +#include "com/centreon/engine/serviceescalation.hh" #include "com/centreon/engine/severity.hh" #include "com/centreon/engine/statusdata.hh" #include "com/centreon/engine/string.hh" @@ -54,12 +57,12 @@ using com::centreon::common::log_v2::log_v2; namespace com::centreon::engine { -std::ostream& operator<<(std::ostream& str, const HostIdentifier& host_id) { +std::ostream& operator<<(std::ostream& str, const NameOrIdIdentifier& host_id) { switch (host_id.identifier_case()) { - case HostIdentifier::kName: + case NameOrIdIdentifier::kName: str << "host name=" << host_id.name(); break; - case HostIdentifier::kId: + case NameOrIdIdentifier::kId: str << "host id=" << host_id.id(); break; default: @@ -88,7 +91,7 @@ std::ostream& operator<<(std::ostream& str, const ServiceIdentifier& serv_id) { namespace fmt { template <> -struct formatter : ostream_formatter {}; +struct formatter : ostream_formatter {}; template <> struct formatter : ostream_formatter {}; @@ -209,10 +212,11 @@ grpc::Status engine_impl::NewThresholdsFile(grpc::ServerContext* context * @param request Host's identifier (it can be a hostname or a hostid) * @param response The filled fields * - *@return Status::OK + * @return Status::OK if the Host is found and populated successfully, + * otherwise returns Status::INVALID_ARGUMENT with an error message. */ grpc::Status engine_impl::GetHost(grpc::ServerContext* context [[maybe_unused]], - const HostIdentifier* request + const NameOrIdIdentifier* request [[maybe_unused]], EngineHost* response) { std::string err; @@ -390,8 +394,23 @@ grpc::Status engine_impl::GetHost(grpc::ServerContext* context [[maybe_unused]], host->set_icon_id(selectedhost->get_icon_id()); // locals - hostgroup* hg{selectedhost->get_parent_groups().front()}; - host->set_group_name(hg ? hg->get_group_name() : ""); + for (const auto& hg : selectedhost->get_parent_groups()) + if (hg) + host->add_group_name(hg->get_group_name()); + + host->set_acknowledgement_timeout(selectedhost->acknowledgement_timeout()); + + const auto& host_severity = selectedhost->get_severity(); + + if (host_severity) { + host->set_severity_level(host_severity->level()); + host->set_severity_id(host_severity->id()); + } + + if (!selectedhost->tags().empty()) + for (const auto& tag : selectedhost->tags()) + host->add_tag(fmt::format("id:{},name:{},type:{}", tag->id(), + tag->name(), static_cast(tag->type()))); for (const auto& cv : selectedhost->custom_variables) host->add_custom_variables(fmt::format( @@ -417,30 +436,112 @@ grpc::Status engine_impl::GetHost(grpc::ServerContext* context [[maybe_unused]], * @param request Contact's identifier * @param response The filled fields * - * @return Status::OK + * @return Status::OK if the Contact is found and populated successfully, + * otherwise returns Status::INVALID_ARGUMENT with an error message. **/ grpc::Status engine_impl::GetContact(grpc::ServerContext* context [[maybe_unused]], - const ContactIdentifier* request, + const NameIdentifier* request, EngineContact* response) { std::string err; - auto fn = std::packaged_task( - [&err, request, contact = response]() -> int32_t { - std::shared_ptr selectedcontact; - /* get the contact by his name */ - auto itcontactname = contact::contacts.find(request->name()); - if (itcontactname != contact::contacts.end()) - selectedcontact = itcontactname->second; - else { - err = fmt::format("could not find contact '{}'", request->name()); - return 1; - } - /* recovering contact's information */ - contact->set_name(selectedcontact->get_name()); - contact->set_alias(selectedcontact->get_alias()); - contact->set_email(selectedcontact->get_email()); - return 0; - }); + auto fn = std::packaged_task([&err, request, + contact = response]() -> int32_t { + std::shared_ptr selectedcontact; + /* get the contact by his name */ + auto itcontactname = contact::contacts.find(request->name()); + if (itcontactname != contact::contacts.end()) + selectedcontact = itcontactname->second; + else { + err = fmt::format("could not find contact '{}'", request->name()); + return 1; + } + /* recovering contact's information */ + contact->set_name(selectedcontact->get_name()); + contact->set_alias(selectedcontact->get_alias()); + contact->set_email(selectedcontact->get_email()); + + if (!selectedcontact->get_parent_groups().empty()) + for (const auto& [key, _] : selectedcontact->get_parent_groups()) + contact->add_contact_groups(key); + + contact->set_pager(selectedcontact->get_pager()); + contact->set_host_notification_period( + selectedcontact->get_host_notification_period()); + + if (!selectedcontact->get_host_notification_commands().empty()) { + for (const auto& cmd : selectedcontact->get_host_notification_commands()) + contact->add_host_notification_commands(cmd->get_name()); + } + contact->set_service_notification_period( + selectedcontact->get_service_notification_period()); + + if (!selectedcontact->get_service_notification_commands().empty()) { + for (const auto& cmd : + selectedcontact->get_service_notification_commands()) + contact->add_service_notification_commands(cmd->get_name()); + } + + contact->set_host_notification_on_up( + selectedcontact->notify_on(notifier::host_notification, notifier::up)); + contact->set_host_notification_on_down(selectedcontact->notify_on( + notifier::host_notification, notifier::down)); + contact->set_host_notification_on_unreachable(selectedcontact->notify_on( + notifier::host_notification, notifier::unreachable)); + contact->set_host_notification_on_flappingstart(selectedcontact->notify_on( + notifier::host_notification, notifier::flappingstart)); + contact->set_host_notification_on_flappingstop(selectedcontact->notify_on( + notifier::host_notification, notifier::flappingstop)); + contact->set_host_notification_on_flappingdisabled( + selectedcontact->notify_on(notifier::host_notification, + notifier::flappingdisabled)); + contact->set_host_notification_on_downtime(selectedcontact->notify_on( + notifier::host_notification, notifier::downtime)); + + contact->set_service_notification_on_ok(selectedcontact->notify_on( + notifier::service_notification, notifier::ok)); + contact->set_service_notification_on_warning(selectedcontact->notify_on( + notifier::service_notification, notifier::warning)); + contact->set_service_notification_on_unknown(selectedcontact->notify_on( + notifier::service_notification, notifier::unknown)); + contact->set_service_notification_on_critical(selectedcontact->notify_on( + notifier::service_notification, notifier::critical)); + contact->set_service_notification_on_flappingstart( + selectedcontact->notify_on(notifier::service_notification, + notifier::flappingstart)); + contact->set_service_notification_on_flappingstop( + selectedcontact->notify_on(notifier::service_notification, + notifier::flappingstop)); + contact->set_service_notification_on_flappingdisabled( + selectedcontact->notify_on(notifier::service_notification, + notifier::flappingdisabled)); + contact->set_service_notification_on_downtime(selectedcontact->notify_on( + notifier::service_notification, notifier::downtime)); + + contact->set_host_notifications_enabled( + selectedcontact->get_host_notifications_enabled()); + contact->set_service_notifications_enabled( + selectedcontact->get_service_notifications_enabled()); + contact->set_can_submit_commands( + selectedcontact->get_can_submit_commands()); + contact->set_retain_status_information( + selectedcontact->get_retain_status_information()); + contact->set_retain_nonstatus_information( + selectedcontact->get_retain_nonstatus_information()); + contact->set_timezone(selectedcontact->get_timezone()); + + if (!selectedcontact->get_addresses().empty()) + for (const auto& addr : selectedcontact->get_addresses()) + contact->add_addresses(addr); + + for (const auto& [key, custom_variable] : + selectedcontact->get_custom_variables()) + contact->add_custom_variables(fmt::format( + "key : {}, value :{}, is_sent :{}, has_been_modified: {} ", key, + custom_variable.value(), custom_variable.is_sent(), + custom_variable.has_been_modified())); + + return 0; + }); std::future result = fn.get_future(); command_manager::instance().enqueue(std::move(fn)); if (result.get() == 0) @@ -457,29 +558,518 @@ grpc::Status engine_impl::GetContact(grpc::ServerContext* context * hostid & serviceid) * @param response The filled fields * - *@return Status::OK + * @return Status::OK if the Service is found and populated successfully, + * otherwise returns Status::INVALID_ARGUMENT with an error message. */ grpc::Status engine_impl::GetService(grpc::ServerContext* context [[maybe_unused]], const ServiceIdentifier* request, EngineService* response) { std::string err; + auto fn = std::packaged_task([&err, request, + service = response]() -> int32_t { + std::shared_ptr selectedservice; + std::tie(selectedservice, err) = get_serv(*request); + if (!err.empty()) { + return 1; + } + /* recovering service's information */ + service->set_host_id(selectedservice->host_id()); + service->set_service_id(selectedservice->service_id()); + service->set_host_name(selectedservice->get_hostname()); + service->set_description(selectedservice->description()); + service->set_check_period(selectedservice->check_period()); + service->set_current_state(static_cast( + selectedservice->get_current_state())); + service->set_display_name(selectedservice->get_display_name()); + service->set_check_command(selectedservice->check_command()); + service->set_event_handler(selectedservice->event_handler()); + service->set_initial_state(static_cast( + selectedservice->get_initial_state())); + service->set_check_interval(selectedservice->check_interval()); + service->set_retry_interval(selectedservice->retry_interval()); + service->set_max_check_attempts(selectedservice->max_check_attempts()); + service->set_acknowledgement_timeout( + selectedservice->acknowledgement_timeout()); + + if (!selectedservice->get_contactgroups().empty()) + for (const auto& [key, _] : selectedservice->get_contactgroups()) + service->add_contactgroups(key); + + if (!selectedservice->contacts().empty()) + for (const auto& [key, _] : selectedservice->contacts()) + service->add_contacts(key); + + if (!selectedservice->get_parent_groups().empty()) + for (const auto& grp : selectedservice->get_parent_groups()) + if (grp) + service->add_servicegroups(grp->get_group_name()); + + service->set_notification_interval( + selectedservice->get_notification_interval()); + service->set_first_notification_delay( + selectedservice->get_first_notification_delay()); + service->set_recovery_notification_delay( + selectedservice->get_recovery_notification_delay()); + service->set_notify_on_unknown( + selectedservice->get_notify_on(notifier::unknown)); + service->set_notify_on_warning( + selectedservice->get_notify_on(notifier::warning)); + service->set_notify_on_critical( + selectedservice->get_notify_on(notifier::critical)); + service->set_notify_on_ok(selectedservice->get_notify_on(notifier::ok)); + service->set_notify_on_flappingstart( + selectedservice->get_notify_on(notifier::flappingstart)); + service->set_notify_on_flappingstop( + selectedservice->get_notify_on(notifier::flappingstop)); + service->set_notify_on_flappingdisabled( + selectedservice->get_notify_on(notifier::flappingdisabled)); + service->set_notify_on_downtime( + selectedservice->get_notify_on(notifier::downtime)); + service->set_stalk_on_ok(selectedservice->get_stalk_on(notifier::ok)); + service->set_stalk_on_warning( + selectedservice->get_stalk_on(notifier::warning)); + service->set_stalk_on_unknown( + selectedservice->get_stalk_on(notifier::unknown)); + service->set_stalk_on_critical( + selectedservice->get_stalk_on(notifier::critical)); + service->set_is_volatile(selectedservice->get_is_volatile()); + service->set_notification_period(selectedservice->notification_period()); + service->set_flap_detection_enabled( + selectedservice->flap_detection_enabled()); + service->set_low_flap_threshold(selectedservice->get_low_flap_threshold()); + service->set_high_flap_threshold( + selectedservice->get_high_flap_threshold()); + service->set_flap_detection_on_ok( + selectedservice->get_flap_detection_on(notifier::ok)); + service->set_flap_detection_on_warning( + selectedservice->get_flap_detection_on(notifier::warning)); + service->set_flap_detection_on_unknown( + selectedservice->get_flap_detection_on(notifier::unknown)); + service->set_flap_detection_on_critical( + selectedservice->get_flap_detection_on(notifier::critical)); + service->set_process_performance_data( + selectedservice->get_process_performance_data()); + service->set_check_freshness_enabled( + selectedservice->check_freshness_enabled()); + service->set_freshness_threshold( + selectedservice->get_freshness_threshold()); + service->set_passive_checks_enabled( + selectedservice->passive_checks_enabled()); + service->set_event_handler_enabled( + selectedservice->event_handler_enabled()); + service->set_active_checks_enabled( + selectedservice->active_checks_enabled()); + service->set_retain_status_information( + selectedservice->get_retain_status_information()); + service->set_retain_nonstatus_information( + selectedservice->get_retain_nonstatus_information()); + service->set_notifications_enabled( + selectedservice->get_notifications_enabled()); + service->set_obsess_over(selectedservice->obsess_over()); + service->set_notes(selectedservice->get_notes()); + service->set_notes_url(selectedservice->get_notes_url()); + service->set_action_url(selectedservice->get_action_url()); + service->set_icon_image(selectedservice->get_icon_image()); + service->set_icon_image_alt(selectedservice->get_icon_image_alt()); + service->set_acknowledgement(static_cast( + selectedservice->get_acknowledgement())); + service->set_host_problem_at_last_check( + selectedservice->get_host_problem_at_last_check()); + service->set_check_type(static_cast( + selectedservice->get_check_type())); + service->set_last_state( + static_cast(selectedservice->get_last_state())); + service->set_last_hard_state(static_cast( + selectedservice->get_last_hard_state())); + service->set_plugin_output(selectedservice->get_plugin_output()); + service->set_long_plugin_output(selectedservice->get_long_plugin_output()); + service->set_perf_data(selectedservice->get_perf_data()); + service->set_state_type( + static_cast(selectedservice->get_state_type())); + service->set_next_check(string::ctime(selectedservice->get_next_check())); + service->set_should_be_scheduled( + selectedservice->get_should_be_scheduled()); + service->set_last_check(string::ctime(selectedservice->get_last_check())); + service->set_current_attempt(selectedservice->get_current_attempt()); + service->set_current_event_id(selectedservice->get_current_event_id()); + service->set_last_event_id(selectedservice->get_last_event_id()); + service->set_current_problem_id(selectedservice->get_current_problem_id()); + service->set_last_problem_id(selectedservice->get_last_problem_id()); + service->set_last_notification( + string::ctime(selectedservice->get_last_notification())); + service->set_next_notification( + string::ctime(selectedservice->get_next_notification())); + service->set_no_more_notifications( + selectedservice->get_no_more_notifications()); + service->set_last_state_change( + string::ctime(selectedservice->get_last_state_change())); + service->set_last_hard_state_change( + string::ctime(selectedservice->get_last_hard_state_change())); + service->set_last_time_ok( + string::ctime(selectedservice->get_last_time_ok())); + service->set_last_time_warning( + string::ctime(selectedservice->get_last_time_warning())); + service->set_last_time_unknown( + string::ctime(selectedservice->get_last_time_unknown())); + service->set_last_time_critical( + string::ctime(selectedservice->get_last_time_critical())); + service->set_has_been_checked(selectedservice->has_been_checked()); + service->set_is_being_freshened(selectedservice->get_is_being_freshened()); + service->set_notified_on_unknown( + selectedservice->get_notified_on(notifier::unknown)); + service->set_notified_on_warning( + selectedservice->get_notified_on(notifier::warning)); + service->set_notified_on_critical( + selectedservice->get_notified_on(notifier::critical)); + service->set_notification_number( + selectedservice->get_notification_number()); + service->set_current_notification_id( + selectedservice->get_current_notification_id()); + service->set_latency(selectedservice->get_latency()); + service->set_execution_time(selectedservice->get_execution_time()); + service->set_is_executing(selectedservice->get_is_executing()); + service->set_check_options(selectedservice->get_check_options()); + service->set_scheduled_downtime_depth( + selectedservice->get_scheduled_downtime_depth()); + service->set_pending_flex_downtime( + selectedservice->get_pending_flex_downtime()); + service->set_state_history(fmt::format( + "[{}]", fmt::join(selectedservice->get_state_history(), ", "))); + service->set_state_history_index( + selectedservice->get_state_history_index()); + service->set_is_flapping(selectedservice->get_is_flapping()); + service->set_flapping_comment_id( + selectedservice->get_flapping_comment_id()); + service->set_percent_state_change( + selectedservice->get_percent_state_change()); + service->set_modified_attributes( + selectedservice->get_modified_attributes()); + service->set_host_ptr(selectedservice->get_host_ptr() + ? selectedservice->get_host_ptr()->name() + : ""); + service->set_event_handler_args(selectedservice->get_event_handler_args()); + service->set_check_command_args(selectedservice->get_check_command_args()); + service->set_timezone(selectedservice->get_timezone()); + service->set_icon_id(selectedservice->get_icon_id()); + + const auto& service_severity = selectedservice->get_severity(); + + if (service_severity) { + service->set_severity_level(service_severity->level()); + service->set_severity_id(service_severity->id()); + } + + if (!selectedservice->tags().empty()) + for (const auto& tag : selectedservice->tags()) + service->add_tag(fmt::format("id:{},name:{},type:{}", tag->id(), + tag->name(), + static_cast(tag->type()))); + + for (auto const& cv : selectedservice->custom_variables) + service->add_custom_variables(fmt::format( + "key : {}, value :{}, is_sent :{}, has_been_modified: {} ", cv.first, + cv.second.value(), cv.second.is_sent(), + cv.second.has_been_modified())); + + service->set_service_type(static_cast( + selectedservice->get_service_type() + 1)); + + // if anomaly detection , set the anomaly detection fields + if (selectedservice->get_service_type() == + service_type::ANOMALY_DETECTION) { + auto selectedanomaly = + std::static_pointer_cast( + selectedservice); + + service->set_internal_id(selectedanomaly->get_internal_id()); + service->set_metric_name(selectedanomaly->get_metric_name()); + service->set_thresholds_file(selectedanomaly->get_thresholds_file()); + service->set_sensitivity(selectedanomaly->get_sensitivity()); + service->set_dependent_service_id( + selectedanomaly->get_dependent_service()->service_id()); + } + + return 0; + }); + + std::future result = fn.get_future(); + command_manager::instance().enqueue(std::move(fn)); + + if (result.get() == 0) + return grpc::Status::OK; + else + return grpc::Status(grpc::INVALID_ARGUMENT, err); +} + +/** + * @brief Return hostgroup informations. + * + * @param context gRPC context + * @param request hostgroup's identifier (it can be a hostgroupename or + * hostgroupid) + * @param response The filled fields + * + * @return Status::OK if the HostGroup is found and populated successfully, + * otherwise returns Status::INVALID_ARGUMENT with an error message. + */ +grpc::Status engine_impl::GetHostGroup(grpc::ServerContext* context + [[maybe_unused]], + const NameIdentifier* request, + EngineHostGroup* response) { + std::string err; + auto fn = std::packaged_task( + [&err, request, hostgroup = response]() -> int32_t { + std::shared_ptr selectedhostgroup; + auto ithostgroup = hostgroup::hostgroups.find(request->name()); + if (ithostgroup != hostgroup::hostgroups.end()) + selectedhostgroup = ithostgroup->second; + else { + err = fmt::format("could not find hostgroup '{}'", request->name()); + return 1; + } + + hostgroup->set_id(selectedhostgroup->get_id()); + hostgroup->set_name(selectedhostgroup->get_group_name()); + hostgroup->set_alias(selectedhostgroup->get_alias()); + + if (!selectedhostgroup->members.empty()) + for (const auto& [key, _] : selectedhostgroup->members) + hostgroup->add_members(key); + + hostgroup->set_notes(selectedhostgroup->get_notes()); + hostgroup->set_notes_url(selectedhostgroup->get_notes_url()); + hostgroup->set_action_url(selectedhostgroup->get_action_url()); + + return 0; + }); + + std::future result = fn.get_future(); + command_manager::instance().enqueue(std::move(fn)); + + if (result.get() == 0) + return grpc::Status::OK; + else + return grpc::Status(grpc::INVALID_ARGUMENT, err); +} + +/** + * @brief Return ServiceGroup informations. + * + * @param context gRPC context + * @param request ServiceGroup's identifier (by ServiceGroup name) + * @param response The filled fields + * + * @return Status::OK if the ServiceGroup is found and populated successfully, + * otherwise returns Status::INVALID_ARGUMENT with an error message. + */ +grpc::Status engine_impl::GetServiceGroup(grpc::ServerContext* context + [[maybe_unused]], + const NameIdentifier* request, + EngineServiceGroup* response) { + std::string err; + auto fn = std::packaged_task([&err, request, + servicegroup = + response]() -> int32_t { + std::shared_ptr selectedservicegroup; + auto itservicegroup = servicegroup::servicegroups.find(request->name()); + if (itservicegroup != servicegroup::servicegroups.end()) + selectedservicegroup = itservicegroup->second; + else { + err = fmt::format("could not find servicegroup '{}'", request->name()); + return 1; + } + servicegroup->set_id(selectedservicegroup->get_id()); + servicegroup->set_name(selectedservicegroup->get_group_name()); + servicegroup->set_alias(selectedservicegroup->get_alias()); + + servicegroup->set_notes(selectedservicegroup->get_notes()); + servicegroup->set_notes_url(selectedservicegroup->get_notes_url()); + servicegroup->set_action_url(selectedservicegroup->get_action_url()); + + if (!selectedservicegroup->members.empty()) { + for (const auto& [host_serv_pair, _] : selectedservicegroup->members) { + servicegroup->add_members( + fmt::format("{},{}", host_serv_pair.first, host_serv_pair.second)); + } + } + return 0; + }); + + std::future result = fn.get_future(); + command_manager::instance().enqueue(std::move(fn)); + + if (result.get() == 0) + return grpc::Status::OK; + else + return grpc::Status(grpc::INVALID_ARGUMENT, err); +} + +/** + * @brief Return ContactGroup informations. + * + * @param context gRPC context + * @param request ContactGroup's identifier (by ContactGroup name) + * @param response The filled fields + * + * @return Status::OK if the ContactGroup is found and populated successfully, + * otherwise returns Status::INVALID_ARGUMENT with an error message. + */ +grpc::Status engine_impl::GetContactGroup(grpc::ServerContext* context + [[maybe_unused]], + const NameIdentifier* request, + EngineContactGroup* response) { + std::string err; + auto fn = std::packaged_task([&err, request, + contactgroup = + response]() -> int32_t { + std::shared_ptr selectedcontactgroup; + auto itcontactgroup = contactgroup::contactgroups.find(request->name()); + if (itcontactgroup != contactgroup::contactgroups.end()) + selectedcontactgroup = itcontactgroup->second; + else { + err = fmt::format("could not find contactgroup '{}'", request->name()); + return 1; + } + + contactgroup->set_name(selectedcontactgroup->get_name()); + contactgroup->set_alias(selectedcontactgroup->get_alias()); + + if (!selectedcontactgroup->get_members().empty()) + for (const auto& [key, _] : selectedcontactgroup->get_members()) + contactgroup->add_members(key); + + return 0; + }); + + std::future result = fn.get_future(); + command_manager::instance().enqueue(std::move(fn)); + + if (result.get() == 0) + return grpc::Status::OK; + else + return grpc::Status(grpc::INVALID_ARGUMENT, err); +} + +/** + * @brief Return Tag informations. + * + * @param context gRPC context + * @param request Tag's identifier (by Tag id and type) + * @param response The filled fields + * + * @return Status::OK if the tag is found and populated successfully, + * otherwise returns Status::INVALID_ARGUMENT with an error message. + */ +grpc::Status engine_impl::GetTag(grpc::ServerContext* context [[maybe_unused]], + const IdOrTypeIdentifier* request, + EngineTag* response) { + std::string err; + auto fn = std::packaged_task([&err, request, + tag = response]() -> int32_t { + std::shared_ptr selectedtag; + auto ittag = tag::tags.find(std::make_pair(request->id(), request->type())); + if (ittag != tag::tags.end()) + selectedtag = ittag->second; + else { + err = fmt::format("could not find tag id:'{}', type:'{}' ", request->id(), + request->type()); + return 1; + } + + tag->set_name(selectedtag->name()); + tag->set_id(selectedtag->id()); + tag->set_type(static_cast(selectedtag->type())); + + return 0; + }); + + std::future result = fn.get_future(); + command_manager::instance().enqueue(std::move(fn)); + + if (result.get() == 0) + return grpc::Status::OK; + else + return grpc::Status(grpc::INVALID_ARGUMENT, err); +} + +/** + * @brief Return Severity informations. + * + * @param context gRPC context + * @param request Severity's identifier (by Severity id and type) + * @param response The filled fields + * + * @return Status::OK if the Severity is found and populated successfully, + * otherwise returns Status::INVALID_ARGUMENT with an error message. + */ +grpc::Status engine_impl::GetSeverity(grpc::ServerContext* context + [[maybe_unused]], + const IdOrTypeIdentifier* request, + EngineSeverity* response) { + std::string err; + auto fn = std::packaged_task( + [&err, request, severity = response]() -> int32_t { + std::shared_ptr selectedseverity; + auto itseverity = severity::severities.find( + std::make_pair(request->id(), request->type())); + if (itseverity != severity::severities.end()) + selectedseverity = itseverity->second; + else { + err = fmt::format("could not find tag id:'{}', type:'{}' ", + request->id(), request->type()); + return 1; + } + + severity->set_name(selectedseverity->name()); + severity->set_id(selectedseverity->id()); + severity->set_type(static_cast( + selectedseverity->type() + 1)); + severity->set_level(selectedseverity->level()); + severity->set_icon_id(selectedseverity->icon_id()); + + return 0; + }); + + std::future result = fn.get_future(); + command_manager::instance().enqueue(std::move(fn)); + + if (result.get() == 0) + return grpc::Status::OK; + else + return grpc::Status(grpc::INVALID_ARGUMENT, err); +} + +/** + * @brief Return Command informations. + * + * @param context gRPC context + * @param request Command's identifier (by name) + * @param response The filled fields + * + * @return Status::OK if the Command is found and populated successfully, + * otherwise returns Status::INVALID_ARGUMENT with an error message. + */ +grpc::Status engine_impl::GetCommand(grpc::ServerContext* context + [[maybe_unused]], + const NameIdentifier* request, + EngineCommand* response) { + std::string err; auto fn = std::packaged_task( - [&err, request, service = response]() -> int32_t { - std::shared_ptr selectedservice; - std::tie(selectedservice, err) = get_serv(*request); - if (!err.empty()) { + [&err, request, command = response]() -> int32_t { + std::shared_ptr selectedcommand; + auto itcommand = commands::command::commands.find(request->name()); + if (itcommand != commands::command::commands.end()) + selectedcommand = itcommand->second; + else { + err = fmt::format("could not find Command '{}'", request->name()); return 1; } + command->set_command_name(selectedcommand->get_name()); + command->set_command_line(selectedcommand->get_command_line()); + command->set_type( + static_cast(selectedcommand->get_type())); - /* recovering service's information */ - service->set_host_id(selectedservice->host_id()); - service->set_service_id(selectedservice->service_id()); - service->set_host_name(selectedservice->get_hostname()); - service->set_description(selectedservice->description()); - service->set_check_period(selectedservice->check_period()); - service->set_current_state(static_cast( - selectedservice->get_current_state())); return 0; }); @@ -492,6 +1082,181 @@ grpc::Status engine_impl::GetService(grpc::ServerContext* context return grpc::Status(grpc::INVALID_ARGUMENT, err); } +/** + * @brief Return Connector informations. + * + * @param context gRPC context + * @param request Connector's identifier (by name) + * @param response The filled fields + * + * @return Status::OK if the Connector is found and populated successfully, + * otherwise returns Status::INVALID_ARGUMENT with an error message. + */ +grpc::Status engine_impl::GetConnector(grpc::ServerContext* context + [[maybe_unused]], + const NameIdentifier* request, + EngineConnector* response) { + std::string err; + auto fn = std::packaged_task([&err, request, + connector = response]() -> int32_t { + std::shared_ptr + selectedconnector; + auto itconnector = commands::connector::connectors.find(request->name()); + if (itconnector != commands::connector::connectors.end()) + selectedconnector = itconnector->second; + else { + err = fmt::format("could not find Connector '{}'", request->name()); + return 1; + } + connector->set_connector_name(selectedconnector->get_name()); + connector->set_connector_line(selectedconnector->get_command_line()); + + return 0; + }); + + std::future result = fn.get_future(); + command_manager::instance().enqueue(std::move(fn)); + + if (result.get() == 0) + return grpc::Status::OK; + else + return grpc::Status(grpc::INVALID_ARGUMENT, err); +} + +/** + * @brief Return HostEscalation informations. + * + * @param context gRPC context + * @param request HostEscalation's identifier (by name) + * @param response The filled fields + * + * @return Status::OK if the HostEscalation is found and populated successfully, + * otherwise returns Status::INVALID_ARGUMENT with an error message. + */ +grpc::Status engine_impl::GetHostEscalation(grpc::ServerContext* context + [[maybe_unused]], + const NameIdentifier* request, + EngineHostEscalation* response) { + std::string err; + auto fn = std::packaged_task([&err, request, + escalation = response]() -> int32_t { + std::shared_ptr selectedescalation; + auto itescalation = hostescalation::hostescalations.find(request->name()); + if (itescalation != hostescalation::hostescalations.end()) + selectedescalation = itescalation->second; + else { + err = fmt::format("could not find hostescalation '{}'", request->name()); + return 1; + } + escalation->set_host_name(selectedescalation->get_hostname()); + if (!selectedescalation->get_contactgroups().empty()) + for (const auto& [name, _] : selectedescalation->get_contactgroups()) + escalation->add_contact_group(name); + + escalation->set_first_notification( + selectedescalation->get_first_notification()); + escalation->set_last_notification( + selectedescalation->get_last_notification()); + escalation->set_notification_interval( + selectedescalation->get_notification_interval()); + escalation->set_escalation_period( + selectedescalation->get_escalation_period()); + auto options = fmt::format( + "{}{}{}", + selectedescalation->get_escalate_on(notifier::down) ? "d" : "", + selectedescalation->get_escalate_on(notifier::unreachable) ? "u" : "", + selectedescalation->get_escalate_on(notifier::up) ? "r" : ""); + + if (options == "dur") + options = "all"; + + if (!options.empty() && options != "all" && options.length() != 1) + options = fmt::format("{}", fmt::join(options, ",")); + + escalation->set_escalation_option(options); + return 0; + }); + + std::future result = fn.get_future(); + command_manager::instance().enqueue(std::move(fn)); + + if (result.get() == 0) + return grpc::Status::OK; + else + return grpc::Status(grpc::INVALID_ARGUMENT, err); +} + +/** + * @brief Return ServiceEscalation informations. + * + * @param context gRPC context + * @param request ServiceEscalation's identifier (by host and service name) + * @param response The filled fields + * + * @return Status::OK if the ServiceEscalation is found and populated + * successfully, otherwise returns Status::INVALID_ARGUMENT with an error + * message. + */ +grpc::Status engine_impl::GetServiceEscalation( + grpc::ServerContext* context [[maybe_unused]], + const PairNamesIdentifier* request, + EngineServiceEscalation* response) { + std::string err; + auto fn = std::packaged_task([&err, request, + escalation = response]() -> int32_t { + std::shared_ptr + selectedescalation; + auto itescalation = serviceescalation::serviceescalations.find( + std::make_pair(request->host_name(), request->service_name())); + if (itescalation != serviceescalation::serviceescalations.end()) + selectedescalation = itescalation->second; + else { + err = fmt::format( + "could not find serviceescalation with : host '{}',service '{}'", + request->host_name(), request->service_name()); + return 1; + } + escalation->set_host(selectedescalation->get_hostname()); + escalation->set_service_description(selectedescalation->get_description()); + if (!selectedescalation->get_contactgroups().empty()) + for (const auto& [name, _] : selectedescalation->get_contactgroups()) + escalation->add_contact_group(name); + + escalation->set_first_notification( + selectedescalation->get_first_notification()); + escalation->set_last_notification( + selectedescalation->get_last_notification()); + escalation->set_notification_interval( + selectedescalation->get_notification_interval()); + escalation->set_escalation_period( + selectedescalation->get_escalation_period()); + + auto options = fmt::format( + "{}{}{}{}", + selectedescalation->get_escalate_on(notifier::warning) ? "w" : "", + selectedescalation->get_escalate_on(notifier::unknown) ? "u" : "", + selectedescalation->get_escalate_on(notifier::critical) ? "c" : "", + selectedescalation->get_escalate_on(notifier::ok) ? "r" : ""); + + if (options == "wucr") + options = "all"; + + if (!options.empty() && options != "all" && options.length() != 1) + options = fmt::format("{}", fmt::join(options, ",")); + + escalation->set_escalation_option(options); + + return 0; + }); + std::future result = fn.get_future(); + command_manager::instance().enqueue(std::move(fn)); + + if (result.get() == 0) + return grpc::Status::OK; + else + return grpc::Status(grpc::INVALID_ARGUMENT, err); +} + /** * @brief Return the total number of hosts. * @@ -840,11 +1605,10 @@ grpc::Status engine_impl::DeleteComment(grpc::ServerContext* context * * @return Status::OK */ -grpc::Status engine_impl::DeleteAllHostComments(grpc::ServerContext* context - [[maybe_unused]], - const HostIdentifier* request, - CommandSuccess* response - [[maybe_unused]]) { +grpc::Status engine_impl::DeleteAllHostComments( + grpc::ServerContext* context [[maybe_unused]], + const NameOrIdIdentifier* request, + CommandSuccess* response [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { std::shared_ptr temp_host; @@ -911,7 +1675,7 @@ grpc::Status engine_impl::DeleteAllServiceComments( */ grpc::Status engine_impl::RemoveHostAcknowledgement( grpc::ServerContext* context [[maybe_unused]], - const HostIdentifier* request, + const NameOrIdIdentifier* request, CommandSuccess* response [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { @@ -2414,7 +3178,7 @@ grpc::Status engine_impl::DelayServiceNotification( switch (request->identifier_case()) { case ServiceDelayIdentifier::kNames: { - NameIdentifier names = request->names(); + PairNamesIdentifier names = request->names(); auto it = service::services.find({names.host_name(), names.service_name()}); if (it != service::services.end()) @@ -2426,7 +3190,7 @@ grpc::Status engine_impl::DelayServiceNotification( } } break; case ServiceDelayIdentifier::kIds: { - IdIdentifier ids = request->ids(); + PairIdsIdentifier ids = request->ids(); auto it = service::services_by_id.find({ids.host_id(), ids.service_id()}); if (it != service::services_by_id.end()) @@ -3228,7 +3992,7 @@ grpc::Status engine_impl::ShutdownProgram( ::grpc::Status engine_impl::EnableHostAndChildNotifications( ::grpc::ServerContext* context [[maybe_unused]], - const ::com::centreon::engine::HostIdentifier* request, + const ::com::centreon::engine::NameOrIdIdentifier* request, ::com::centreon::engine::CommandSuccess* response [[maybe_unused]]) { HOST_METHOD_BEGIN commands::processing::wrapper_enable_host_and_child_notifications( @@ -3238,7 +4002,7 @@ ::grpc::Status engine_impl::EnableHostAndChildNotifications( ::grpc::Status engine_impl::DisableHostAndChildNotifications( ::grpc::ServerContext* context [[maybe_unused]], - const ::com::centreon::engine::HostIdentifier* request, + const ::com::centreon::engine::NameOrIdIdentifier* request, ::com::centreon::engine::CommandSuccess* response [[maybe_unused]]) { HOST_METHOD_BEGIN commands::processing::wrapper_disable_host_and_child_notifications( @@ -3248,7 +4012,7 @@ ::grpc::Status engine_impl::DisableHostAndChildNotifications( ::grpc::Status engine_impl::DisableHostNotifications( ::grpc::ServerContext* context [[maybe_unused]], - const ::com::centreon::engine::HostIdentifier* request, + const ::com::centreon::engine::NameOrIdIdentifier* request, ::com::centreon::engine::CommandSuccess* response [[maybe_unused]]) { HOST_METHOD_BEGIN disable_host_notifications(host_info.first.get()); @@ -3257,7 +4021,7 @@ ::grpc::Status engine_impl::DisableHostNotifications( ::grpc::Status engine_impl::EnableHostNotifications( ::grpc::ServerContext* context [[maybe_unused]], - const ::com::centreon::engine::HostIdentifier* request, + const ::com::centreon::engine::NameOrIdIdentifier* request, ::com::centreon::engine::CommandSuccess* response [[maybe_unused]]) { HOST_METHOD_BEGIN enable_host_notifications(host_info.first.get()); @@ -3344,10 +4108,10 @@ ::grpc::Status engine_impl::ChangeAnomalyDetectionSensitivity( */ std::pair, std::string> engine_impl::get_host( - const ::com::centreon::engine::HostIdentifier& host_info) { + const ::com::centreon::engine::NameOrIdIdentifier& host_info) { std::string err; switch (host_info.identifier_case()) { - case HostIdentifier::kName: { + case NameOrIdIdentifier::kName: { /* get the host */ auto ithostname = host::hosts.find(host_info.name()); if (ithostname != host::hosts.end()) @@ -3358,7 +4122,7 @@ engine_impl::get_host( err); } } break; - case HostIdentifier::kId: { + case NameOrIdIdentifier::kId: { /* get the host */ auto ithostid = host::hosts_by_id.find(host_info.id()); if (ithostid != host::hosts_by_id.end()) @@ -3386,7 +4150,7 @@ engine_impl::get_serv( /* checking identifier sesrname (by names or by ids) */ switch (serv_info.identifier_case()) { case ServiceIdentifier::kNames: { - const NameIdentifier& names = serv_info.names(); + const PairNamesIdentifier& names = serv_info.names(); /* get the service */ auto itservicenames = service::services.find( std::make_pair(names.host_name(), names.service_name())); @@ -3400,7 +4164,7 @@ engine_impl::get_serv( } } break; case ServiceIdentifier::kIds: { - const IdIdentifier& ids = serv_info.ids(); + const PairIdsIdentifier& ids = serv_info.ids(); /* get the service */ auto itserviceids = service::services_by_id.find( std::make_pair(ids.host_id(), ids.service_id())); diff --git a/engine/inc/com/centreon/engine/anomalydetection.hh b/engine/inc/com/centreon/engine/anomalydetection.hh index 29785ed6bb5..85142ca5cc7 100644 --- a/engine/inc/com/centreon/engine/anomalydetection.hh +++ b/engine/inc/com/centreon/engine/anomalydetection.hh @@ -87,7 +87,6 @@ class anomalydetection : public service { bool status_change, bool checks_enabled, bool accept_passive_checks, - enum service::service_state initial_state, uint32_t check_interval, uint32_t retry_interval, uint32_t notification_interval, @@ -164,7 +163,6 @@ com::centreon::engine::anomalydetection* add_anomalydetection( std::string const& metric_name, std::string const& thresholds_file, bool status_change, - enum com::centreon::engine::service::service_state initial_state, int max_attempts, double check_interval, double retry_interval, diff --git a/engine/inc/com/centreon/engine/broker.hh b/engine/inc/com/centreon/engine/broker.hh index d1bececa863..e755b1d3cf1 100644 --- a/engine/inc/com/centreon/engine/broker.hh +++ b/engine/inc/com/centreon/engine/broker.hh @@ -22,6 +22,7 @@ #ifndef CCE_BROKER_HH #define CCE_BROKER_HH +#include "bbdo/neb.pb.h" #include "com/centreon/engine/commands/command.hh" #include "com/centreon/engine/comment.hh" #include "com/centreon/engine/events/timed_event.hh" @@ -520,6 +521,10 @@ struct timeval get_broker_timestamp(struct timeval const* timestamp); void broker_bench(unsigned id, const std::chrono::system_clock::time_point& mess_create); +struct nebstruct_agent_stats_data; + +void broker_agent_stats(nebstruct_agent_stats_data& stats); + #ifdef __cplusplus } #endif /* C++ */ diff --git a/engine/inc/com/centreon/engine/commands/processing.hh b/engine/inc/com/centreon/engine/commands/processing.hh index f5cf0036382..f05e8fcc841 100644 --- a/engine/inc/com/centreon/engine/commands/processing.hh +++ b/engine/inc/com/centreon/engine/commands/processing.hh @@ -152,6 +152,6 @@ class processing { }; } // namespace commands -} +} // namespace com::centreon::engine #endif // !CCE_MOD_EXTCMD_PROCESSING_HH diff --git a/engine/inc/com/centreon/engine/configuration/whitelist.hh b/engine/inc/com/centreon/engine/configuration/whitelist.hh index 26991502388..c8aab98c4cc 100644 --- a/engine/inc/com/centreon/engine/configuration/whitelist.hh +++ b/engine/inc/com/centreon/engine/configuration/whitelist.hh @@ -105,6 +105,8 @@ whitelist::whitelist(string_iter dir_path_begin, string_iter dir_path_end) "whitelist directory found, but no restrictions, " "all commands are accepted"); break; + default: + break; } } diff --git a/engine/inc/com/centreon/engine/contact.hh b/engine/inc/com/centreon/engine/contact.hh index fc6069a017d..0077a19178e 100644 --- a/engine/inc/com/centreon/engine/contact.hh +++ b/engine/inc/com/centreon/engine/contact.hh @@ -1,5 +1,5 @@ -/* - * Copyright 2017 - 2019 Centreon (https://www.centreon.com/) +/** + * Copyright 2017 - 2024 Centreon (https://www.centreon.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/engine/inc/com/centreon/engine/engine_impl.hh b/engine/inc/com/centreon/engine/engine_impl.hh index f1bba7a6791..4dafee7d002 100644 --- a/engine/inc/com/centreon/engine/engine_impl.hh +++ b/engine/inc/com/centreon/engine/engine_impl.hh @@ -49,14 +49,41 @@ class engine_impl final : public Engine::Service { const ::google::protobuf::Empty*, GenericValue*) override; grpc::Status GetHost(grpc::ServerContext* context, - const HostIdentifier* request, + const NameOrIdIdentifier* request, EngineHost* response) override; grpc::Status GetContact(grpc::ServerContext* context, - const ContactIdentifier* request, + const NameIdentifier* request, EngineContact* response) override; grpc::Status GetService(grpc::ServerContext* context, const ServiceIdentifier* request, EngineService* response) override; + grpc::Status GetHostGroup(grpc::ServerContext* context, + const NameIdentifier*, + EngineHostGroup*) override; + grpc::Status GetServiceGroup(grpc::ServerContext* context, + const NameIdentifier* request, + EngineServiceGroup* response) override; + grpc::Status GetContactGroup(grpc::ServerContext* context, + const NameIdentifier* request, + EngineContactGroup* response) override; + grpc::Status GetTag(grpc::ServerContext* context, + const IdOrTypeIdentifier* request, + EngineTag* response) override; + grpc::Status GetSeverity(grpc::ServerContext* context, + const IdOrTypeIdentifier* request, + EngineSeverity* response) override; + grpc::Status GetCommand(grpc::ServerContext* context, + const NameIdentifier* request, + EngineCommand* response) override; + grpc::Status GetConnector(grpc::ServerContext* context, + const NameIdentifier* request, + EngineConnector* response) override; + grpc::Status GetHostEscalation(grpc::ServerContext* context, + const NameIdentifier* request, + EngineHostEscalation* response) override; + grpc::Status GetServiceEscalation(grpc::ServerContext* context, + const PairNamesIdentifier* request, + EngineServiceEscalation* response) override; grpc::Status AddHostComment(grpc::ServerContext* context, const EngineComment* request, CommandSuccess* response) override; @@ -70,10 +97,10 @@ class engine_impl final : public Engine::Service { const ServiceIdentifier* request, CommandSuccess* response) override; grpc::Status DeleteAllHostComments(grpc::ServerContext* context, - const HostIdentifier* request, + const NameOrIdIdentifier* request, CommandSuccess* response) override; grpc::Status RemoveHostAcknowledgement(grpc::ServerContext* context, - const HostIdentifier* request, + const NameOrIdIdentifier* request, CommandSuccess* response) override; grpc::Status RemoveServiceAcknowledgement(grpc::ServerContext* context, const ServiceIdentifier* request, @@ -194,21 +221,21 @@ class engine_impl final : public Engine::Service { ::google::protobuf::Empty*) override; ::grpc::Status EnableHostAndChildNotifications( ::grpc::ServerContext* context, - const ::com::centreon::engine::HostIdentifier* request, + const ::com::centreon::engine::NameOrIdIdentifier* request, ::com::centreon::engine::CommandSuccess* response) override; ::grpc::Status DisableHostAndChildNotifications( ::grpc::ServerContext* context, - const ::com::centreon::engine::HostIdentifier* request, + const ::com::centreon::engine::NameOrIdIdentifier* request, ::com::centreon::engine::CommandSuccess* response) override; ::grpc::Status DisableHostNotifications( ::grpc::ServerContext* context, - const ::com::centreon::engine::HostIdentifier* request, + const ::com::centreon::engine::NameOrIdIdentifier* request, ::com::centreon::engine::CommandSuccess* response) override; ::grpc::Status EnableHostNotifications( ::grpc::ServerContext* context, - const ::com::centreon::engine::HostIdentifier* request, + const ::com::centreon::engine::NameOrIdIdentifier* request, ::com::centreon::engine::CommandSuccess* response) override; ::grpc::Status DisableNotifications( @@ -238,7 +265,7 @@ class engine_impl final : public Engine::Service { static std::pair, std::string /*error*/> - get_host(const ::com::centreon::engine::HostIdentifier& host_info); + get_host(const ::com::centreon::engine::NameOrIdIdentifier& host_info); static std::pair, std::string /*error*/> @@ -262,5 +289,5 @@ class engine_impl final : public Engine::Service { google::protobuf::Empty* response) override; }; -} +} // namespace com::centreon::engine #endif /* !CCE_ENGINERPC_ENGINE_IMPL_HH */ diff --git a/engine/inc/com/centreon/engine/host.hh b/engine/inc/com/centreon/engine/host.hh index d9c00c7fb4a..13215ea11e1 100644 --- a/engine/inc/com/centreon/engine/host.hh +++ b/engine/inc/com/centreon/engine/host.hh @@ -50,7 +50,6 @@ class host : public notifier { std::string const& alias, std::string const& address, std::string const& check_period, - enum host::host_state initial_state, uint32_t check_interval, uint32_t retry_interval, int max_attempts, @@ -250,7 +249,7 @@ class host : public notifier { void set_check_command_ptr( const std::shared_ptr& cmd) override; - host_map_unsafe parent_hosts; + host_map parent_hosts; host_map_unsafe child_hosts; static host_map hosts; static host_id_map hosts_by_id; @@ -288,10 +287,11 @@ class host : public notifier { int _circular_path_checked; bool _contains_circular_path; + enum host_state _initial_state; enum host_state _last_state; enum host_state _last_hard_state; enum host_state _current_state; - enum host_state _initial_state; + std::list _hostgroups; }; @@ -309,6 +309,7 @@ int number_of_total_parent_hosts(com::centreon::engine::host* hst); std::ostream& operator<<(std::ostream& os, com::centreon::engine::host const& obj); std::ostream& operator<<(std::ostream& os, host_map_unsafe const& obj); +std::ostream& operator<<(std::ostream& os, host_map const& obj); namespace com::centreon::engine { @@ -320,6 +321,4 @@ std::string get_host_name(const uint64_t host_id); } // namespace com::centreon::engine -std::ostream& operator<<(std::ostream& os, host_map_unsafe const& obj); - #endif // !CCE_HOST_HH diff --git a/engine/inc/com/centreon/engine/nebcallbacks.hh b/engine/inc/com/centreon/engine/nebcallbacks.hh index d382b6950f7..ac333b2c047 100644 --- a/engine/inc/com/centreon/engine/nebcallbacks.hh +++ b/engine/inc/com/centreon/engine/nebcallbacks.hh @@ -75,7 +75,8 @@ #define NEBCALLBACK_BENCH_DATA 45 #define NEBCALLBACK_OTL_METRICS 46 -#define NEBCALLBACK_NUMITEMS 47 /* Total number of callback types we have. */ +#define NEBCALLBACK_AGENT_STATS 47 +#define NEBCALLBACK_NUMITEMS 48 /* Total number of callback types we have. */ #ifdef __cplusplus extern "C" { diff --git a/engine/inc/com/centreon/engine/nebstructs.hh b/engine/inc/com/centreon/engine/nebstructs.hh index 271aa3b53d5..2b175542c6d 100644 --- a/engine/inc/com/centreon/engine/nebstructs.hh +++ b/engine/inc/com/centreon/engine/nebstructs.hh @@ -21,6 +21,8 @@ #ifndef CCE_NEBSTRUCTS_HH #define CCE_NEBSTRUCTS_HH +#include +#include "bbdo/neb.pb.h" #include "com/centreon/engine/comment.hh" /* Acknowledgement structure. */ @@ -255,4 +257,33 @@ typedef struct nebstruct_bench_struct { std::chrono::system_clock::time_point mess_create; } nebstruct_bench_data; +struct nebstruct_agent_stats_data { + struct cumul_data { + cumul_data(unsigned maj, + unsigned min, + unsigned pat, + bool rev, + const std::string& operating_system, + const std::string& os_ver, + size_t nb_ag) + : major(maj), + minor(min), + patch(pat), + reverse(rev), + os(operating_system), + os_version(os_ver), + nb_agent(nb_ag) {} + + unsigned major; + unsigned minor; + unsigned patch; + bool reverse; + std::string os; + std::string os_version; + size_t nb_agent; + }; + + std::unique_ptr> data; +}; + #endif /* !CCE_NEBSTRUCTS_HH */ diff --git a/engine/inc/com/centreon/engine/service.hh b/engine/inc/com/centreon/engine/service.hh index 25874da44d2..411fbd6fea5 100644 --- a/engine/inc/com/centreon/engine/service.hh +++ b/engine/inc/com/centreon/engine/service.hh @@ -124,7 +124,6 @@ class service : public notifier { std::string const& check_command, bool checks_enabled, bool accept_passive_checks, - enum service::service_state initial_state, uint32_t check_interval, uint32_t retry_interval, uint32_t notification_interval, @@ -295,7 +294,6 @@ com::centreon::engine::service* add_service( std::string const& description, std::string const& display_name, std::string const& check_period, - enum com::centreon::engine::service::service_state initial_state, int max_attempts, double check_interval, double retry_interval, diff --git a/engine/modules/bench/passive/engine_cfg.cc b/engine/modules/bench/passive/engine_cfg.cc index b1006d6bbd9..def42f26dc1 100644 --- a/engine/modules/bench/passive/engine_cfg.cc +++ b/engine/modules/bench/passive/engine_cfg.cc @@ -44,8 +44,9 @@ engine_cfg::engine_cfg(std::string const& additional, bool auto_delete) : _auto_delete(auto_delete) { // Create directory. - _directory = tmpnam(NULL); - ::mkdir(_directory.c_str(), 0777); + char temp_dir[] = "/tmp/centengine.XXXXXX"; + _directory = mkdtemp(temp_dir); + chmod(_directory.c_str(), 0777); // Write object files. std::string object_file(_directory); diff --git a/engine/modules/external_commands/CMakeLists.txt b/engine/modules/external_commands/CMakeLists.txt index de832e231ce..0100595f13c 100644 --- a/engine/modules/external_commands/CMakeLists.txt +++ b/engine/modules/external_commands/CMakeLists.txt @@ -34,7 +34,7 @@ if(LEGACY_ENGINE) "${INC_DIR}/utils.hh") set_property(TARGET "externalcmd" PROPERTY PREFIX "") target_precompile_headers(externalcmd PRIVATE precomp_inc/precomp.hh) - add_dependencies(externalcmd centreon_clib pb_neb_lib) + add_dependencies(externalcmd centreon_clib pb_neb_lib pb_common_lib) target_link_libraries(externalcmd centreon_clib spdlog::spdlog) # Install rule. @@ -55,7 +55,7 @@ else() set_property(TARGET "externalcmd" PROPERTY PREFIX "") target_precompile_headers("externalcmd" PRIVATE precomp_inc/precomp.hh) - add_dependencies(externalcmd centreon_clib pb_neb_lib) + add_dependencies(externalcmd centreon_clib pb_neb_lib pb_common_lib) target_link_libraries(externalcmd centreon_clib spdlog::spdlog) install( diff --git a/engine/modules/opentelemetry/CMakeLists.txt b/engine/modules/opentelemetry/CMakeLists.txt index f6a8b94974f..ad04953c771 100644 --- a/engine/modules/opentelemetry/CMakeLists.txt +++ b/engine/modules/opentelemetry/CMakeLists.txt @@ -69,6 +69,7 @@ ${SRC_DIR}/centreon_agent/agent_config.cc ${SRC_DIR}/centreon_agent/agent_impl.cc ${SRC_DIR}/centreon_agent/agent_reverse_client.cc ${SRC_DIR}/centreon_agent/agent_service.cc +${SRC_DIR}/centreon_agent/agent_stat.cc ${SRC_DIR}/centreon_agent/to_agent_connector.cc ${SRC_DIR}/grpc_config.cc ${SRC_DIR}/host_serv_extractor.cc @@ -96,6 +97,7 @@ target_link_libraries(opentelemetry add_dependencies(opentelemetry pb_open_telemetry_lib pb_neb_lib + pb_common_lib engine_rpc) target_include_directories(opentelemetry PRIVATE diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_config.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_config.hh index f65940cbf92..41eaa24e675 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_config.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_config.hh @@ -46,6 +46,8 @@ class agent_config { public: agent_config(const rapidjson::Value& json_config_v); + agent_config(); + // used for tests agent_config(uint32_t check_interval, uint32_t max_concurrent_checks, diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_impl.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_impl.hh index c1e1a4a5c06..614cd4490b6 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_impl.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_impl.hh @@ -19,7 +19,8 @@ #ifndef CCE_MOD_OTL_CENTREON_AGENT_AGENT_IMPL_HH #define CCE_MOD_OTL_CENTREON_AGENT_AGENT_IMPL_HH -#include "centreon_agent/agent.grpc.pb.h" +#include "agent_stat.hh" + #include "com/centreon/engine/modules/opentelemetry/centreon_agent/agent_config.hh" #include "com/centreon/engine/modules/opentelemetry/otl_data_point.hh" @@ -39,6 +40,7 @@ class agent_impl public std::enable_shared_from_this> { std::shared_ptr _io_context; const std::string_view _class_name; + const bool _reversed; whitelist_cache _whitelist_cache; @@ -51,7 +53,7 @@ class agent_impl std::shared_ptr _last_sent_config ABSL_GUARDED_BY(_protect); - static std::set> _instances + static std::set>* _instances ABSL_GUARDED_BY(_instances_m); static absl::Mutex _instances_m; @@ -70,6 +72,9 @@ class agent_impl protected: std::shared_ptr _logger; bool _alive ABSL_GUARDED_BY(_protect); + + agent_stat::pointer _stats; + mutable absl::Mutex _protect; public: @@ -77,7 +82,9 @@ class agent_impl const std::string_view class_name, const agent_config::pointer& conf, const metric_handler& handler, - const std::shared_ptr& logger); + const std::shared_ptr& logger, + bool reversed, + const agent_stat::pointer& stats); virtual ~agent_impl(); diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_reverse_client.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_reverse_client.hh index cc02b91e8af..3d01a5ca314 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_reverse_client.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_reverse_client.hh @@ -20,6 +20,7 @@ #define CCE_MOD_OTL_CENTREON_AGENT_AGENT_REVERSE_CLIENT_HH #include "com/centreon/engine/modules/opentelemetry/centreon_agent/agent_config.hh" +#include "com/centreon/engine/modules/opentelemetry/centreon_agent/agent_stat.hh" #include "com/centreon/engine/modules/opentelemetry/otl_data_point.hh" namespace com::centreon::engine::modules::opentelemetry::centreon_agent { @@ -39,6 +40,8 @@ class agent_reverse_client { absl::Mutex _agents_m; config_to_client _agents ABSL_GUARDED_BY(_agents_m); + agent_stat::pointer _agent_stats; + virtual config_to_client::iterator _create_new_client_connection( const grpc_config::pointer& agent_endpoint, const agent_config::pointer& agent_conf) @@ -50,7 +53,8 @@ class agent_reverse_client { agent_reverse_client( const std::shared_ptr& io_context, const metric_handler& handler, - const std::shared_ptr& logger); + const std::shared_ptr& logger, + const agent_stat::pointer& stats); virtual ~agent_reverse_client(); diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_service.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_service.hh index a58f8263a50..54abebd58c7 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_service.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_service.hh @@ -19,6 +19,7 @@ #ifndef CCE_MOD_OTL_CENTREON_AGENT_AGENT_SERVICE_HH #define CCE_MOD_OTL_CENTREON_AGENT_AGENT_SERVICE_HH +#include "centreon_agent/agent.grpc.pb.h" #include "com/centreon/engine/modules/opentelemetry/centreon_agent/agent_config.hh" #include "com/centreon/engine/modules/opentelemetry/centreon_agent/agent_impl.hh" @@ -38,11 +39,14 @@ class agent_service : public agent::AgentService::Service, metric_handler _metric_handler; std::shared_ptr _logger; + agent_stat::pointer _stats; + public: agent_service(const std::shared_ptr& io_context, const agent_config::pointer& conf, const metric_handler& handler, - const std::shared_ptr& logger); + const std::shared_ptr& logger, + const agent_stat::pointer& stats); void init(); @@ -50,7 +54,8 @@ class agent_service : public agent::AgentService::Service, const std::shared_ptr& io_context, const agent_config::pointer& conf, const metric_handler& handler, - const std::shared_ptr& logger); + const std::shared_ptr& logger, + const agent_stat::pointer& stats); // disable synchronous version of this method ::grpc::Status Export( diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_stat.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_stat.hh new file mode 100644 index 00000000000..90f50a3f124 --- /dev/null +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_stat.hh @@ -0,0 +1,78 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CCE_MOD_OTL_CENTREON_AGENT_AGENT_STAT_HH +#define CCE_MOD_OTL_CENTREON_AGENT_AGENT_STAT_HH + +#include +#include +#include +#include +#include "centreon_agent/agent.pb.h" + +namespace com::centreon::engine::modules::opentelemetry::centreon_agent { + +class agent_stat : public std::enable_shared_from_this { + using agent_info_set = std::set; + struct group_by_key + : public std::tuple< + unsigned /*agent major version*/, + unsigned /*agent minor version*/, + unsigned /*agent patch*/, + bool /*reverse*/, + std::string /*os almalinux, windows, windows-server...*/, + std::string /*os version*/> { + public: + group_by_key(const com::centreon::agent::AgentInfo& agent_info, + bool reversed); + }; + + using agent_info_map = absl::flat_hash_map; + + agent_info_map _data ABSL_GUARDED_BY(_protect); + + std::shared_ptr _io_context; + asio::system_timer _send_timer ABSL_GUARDED_BY(_protect); + bool _dirty ABSL_GUARDED_BY(_protect); + + mutable absl::Mutex _protect; + + void _on_stat_update() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(_protect); + + void _start_send_timer(); + void _send_timer_handler(const boost::system::error_code& err); + + public: + using pointer = std::shared_ptr; + + agent_stat(const std::shared_ptr& io_context); + + static pointer load(const std::shared_ptr& io_context); + + void stop_send_timer(); + + void add_agent(const com::centreon::agent::AgentInfo& agent_info, + bool reversed, + const void* reactor); + void remove_agent(const com::centreon::agent::AgentInfo& agent_info, + bool reversed, + const void* reactor); +}; + +} // namespace com::centreon::engine::modules::opentelemetry::centreon_agent +#endif \ No newline at end of file diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/to_agent_connector.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/to_agent_connector.hh index 3fc016aebb9..31c8d943849 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/to_agent_connector.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/to_agent_connector.hh @@ -20,6 +20,7 @@ #define CCE_MOD_OTL_CENTREON_AGENT_AGENT_CLIENT_HH #include "centreon_agent/agent.grpc.pb.h" +#include "centreon_agent/agent_stat.hh" #include "com/centreon/engine/modules/opentelemetry/centreon_agent/agent_config.hh" #include "com/centreon/common/grpc/grpc_client.hh" @@ -47,12 +48,15 @@ class to_agent_connector absl::Mutex _connection_m; std::shared_ptr _connection ABSL_GUARDED_BY(_connection_m); + agent_stat::pointer _stats; + public: to_agent_connector(const grpc_config::pointer& agent_endpoint_conf, const std::shared_ptr& io_context, const agent_config::pointer& agent_conf, const metric_handler& handler, - const std::shared_ptr& logger); + const std::shared_ptr& logger, + const agent_stat::pointer& stats); virtual ~to_agent_connector(); @@ -63,7 +67,8 @@ class to_agent_connector const std::shared_ptr& io_context, const agent_config::pointer& agent_conf, const metric_handler& handler, - const std::shared_ptr& logger); + const std::shared_ptr& logger, + const agent_stat::pointer& stats); void refresh_agent_configuration_if_needed( const agent_config::pointer& new_conf); diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/open_telemetry.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/open_telemetry.hh index b30ba4664b3..a2777f95376 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/open_telemetry.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/open_telemetry.hh @@ -59,6 +59,8 @@ class open_telemetry : public commands::otel::open_telemetry_base { std::shared_ptr _io_context; mutable std::mutex _protect; + centreon_agent::agent_stat::pointer _agent_stats; + void _forward_to_broker(const std::vector& unknown); void _create_telegraf_conf_server( diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_data_point.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_data_point.hh index 76c79038413..0235577f7f4 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_data_point.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_data_point.hh @@ -207,6 +207,8 @@ void otl_data_point::extract_data_points(const metric_request_ptr& metrics, handler( otl_data_point(metrics, resource, scope, pb_metric, iter)); break; + default: + break; } } } diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_server.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_server.hh index 935aac30d9c..728f96e1dab 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_server.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_server.hh @@ -46,7 +46,8 @@ class otl_server : public common::grpc::grpc_server_base { const grpc_config::pointer& conf, const centreon_agent::agent_config::pointer& agent_config, const metric_handler& handler, - const std::shared_ptr& logger); + const std::shared_ptr& logger, + const centreon_agent::agent_stat::pointer& agent_stats); void start(); public: @@ -59,7 +60,8 @@ class otl_server : public common::grpc::grpc_server_base { const grpc_config::pointer& conf, const centreon_agent::agent_config::pointer& agent_config, const metric_handler& handler, - const std::shared_ptr& logger); + const std::shared_ptr& logger, + const centreon_agent::agent_stat::pointer& agent_stats); void update_agent_config( const centreon_agent::agent_config::pointer& agent_config); diff --git a/engine/modules/opentelemetry/src/centreon_agent/agent_config.cc b/engine/modules/opentelemetry/src/centreon_agent/agent_config.cc index 0d49927f5c7..cbea64c98f4 100644 --- a/engine/modules/opentelemetry/src/centreon_agent/agent_config.cc +++ b/engine/modules/opentelemetry/src/centreon_agent/agent_config.cc @@ -62,6 +62,11 @@ static constexpr std::string_view _config_schema(R"( } )"); +constexpr unsigned default_check_interval = 60; +constexpr unsigned default_max_concurrent_checks = 100; +constexpr unsigned default_export_period = 60; +constexpr unsigned default_check_timeout = 30; + /** * @brief Construct a new agent config::agent from json data * @@ -74,11 +79,14 @@ agent_config::agent_config(const rapidjson::Value& json_config_v) { file_content.validate(validator); - _check_interval = file_content.get_unsigned("check_interval", 60); - _max_concurrent_checks = - file_content.get_unsigned("max_concurrent_checks", 100); - _export_period = file_content.get_unsigned("export_period", 60); - _check_timeout = file_content.get_unsigned("check_timeout", 30); + _check_interval = + file_content.get_unsigned("check_interval", default_check_interval); + _max_concurrent_checks = file_content.get_unsigned( + "max_concurrent_checks", default_max_concurrent_checks); + _export_period = + file_content.get_unsigned("export_period", default_export_period); + _check_timeout = + file_content.get_unsigned("check_timeout", default_check_timeout); if (file_content.has_member("reverse_connections")) { const auto& reverse_array = file_content.get_member("reverse_connections"); @@ -90,6 +98,16 @@ agent_config::agent_config(const rapidjson::Value& json_config_v) { } } +/** + * @brief default constructor with the same values as default json values + * + */ +agent_config::agent_config() + : _check_interval(default_check_interval), + _max_concurrent_checks(default_max_concurrent_checks), + _export_period(default_export_period), + _check_timeout(default_check_timeout) {} + /** * @brief Constructor used by tests * diff --git a/engine/modules/opentelemetry/src/centreon_agent/agent_impl.cc b/engine/modules/opentelemetry/src/centreon_agent/agent_impl.cc index 2514549e998..c5ca22158f0 100644 --- a/engine/modules/opentelemetry/src/centreon_agent/agent_impl.cc +++ b/engine/modules/opentelemetry/src/centreon_agent/agent_impl.cc @@ -36,8 +36,9 @@ using namespace com::centreon::engine::modules::opentelemetry::centreon_agent; * @tparam bireactor_class */ template -std::set>> - agent_impl::_instances; +std::set>>* + agent_impl::_instances = + new std::set>>; template absl::Mutex agent_impl::_instances_m; @@ -57,14 +58,18 @@ agent_impl::agent_impl( const std::string_view class_name, const agent_config::pointer& conf, const metric_handler& handler, - const std::shared_ptr& logger) + const std::shared_ptr& logger, + bool reversed, + const agent_stat::pointer& stats) : _io_context(io_context), _class_name(class_name), + _reversed(reversed), _conf(conf), _metric_handler(handler), - _logger(logger), _write_pending(false), - _alive(true) { + _logger(logger), + _alive(true), + _stats(stats) { SPDLOG_LOGGER_DEBUG(logger, "create {} this={:p}", _class_name, static_cast(this)); } @@ -76,6 +81,9 @@ agent_impl::agent_impl( */ template agent_impl::~agent_impl() { + if (_agent_info && _agent_info->has_init()) { + _stats->remove_agent(_agent_info->init(), _reversed, this); + } SPDLOG_LOGGER_DEBUG(_logger, "delete {} this={:p}", _class_name, static_cast(this)); } @@ -112,7 +120,7 @@ template void agent_impl::all_agent_calc_and_send_config_if_needed( const agent_config::pointer& new_conf) { absl::MutexLock l(&_instances_m); - for (auto& instance : _instances) { + for (auto& instance : *_instances) { instance->calc_and_send_config_if_needed(new_conf); } } @@ -216,6 +224,7 @@ void agent_impl::on_request( agent_conf = _conf; _last_sent_config.reset(); } + _stats->add_agent(_agent_info->init(), _reversed, this); SPDLOG_LOGGER_DEBUG(_logger, "init from {}", get_peer()); calc_and_send_config_if_needed(agent_conf); } @@ -254,7 +263,7 @@ template void agent_impl::register_stream( const std::shared_ptr& strm) { absl::MutexLock l(&_instances_m); - _instances.insert(strm); + _instances->insert(strm); } /** @@ -371,7 +380,8 @@ void agent_impl::OnDone() { absl::MutexLock l(&_instances_m); SPDLOG_LOGGER_DEBUG(logger, "{:p} server::OnDone()", static_cast(me.get())); - _instances.erase(std::static_pointer_cast>(me)); + _instances->erase( + std::static_pointer_cast>(me)); }); } @@ -402,7 +412,8 @@ void agent_impl::OnDone(const ::grpc::Status& status) { static_cast(me.get()), status.error_message(), status.error_details()); } - _instances.erase(std::static_pointer_cast>(me)); + _instances->erase( + std::static_pointer_cast>(me)); }); } @@ -424,12 +435,13 @@ void agent_impl::shutdown() { */ template void agent_impl::shutdown_all() { - std::set> to_shutdown; + std::set>* to_shutdown; { absl::MutexLock l(&_instances_m); - to_shutdown = std::move(_instances); + to_shutdown = _instances; + _instances = new std::set>>; } - for (std::shared_ptr conn : to_shutdown) { + for (std::shared_ptr conn : *to_shutdown) { conn->shutdown(); } } diff --git a/engine/modules/opentelemetry/src/centreon_agent/agent_reverse_client.cc b/engine/modules/opentelemetry/src/centreon_agent/agent_reverse_client.cc index 7c38cee5ad4..3f5ceaa54e3 100644 --- a/engine/modules/opentelemetry/src/centreon_agent/agent_reverse_client.cc +++ b/engine/modules/opentelemetry/src/centreon_agent/agent_reverse_client.cc @@ -31,8 +31,12 @@ using namespace com::centreon::engine::modules::opentelemetry::centreon_agent; agent_reverse_client::agent_reverse_client( const std::shared_ptr& io_context, const metric_handler& handler, - const std::shared_ptr& logger) - : _io_context(io_context), _metric_handler(handler), _logger(logger) {} + const std::shared_ptr& logger, + const agent_stat::pointer& stats) + : _io_context(io_context), + _metric_handler(handler), + _logger(logger), + _agent_stats(stats) {} /** * @brief Destroy the agent reverse client::agent reverse client object @@ -112,7 +116,7 @@ agent_reverse_client::_create_new_client_connection( auto insert_res = _agents.try_emplace( agent_endpoint, to_agent_connector::load(agent_endpoint, _io_context, agent_conf, - _metric_handler, _logger)); + _metric_handler, _logger, _agent_stats)); return insert_res.first; } diff --git a/engine/modules/opentelemetry/src/centreon_agent/agent_service.cc b/engine/modules/opentelemetry/src/centreon_agent/agent_service.cc index 8fea6fcb1bc..fa403d03cf3 100644 --- a/engine/modules/opentelemetry/src/centreon_agent/agent_service.cc +++ b/engine/modules/opentelemetry/src/centreon_agent/agent_service.cc @@ -37,14 +37,17 @@ class server_bireactor const agent_config::pointer& conf, const otel_request_handler& handler, const std::shared_ptr& logger, - const std::string& peer) + const std::string& peer, + agent_stat::pointer& stats) : agent_impl<::grpc::ServerBidiReactor>( io_context, "agent_server", conf, handler, - logger), + logger, + false, + stats), _peer(peer) { SPDLOG_LOGGER_DEBUG(_logger, "connected with agent {}", _peer); } @@ -84,11 +87,13 @@ agent_service::agent_service( const std::shared_ptr& io_context, const agent_config::pointer& conf, const metric_handler& handler, - const std::shared_ptr& logger) + const std::shared_ptr& logger, + const agent_stat::pointer& stats) : _io_context(io_context), _conf(conf), _metric_handler(handler), - _logger(logger) { + _logger(logger), + _stats(stats) { if (!_conf) { _conf = std::make_shared(60, 100, 10, 30); SPDLOG_LOGGER_INFO(logger, @@ -109,9 +114,10 @@ std::shared_ptr agent_service::load( const std::shared_ptr& io_context, const agent_config::pointer& conf, const metric_handler& handler, - const std::shared_ptr& logger) { + const std::shared_ptr& logger, + const agent_stat::pointer& stats) { std::shared_ptr ret = std::make_shared( - io_context, conf, std::move(handler), logger); + io_context, conf, std::move(handler), logger, stats); ret->init(); return ret; } @@ -144,7 +150,7 @@ agent_service::Export(::grpc::CallbackServerContext* context) { { absl::MutexLock l(&_conf_m); new_reactor = std::make_shared( - _io_context, _conf, _metric_handler, _logger, context->peer()); + _io_context, _conf, _metric_handler, _logger, context->peer(), _stats); } server_bireactor::register_stream(new_reactor); new_reactor->start_read(); diff --git a/engine/modules/opentelemetry/src/centreon_agent/agent_stat.cc b/engine/modules/opentelemetry/src/centreon_agent/agent_stat.cc new file mode 100644 index 00000000000..62c794c4ddc --- /dev/null +++ b/engine/modules/opentelemetry/src/centreon_agent/agent_stat.cc @@ -0,0 +1,188 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "com/centreon/engine/globals.hh" +#include "com/centreon/engine/host.hh" +#include "com/centreon/engine/nebstructs.hh" +#include "com/centreon/engine/service.hh" + +#include "com/centreon/engine/broker.hh" +#include "com/centreon/engine/command_manager.hh" + +#include +#include +#include +#include +#include "centreon_agent/agent_stat.hh" + +using namespace com::centreon::engine::modules::opentelemetry::centreon_agent; + +/** + * @brief Construct a new agent stat::agent stat object don't use it, use load + * instead + * + * @param io_context + */ +agent_stat::agent_stat(const std::shared_ptr& io_context) + : _io_context(io_context), _send_timer(*io_context), _dirty(false) {} + +/** + * @brief static method to construct a agent_stat object + * + * @param context + * @return agent_stat::pointer + */ +agent_stat::pointer agent_stat::load( + const std::shared_ptr& io_context) { + pointer ret = std::make_shared(io_context); + ret->_start_send_timer(); + return ret; +} + +/** + * @brief Construct a new agent stat::group by key::group by key object + * + * @param agent_info + * @param reversed + */ +agent_stat::group_by_key::group_by_key( + const com::centreon::agent::AgentInfo& agent_info, + bool reversed) + : std::tuple( + agent_info.centreon_version().major(), + agent_info.centreon_version().minor(), + agent_info.centreon_version().patch(), + reversed, + agent_info.os(), + agent_info.os_version()) {} + +/** + * @brief Adds an agent to the agent statistics. + * + * This function adds an agent to the internal data structure that keeps track + * of agent statistics. If the agent is not already present, it is added to the + * data structure. + * + * @param agent_info The information about the agent to be added. + * @param reversed A boolean flag indicating whether the agent is connected in + * reverse mode + * @param reactor A pointer to the reactor object associated with the agent + */ +void agent_stat::add_agent(const com::centreon::agent::AgentInfo& agent_info, + bool reversed, + const void* reactor) { + group_by_key key(agent_info, reversed); + absl::MutexLock l(&_protect); + auto it = _data.find(key); + if (it == _data.end()) { + it = _data.emplace(key, agent_info_set()).first; + } + if (it->second.insert(reactor).second) { + // The agent was added. + _dirty = true; + } +} + +/** + * @brief Removes an agent from the agent statistics. + * + * This function removes an agent from the internal data structure that keeps + * track of agent statistics. If the agent is present, it is removed from the + * data structure. If the set of agents for the given key becomes empty after + * removal, the key is also removed from the data structure. + * + * @param agent_info The information about the agent to be removed. + * @param reversed A boolean flag indicating whether the agent is connected in + * reverse mode. + * @param reactor The pointer to the reactor object that is removed. + */ +void agent_stat::remove_agent(const com::centreon::agent::AgentInfo& agent_info, + bool reversed, + const void* reactor) { + group_by_key key(agent_info, reversed); + absl::MutexLock l(&_protect); + auto it = _data.find(key); + if (it != _data.end()) { + size_t erased = it->second.erase(reactor); + if (it->second.empty()) { + _data.erase(it); + } + if (erased) { + // The agent was removed. + _dirty = true; + } + } +} + +/** + * @brief When an agent connect or disconnect from engine, we send a message to + * broker + * + */ +void agent_stat::_on_stat_update() const { + nebstruct_agent_stats_data stats; + stats.data = + std::make_unique>(); + stats.data->reserve(_data.size()); + for (const auto& agent : _data) { + stats.data->emplace_back(std::get<0>(agent.first), std::get<1>(agent.first), + std::get<2>(agent.first), std::get<3>(agent.first), + std::get<4>(agent.first), std::get<5>(agent.first), + agent.second.size()); + } + + // we post all check results in the main thread + auto fn = + std::packaged_task([to_send = std::move(stats)]() mutable { + broker_agent_stats(to_send); + return OK; + }); + command_manager::instance().enqueue(std::move(fn)); +} + +void agent_stat::_start_send_timer() { + absl::MutexLock l(&_protect); + _send_timer.expires_from_now(std::chrono::minutes(1)); + _send_timer.async_wait( + [this, me = shared_from_this()](const boost::system::error_code& err) { + _send_timer_handler(err); + }); +} + +void agent_stat::_send_timer_handler(const boost::system::error_code& err) { + if (err) { + return; + } + { + absl::MutexLock l(&_protect); + if (_dirty) { + _dirty = false; + _on_stat_update(); + } + } + _start_send_timer(); +} + +/** + * @brief to call on module unload + * + */ +void agent_stat::stop_send_timer() { + absl::MutexLock l(&_protect); + _send_timer.cancel(); +} diff --git a/engine/modules/opentelemetry/src/centreon_agent/to_agent_connector.cc b/engine/modules/opentelemetry/src/centreon_agent/to_agent_connector.cc index f8cce8607a9..5b9e402dd74 100644 --- a/engine/modules/opentelemetry/src/centreon_agent/to_agent_connector.cc +++ b/engine/modules/opentelemetry/src/centreon_agent/to_agent_connector.cc @@ -43,7 +43,8 @@ class agent_connection const std::shared_ptr& parent, const agent_config::pointer& conf, const metric_handler& handler, - const std::shared_ptr& logger); + const std::shared_ptr& logger, + const agent_stat::pointer& stats); ::grpc::ClientContext& get_context() { return _context; } @@ -67,14 +68,17 @@ agent_connection::agent_connection( const std::shared_ptr& parent, const agent_config::pointer& conf, const metric_handler& handler, - const std::shared_ptr& logger) + const std::shared_ptr& logger, + const agent_stat::pointer& stats) : agent_impl<::grpc::ClientBidiReactor>( io_context, "reverse_client", conf, handler, - logger), + logger, + true, + stats), _parent(parent) { _peer = parent->get_conf()->get_hostport(); } @@ -119,13 +123,15 @@ to_agent_connector::to_agent_connector( const std::shared_ptr& io_context, const agent_config::pointer& agent_conf, const metric_handler& handler, - const std::shared_ptr& logger) + const std::shared_ptr& logger, + const agent_stat::pointer& stats) : common::grpc::grpc_client_base(agent_endpoint_conf, logger), _io_context(io_context), - _conf(agent_conf), _metric_handler(handler), - _alive(true) { - _stub = std::move(agent::ReversedAgentService::NewStub(_channel)); + _conf(agent_conf), + _alive(true), + _stats(stats) { + _stub = agent::ReversedAgentService::NewStub(_channel); } /** @@ -150,10 +156,11 @@ std::shared_ptr to_agent_connector::load( const std::shared_ptr& io_context, const agent_config::pointer& agent_conf, const metric_handler& handler, - const std::shared_ptr& logger) { + const std::shared_ptr& logger, + const agent_stat::pointer& stats) { std::shared_ptr ret = std::make_shared(agent_endpoint_conf, io_context, - agent_conf, handler, logger); + agent_conf, handler, logger, stats); ret->start(); return ret; } @@ -172,8 +179,9 @@ void to_agent_connector::start() { _connection->shutdown(); _connection.reset(); } - _connection = std::make_shared( - _io_context, shared_from_this(), _conf, _metric_handler, get_logger()); + _connection = + std::make_shared(_io_context, shared_from_this(), _conf, + _metric_handler, get_logger(), _stats); agent_connection::register_stream(_connection); _stub->async()->Import(&_connection->get_context(), _connection.get()); _connection->start_read(); diff --git a/engine/modules/opentelemetry/src/grpc_config.cc b/engine/modules/opentelemetry/src/grpc_config.cc index 0cf4569f2ff..964735843f6 100644 --- a/engine/modules/opentelemetry/src/grpc_config.cc +++ b/engine/modules/opentelemetry/src/grpc_config.cc @@ -67,6 +67,12 @@ static constexpr std::string_view _grpc_config_schema(R"( "type": "integer", "minimum": -1, "maximum": 3600 + }, + "second_max_reconnect_backoff": { + "description": "maximum time between subsequent connection attempts, in seconds. Default: 60s", + "type": "integer", + "minimum": 0, + "maximum": 600 } }, "required": [ @@ -121,9 +127,12 @@ grpc_config::grpc_config(const rapidjson::Value& json_config_v) { else second_keepalive_interval = 30; + unsigned second_max_reconnect_backoff = + json_config.get_unsigned("second_max_reconnect_backoff", 60); + static_cast(*this) = common::grpc::grpc_config( hostport, crypted, certificate, cert_key, ca_cert, ca_name, compress, - second_keepalive_interval); + second_keepalive_interval, second_max_reconnect_backoff); } /** diff --git a/engine/modules/opentelemetry/src/open_telemetry.cc b/engine/modules/opentelemetry/src/open_telemetry.cc index 1e8e6cdcdef..33934574764 100644 --- a/engine/modules/opentelemetry/src/open_telemetry.cc +++ b/engine/modules/opentelemetry/src/open_telemetry.cc @@ -46,7 +46,8 @@ open_telemetry::open_telemetry( const std::shared_ptr& logger) : _config_file_path(config_file_path), _logger(logger), - _io_context(io_context) { + _io_context(io_context), + _agent_stats(centreon_agent::agent_stat::load(io_context)) { SPDLOG_LOGGER_INFO(_logger, "load of open telemetry module"); } @@ -104,7 +105,7 @@ void open_telemetry::_reload() { [me = shared_from_this()](const metric_request_ptr& request) { me->on_metric(request); }, - _logger); + _logger, _agent_stats); } _agent_reverse_client->update(_conf->get_centreon_agent_config()); } @@ -157,7 +158,7 @@ void open_telemetry::_create_otl_server( [me = shared_from_this()](const metric_request_ptr& request) { me->on_metric(request); }, - _logger); + _logger, _agent_stats); } catch (const std::exception& e) { SPDLOG_LOGGER_ERROR(_logger, "fail to create opentelemetry grpc server: {}", e.what()); @@ -249,6 +250,7 @@ void open_telemetry::_shutdown() { if (to_shutdown) { to_shutdown->shutdown(std::chrono::seconds(10)); } + _agent_stats->stop_send_timer(); } /** @@ -398,4 +400,4 @@ void open_telemetry::on_metric(const metric_request_ptr& metrics) { * @param unknown */ void open_telemetry::_forward_to_broker( - const std::vector& unknown) {} + [[maybe_unused]] const std::vector& unknown) {} diff --git a/engine/modules/opentelemetry/src/otl_config.cc b/engine/modules/opentelemetry/src/otl_config.cc index 386615aaf19..e93d5210fbd 100644 --- a/engine/modules/opentelemetry/src/otl_config.cc +++ b/engine/modules/opentelemetry/src/otl_config.cc @@ -119,6 +119,10 @@ otl_config::otl_config(const std::string_view& file_path, "nor an grpc server, nor a reverse client configured"); } + if (!_centreon_agent_config) { + _centreon_agent_config = std::make_shared(); + } + if (file_content.has_member("telegraf_conf_server")) { try { _telegraf_conf_server_config = diff --git a/engine/modules/opentelemetry/src/otl_server.cc b/engine/modules/opentelemetry/src/otl_server.cc index b502953ddb3..ec599b1a8c1 100644 --- a/engine/modules/opentelemetry/src/otl_server.cc +++ b/engine/modules/opentelemetry/src/otl_server.cc @@ -258,7 +258,7 @@ std::shared_ptr metric_service::load( ::grpc::ServerUnaryReactor* metric_service::Export( ::grpc::CallbackServerContext* context, const otl_col_metrics::ExportMetricsServiceRequest* request, - otl_col_metrics::ExportMetricsServiceResponse* response) { + otl_col_metrics::ExportMetricsServiceResponse* response [[maybe_unused]]) { metric_request_ptr shared_request = _allocator->get_metric_request_ptr_from_raw(request); @@ -288,14 +288,16 @@ otl_server::otl_server( const grpc_config::pointer& conf, const centreon_agent::agent_config::pointer& agent_config, const metric_handler& handler, - const std::shared_ptr& logger) + const std::shared_ptr& logger, + const centreon_agent::agent_stat::pointer& agent_stats) : common::grpc::grpc_server_base(conf, logger), _service(detail::metric_service::load(handler, logger)), _agent_service(centreon_agent::agent_service::load(io_context, agent_config, handler, - logger)) {} + logger, + agent_stats)) {} /** * @brief Destroy the otl server::otl server object @@ -317,9 +319,10 @@ otl_server::pointer otl_server::load( const grpc_config::pointer& conf, const centreon_agent::agent_config::pointer& agent_config, const metric_handler& handler, - const std::shared_ptr& logger) { - otl_server::pointer ret( - new otl_server(io_context, conf, agent_config, handler, logger)); + const std::shared_ptr& logger, + const centreon_agent::agent_stat::pointer& agent_stats) { + otl_server::pointer ret(new otl_server(io_context, conf, agent_config, + handler, logger, agent_stats)); ret->start(); return ret; } diff --git a/engine/modules/opentelemetry/src/telegraf/conf_server.cc b/engine/modules/opentelemetry/src/telegraf/conf_server.cc index d38e8e3034e..59d22255683 100644 --- a/engine/modules/opentelemetry/src/telegraf/conf_server.cc +++ b/engine/modules/opentelemetry/src/telegraf/conf_server.cc @@ -324,10 +324,10 @@ void conf_session::answer_to_request( bool at_least_one_found = get_otel_commands( host, - [this, &resp, &host](const std::string& cmd_name, - const std::string& cmd_line, - const std::string& service, - const std::shared_ptr& logger) { + [this, &resp, &host]( + const std::string& cmd_name, const std::string& cmd_line, + const std::string& service, + [[maybe_unused]] const std::shared_ptr& logger) { return _otel_connector_to_stream(cmd_name, cmd_line, host, service, resp->body()); }, diff --git a/engine/src/anomalydetection.cc b/engine/src/anomalydetection.cc index 1227881cf25..d251649c733 100644 --- a/engine/src/anomalydetection.cc +++ b/engine/src/anomalydetection.cc @@ -347,7 +347,6 @@ const anomalydetection::pointer_set& anomalydetection::get_anomaly( * metric thresholds. * @param[in] status_change Should we follow the thresholds file * to determine status. - * @param[in] initial_state Initial service state. * @param[in] max_attempts Max check attempts. * @param[in] accept_passive_checks Does this service accept * check result submission ? @@ -424,7 +423,6 @@ anomalydetection::anomalydetection(uint64_t host_id, bool status_change, bool checks_enabled, bool accept_passive_checks, - enum service::service_state initial_state, uint32_t check_interval, uint32_t retry_interval, uint32_t notification_interval, @@ -456,7 +454,6 @@ anomalydetection::anomalydetection(uint64_t host_id, "", checks_enabled, accept_passive_checks, - initial_state, check_interval, retry_interval, notification_interval, @@ -515,7 +512,6 @@ anomalydetection::~anomalydetection() { * @param[in] thresholds_file, fullname to the thresholds file. * @param[in] status_change, should we follow the thresholds file * to determine status. - * @param[in] initial_state Initial service state. * @param[in] max_attempts Max check attempts. * @param[in] accept_passive_checks Does this service accept * check result submission ? @@ -591,7 +587,6 @@ com::centreon::engine::anomalydetection* add_anomalydetection( std::string const& metric_name, std::string const& thresholds_file, bool status_change, - com::centreon::engine::service::service_state initial_state, int max_attempts, double check_interval, double retry_interval, @@ -761,14 +756,14 @@ com::centreon::engine::anomalydetection* add_anomalydetection( host_id, service_id, host_name, description, display_name.empty() ? description : display_name, internal_id, dependent_service, metric_name, thresholds_file, status_change, - checks_enabled, accept_passive_checks, initial_state, check_interval, - retry_interval, notification_interval, max_attempts, - first_notification_delay, recovery_notification_delay, - notification_period, notifications_enabled, is_volatile, event_handler, - event_handler_enabled, notes, notes_url, action_url, icon_image, - icon_image_alt, flap_detection_enabled, low_flap_threshold, - high_flap_threshold, check_freshness, freshness_threshold, - obsess_over_service, timezone, icon_id, sensitivity)}; + checks_enabled, accept_passive_checks, check_interval, retry_interval, + notification_interval, max_attempts, first_notification_delay, + recovery_notification_delay, notification_period, notifications_enabled, + is_volatile, event_handler, event_handler_enabled, notes, notes_url, + action_url, icon_image, icon_image_alt, flap_detection_enabled, + low_flap_threshold, high_flap_threshold, check_freshness, + freshness_threshold, obsess_over_service, timezone, icon_id, + sensitivity)}; try { obj->set_acknowledgement(AckType::NONE); obj->set_check_options(CHECK_OPTION_NONE); diff --git a/engine/src/broker.cc b/engine/src/broker.cc index a382422ee1b..08a8b81f6ce 100644 --- a/engine/src/broker.cc +++ b/engine/src/broker.cc @@ -1124,4 +1124,15 @@ void broker_bench(unsigned id, // Make callbacks. neb_make_callbacks(NEBCALLBACK_BENCH_DATA, &ds); } + +/** + * @brief send agent usage statistics to broker + * + * @param stats + */ +void broker_agent_stats(nebstruct_agent_stats_data& stats) { + // Fill struct with relevant data. + // Make callbacks. + neb_make_callbacks(NEBCALLBACK_AGENT_STATS, &stats); +} } diff --git a/engine/src/broker/loader.cc b/engine/src/broker/loader.cc index a16b499002f..d0fad9bf5de 100644 --- a/engine/src/broker/loader.cc +++ b/engine/src/broker/loader.cc @@ -19,12 +19,11 @@ */ #include "com/centreon/engine/broker/loader.hh" +#include #include "com/centreon/engine/broker/handle.hh" #include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/globals.hh" #include "com/centreon/engine/logging/logger.hh" -#include "com/centreon/io/directory_entry.hh" -#include "com/centreon/io/file_stream.hh" using namespace com::centreon; using namespace com::centreon::engine; @@ -92,43 +91,45 @@ loader& loader::instance() { */ unsigned int loader::load_directory(std::string const& dir) { // Get directory entries. - io::directory_entry directory(dir); - std::list const& files(directory.entry_list("*.so")); + + std::filesystem::path directory(dir); + std::list files; + for (const auto& entry : std::filesystem::directory_iterator(directory)) { + if (entry.is_regular_file() && entry.path().extension() == ".so") { + files.push_back(entry.path()); + } + } // Sort by file name. - std::multimap sort_files; - for (std::list::const_iterator it(files.begin()), - end(files.end()); - it != end; ++it) - sort_files.insert(std::make_pair(it->file_name(), *it)); + std::multimap sort_files; + for (const auto& file : files) + sort_files.insert(std::make_pair(file.filename().string(), file)); // Load modules. - unsigned int loaded(0); - for (std::multimap::const_iterator - it(sort_files.begin()), - end(sort_files.end()); - it != end; ++it) { - io::file_entry const& f(it->second); - std::string config_file(dir + "/" + f.base_name() + ".cfg"); - if (io::file_stream::exists(config_file.c_str()) == false) - config_file = ""; + unsigned int loaded = 0; + for (const auto& [name, f] : sort_files) { + std::string cfg_file = f.stem().string() + ".cfg"; + std::filesystem::path config_file = dir / std::filesystem::path(cfg_file); + if (!std::filesystem::exists(config_file)) + config_file.clear(); + std::shared_ptr module; try { - module = add_module(dir + "/" + f.file_name(), config_file); + module = add_module(dir / f, config_file); module->open(); engine_logger(log_info_message, basic) - << "Event broker module '" << f.file_name() + << "Event broker module '" << f.filename() << "' initialized successfully."; events_logger->info("Event broker module '{}' initialized successfully.", - f.file_name()); + f.filename().string()); ++loaded; } catch (error const& e) { del_module(module); engine_logger(log_runtime_error, basic) - << "Error: Could not load module '" << f.file_name() << "' -> " + << "Error: Could not load module '" << f.filename() << "' -> " << e.what(); runtime_logger->error("Error: Could not load module '{}' -> {}", - f.file_name(), e.what()); + f.filename().string(), e.what()); } } return loaded; diff --git a/engine/src/command_manager.cc b/engine/src/command_manager.cc index 18207450d8a..c14333b0d52 100644 --- a/engine/src/command_manager.cc +++ b/engine/src/command_manager.cc @@ -411,19 +411,17 @@ void command_manager::schedule_and_propagate_downtime( unsigned long triggered_by, unsigned long duration) { /* check all child hosts... */ - for (host_map_unsafe::iterator it(temp_host->child_hosts.begin()), - end(temp_host->child_hosts.end()); - it != end; ++it) { - if (it->second == nullptr) + for (const auto& [_, ptr_host] : temp_host->child_hosts) { + if (ptr_host == nullptr) continue; /* recurse... */ - schedule_and_propagate_downtime(it->second, entry_time, author, - comment_data, start_time, end_time, fixed, - triggered_by, duration); + schedule_and_propagate_downtime(ptr_host, entry_time, author, comment_data, + start_time, end_time, fixed, triggered_by, + duration); /* schedule downtime for this host */ downtime_manager::instance().schedule_downtime( - downtime::host_downtime, it->second->host_id(), 0, entry_time, author, + downtime::host_downtime, ptr_host->host_id(), 0, entry_time, author, comment_data, start_time, end_time, fixed, triggered_by, duration, nullptr); } diff --git a/engine/src/commands/commands.cc b/engine/src/commands/commands.cc index b0feacaf58f..1a00488f72a 100644 --- a/engine/src/commands/commands.cc +++ b/engine/src/commands/commands.cc @@ -725,28 +725,36 @@ int cmd_process_host_check_result(int cmd, time_t check_time, char* args) { return ERROR; // Get the host name. - char* host_name(args); + auto split = absl::StrSplit(args, ';'); + auto split_it = split.begin(); + + if (split_it == split.end()) + return ERROR; // Get the host check return code and output. - char* delimiter(strchr(host_name, ';')); - if (!delimiter) - return ERROR; - *delimiter = '\0'; - ++delimiter; - char* output(strchr(delimiter, ';')); - if (output) { - *output = '\0'; - ++output; - } else - output = ""; - int return_code(strtol(delimiter, nullptr, 0)); + std::string host_name = std::string(*split_it); - // replace \\n with \n - string::unescape(output); + int return_code; + ++split_it; + + if (split_it == split.end()) + return ERROR; + + if (!absl::SimpleAtoi(*split_it, &return_code)) + return ERROR; + + ++split_it; + + std::string output = ""; + if (split_it != split.end()) { + output = split_it->data(); + // replace \\n with \n + string::unescape(output); + } // Submit the check result. - return ( - process_passive_host_check(check_time, host_name, return_code, output)); + return process_passive_host_check(check_time, host_name.c_str(), return_code, + output.c_str()); } /* process passive host check result */ @@ -2441,28 +2449,24 @@ void enable_and_propagate_notifications(host* hst, enable_host_notifications(hst); /* check all child hosts... */ - for (host_map_unsafe::iterator it(hst->child_hosts.begin()), - end(hst->child_hosts.end()); - it != end; ++it) { - if (it->second == nullptr) + for (const auto& [_, ptr_host] : hst->child_hosts) { + if (ptr_host == nullptr) continue; /* recurse... */ - enable_and_propagate_notifications(it->second, level + 1, affect_top_host, + enable_and_propagate_notifications(ptr_host, level + 1, affect_top_host, affect_hosts, affect_services); /* enable notifications for this host */ if (affect_hosts) - enable_host_notifications(it->second); + enable_host_notifications(ptr_host); /* enable notifications for all services on this host... */ if (affect_services) { - for (service_map_unsafe::iterator it2(it->second->services.begin()), - end2(it->second->services.end()); - it2 != end2; ++it2) { - if (!it2->second) + for (const auto& [_, ptr_srv] : ptr_host->services) { + if (!ptr_srv) continue; - enable_service_notifications(it2->second); + enable_service_notifications(ptr_srv); } } } @@ -2482,28 +2486,24 @@ void disable_and_propagate_notifications(host* hst, disable_host_notifications(hst); /* check all child hosts... */ - for (host_map_unsafe::iterator it(hst->child_hosts.begin()), - end(hst->child_hosts.begin()); - it != end; ++it) { - if (!it->second) + for (const auto& [_, ptr_host] : hst->child_hosts) { + if (!ptr_host) continue; /* recurse... */ - disable_and_propagate_notifications(it->second, level + 1, affect_top_host, + disable_and_propagate_notifications(ptr_host, level + 1, affect_top_host, affect_hosts, affect_services); /* disable notifications for this host */ if (affect_hosts) - disable_host_notifications(it->second); + disable_host_notifications(ptr_host); /* disable notifications for all services on this host... */ if (affect_services) { - for (service_map_unsafe::iterator it2(it->second->services.begin()), - end2(it->second->services.end()); - it2 != end2; ++it2) { - if (!it2->second) + for (const auto& [_, ptr_srv] : ptr_host->services) { + if (!ptr_srv) continue; - disable_service_notifications(it2->second); + disable_service_notifications(ptr_srv); } } } @@ -2624,20 +2624,18 @@ void schedule_and_propagate_downtime(host* temp_host, unsigned long triggered_by, unsigned long duration) { /* check all child hosts... */ - for (host_map_unsafe::iterator it(temp_host->child_hosts.begin()), - end(temp_host->child_hosts.end()); - it != end; ++it) { - if (it->second == nullptr) + for (const auto& [_, ptr_host] : temp_host->child_hosts) { + if (ptr_host == nullptr) continue; /* recurse... */ - schedule_and_propagate_downtime(it->second, entry_time, author, - comment_data, start_time, end_time, fixed, - triggered_by, duration); + schedule_and_propagate_downtime(ptr_host, entry_time, author, comment_data, + start_time, end_time, fixed, triggered_by, + duration); /* schedule downtime for this host */ downtime_manager::instance().schedule_downtime( - downtime::host_downtime, it->second->host_id(), 0, entry_time, author, + downtime::host_downtime, ptr_host->host_id(), 0, entry_time, author, comment_data, start_time, end_time, fixed, triggered_by, duration, nullptr); } diff --git a/engine/src/commands/connector.cc b/engine/src/commands/connector.cc index 1b48116397f..b18bc84ecbc 100644 --- a/engine/src/commands/connector.cc +++ b/engine/src/commands/connector.cc @@ -731,10 +731,10 @@ void connector::_recv_query_version(char const* data) { try { // Parse query version response to get major and minor // engine version supported by the connector. - int version[2]; + unsigned version[2]; char* endptr(nullptr); for (uint32_t i(0); i < 2; ++i) { - version[i] = strtol(data, &endptr, 10); + version[i] = strtoul(data, &endptr, 10); if (data == endptr) throw engine_error() << "Invalid version query: Bad format"; data = endptr + 1; diff --git a/engine/src/commands/otel_connector.cc b/engine/src/commands/otel_connector.cc index 6b8433e4d15..a7e9ae1a1ba 100644 --- a/engine/src/commands/otel_connector.cc +++ b/engine/src/commands/otel_connector.cc @@ -172,11 +172,12 @@ void otel_connector::update(const std::string& cmd_line) { * @param caller * @return uint64_t */ -uint64_t otel_connector::run(const std::string& processed_cmd, - nagios_macros& macros, - uint32_t timeout, - const check_result::pointer& to_push_to_checker, - const void* caller) { +uint64_t otel_connector::run(const std::string& processed_cmd [[maybe_unused]], + nagios_macros& macros [[maybe_unused]], + uint32_t timeout [[maybe_unused]], + const check_result::pointer& to_push_to_checker + [[maybe_unused]], + const void* caller [[maybe_unused]]) { SPDLOG_LOGGER_ERROR(_logger, "open telemetry services must be passive"); throw exceptions::msg_fmt("open telemetry services must be passive"); } @@ -191,10 +192,10 @@ uint64_t otel_connector::run(const std::string& processed_cmd, * @param timeout timeout in seconds * @param res check result */ -void otel_connector::run(const std::string& processed_cmd, - nagios_macros& macros, - uint32_t timeout, - result& res) { +void otel_connector::run(const std::string& processed_cmd [[maybe_unused]], + nagios_macros& macros [[maybe_unused]], + uint32_t timeout [[maybe_unused]], + result& res [[maybe_unused]]) { SPDLOG_LOGGER_ERROR(_logger, "open telemetry services must be passive"); throw exceptions::msg_fmt("open telemetry services must be passive"); } diff --git a/engine/src/config.cc b/engine/src/config.cc index 544d2be9a0d..6fc4941c5bb 100644 --- a/engine/src/config.cc +++ b/engine/src/config.cc @@ -63,19 +63,17 @@ static int dfs_host_path(host* root) { dfs_set_status(root, DFS_TEMP_CHECKED); /* We are scanning the children */ - for (host_map_unsafe::iterator it(root->child_hosts.begin()), - end(root->child_hosts.end()); - it != end; it++) { - int child_status = dfs_get_status(it->second); + for (const auto& [_, ptr_host] : root->child_hosts) { + int child_status = dfs_get_status(ptr_host); /* If a child is not checked, check it */ if (child_status == DFS_UNCHECKED) - child_status = dfs_host_path(it->second); + child_status = dfs_host_path(ptr_host); /* If a child already temporary checked, its a problem, * loop inside, and its a acked status */ if (child_status == DFS_TEMP_CHECKED) { - dfs_set_status(it->second, DFS_LOOPY); + dfs_set_status(ptr_host, DFS_LOOPY); dfs_set_status(root, DFS_LOOPY); } @@ -86,7 +84,7 @@ static int dfs_host_path(host* root) { dfs_set_status(root, DFS_NEAR_LOOP); /* we already saw this child, it's a problem */ - dfs_set_status(it->second, DFS_LOOPY); + dfs_set_status(ptr_host, DFS_LOOPY); } } diff --git a/engine/src/configuration/applier/anomalydetection.cc b/engine/src/configuration/applier/anomalydetection.cc index 9feef230b3b..d0208faa001 100644 --- a/engine/src/configuration/applier/anomalydetection.cc +++ b/engine/src/configuration/applier/anomalydetection.cc @@ -62,11 +62,10 @@ void applier::anomalydetection::add_object( obj.host_id(), obj.service_id(), obj.host_name(), obj.service_description(), obj.display_name(), obj.internal_id(), obj.dependent_service_id(), obj.metric_name(), obj.thresholds_file(), - obj.status_change(), - static_cast(obj.initial_state()), - obj.max_check_attempts(), obj.check_interval(), obj.retry_interval(), - obj.notification_interval(), obj.first_notification_delay(), - obj.recovery_notification_delay(), obj.notification_period(), + obj.status_change(), obj.max_check_attempts(), obj.check_interval(), + obj.retry_interval(), obj.notification_interval(), + obj.first_notification_delay(), obj.recovery_notification_delay(), + obj.notification_period(), static_cast(obj.notification_options() & configuration::anomalydetection::ok), static_cast(obj.notification_options() & @@ -179,11 +178,10 @@ void applier::anomalydetection::add_object( obj.host_id(), obj.service_id(), obj.host_name(), obj.service_description(), obj.display_name(), obj.internal_id(), obj.dependent_service_id(), obj.metric_name(), obj.thresholds_file(), - obj.status_change(), - static_cast(obj.initial_state()), - obj.max_check_attempts(), obj.check_interval(), obj.retry_interval(), - obj.notification_interval(), obj.first_notification_delay(), - obj.recovery_notification_delay(), obj.notification_period(), + obj.status_change(), obj.max_check_attempts(), obj.check_interval(), + obj.retry_interval(), obj.notification_interval(), + obj.first_notification_delay(), obj.recovery_notification_delay(), + obj.notification_period(), static_cast(obj.notification_options() & action_svc_ok), static_cast(obj.notification_options() & action_svc_unknown), static_cast(obj.notification_options() & action_svc_warning), @@ -357,8 +355,6 @@ void applier::anomalydetection::modify_object( s->set_thresholds_file(obj.thresholds_file()); s->set_event_handler(obj.event_handler()); s->set_event_handler_enabled(obj.event_handler_enabled()); - s->set_initial_state(static_cast( - obj.initial_state())); s->set_check_interval(obj.check_interval()); s->set_retry_interval(obj.retry_interval()); s->set_max_attempts(obj.max_check_attempts()); @@ -550,8 +546,6 @@ void applier::anomalydetection::modify_object( s->set_thresholds_file(new_obj.thresholds_file()); s->set_event_handler(new_obj.event_handler()); s->set_event_handler_enabled(new_obj.event_handler_enabled()); - s->set_initial_state(static_cast( - new_obj.initial_state())); s->set_check_interval(new_obj.check_interval()); s->set_retry_interval(new_obj.retry_interval()); s->set_max_attempts(new_obj.max_check_attempts()); diff --git a/engine/src/configuration/applier/host.cc b/engine/src/configuration/applier/host.cc index ea2a7dc2f76..fe07d4645bb 100644 --- a/engine/src/configuration/applier/host.cc +++ b/engine/src/configuration/applier/host.cc @@ -23,9 +23,7 @@ #include "com/centreon/engine/common.hh" #include "com/centreon/engine/config.hh" #include "com/centreon/engine/configuration/applier/scheduler.hh" -#include "com/centreon/engine/configuration/applier/state.hh" #include "com/centreon/engine/downtimes/downtime_manager.hh" -#include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/globals.hh" #include "com/centreon/engine/logging/logger.hh" #include "com/centreon/engine/severity.hh" @@ -58,9 +56,8 @@ void applier::host::add_object(const configuration::host& obj) { // Create host. auto h = std::make_shared( obj.host_id(), obj.host_name(), obj.display_name(), obj.alias(), - obj.address(), obj.check_period(), - static_cast(obj.initial_state()), - obj.check_interval(), obj.retry_interval(), obj.max_check_attempts(), + obj.address(), obj.check_period(), obj.check_interval(), + obj.retry_interval(), obj.max_check_attempts(), static_cast(obj.notification_options() & configuration::host::up), static_cast(obj.notification_options() & configuration::host::down), static_cast(obj.notification_options() & @@ -183,9 +180,8 @@ void applier::host::add_object(const configuration::Host& obj) { // Create host. auto h = std::make_shared( obj.host_id(), obj.host_name(), obj.display_name(), obj.alias(), - obj.address(), obj.check_period(), - static_cast(obj.initial_state()), - obj.check_interval(), obj.retry_interval(), obj.max_check_attempts(), + obj.address(), obj.check_period(), obj.check_interval(), + obj.retry_interval(), obj.max_check_attempts(), static_cast(obj.notification_options() & action_hst_up), static_cast(obj.notification_options() & action_hst_down), static_cast(obj.notification_options() & action_hst_unreachable), @@ -319,10 +315,8 @@ void applier::host::modify_object(configuration::host const& obj) { else it_obj->second->set_alias(obj.host_name()); it_obj->second->set_address(obj.address()); - if (obj.check_period().empty()) + if (!obj.check_period().empty()) it_obj->second->set_check_period(obj.check_period()); - it_obj->second->set_initial_state( - static_cast(obj.initial_state())); it_obj->second->set_check_interval(static_cast(obj.check_interval())); it_obj->second->set_retry_interval(static_cast(obj.retry_interval())); it_obj->second->set_max_attempts(static_cast(obj.max_check_attempts())); @@ -371,6 +365,7 @@ void applier::host::modify_object(configuration::host const& obj) { configuration::host::unreachable ? notifier::unreachable : notifier::none); + it_obj->second->set_stalk_on(notifier::none); it_obj->second->add_stalk_on(obj.stalking_options() & configuration::host::up ? notifier::up : notifier::none); @@ -411,6 +406,7 @@ void applier::host::modify_object(configuration::host const& obj) { config->interval_length()); it_obj->second->set_recovery_notification_delay( obj.recovery_notification_delay()); + it_obj->second->set_icon_id(obj.icon_id()); // Contacts. if (obj.contacts() != obj_old.contacts()) { @@ -482,10 +478,8 @@ void applier::host::modify_object(configuration::host const& obj) { if (obj.parents() != obj_old.parents()) { // Delete old parents. { - for (host_map_unsafe::iterator it(it_obj->second->parent_hosts.begin()), - end(it_obj->second->parent_hosts.end()); - it != end; it++) - broker_relation_data(NEBTYPE_PARENT_DELETE, it->second, nullptr, + for (const auto& [_, sptr_host] : it_obj->second->parent_hosts) + broker_relation_data(NEBTYPE_PARENT_DELETE, sptr_host.get(), nullptr, it_obj->second.get(), nullptr); } it_obj->second->parent_hosts.clear(); @@ -548,10 +542,8 @@ void applier::host::modify_object(configuration::Host* old_obj, else h->set_alias(new_obj.host_name()); h->set_address(new_obj.address()); - if (new_obj.check_period().empty()) + if (!new_obj.check_period().empty()) h->set_check_period(new_obj.check_period()); - h->set_initial_state( - static_cast(new_obj.initial_state())); h->set_check_interval(static_cast(new_obj.check_interval())); h->set_retry_interval(static_cast(new_obj.retry_interval())); h->set_max_attempts(static_cast(new_obj.max_check_attempts())); @@ -596,6 +588,7 @@ void applier::host::modify_object(configuration::Host* old_obj, action_hst_unreachable ? notifier::unreachable : notifier::none); + h->set_stalk_on(notifier::none); h->add_stalk_on(new_obj.stalking_options() & action_hst_up ? notifier::up : notifier::none); h->add_stalk_on(new_obj.stalking_options() & action_hst_down @@ -632,6 +625,7 @@ void applier::host::modify_object(configuration::Host* old_obj, h->set_acknowledgement_timeout(new_obj.acknowledgement_timeout() * pb_config.interval_length()); h->set_recovery_notification_delay(new_obj.recovery_notification_delay()); + h->set_icon_id(new_obj.icon_id()); // Contacts. if (!MessageDifferencer::Equals(new_obj.contacts(), old_obj->contacts())) { @@ -727,10 +721,9 @@ void applier::host::modify_object(configuration::Host* old_obj, if (parents_changed) { // Delete old parents. - for (auto it = h->parent_hosts.begin(), end = h->parent_hosts.end(); - it != end; it++) - broker_relation_data(NEBTYPE_PARENT_DELETE, it->second, nullptr, h.get(), - nullptr); + for (const auto& [_, sptr_host] : h->parent_hosts) + broker_relation_data(NEBTYPE_PARENT_DELETE, sptr_host.get(), nullptr, + h.get(), nullptr); h->parent_hosts.clear(); // Create parents. @@ -787,6 +780,11 @@ void applier::host::remove_object(configuration::host const& obj) { for (auto& it_h : it->second->get_parent_groups()) it_h->members.erase(it->second->name()); + // remove any relations + for (const auto& [_, sptr_host] : it->second->parent_hosts) + broker_relation_data(NEBTYPE_PARENT_DELETE, sptr_host.get(), nullptr, + it->second.get(), nullptr); + // Notify event broker. for (auto it_s = it->second->services.begin(); it_s != it->second->services.end(); ++it_s) @@ -833,6 +831,11 @@ void applier::host::remove_object(ssize_t idx) { for (auto& it_h : it->second->get_parent_groups()) it_h->members.erase(it->second->name()); + // remove any relations + for (const auto& [_, sptr_host] : it->second->parent_hosts) + broker_relation_data(NEBTYPE_PARENT_DELETE, sptr_host.get(), nullptr, + it->second.get(), nullptr); + // Notify event broker. for (auto it_s = it->second->services.begin(); it_s != it->second->services.end(); ++it_s) @@ -870,10 +873,8 @@ void applier::host::resolve_object(const configuration::host& obj, // It is necessary to do it only once to prevent the removal // of valid child backlinks. if (obj == *config->hosts().begin()) { - for (host_map::iterator it(engine::host::hosts.begin()), - end(engine::host::hosts.end()); - it != end; ++it) - it->second->child_hosts.clear(); + for (const auto& [_, sptr_host] : engine::host::hosts) + sptr_host->child_hosts.clear(); } // Find host. @@ -911,10 +912,8 @@ void applier::host::resolve_object(const configuration::Host& obj, // It is necessary to do it only once to prevent the removal // of valid child backlinks. if (&obj == &(*pb_config.hosts().begin())) { - for (host_map::iterator it(engine::host::hosts.begin()), - end(engine::host::hosts.end()); - it != end; ++it) - it->second->child_hosts.clear(); + for (const auto& [_, sptr_host] : engine::host::hosts) + sptr_host->child_hosts.clear(); } // Find host. diff --git a/engine/src/configuration/applier/service.cc b/engine/src/configuration/applier/service.cc index 43800266ab2..fcf4e5bd70b 100644 --- a/engine/src/configuration/applier/service.cc +++ b/engine/src/configuration/applier/service.cc @@ -23,7 +23,6 @@ #include "com/centreon/engine/config.hh" #include "com/centreon/engine/configuration/applier/scheduler.hh" #include "com/centreon/engine/downtimes/downtime_manager.hh" -#include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/globals.hh" #include "com/centreon/engine/logging/logger.hh" #include "com/centreon/engine/severity.hh" @@ -65,7 +64,6 @@ void applier::service::add_object(configuration::service const& obj) { engine::service* svc{add_service( obj.host_id(), obj.service_id(), obj.host_name(), obj.service_description(), obj.display_name(), obj.check_period(), - static_cast(obj.initial_state()), obj.max_check_attempts(), obj.check_interval(), obj.retry_interval(), obj.notification_interval(), obj.first_notification_delay(), obj.recovery_notification_delay(), obj.notification_period(), @@ -206,7 +204,6 @@ void applier::service::add_object(const configuration::Service& obj) { engine::service* svc{add_service( obj.host_id(), obj.service_id(), obj.host_name(), obj.service_description(), obj.display_name(), obj.check_period(), - static_cast(obj.initial_state()), obj.max_check_attempts(), obj.check_interval(), obj.retry_interval(), obj.notification_interval(), obj.first_notification_delay(), obj.recovery_notification_delay(), obj.notification_period(), @@ -419,8 +416,6 @@ void applier::service::modify_object(configuration::service const& obj) { s->set_check_command(obj.check_command()); s->set_event_handler(obj.event_handler()); s->set_event_handler_enabled(obj.event_handler_enabled()); - s->set_initial_state( - static_cast(obj.initial_state())); s->set_check_interval(obj.check_interval()); s->set_retry_interval(obj.retry_interval()); s->set_max_attempts(obj.max_check_attempts()); @@ -450,7 +445,7 @@ void applier::service::modify_object(configuration::service const& obj) { static_cast(obj.notification_interval())); s->set_first_notification_delay( static_cast(obj.first_notification_delay())); - + s->set_stalk_on(configuration::service::none); s->add_stalk_on(obj.stalking_options() & configuration::service::ok ? notifier::ok : notifier::none); @@ -512,6 +507,7 @@ void applier::service::modify_object(configuration::service const& obj) { s->set_acknowledgement_timeout(obj.acknowledgement_timeout() * config->interval_length()); s->set_recovery_notification_delay(obj.recovery_notification_delay()); + s->set_icon_id(obj.icon_id()); // Contacts. if (obj.contacts() != obj_old.contacts()) { @@ -632,8 +628,6 @@ void applier::service::modify_object(configuration::Service* old_obj, s->set_check_command(new_obj.check_command()); s->set_event_handler(new_obj.event_handler()); s->set_event_handler_enabled(new_obj.event_handler_enabled()); - s->set_initial_state( - static_cast(new_obj.initial_state())); s->set_check_interval(new_obj.check_interval()); s->set_retry_interval(new_obj.retry_interval()); s->set_max_attempts(new_obj.max_check_attempts()); @@ -658,7 +652,7 @@ void applier::service::modify_object(configuration::Service* old_obj, static_cast(new_obj.notification_interval())); s->set_first_notification_delay( static_cast(new_obj.first_notification_delay())); - + s->set_stalk_on(notifier::none); s->add_stalk_on(new_obj.stalking_options() & action_svc_ok ? notifier::ok : notifier::none); s->add_stalk_on(new_obj.stalking_options() & action_svc_warning @@ -717,6 +711,7 @@ void applier::service::modify_object(configuration::Service* old_obj, s->set_acknowledgement_timeout(new_obj.acknowledgement_timeout() * pb_config.interval_length()); s->set_recovery_notification_delay(new_obj.recovery_notification_delay()); + s->set_icon_id(new_obj.icon_id()); // Contacts. if (!MessageDifferencer::Equals(new_obj.contacts(), old_obj->contacts())) { diff --git a/engine/src/configuration/applier/state.cc b/engine/src/configuration/applier/state.cc index 8cfd81d0e0f..58f846f587a 100644 --- a/engine/src/configuration/applier/state.cc +++ b/engine/src/configuration/applier/state.cc @@ -1894,9 +1894,10 @@ void applier::state::_processing(configuration::state& new_cfg, _apply(diff_contacts, err); _apply( diff_contactgroups, err); + + _resolve(config->contacts(), err); _resolve( config->contactgroups(), err); - _resolve(config->contacts(), err); // Apply severities. _apply(diff_severities, err); @@ -2317,10 +2318,10 @@ void applier::state::_processing(configuration::State& new_cfg, err); _apply( diff_contactgroups, err); - _resolve( - pb_config.contactgroups(), err); _resolve(pb_config.contacts(), err); + _resolve( + pb_config.contactgroups(), err); // Apply severities. _apply, diff --git a/engine/src/downtimes/service_downtime.cc b/engine/src/downtimes/service_downtime.cc index 93c4e2ac093..3861a7fd2a2 100644 --- a/engine/src/downtimes/service_downtime.cc +++ b/engine/src/downtimes/service_downtime.cc @@ -323,8 +323,7 @@ int service_downtime::handle() { time_t event_time(0L); int attr(0); - engine_logger(dbg_functions, basic) << "handle_downtime()"; - SPDLOG_LOGGER_TRACE(functions_logger, "handle_downtime()"); + SPDLOG_LOGGER_TRACE(functions_logger, "service_downtime::handle()"); auto found = service::services_by_id.find({host_id(), service_id()}); @@ -415,7 +414,7 @@ int service_downtime::handle() { /* update the status data */ /* We update with CHECK_RESULT level, so notifications numbers, downtimes, * and check infos will be updated. */ - found->second->update_status(); + found->second->update_status(service::STATUS_DOWNTIME_DEPTH); /* decrement pending flex downtime if necessary */ if (!is_fixed() && _incremented_pending_downtime) { diff --git a/engine/src/events/loop.cc b/engine/src/events/loop.cc index 7f99df26f35..9236e391c43 100644 --- a/engine/src/events/loop.cc +++ b/engine/src/events/loop.cc @@ -174,7 +174,8 @@ void loop::_dispatching() { engine_logger(log_info_message, most) << "Reloading..."; process_logger->info("Reloading..."); reloading = true; - std::async(std::launch::async, apply_conf, &reloading); + auto future [[maybe_unused]] = + std::async(std::launch::async, apply_conf, &reloading); } else { engine_logger(log_info_message, most) << "Already reloading..."; process_logger->info("Already reloading..."); diff --git a/engine/src/host.cc b/engine/src/host.cc index f3d48bcd95a..a99dc21256e 100644 --- a/engine/src/host.cc +++ b/engine/src/host.cc @@ -61,7 +61,6 @@ host_id_map host::hosts_by_id; * @param[in] alias Host alias. * @param[in] address Host address. * @param[in] check_period Check period. - * @param[in] initial_state Initial host state. * @param[in] check_interval Normal check interval. * @param[in] retry_interval Retry check interval. * @param[in] max_attempts Max check attempts. @@ -136,7 +135,6 @@ host::host(uint64_t host_id, const std::string& alias, const std::string& address, const std::string& check_period, - enum host::host_state initial_state, uint32_t check_interval, uint32_t retry_interval, int max_attempts, @@ -251,10 +249,10 @@ host::host(uint64_t host_id, _total_service_check_interval{0}, _circular_path_checked{false}, _contains_circular_path{false}, - _last_state{initial_state}, - _last_hard_state{initial_state}, - _current_state{initial_state}, - _initial_state{initial_state} { + _initial_state{state_up}, + _last_state{_initial_state}, + _last_hard_state{_initial_state}, + _current_state{_initial_state} { // Make sure we have the data we need. if (name.empty() || address.empty()) { engine_logger(log_config_error, basic) @@ -284,7 +282,7 @@ host::host(uint64_t host_id, // Duplicate string vars. _alias = !alias.empty() ? alias : name; - set_current_attempt(initial_state == host::state_up ? 1 : max_attempts); + set_current_attempt(1); set_modified_attributes(MODATTR_NONE); set_state_type(hard); @@ -577,13 +575,25 @@ int host::get_current_state_int() const { } std::ostream& operator<<(std::ostream& os, host_map_unsafe const& obj) { - for (host_map_unsafe::const_iterator it{obj.begin()}, end{obj.end()}; - it != end; ++it) { - os << it->first; - if (std::next(it) != end) + bool first = true; + for (const auto& [key, _] : obj) { + if (!first) { os << ", "; - else - os << ""; + } + os << key; + first = false; + } + return os; +} + +std::ostream& operator<<(std::ostream& os, host_map const& obj) { + bool first = true; + for (const auto& [key, _] : obj) { + if (!first) { + os << ", "; + } + os << key; + first = false; } return os; } @@ -1031,8 +1041,7 @@ int is_host_immediate_child_of_host(com::centreon::engine::host* parent_host, } // Mid-level/bottom hosts. else { - host_map_unsafe::const_iterator it{ - child_host->parent_hosts.find(parent_host->name())}; + auto it{child_host->parent_hosts.find(parent_host->name())}; return it != child_host->parent_hosts.end(); } @@ -1066,9 +1075,8 @@ int is_host_immediate_parent_of_host(com::centreon::engine::host* child_host, */ int number_of_immediate_child_hosts(com::centreon::engine::host* hst) { int children(0); - for (host_map::iterator it{host::hosts.begin()}, end{host::hosts.end()}; - it != end; ++it) - if (is_host_immediate_child_of_host(hst, it->second.get())) + for (const auto& [_, sptr_host] : host::hosts) + if (is_host_immediate_child_of_host(hst, sptr_host.get())) ++children; return children; } @@ -1084,9 +1092,8 @@ int number_of_immediate_child_hosts(com::centreon::engine::host* hst) { */ int number_of_immediate_parent_hosts(com::centreon::engine::host* hst) { int parents(0); - for (host_map::iterator it{host::hosts.begin()}, end{host::hosts.end()}; - it != end; ++it) - if (is_host_immediate_parent_of_host(hst, it->second.get())) + for (const auto& [_, sptr_host] : host::hosts) + if (is_host_immediate_parent_of_host(hst, sptr_host.get())) ++parents; return parents; } @@ -1102,10 +1109,9 @@ int number_of_immediate_parent_hosts(com::centreon::engine::host* hst) { */ int number_of_total_child_hosts(com::centreon::engine::host* hst) { int children(0); - for (host_map::iterator it{host::hosts.begin()}, end{host::hosts.end()}; - it != end; ++it) - if (is_host_immediate_child_of_host(hst, it->second.get())) - children += number_of_total_child_hosts(it->second.get()) + 1; + for (const auto& [_, sptr_host] : host::hosts) + if (is_host_immediate_child_of_host(hst, sptr_host.get())) + children += number_of_total_child_hosts(sptr_host.get()) + 1; return children; } @@ -3255,17 +3261,15 @@ int host::process_check_result_3x(enum host::host_state new_state, SPDLOG_LOGGER_DEBUG(checks_logger, "Propagating checks to parent host(s)..."); - for (host_map_unsafe::iterator it{parent_hosts.begin()}, - end{parent_hosts.end()}; - it != end; it++) { - if (!it->second) + for (const auto& [key, sptr_host] : parent_hosts) { + if (!sptr_host) continue; - if (it->second->get_current_state() != host::state_up) { + if (sptr_host->get_current_state() != host::state_up) { engine_logger(dbg_checks, more) - << "Check of parent host '" << it->first << "' queued."; + << "Check of parent host '" << key << "' queued."; SPDLOG_LOGGER_DEBUG(checks_logger, - "Check of parent host '{}' queued.", it->first); - check_hostlist.push_back(it->second); + "Check of parent host '{}' queued.", key); + check_hostlist.push_back(sptr_host.get()); } } @@ -3277,17 +3281,15 @@ int host::process_check_result_3x(enum host::host_state new_state, SPDLOG_LOGGER_DEBUG(checks_logger, "Propagating checks to child host(s)..."); - for (host_map_unsafe::iterator it{child_hosts.begin()}, - end{child_hosts.end()}; - it != end; it++) { - if (!it->second) + for (const auto& [key, ptr_host] : child_hosts) { + if (!ptr_host) continue; - if (it->second->get_current_state() != host::state_up) { + if (ptr_host->get_current_state() != host::state_up) { engine_logger(dbg_checks, more) - << "Check of child host '" << it->first << "' queued."; + << "Check of child host '" << key << "' queued."; SPDLOG_LOGGER_DEBUG(checks_logger, "Check of child host '{}' queued.", - it->first); - check_hostlist.push_back(it->second); + key); + check_hostlist.push_back(ptr_host); } } } @@ -3377,24 +3379,21 @@ int host::process_check_result_3x(enum host::host_state new_state, "** WARNING: Max attempts = 1, so we have to run serial " "checks of all parent hosts!"); - for (host_map_unsafe::iterator it{parent_hosts.begin()}, - end{parent_hosts.end()}; - it != end; it++) { - if (!it->second) + for (const auto& [key, sptr_host] : parent_hosts) { + if (!sptr_host) continue; has_parent = true; engine_logger(dbg_checks, more) - << "Running serial check parent host '" << it->first << "'..."; - SPDLOG_LOGGER_DEBUG(checks_logger, - "Running serial check parent host '{}'...", - it->first); + << "Running serial check parent host '" << key << "'..."; + SPDLOG_LOGGER_DEBUG( + checks_logger, "Running serial check parent host '{}'...", key); /* run an immediate check of the parent host */ - it->second->run_sync_check_3x(&parent_state, check_options, - use_cached_result, - check_timestamp_horizon); + sptr_host->run_sync_check_3x(&parent_state, check_options, + use_cached_result, + check_timestamp_horizon); /* bail out as soon as we find one parent host that is UP */ if (parent_state == host::state_up) { @@ -3444,17 +3443,15 @@ int host::process_check_result_3x(enum host::host_state new_state, checks_logger, "Propagating check to immediate non-UNREACHABLE child hosts..."); - for (host_map_unsafe::iterator it{child_hosts.begin()}, - end{child_hosts.end()}; - it != end; it++) { - if (!it->second) + for (const auto& [key, ptr_host] : child_hosts) { + if (!ptr_host) continue; - if (it->second->get_current_state() != host::state_unreachable) { + if (ptr_host->get_current_state() != host::state_unreachable) { engine_logger(dbg_checks, more) - << "Check of child host '" << it->first << "' queued."; + << "Check of child host '" << key << "' queued."; SPDLOG_LOGGER_DEBUG(checks_logger, - "Check of child host '{}' queued.", it->first); - check_hostlist.push_back(it->second); + "Check of child host '{}' queued.", key); + check_hostlist.push_back(ptr_host); } } } @@ -3488,17 +3485,15 @@ int host::process_check_result_3x(enum host::host_state new_state, "Propagating checks to immediate parent hosts that " "are UP..."); - for (host_map_unsafe::iterator it{parent_hosts.begin()}, - end{parent_hosts.end()}; - it != end; it++) { - if (it->second == nullptr) + for (const auto& [key, sptr_host] : parent_hosts) { + if (sptr_host == nullptr) continue; - if (it->second->get_current_state() == host::state_up) { - check_hostlist.push_back(it->second); + if (sptr_host->get_current_state() == host::state_up) { + check_hostlist.push_back(sptr_host.get()); engine_logger(dbg_checks, more) - << "Check of host '" << it->first << "' queued."; + << "Check of host '" << key << "' queued."; SPDLOG_LOGGER_DEBUG(checks_logger, "Check of host '{}' queued.", - it->first); + key); } } @@ -3511,17 +3506,15 @@ int host::process_check_result_3x(enum host::host_state new_state, "Propagating checks to immediate non-UNREACHABLE " "child hosts..."); - for (host_map_unsafe::iterator it{child_hosts.begin()}, - end{child_hosts.end()}; - it != end; it++) { - if (!it->second) + for (const auto& [key, ptr_host] : child_hosts) { + if (!ptr_host) continue; - if (it->second->get_current_state() != host::state_unreachable) { + if (ptr_host->get_current_state() != host::state_unreachable) { engine_logger(dbg_checks, more) - << "Check of child host '" << it->first << "' queued."; + << "Check of child host '" << key << "' queued."; SPDLOG_LOGGER_DEBUG(checks_logger, - "Check of child host '{}' queued.", it->first); - check_hostlist.push_back(it->second); + "Check of child host '{}' queued.", key); + check_hostlist.push_back(ptr_host); } } @@ -3739,22 +3732,20 @@ enum host::host_state host::determine_host_reachability( /* check all parent hosts to see if we're DOWN or UNREACHABLE */ else { - for (host_map_unsafe::iterator it{parent_hosts.begin()}, - end{parent_hosts.end()}; - it != end; it++) { - if (!it->second) + for (const auto& [key, sptr_host] : parent_hosts) { + if (!sptr_host) continue; /* bail out as soon as we find one parent host that is UP */ - if (it->second->get_current_state() == host::state_up) { + if (sptr_host->get_current_state() == host::state_up) { is_host_present = true; /* set the current state */ state = host::state_down; - engine_logger(dbg_checks, most) << "At least one parent (" << it->first - << ") is up, so host is DOWN."; + engine_logger(dbg_checks, most) + << "At least one parent (" << key << ") is up, so host is DOWN."; SPDLOG_LOGGER_DEBUG(checks_logger, "At least one parent ({}) is up, so host is DOWN.", - it->first); + key); break; } } @@ -4102,22 +4093,20 @@ void host::resolve(uint32_t& w, uint32_t& e) { } /* check all parent parent host */ - for (host_map_unsafe::iterator it(parent_hosts.begin()), - end(parent_hosts.end()); - it != end; it++) { - host_map::const_iterator it_host{host::hosts.find(it->first)}; + for (auto& [key, sptr_host] : parent_hosts) { + host_map::const_iterator it_host{host::hosts.find(key)}; if (it_host == host::hosts.end() || !it_host->second) { - engine_logger(log_verification_error, basic) << "Error: '" << it->first + engine_logger(log_verification_error, basic) << "Error: '" << key << "' is not a " "valid parent for host '" << name() << "'!"; config_logger->error("Error: '{}' is not a valid parent for host '{}'!", - it->first, name()); + key, name()); errors++; } else { - it->second = it_host->second.get(); - it_host->second->add_child_host( - this); // add a reverse (child) link to make searches faster later on + sptr_host = it_host->second; + it_host->second->add_child_host(this); // add a reverse (child) link to + // make searches faster later on } } diff --git a/engine/src/macros/grab_host.cc b/engine/src/macros/grab_host.cc index c611278d665..22cf6899213 100644 --- a/engine/src/macros/grab_host.cc +++ b/engine/src/macros/grab_host.cc @@ -184,12 +184,10 @@ std::string get_host_total_services(host& hst, nagios_macros* mac) { static std::string get_host_parents(host& hst, nagios_macros* mac) { (void)mac; std::string retval; - for (host_map_unsafe::const_iterator it(hst.parent_hosts.begin()), - end(hst.parent_hosts.end()); - it != end; it++) { + for (const auto& [key, _] : hst.parent_hosts) { if (!retval.empty()) retval.append(","); - retval.append(it->first); + retval.append(key); } return retval; } @@ -205,12 +203,10 @@ static std::string get_host_parents(host& hst, nagios_macros* mac) { static std::string get_host_children(host& hst, nagios_macros* mac) { (void)mac; std::string retval; - for (host_map_unsafe::const_iterator it(hst.child_hosts.begin()), - end(hst.child_hosts.end()); - it != end; it++) { + for (const auto& [key, _] : hst.child_hosts) { if (!retval.empty()) retval.append(","); - retval.append(it->first); + retval.append(key); } return retval; } diff --git a/engine/src/main.cc b/engine/src/main.cc index 9dee3f52c85..1f85d5f871c 100644 --- a/engine/src/main.cc +++ b/engine/src/main.cc @@ -67,7 +67,6 @@ namespace asio = boost::asio; #include "com/centreon/engine/statusdata.hh" #include "com/centreon/engine/string.hh" #include "com/centreon/engine/version.hh" -#include "com/centreon/io/directory_entry.hh" #include "com/centreon/logging/engine.hh" #ifdef LEGACY_CONF #include "common/engine_legacy_conf/parser.hh" @@ -127,6 +126,9 @@ int main(int argc, char* argv[]) { * threads and we'll only be able to change loggers atomic values. */ #ifdef LEGACY_CONF config = new configuration::state; +#else + // init pb_config to default values + configuration::state_helper state_hlp(&pb_config); #endif init_loggers(); diff --git a/engine/src/service.cc b/engine/src/service.cc index 4219833c22e..30bc2fc2503 100644 --- a/engine/src/service.cc +++ b/engine/src/service.cc @@ -59,7 +59,6 @@ service::service(const std::string& hostname, const std::string& check_command, bool checks_enabled, bool accept_passive_checks, - enum service::service_state initial_state, uint32_t check_interval, uint32_t retry_interval, uint32_t notification_interval, @@ -131,10 +130,10 @@ service::service(const std::string& hostname, _last_time_warning{0}, _last_time_unknown{0}, _last_time_critical{0}, - _initial_state{initial_state}, - _current_state{initial_state}, - _last_hard_state{initial_state}, - _last_state{initial_state}, + _initial_state{service::state_ok}, + _current_state{_initial_state}, + _last_hard_state{_initial_state}, + _last_state{_initial_state}, _host_ptr{nullptr}, _host_problem_at_last_check{false} { if (st == NONE) { @@ -147,7 +146,7 @@ service::service(const std::string& hostname, else _service_type = SERVICE; } - set_current_attempt(initial_state == service::state_ok ? 1 : max_attempts); + set_current_attempt(1); } service::~service() noexcept { @@ -563,7 +562,6 @@ std::ostream& operator<<(std::ostream& os, * @param[in] description Service description. * @param[in] display_name Display name. * @param[in] check_period Check timeperiod name. - * @param[in] initial_state Initial service state. * @param[in] max_attempts Max check attempts. * @param[in] accept_passive_checks Does this service accept * check result submission ? @@ -636,7 +634,6 @@ com::centreon::engine::service* add_service( const std::string& description, const std::string& display_name, const std::string& check_period, - com::centreon::engine::service::service_state initial_state, int max_attempts, double check_interval, double retry_interval, @@ -763,8 +760,8 @@ com::centreon::engine::service* add_service( // Allocate memory. auto obj{std::make_shared( host_name, description, display_name.empty() ? description : display_name, - check_command, checks_enabled, accept_passive_checks, initial_state, - check_interval, retry_interval, notification_interval, max_attempts, + check_command, checks_enabled, accept_passive_checks, check_interval, + retry_interval, notification_interval, max_attempts, first_notification_delay, recovery_notification_delay, notification_period, notifications_enabled, is_volatile, check_period, event_handler, event_handler_enabled, notes, notes_url, action_url, diff --git a/engine/tests/configuration/applier/applier-host.cc b/engine/tests/configuration/applier/applier-host.cc index ebc1f3cb65c..7a1c9fc2d1a 100644 --- a/engine/tests/configuration/applier/applier-host.cc +++ b/engine/tests/configuration/applier/applier-host.cc @@ -45,7 +45,6 @@ class ApplierHost : public ::testing::Test { // Then the applier add_object throws an exception. TEST_F(ApplierHost, NewHostWithoutHostId) { configuration::applier::host hst_aply; - configuration::applier::service svc_aply; configuration::service svc; configuration::host hst; ASSERT_TRUE(hst.parse("host_name", "test_host")); diff --git a/engine/tests/configuration/applier/applier-pbservice.cc b/engine/tests/configuration/applier/applier-pbservice.cc index 62045656d29..8413e9c7d2e 100644 --- a/engine/tests/configuration/applier/applier-pbservice.cc +++ b/engine/tests/configuration/applier/applier-pbservice.cc @@ -309,19 +309,6 @@ TEST_F(ApplierService, PbServicesFlapOptionsAll) { action_svc_unknown); } -// Given a service configuration, -// When the initial_state value is set to unknown, -// Then it is well recorded with unknown. -// When the initial_state value is set to whatever -// Then the parse method returns false. -TEST_F(ApplierService, PbServicesInitialState) { - configuration::Service csvc; - configuration::service_helper csvc_hlp(&csvc); - csvc_hlp.hook("initial_state", "u"); - ASSERT_EQ(csvc.initial_state(), engine::service::state_unknown); - ASSERT_FALSE(csvc_hlp.hook("initial_state", "g")); -} - // Given a service configuration, // When the stalking options are set to "c,w", // Then they are well recorded with "critical | warning" diff --git a/engine/tests/configuration/applier/applier-service.cc b/engine/tests/configuration/applier/applier-service.cc index 4833395a9b8..d5d77efcfd6 100644 --- a/engine/tests/configuration/applier/applier-service.cc +++ b/engine/tests/configuration/applier/applier-service.cc @@ -337,22 +337,10 @@ TEST_F(ApplierService, ServicesFlapOptionsAll) { configuration::service::unknown); } -// Given a service configuration, -// When the initial_state value is set to unknown, -// Then it is well recorded with unknown. -// When the initial_state value is set to whatever -// Then the parse method returns false. -TEST_F(ApplierService, ServicesInitialState) { - configuration::service csvc; - ASSERT_TRUE(csvc.parse("initial_state", "u")); - ASSERT_EQ(csvc.initial_state(), engine::service::state_unknown); - ASSERT_FALSE(csvc.parse("initial_state", "g")); -} - // Given a service configuration, // When the stalking options are set to "c,w", // Then they are well recorded with "critical | warning" -// When the initial_state value is set to "a" +// When the stalking options value is set to "a" // Then they are well recorded with "ok | warning | unknown | critical" TEST_F(ApplierService, ServicesStalkingOptions) { configuration::service csvc; diff --git a/engine/tests/enginerpc/client.cc b/engine/tests/enginerpc/client.cc index a8fbe166de4..dcf3c1bc32d 100644 --- a/engine/tests/enginerpc/client.cc +++ b/engine/tests/enginerpc/client.cc @@ -56,7 +56,7 @@ class EngineRPCClient { } bool GetHostByHostName(std::string const& req, EngineHost* response) { - HostIdentifier request; + NameOrIdIdentifier request; grpc::ClientContext context; request.set_name(req); @@ -70,7 +70,7 @@ class EngineRPCClient { } bool GetHostByHostId(uint32_t& req, EngineHost* response) { - HostIdentifier request; + NameOrIdIdentifier request; grpc::ClientContext context; request.set_id(req); @@ -84,7 +84,7 @@ class EngineRPCClient { } bool GetContact(std::string const& req, EngineContact* response) { - ContactIdentifier request; + NameIdentifier request; grpc::ClientContext context; request.set_name(req); @@ -333,7 +333,7 @@ class EngineRPCClient { bool DeleteAllHostCommentsByName(std::string const& req, CommandSuccess* response) { - HostIdentifier request; + NameOrIdIdentifier request; grpc::ClientContext context; request.set_name(req); @@ -347,7 +347,7 @@ class EngineRPCClient { } bool DeleteAllHostCommentsById(uint32_t& req, CommandSuccess* response) { - HostIdentifier request; + NameOrIdIdentifier request; grpc::ClientContext context; request.set_id(req); @@ -398,7 +398,7 @@ class EngineRPCClient { bool RemoveHostAcknowledgementByNames(std::string const& hostname, CommandSuccess* response) { - HostIdentifier request; + NameOrIdIdentifier request; grpc::ClientContext context; request.set_name(hostname); grpc::Status status = @@ -412,7 +412,7 @@ class EngineRPCClient { bool RemoveHostAcknowledgementByIds(uint32_t& hostid, CommandSuccess* response) { - HostIdentifier request; + NameOrIdIdentifier request; grpc::ClientContext context; request.set_id(hostid); diff --git a/engine/tests/opentelemetry/host_serv_extractor_test.cc b/engine/tests/opentelemetry/host_serv_extractor_test.cc index 1e20e23d69a..b4db5ad82e6 100644 --- a/engine/tests/opentelemetry/host_serv_extractor_test.cc +++ b/engine/tests/opentelemetry/host_serv_extractor_test.cc @@ -34,7 +34,9 @@ TEST(otl_host_serv_extractor_test, empty_request) { ExportMetricsServiceRequest>(); otl_data_point::extract_data_points( - request, [](const otl_data_point& data_pt) { ASSERT_TRUE(false); }); + request, [](const otl_data_point& data_pt [[maybe_unused]]) { + ASSERT_TRUE(false); + }); } class otl_host_serv_attributes_extractor_test : public ::testing::Test { diff --git a/engine/tests/opentelemetry/otl_server_test.cc b/engine/tests/opentelemetry/otl_server_test.cc index 5d6291a6cc3..b74c9b9ef4f 100644 --- a/engine/tests/opentelemetry/otl_server_test.cc +++ b/engine/tests/opentelemetry/otl_server_test.cc @@ -85,8 +85,9 @@ class otl_server_test : public ::testing::Test { const metric_handler_type& handler) { std::shared_ptr agent_conf = std::make_shared(60, 100, 60, 10); - _server = otl_server::load(g_io_context, conf, agent_conf, handler, - spdlog::default_logger()); + _server = otl_server::load( + g_io_context, conf, agent_conf, handler, spdlog::default_logger(), + std::make_shared(g_io_context)); } }; diff --git a/engine/tests/string/string.cc b/engine/tests/string/string.cc index 3486ba7e15d..e0adeb7217d 100644 --- a/engine/tests/string/string.cc +++ b/engine/tests/string/string.cc @@ -62,6 +62,17 @@ TEST(string_utils, extractPerfdataGaugeDiff) { "d[aa a]=28;13;54;0;80"); } +TEST(string_utils, extractPerfdataBrackets) { + std::string perfdata( + "'xx[aa a aa]'=2;3;7;1;9 '[a aa]'=12;25;50;0;118 'aa a]'=28;13;54;0;80"); + ASSERT_EQ(string::extract_perfdata(perfdata, "xx[aa a aa]"), + "'xx[aa a aa]'=2;3;7;1;9"); + ASSERT_EQ(string::extract_perfdata(perfdata, "[a aa]"), + "'[a aa]'=12;25;50;0;118"); + ASSERT_EQ(string::extract_perfdata(perfdata, "aa a]"), + "'aa a]'=28;13;54;0;80"); +} + TEST(string_utils, removeThresholdsWithoutThresholds) { std::string perfdata("a=2V"); ASSERT_EQ(string::remove_thresholds(perfdata), "a=2V"); diff --git a/gorgone/docs/configuration.md b/gorgone/docs/configuration.md index 0789429e9b7..b634673c72d 100644 --- a/gorgone/docs/configuration.md +++ b/gorgone/docs/configuration.md @@ -100,6 +100,22 @@ configuration: proxy_name: proxy ``` +## *centreon vault* + +Centreon Vault is a tool that secures the passwords present in the Centreon configuration.\ + +It stores passwords in a vault and retrieves them when needed by each component.\ + +Gorgone allows you to use a vault to store any string in the configuration. It cannot store an array or a hash. + + +To use Vault, read the official documentation to set up Vault and the configuration file in `/var/lib/centreon/vault/vault.json`\ + +Then replace any password present in the Gorgone configuration with a Vault string. See the official format here : + + +https://github.com/centreon/centreon-collect/blob/develop/perl-libs/lib/centreon/common/centreonvault.pm#L391 + ## *modules* See the *configuration* titles of the modules documentations listed [here](../docs/modules.md). diff --git a/gorgone/docs/modules/core/proxy.md b/gorgone/docs/modules/core/proxy.md index 69899491411..b1d1ee14af3 100644 --- a/gorgone/docs/modules/core/proxy.md +++ b/gorgone/docs/modules/core/proxy.md @@ -12,16 +12,17 @@ A SSH client library make routing to non-gorgoned nodes possible. ## Configuration -| Directive | Description | Default value | -|:---------------------|:-------------------------------------------------------------------|:---------------| -| pool | Number of children to instantiate to process events | `5` | -| synchistory_time | Time in seconds between two log synchronisations | `60` | -| synchistory_timeout | Time in seconds before log synchronisation is considered timed out | `30` | -| ping | Time in seconds between two node pings | `60` | -| pong_discard_timeout | Time in seconds before a ping is considered lost | `300` | +| Directive | Description | Default value | +|:---------------------|:-----------------------------------------------------------------------------------------------------------------------------------|:--------------| +| pool | Number of children to instantiate to process events | `5` | +| synchistory_time | Time in seconds between two log synchronisations | `60` | +| synchistory_timeout | Time in seconds before log synchronisation is considered timed out | `30` | +| ping | Time in seconds between two node pings | `60` | +| pong_discard_timeout | Time in seconds before a ping is considered lost | `300` | +| buffer_size | Maximum size of the packet sent from a node to another. This is mainly used by legacycmd to send files from the central to the poller. | `150000` | -This part of the configuration is only used if some poller must connect with the pullwss module. +This part of the configuration is only used if some poller must connect with the pullwss module. | Directive | Description | Default value | |:--------------|:-----------------------------------------------------------------------------------------------|:--------------| diff --git a/gorgone/gorgone/class/core.pm b/gorgone/gorgone/class/core.pm index b432de30721..0e4b0e909e9 100644 --- a/gorgone/gorgone/class/core.pm +++ b/gorgone/gorgone/class/core.pm @@ -35,7 +35,7 @@ use gorgone::class::listener; use gorgone::class::frame; use Time::HiRes; use Try::Tiny; - +use centreon::common::centreonvault; my ($gorgone); use base qw(gorgone::class::script); @@ -163,10 +163,17 @@ sub init { $self->{logger}->writeLogError("[core] can't find config file '$self->{config_file}'"); exit(1); } + # before loading the config, we need to load initialize vault. + # Gorgone don't know how to reload for now, but once it will be done, we will need to retry the vault connexion if it failed when starting, and read again the configuration + $self->{vault_file} = defined($self->{vault_file}) ? $self->{vault_file} : '/var/lib/centreon/vault/vault.json'; + $self->{vault} = centreon::common::centreonvault->new(logger => $self->{logger}, 'config_file' => $self->{vault_file}); + $self->{config} = $self->yaml_load_config( - file => $self->{config_file}, + file => $self->{config_file}, + # the filter is used to remove anything from the configuration not related to gorgone or centreon filter => '!($ariane eq "configuration##" || $ariane =~ /^configuration##(?:gorgone|centreon)##/)' ); + $self->init_server_keys(); $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_zmq_tcp_keepalive} = diff --git a/gorgone/gorgone/class/logger.pm b/gorgone/gorgone/class/logger.pm deleted file mode 100644 index 90b13859819..00000000000 --- a/gorgone/gorgone/class/logger.pm +++ /dev/null @@ -1,256 +0,0 @@ -# -# Copyright 2019 Centreon (http://www.centreon.com/) -# -# Centreon is a full-fledged industry-strength solution that meets -# the needs in IT infrastructure and application monitoring for -# service performance. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -package gorgone::class::logger; - -=head1 NOM - -gorgone::class::logger - Simple logging module - -=head1 SYNOPSIS - - #!/usr/bin/perl -w - - use strict; - use warnings; - - use centreon::polling; - - my $logger = new gorgone::class::logger(); - - $logger->writeLogInfo("information"); - -=head1 DESCRIPTION - -This module offers a simple interface to write log messages to various output: - -* standard output -* file -* syslog - -=cut - -use strict; -use warnings; -use Sys::Syslog qw(:standard :macros); -use IO::Handle; -use Encode; - -my %severities = ( - 1 => LOG_INFO, - 2 => LOG_ERR, - 4 => LOG_DEBUG -); - -sub new { - my $class = shift; - - my $self = bless - { - file => 0, - filehandler => undef, - # 0 = nothing, 1 = critical, 3 = info, 7 = debug - severity => 3, - old_severity => 3, - # 0 = stdout, 1 = file, 2 = syslog - log_mode => 0, - # Output pid of current process - withpid => 0, - # syslog - log_facility => undef, - log_option => LOG_PID, - }, $class; - return $self; -} - -sub file_mode($$) { - my ($self, $file) = @_; - - if (defined($self->{filehandler})) { - $self->{filehandler}->close(); - } - if (open($self->{filehandler}, ">>", $file)){ - $self->{log_mode} = 1; - $self->{filehandler}->autoflush(1); - $self->{file_name} = $file; - return 1; - } - $self->{filehandler} = undef; - print STDERR "Cannot open file $file: $!\n"; - return 0; -} - -sub is_file_mode { - my $self = shift; - - if ($self->{log_mode} == 1) { - return 1; - } - return 0; -} - -sub is_debug { - my $self = shift; - - if (($self->{severity} & 4) == 0) { - return 0; - } - return 1; -} - -sub syslog_mode($$$) { - my ($self, $logopt, $facility) = @_; - - $self->{log_mode} = 2; - openlog($0, $logopt, $facility); - return 1; -} - -# For daemons -sub redirect_output { - my $self = shift; - - if ($self->is_file_mode()) { - open my $lfh, '>>', $self->{file_name}; - open STDOUT, '>&', $lfh; - open STDERR, '>&', $lfh; - } -} - -sub flush_output { - my ($self, %options) = @_; - - $| = 1 if (defined($options{enabled})); -} - -sub force_default_severity { - my ($self, %options) = @_; - - $self->{old_severity} = defined($options{severity}) ? $options{severity} : $self->{severity}; -} - -sub set_default_severity { - my $self = shift; - - $self->{severity} = $self->{old_severity}; -} - -# Getter/Setter Log severity -sub severity { - my $self = shift; - if (@_) { - my $save_severity = $self->{severity}; - if ($_[0] =~ /^[012347]$/) { - $self->{severity} = $_[0]; - } elsif ($_[0] eq 'none') { - $self->{severity} = 0; - } elsif ($_[0] eq 'error') { - $self->{severity} = 1; - } elsif ($_[0] eq 'info') { - $self->{severity} = 3; - } elsif ($_[0] eq 'debug') { - $self->{severity} = 7; - } else { - $self->writeLogError('Wrong severity value set.'); - return -1; - } - $self->{old_severity} = $save_severity; - } - return $self->{severity}; -} - -sub withpid { - my $self = shift; - if (@_) { - $self->{withpid} = $_[0]; - } - return $self->{withpid}; -} - -sub get_date { - my $self = shift; - my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time()); - return sprintf( - '%04d-%02d-%02d %02d:%02d:%02d', - $year+1900, $mon+1, $mday, $hour, $min, $sec - ); -} - -sub writeLog { - my ($self) = shift; - - my $withdate = (defined $_[0]->{withdate}) ? $_[0]->{withdate} : 1; - my $withseverity = (defined $_[0]->{withseverity}) ? $_[0]->{withseverity} : 1; - - if (($self->{severity} & $_[0]->{severity}) == 0) { - return; - } - - if (length($_[0]->{message}) > 20000) { - $_[0]->{message} = substr($_[0]->{message}, 0, 20000) . '...'; - } - if ($self->{log_mode} == 2) { - syslog($severities{$_[0]->{severity}}, $_[0]->{message}); - return; - } - - $_[0]->{message} = (($self->{withpid} == 1) ? "$$ - $_[0]->{message} " : $_[0]->{message}); - $_[0]->{message} = ($withseverity) - ? $_[0]->{severity_str} . " - $_[0]->{message}" : $_[0]->{message}; - $_[0]->{message} = ($withdate) - ? $self->get_date . " - $_[0]->{message}" : $_[0]->{message}; - - chomp($_[0]->{message}); - if ($self->{log_mode} == 0) { - print "$_[0]->{message}\n"; - } elsif ($self->{log_mode} == 1) { - if (defined $self->{filehandler}) { - print { $self->{filehandler} } "$_[0]->{message}\n"; - } - } -} - -sub writeLogDebug { - my ($self) = shift; - - $self->writeLog({ severity => 4, severity_str => 'DEBUG', message => $_[0] }); -} - -sub writeLogInfo { - my ($self) = shift; - - $self->writeLog({ severity => 2, severity_str => 'INFO', message => $_[0] }); -} - -sub writeLogError { - my ($self) = shift; - - $self->writeLog({ severity => 1, severity_str => 'ERROR', message => $_[0] }); -} - -sub DESTROY { - my $self = shift; - - if (defined $self->{filehandler}) { - $self->{filehandler}->close(); - } -} - -1; diff --git a/gorgone/gorgone/class/script.pm b/gorgone/gorgone/class/script.pm index a5891101799..63318549a7c 100644 --- a/gorgone/gorgone/class/script.pm +++ b/gorgone/gorgone/class/script.pm @@ -25,7 +25,7 @@ use warnings; use FindBin; use Getopt::Long; use Pod::Usage; -use gorgone::class::logger; +use centreon::common::logger; use gorgone::class::db; use gorgone::class::lock; use YAML::XS; @@ -53,9 +53,10 @@ sub new { bless $self, $class; $self->{name} = $name; - $self->{logger} = gorgone::class::logger->new(); + $self->{logger} = centreon::common::logger->new(); $self->{options} = { 'config=s' => \$self->{config_file}, + 'vault=s' => \$self->{vault_config_file}, 'logfile=s' => \$self->{log_file}, 'severity=s' => \$self->{severity}, 'flushoutput' => \$self->{flushoutput}, @@ -141,6 +142,11 @@ sub run { $self->init(); } +# yaml_get_include: return a flat array of files defined by an !include directive. +# it will resolve the wildcard and return a sorted list of files. +# include: string with the directive. It can be a comma separated list, each element can contain '*' at the start of the string to specify 0 or more character (any character). +# current_dir: current directory to resolve relative path of !include directive. +# if the path is not absolute, it will be prefixed by the binary current path, so the first top level include should be an absolute path. sub yaml_get_include { my ($self, %options) = @_; @@ -151,16 +157,19 @@ sub yaml_get_include { my $dirname = File::Basename::dirname($dir); $dirname = $options{current_dir} . '/' . $dirname if ($dirname !~ /^\//); my $match_files = File::Basename::basename($dir); + # \Q\E is used to escape every special characters in the regex. + # we replace * by .* to match any character and disable \Q\E locally. + # so the extension will correctly match the file. $match_files =~ s/\*/\\E.*\\Q/g; $match_files = '\Q' . $match_files . '\E'; - my @sorted_files = (); my $DIR; + if (!opendir($DIR, $dirname)) { $self->{logger}->writeLogError("config - cannot opendir '$dirname' error: $!"); return (); } - + # opened the directory for the tested file, we will now test every file in the directory to see if they match the pattern. while (readdir($DIR)) { if (-f "$dirname/$_" && eval "/^$match_files\$/") { push @sorted_files, "$dirname/$_"; @@ -170,13 +179,17 @@ sub yaml_get_include { @sorted_files = sort { $a cmp $b } @sorted_files; push @all_files, @sorted_files; } - + # the list can be empty, for exemple if the client disable all the cron or whitelist of gorgone there should not be any error. return @all_files; } - +# yaml_parse_config: recursive function to parse yaml content and honor the inclusion of other files and vault password decryption. +# depending on the type of the yaml object, it will call itself recursively. +# config: yaml object as perl reference (hash, array, scalar, hash of hash...). $YAML::XS::LoadBlessed should be set to 1 to transform !include in blessed reference. +# current_dir: current directory to resolve relative path of !include directive. +# filter: a string to eval to filter the yaml content. you can for exemple return only children of a node. +# ariane: Ariadne's thread to know where we are in the yaml content. It is used by the filter. example : 'configuration##gorgone##gorgonecore##' sub yaml_parse_config { my ($self, %options) = @_; - if (ref(${$options{config}}) eq 'HASH') { foreach (keys %{${$options{config}}}) { my $ariane = $options{ariane} . $_ . '##'; @@ -206,6 +219,7 @@ sub yaml_parse_config { ariane => $ariane ); } + # $YAML::XS::LoadBlessed must be set, when YAML::XS will load a property with !include, it will be a blessed reference instead of a scalar. } elsif (ref(${$options{config}}) eq 'include') { my @files = $self->yaml_get_include( include => ${${$options{config}}}, @@ -236,9 +250,23 @@ sub yaml_parse_config { } else { ${$options{config}} = 'false'; } + + } elsif (ref(${$options{config}}) eq '') { + # this is a scalar value, we check if this is a vault path to replace it. + if ($self->{vault} and $self->{vault}->can('get_secret')) { + ${$options{config}} = $self->{vault}->get_secret( ${$options{config}}); + } + } else { + $self->{logger}->writeLogError("config - unknown type of data: " . ref(${$options{config}})); } } +# yaml_load_config: entry point for yaml parsing. +# can be called by yaml_parse_config if there is !include in the yaml, and will call yaml_parse_config to parse the content of the file. +# file: filename to parse. The file can contain !include directive to include other files. +# filter: is a string to eval to filter the yaml content. you can for exemple return only children of a node named configuration with this filter : +# '$ariane eq "configuration##"' +# arianne: Ariadne's thread to know where we are in the yaml content. It is used by the filter. example : 'configuration##gorgone##gorgonecore##' sub yaml_load_config { my ($self, %options) = @_; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIMetric.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIMetric.pm index d240db5ab7c..4b6ca9b0ad0 100644 --- a/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIMetric.pm +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIMetric.pm @@ -83,7 +83,7 @@ sub insertMetricsIntoTable { $query .= " s.`sc_id`, s.`sc_name`, s.`host_id`, s.`host_name`, `hc_id`, `hc_name`, `hg_id`, `hg_name`"; $query .= " FROM `mod_bi_tmp_today_services` s, `metrics` m, `index_data` i"; $query .= " WHERE i.id = m.index_id and i.host_id=s.host_id and i.service_id=s.service_id"; - $query .= " group by s.hg_id, s.hc_id, s.sc_id, m.index_id, m.metric_id"; + $query .= " group by s.hg_id, s.hc_id, s.sc_id, m.index_id, m.metric_id, m.metric_name, m.unit_name, s.service_description, s.sc_name, s.host_name, s.host_id, hc_name, s.hg_name"; my $sth = $db->query({ query => $query }); return $sth; } diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/HostAvailability.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/HostAvailability.pm index f908982669a..e243b5f1e18 100644 --- a/gorgone/gorgone/modules/centreon/mbi/libs/bi/HostAvailability.pm +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/HostAvailability.pm @@ -151,7 +151,7 @@ sub getHGMonthAvailability { $query .= " STRAIGHT_JOIN mod_bi_hostgroups hg ON (h.hg_name=hg.hg_name AND h.hg_id=hg.hg_id)"; $query .= " STRAIGHT_JOIN mod_bi_hostcategories hc ON (h.hc_name=hc.hc_name AND h.hc_id=hc.hc_id)"; $query .= " WHERE t.year = YEAR('".$start."') AND t.month = MONTH('".$start."') and t.hour=0"; - $query .= " GROUP BY h.hg_id, h.hc_id, ha.liveservice_id"; + $query .= " GROUP BY h.hg_id, h.hc_id,hc.id,hg.id, ha.liveservice_id"; my $sth = $db->query({ query => $query }); $self->{"logger"}->writeLog("DEBUG","[HOST] Calculating MTBF/MTRS/MTBSI for Host"); diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/ServiceAvailability.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/ServiceAvailability.pm index dee44a610b3..4f8218cf2e6 100644 --- a/gorgone/gorgone/modules/centreon/mbi/libs/bi/ServiceAvailability.pm +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/ServiceAvailability.pm @@ -156,7 +156,7 @@ sub getHGMonthAvailability { $query .= " STRAIGHT_JOIN mod_bi_hostcategories hc ON (s.hc_name=hc.hc_name AND s.hc_id=hc.hc_id)"; $query .= " STRAIGHT_JOIN mod_bi_servicecategories sc ON (s.sc_id=sc.sc_id AND s.sc_name=sc.sc_name)"; $query .= " WHERE t.year = YEAR('".$start."') AND t.month = MONTH('".$start."') and t.hour=0"; - $query .= " GROUP BY s.hg_id, s.hc_id, s.sc_id, sa.liveservice_id"; + $query .= " GROUP BY s.hg_id, s.hc_id, s.sc_id, sa.liveservice_id, hc.id, hg.id, sc.id"; my $sth = $db->query({ query => $query }); my @data = (); @@ -194,9 +194,9 @@ sub getHGMonthAvailability_optimised { $query .= "STRAIGHT_JOIN mod_bi_hostcategories hc ON (s.hc_name=hc.hc_name AND s.hc_id=hc.hc_id) "; $query .= "STRAIGHT_JOIN mod_bi_servicecategories sc ON (s.sc_id=sc.sc_id AND s.sc_name=sc.sc_name)"; $query .= " WHERE YEAR(from_unixtime(time_id)) = YEAR('".$start."') AND MONTH(from_unixtime(time_id)) = MONTH('".$start."') and hour(from_unixtime(time_id)) = 0 "; - $query .= "GROUP BY s.hg_id, s.hc_id, s.sc_id, sa.liveservice_id ) availability "; + $query .= "GROUP BY s.hg_id, s.hc_id, s.sc_id, sa.liveservice_id, hc.id, sc.id, hg.id ) availability "; $query .= "LEFT JOIN ( SELECT s.hg_id,s.hc_id,s.sc_id,e.modbiliveservice_id, "; - $query .= "SUM(IF(state=1,1,0)) as warningEvents, SUM(IF(state=2,1,0)) as criticalEvents, "; + $query .= "SUM(IF(state=1,1,0)) as warningEvents, SUM(IF(state=2,1,0)) as criticalEvents, "; $query .= "SUM(IF(state=3,1,0)) as unknownEvents FROM mod_bi_servicestateevents e "; $query .= "STRAIGHT_JOIN mod_bi_services s ON (e.modbiservice_id = s.id) "; $query .= "STRAIGHT_JOIN mod_bi_hostgroups hg ON (s.hg_name=hg.hg_name AND s.hg_id=hg.hg_id) "; @@ -204,7 +204,7 @@ sub getHGMonthAvailability_optimised { $query .= "STRAIGHT_JOIN mod_bi_servicecategories sc ON (s.sc_id=sc.sc_id AND s.sc_name=sc.sc_name) "; $query .= "AND s.id = e.modbiservice_id AND start_time < UNIX_TIMESTAMP('".$end."') "; $query .= "AND end_time > UNIX_TIMESTAMP('".$start."') AND e.state in (1,2,3) "; - $query .= "GROUP BY s.hg_id, s.hc_id, s.sc_id, e.modbiliveservice_id ) events "; + $query .= "GROUP BY s.hg_id, s.hc_id, s.sc_id, e.modbiliveservice_id) events "; $query .= "ON availability.hg_id = events.hg_id AND availability.hc_id = events.hc_id "; $query .= "AND availability.sc_id = events.sc_id "; $query .= "AND availability.liveservice_id = events.modbiliveservice_id"; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/Metrics.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/Metrics.pm index 93f6b2df733..c6abff375b5 100644 --- a/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/Metrics.pm +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/Metrics.pm @@ -210,7 +210,7 @@ sub getFirstAndLastValues { $query .= " FROM data_bin as d, data_bin as d2, " . $self->{name_minmaxctime_tmp} . " as db"; $query .= " WHERE db.id_metric=d.id_metric AND db.min_val=d.ctime"; $query .= " AND db.id_metric=d2.id_metric AND db.max_val=d2.ctime"; - $query .= " GROUP BY db.id_metric"; + $query .= " GROUP BY db.id_metric, d.value, d2.value, d.id_metric"; my $sth = $db->query({ query => $query }); $self->addIndexTempTableMetricDayFirstLastValues(); $self->dropTempTableCtimeMinMaxValues(); diff --git a/gorgone/gorgone/modules/core/proxy/hooks.pm b/gorgone/gorgone/modules/core/proxy/hooks.pm index 1319abad40e..25b5c5dc2e4 100644 --- a/gorgone/gorgone/modules/core/proxy/hooks.pm +++ b/gorgone/gorgone/modules/core/proxy/hooks.pm @@ -1169,7 +1169,7 @@ sub prepare_remote_copy { sysopen(FH, $localsrc, O_RDONLY); binmode(FH); - my $buffer_size = (defined($config->{buffer_size})) ? $config->{buffer_size} : 500_000; + my $buffer_size = (defined($config->{buffer_size})) ? $config->{buffer_size} : 150_000; my $buffer; while (my $bytes = sysread(FH, $buffer, $buffer_size)) { my $action = JSON::XS->new->encode({ diff --git a/gorgone/gorgoned b/gorgone/gorgoned index fdb423af470..8006a14463b 100644 --- a/gorgone/gorgoned +++ b/gorgone/gorgoned @@ -46,6 +46,10 @@ gorgoned [options] Specify the path to the yaml configuration file (default: ''). +=item B<--vault> + +Specify the path to the vault json configuration file (default: '/var/lib/centreon/vault/vault.json'). + =item B<--help> Print a brief help message and exits. diff --git a/gorgone/packaging/centreon-gorgone-centreon-config.yaml b/gorgone/packaging/centreon-gorgone-centreon-config.yaml index b6485a84883..2e4ae90da5f 100644 --- a/gorgone/packaging/centreon-gorgone-centreon-config.yaml +++ b/gorgone/packaging/centreon-gorgone-centreon-config.yaml @@ -57,11 +57,11 @@ overrides: depends: - centreon-gorgone (= ${VERSION}-${RELEASE}${DIST}) replaces: - - centreon-gorgone (<< 24.10.0) + - centreon-gorgone (<< 24.11.0) deb: breaks: - - centreon-gorgone (<< 24.10.0) + - centreon-gorgone (<< 24.11.0) rpm: summary: Configure Centreon Gorgone for use with Centreon Web diff --git a/gorgone/packaging/centreon-gorgone.yaml b/gorgone/packaging/centreon-gorgone.yaml index 16e86d297b1..a94092299df 100644 --- a/gorgone/packaging/centreon-gorgone.yaml +++ b/gorgone/packaging/centreon-gorgone.yaml @@ -157,6 +157,7 @@ overrides: rpm: depends: - centreon-common + - centreon-perl-libs-common - bzip2 - perl-Libssh-Session >= 0.8 - perl-CryptX @@ -196,6 +197,7 @@ overrides: deb: depends: # those dependencies are taken from centreon-gorgone/packaging/debian/control - centreon-common + - centreon-perl-libs-common - libdatetime-perl - libtime-parsedate-perl - libtry-tiny-perl diff --git a/gorgone/tests/robot/resources/LogResearch.py b/gorgone/tests/robot/resources/LogResearch.py index 0fe4afbd9d4..fe272a870c0 100644 --- a/gorgone/tests/robot/resources/LogResearch.py +++ b/gorgone/tests/robot/resources/LogResearch.py @@ -129,7 +129,7 @@ def ctn_find_in_log(log: str, date, content, regex=False): def ctn_extract_date_from_log(line: str): - p = re.compile(r"(^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})") + p = re.compile(r"^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})") m = p.match(line) if m is None: return None diff --git a/gorgone/tests/robot/tests/centreon/legacycmd.robot b/gorgone/tests/robot/tests/centreon/legacycmd.robot index 96fd52b4863..1d423b3b9fa 100644 --- a/gorgone/tests/robot/tests/centreon/legacycmd.robot +++ b/gorgone/tests/robot/tests/centreon/legacycmd.robot @@ -40,6 +40,8 @@ Legacycmd Teardown Terminate Process pipeWatcher_${comm} Run rm -rf /var/cache/centreon/config Run rm -rf /etc/centreon/centreon_vmware.json + Run rm -rf /etc/centreon-engine/randomBigFile.cfg + Run rm -rf /etc/centreon-engine/engine-hosts.cfg Push Engine And vmware Configuration [Arguments] ${comm}= ${poller_id}=2 @@ -51,23 +53,30 @@ Push Engine And vmware Configuration Run sed -i -e 's/@COMMUNICATION_MODE@/${comm}/g' /var/cache/centreon/config/vmware/${poller_id}/centreon_vmware.json Run sed -i -e 's/@COMMUNICATION_MODE@/${comm}/g' /var/cache/centreon/config/broker/${poller_id}/broker.cfg Run sed -i -e 's/@COMMUNICATION_MODE@/${comm}/g' /var/cache/centreon/config/engine/${poller_id}/engine-hosts.cfg + Run dd if=/dev/urandom of=/var/cache/centreon/config/engine/${poller_id}/randomBigFile.cfg bs=200MB count=1 iflag=fullblock + ${MD5Start}= Run md5sum /var/cache/centreon/config/engine/${poller_id}/randomBigFile.cfg | cut -f 1 -d " " Run chown www-data:www-data /var/cache/centreon/config/*/${poller_id}/* Run chmod 644 /var/cache/centreon/config/*/${poller_id}/* - + # gorgone central should get these files, and send it to poller in /etc/centreon/, /etc/centreon-broker/, /etc/centreon-engine/ - ${log_query} Create List centreon_vmware.json + # we are checking the poller have the last bit of centreon-engine before continuing. + ${log_query} Create List Copy to '/etc/centreon-engine//' finished successfully # SENDCFGFILE say to gorgone to push conf to poller for a poller id. Run echo SENDCFGFILE:${poller_id} > /var/lib/centreon/centcore/random.cmd - ${log_status} Ctn Find In Log With Timeout log=/var/log/centreon-gorgone/${comm}_gorgone_central_legacycmd/gorgoned.log content=${log_query} regex=0 timeout=20 + ${log_status} Ctn Find In Log With Timeout log=/var/log/centreon-gorgone/${comm}_gorgone_poller${poller_id}_legacycmd/gorgoned.log content=${log_query} regex=0 timeout=40 Should Be True ${log_status} Didn't found the logs : ${log_status} Log To Console File should be set in /etc/centreon/ now + # check vmware conf file ${res}= Run cat /etc/centreon/centreon_vmware.json Should Be Equal As Strings ${res} {"communication mode": "${comm}"} data in /etc/centreon/centreon_vmware.json is not correct. # check the user/group and permission are right. as gorgone run as root in the tests and as centreon-gorgone in prod, this might be different from real life. ${vmware_stat}= Run stat -c "%a %U %G" /etc/centreon/centreon_vmware.json Should Be Equal As Strings ${vmware_stat} 644 centreon-gorgone centreon for vmware file + ${MD5Result}= Run md5sum /etc/centreon-engine/randomBigFile.cfg | cut -f 1 -d " " + Should Be Equal ${MD5Start} ${MD5Result} MD5 Don't match, the big file might have been corrupted. + # check engine conf file # for now gorgone don't set user/group after it untar, it's only done when copying single files. # We can't check the user in the test as "www-data" user is "httpd" on rhel based system diff --git a/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/30-centreon.yaml b/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/30-centreon.yaml new file mode 100644 index 00000000000..35c0259e1b6 --- /dev/null +++ b/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/30-centreon.yaml @@ -0,0 +1,12 @@ +name: centreon.yaml +description: Configure Centreon Gorgone to work with Centreon Web. +centreon: + database: + db_configuration: + dsn: "mysql:host=localhost:port=3306;dbname=centreon" + username: "centreon" + password: "secret::hashicorp_vault::SecretPathArg::secretNameFromApiResponse" + db_realtime: + dsn: "mysql:host=localhost:port=3306;dbname=centreon_storage" + username: "centreon" + password: "password" diff --git a/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/31-centreon-api.yaml b/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/31-centreon-api.yaml new file mode 100644 index 00000000000..2929e7829de --- /dev/null +++ b/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/31-centreon-api.yaml @@ -0,0 +1,9 @@ +gorgone: + tpapi: + - name: centreonv2 + base_url: "http://127.0.0.1/centreon/api/latest/" + username: "centreon-gorgone" + password: "secret::hashicorp_vault::SecretPathArg::secretNameFromApiResponse" + - name: clapi + username: "centreon-gorgone" + password: "webapiPassword!" diff --git a/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/39-action.yaml b/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/39-action.yaml new file mode 100644 index 00000000000..91cef328e94 --- /dev/null +++ b/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/39-action.yaml @@ -0,0 +1,8 @@ +gorgone: + modules: + - name: action + package: "gorgone::modules::core::action::hooks" + enable: true + command_timeout: 30 + whitelist_cmds: secret::hashicorp_vault::SecretPathArg::secretNameFromApiResponse + allowed_cmds: !include whitelist.conf.d/*.yaml diff --git a/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/40-gorgoned.yaml b/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/40-gorgoned.yaml new file mode 100644 index 00000000000..bcb66cbe241 --- /dev/null +++ b/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/40-gorgoned.yaml @@ -0,0 +1,97 @@ +gorgone: + gorgonecore: + privkey: "/var/lib/centreon-gorgone/.keys/rsakey.priv.pem" + pubkey: "/var/lib/centreon-gorgone/.keys/rsakey.pub.pem" + id: 1 + + modules: + - name: httpserver + package: "gorgone::modules::core::httpserver::hooks" + enable: true + address: "0.0.0.0" + port: "8085" + ssl: true + ssl_cert_file: /var/lib/centreon-gorgone/.keys/server_api_cert.pem + ssl_key_file: /var/lib/centreon-gorgone/.keys/server_api_key.pem + auth: + enabled: false + user: web-user-gorgone-api + password: password + allowed_hosts: + enabled: true + subnets: + - 127.0.0.1/32 + + - name: action + package: "gorgone::modules::core::action::hooks" + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: cron + package: "gorgone::modules::core::cron::hooks" + enable: true + cron: !include cron.d/*.yaml + + - name: register + package: "gorgone::modules::core::register::hooks" + enable: true + + - name: nodes + package: "gorgone::modules::centreon::nodes::hooks" + enable: true + + - name: proxy + package: "gorgone::modules::core::proxy::hooks" + enable: true + buffer_size: 10 + pool: 1 + httpserver: + enable: true + token: "^$*ù^é&àérç(é/*-+$$z@ze%r¨£µ~zz" + address: "0.0.0.0" + port: 8099 + + + - name: legacycmd + package: "gorgone::modules::centreon::legacycmd::hooks" + enable: true + buffer_size: 100 + cmd_dir: "/var/lib/centreon/centcore/" + cmd_file: "/var/lib/centreon/centcore.cmd" + cache_dir: "/var/cache/centreon/" + cache_dir_trap: "/etc/snmp/centreon_traps" + remote_dir: "/var/cache/centreon//config/remote-data/" + + - name: engine + package: "gorgone::modules::centreon::engine::hooks" + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" + + - name: statistics + package: "gorgone::modules::centreon::statistics::hooks" + enable: true + broker_cache_dir: "/var/cache/centreon//broker-stats/" + cron: + - id: broker_stats + timespec: "*/5 * * * *" + action: BROKERSTATS + parameters: + timeout: 10 + - id: engine_stats + timespec: "*/5 * * * *" + action: ENGINESTATS + parameters: + timeout: 10 diff --git a/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/41-autodiscovery.yaml b/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/41-autodiscovery.yaml new file mode 100644 index 00000000000..56bae5eb0fb --- /dev/null +++ b/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/41-autodiscovery.yaml @@ -0,0 +1,5 @@ +gorgone: + modules: + - name: autodiscovery + package: "gorgone::modules::centreon::autodiscovery::hooks" + enable: true diff --git a/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/50-centreon-audit.yaml b/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/50-centreon-audit.yaml new file mode 100644 index 00000000000..ae0f8c96c62 --- /dev/null +++ b/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/50-centreon-audit.yaml @@ -0,0 +1,5 @@ +gorgone: + modules: + - name: audit + package: "gorgone::modules::centreon::audit::hooks" + enable: true diff --git a/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/cron.d/41-service-discovery.yaml b/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/cron.d/41-service-discovery.yaml new file mode 100644 index 00000000000..b2796c7d284 --- /dev/null +++ b/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/cron.d/41-service-discovery.yaml @@ -0,0 +1,3 @@ +- id: service_discovery + timespec: "30 22 * * *" + action: LAUNCHSERVICEDISCOVERY diff --git a/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/whitelist.conf.d/centreon.yaml b/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/whitelist.conf.d/centreon.yaml new file mode 100644 index 00000000000..e4d0ce5e784 --- /dev/null +++ b/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.d/whitelist.conf.d/centreon.yaml @@ -0,0 +1,21 @@ +# Configuration brought by Centreon Gorgone package. +# SHOULD NOT BE EDITED! CREATE YOUR OWN FILE IN WHITELIST.CONF.D DIRECTORY! +- ^sudo\s+(/bin/|/usr/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ +- ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ +- ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/+centengine\.cfg\s*$ +- ^cat\s+/var/lib/centreon-engine/+[a-zA-Z0-9\-]+-stats\.json\s*$ +- ^/usr/lib/centreon/plugins/.*$ +- ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ +- ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ +- ^centreon +- ^mkdir +- ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host +- ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ +- ^/usr/bin/php (-q )?/usr/share/centreon/cron/[\w,\s.-]+ >> /var/log/centreon-gorgone/[\w,\s.-]+\s+2>&1$ +- ^/usr/bin/php -q /usr/share/centreon/www/modules/centreon-bi-server/tools/purgeArchivesFiles\.php >> /var/log/centreon-gorgone/centreon-bi-archive-retention\.log 2>&1$ +- ^/usr/share/centreon/cron/eventReportBuilder --config=/etc/centreon/conf\.pm >> /var/log/centreon-gorgone/eventReportBuilder\.log 2>&1$ +- ^/usr/share/centreon/cron/dashboardBuilder --config=/etc/centreon/conf\.pm >> /var/log/centreon-gorgone/dashboardBuilder\.log 2>&1$ +- ^/usr/share/centreon/www/modules/centreon-dsm/+cron/centreon_dsm_purge\.pl --config=\"/etc/centreon/conf.pm\" --severity=\S+ >> /var/log/centreon-gorgone/centreon_dsm_purge\.log 2>&1\s*$ +- ^/usr/share/centreon-bi-backup/centreon-bi-backup-web\.sh >> /var/log/centreon-gorgone/centreon-bi-backup-web\.log 2>&1$ +- ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/+cron/centreon_autodisco.pl --config='/etc/centreon/conf.pm' --config-extra='/etc/centreon/centreon_autodisco.pm' --severity=\S+ >> /var/log/centreon-gorgone/centreon_service_discovery.log 2>&1$ +- secret::hashicorp_vault::SecretPathArg::secretNameFromApiResponse \ No newline at end of file diff --git a/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.yaml b/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.yaml new file mode 100644 index 00000000000..d843c9b7d46 --- /dev/null +++ b/gorgone/tests/unit/class/config_examples/centreon-gorgone/config.yaml @@ -0,0 +1,3 @@ +name: config.yaml +description: Configuration brought by Centreon Gorgone package. SHOULD NOT BE EDITED! USE CONFIG.D DIRECTORY! +configuration: !include config.d/*.yaml \ No newline at end of file diff --git a/gorgone/tests/unit/class/config_examples/centreon-gorgone/expectedConfiguration.pl b/gorgone/tests/unit/class/config_examples/centreon-gorgone/expectedConfiguration.pl new file mode 100644 index 00000000000..819c3556a1e --- /dev/null +++ b/gorgone/tests/unit/class/config_examples/centreon-gorgone/expectedConfiguration.pl @@ -0,0 +1,195 @@ +return { + 'configuration' => { + 'centreon' => { + 'database' => { + 'db_configuration' => { + 'dsn' => 'mysql:host=localhost:port=3306;dbname=centreon', + 'password' => 'VaultSentASecret', + 'username' => 'centreon' + }, + 'db_realtime' => { + 'dsn' => 'mysql:host=localhost:port=3306;dbname=centreon_storage', + 'password' => 'password', + 'username' => 'centreon' + } + } + }, + 'gorgone' => { + 'tpapi' => [ + { + 'password' => 'VaultSentASecret', + 'base_url' => 'http://127.0.0.1/centreon/api/latest/', + 'name' => 'centreonv2', + 'username' => 'centreon-gorgone' + }, + { + 'username' => 'centreon-gorgone', + 'name' => 'clapi', + 'password' => 'webapiPassword!' + } + ], + 'gorgonecore' => { + 'id' => 1, + 'privkey' => '/var/lib/centreon-gorgone/.keys/rsakey.priv.pem', + 'pubkey' => '/var/lib/centreon-gorgone/.keys/rsakey.pub.pem' + }, + 'modules' => [ + { + 'package' => 'gorgone::modules::core::action::hooks', + 'whitelist_cmds' => 'VaultSentASecret', + 'command_timeout' => 30, + 'allowed_cmds' => [ + '^sudo\\s+(/bin/|/usr/bin/)?systemctl\\s+(reload|restart)\\s+(centengine|centreontrapd|cbd)\\s*$', + '^(sudo\\s+)?(/usr/bin/)?service\\s+(centengine|centreontrapd|cbd|cbd-sql)\\s+(reload|restart)\\s*$', + '^/usr/sbin/centenginestats\\s+-c\\s+/etc/centreon-engine/+centengine\\.cfg\\s*$', + '^cat\\s+/var/lib/centreon-engine/+[a-zA-Z0-9\\-]+-stats\\.json\\s*$', + '^/usr/lib/centreon/plugins/.*$', + '^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\\.log 2>&1\\s*$', + '^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\\.php >> /var/log/centreon-helios\\.log 2>&1\\s*$', + '^centreon', + '^mkdir', + '^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host', + '^/usr/share/centreon/bin/centreon -u \\"centreon-gorgone\\" -p \\S+ -w -o CentreonWorker -a processQueue$', + '^/usr/bin/php (-q )?/usr/share/centreon/cron/[\\w,\\s.-]+ >> /var/log/centreon-gorgone/[\\w,\\s.-]+\\s+2>&1$', + '^/usr/bin/php -q /usr/share/centreon/www/modules/centreon-bi-server/tools/purgeArchivesFiles\\.php >> /var/log/centreon-gorgone/centreon-bi-archive-retention\\.log 2>&1$', + '^/usr/share/centreon/cron/eventReportBuilder --config=/etc/centreon/conf\\.pm >> /var/log/centreon-gorgone/eventReportBuilder\\.log 2>&1$', + '^/usr/share/centreon/cron/dashboardBuilder --config=/etc/centreon/conf\\.pm >> /var/log/centreon-gorgone/dashboardBuilder\\.log 2>&1$', + '^/usr/share/centreon/www/modules/centreon-dsm/+cron/centreon_dsm_purge\\.pl --config=\\"/etc/centreon/conf.pm\\" --severity=\\S+ >> /var/log/centreon-gorgone/centreon_dsm_purge\\.log 2>&1\\s*$', + '^/usr/share/centreon-bi-backup/centreon-bi-backup-web\\.sh >> /var/log/centreon-gorgone/centreon-bi-backup-web\\.log 2>&1$', + '^/usr/share/centreon/www/modules/centreon-autodiscovery-server/+cron/centreon_autodisco.pl --config=\'/etc/centreon/conf.pm\' --config-extra=\'/etc/centreon/centreon_autodisco.pm\' --severity=\\S+ >> /var/log/centreon-gorgone/centreon_service_discovery.log 2>&1$', + 'VaultSentASecret' + ], + 'name' => 'action', + 'enable' => 'true' + }, + { + 'enable' => 'true', + 'ssl_cert_file' => '/var/lib/centreon-gorgone/.keys/server_api_cert.pem', + 'ssl' => 'true', + 'auth' => { + 'user' => 'web-user-gorgone-api', + 'enabled' => 'false', + 'password' => 'password' + }, + 'name' => 'httpserver', + 'address' => '0.0.0.0', + 'allowed_hosts' => { + 'enabled' => 'true', + 'subnets' => [ + '127.0.0.1/32' + ] + }, + 'port' => '8085', + 'package' => 'gorgone::modules::core::httpserver::hooks', + 'ssl_key_file' => '/var/lib/centreon-gorgone/.keys/server_api_key.pem' + }, + { + 'whitelist_cmds' => 'true', + 'enable' => 'true', + 'package' => 'gorgone::modules::core::action::hooks', + 'allowed_cmds' => [ + '^sudo\\s+(/bin/)?systemctl\\s+(reload|restart)\\s+(centengine|centreontrapd|cbd)\\s*$', + '^(sudo\\s+)?(/usr/bin/)?service\\s+(centengine|centreontrapd|cbd|cbd-sql)\\s+(reload|restart)\\s*$', + '^/usr/sbin/centenginestats\\s+-c\\s+/etc/centreon-engine/centengine\\.cfg\\s*$', + '^cat\\s+/var/lib/centreon-engine/[a-zA-Z0-9\\-]+-stats\\.json\\s*$', + '^/usr/lib/centreon/plugins/.*$', + '^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\\.log 2>&1\\s*$', + '^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\\.php >> /var/log/centreon-helios\\.log 2>&1\\s*$', + '^centreon', + '^mkdir', + '^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host', + '^/usr/share/centreon/bin/centreon -u \\"centreon-gorgone\\" -p \\S+ -w -o CentreonWorker -a processQueue$' + ], + 'name' => 'action', + 'command_timeout' => 30 + }, + { + 'enable' => 'true', + 'cron' => [ + { + 'action' => 'LAUNCHSERVICEDISCOVERY', + 'timespec' => '30 22 * * *', + 'id' => 'service_discovery' + } + ], + 'name' => 'cron', + 'package' => 'gorgone::modules::core::cron::hooks' + }, + { + 'package' => 'gorgone::modules::core::register::hooks', + 'name' => 'register', + 'enable' => 'true' + }, + { + 'enable' => 'true', + 'package' => 'gorgone::modules::centreon::nodes::hooks', + 'name' => 'nodes' + }, + { + 'enable' => 'true', + 'httpserver' => { + 'enable' => 'true', + 'port' => 8099, + 'token' => "^\$*\x{f9}^\x{e9}&\x{e0}\x{e9}r\x{e7}(\x{e9}/*-+\$\$z\@ze%r\x{a8}\x{a3}\x{b5}~zz", + 'address' => '0.0.0.0' + }, + 'package' => 'gorgone::modules::core::proxy::hooks', + 'name' => 'proxy', + 'pool' => 1, + 'buffer_size' => 10 + }, + { + 'cmd_dir' => '/var/lib/centreon/centcore/', + 'buffer_size' => 100, + 'cache_dir' => '/var/cache/centreon/', + 'enable' => 'true', + 'name' => 'legacycmd', + 'remote_dir' => '/var/cache/centreon//config/remote-data/', + 'cmd_file' => '/var/lib/centreon/centcore.cmd', + 'cache_dir_trap' => '/etc/snmp/centreon_traps', + 'package' => 'gorgone::modules::centreon::legacycmd::hooks' + }, + { + 'enable' => 'true', + 'command_file' => '/var/lib/centreon-engine/rw/centengine.cmd', + 'name' => 'engine', + 'package' => 'gorgone::modules::centreon::engine::hooks' + }, + { + 'name' => 'statistics', + 'package' => 'gorgone::modules::centreon::statistics::hooks', + 'enable' => 'true', + 'cron' => [ + { + 'action' => 'BROKERSTATS', + 'timespec' => '*/5 * * * *', + 'id' => 'broker_stats', + 'parameters' => { + 'timeout' => 10 + } + }, + { + 'action' => 'ENGINESTATS', + 'id' => 'engine_stats', + 'timespec' => '*/5 * * * *', + 'parameters' => { + 'timeout' => 10 + } + } + ], + 'broker_cache_dir' => '/var/cache/centreon//broker-stats/' + }, + { + 'enable' => 'true', + 'package' => 'gorgone::modules::centreon::autodiscovery::hooks', + 'name' => 'autodiscovery' + }, + { + 'package' => 'gorgone::modules::centreon::audit::hooks', + 'name' => 'audit', + 'enable' => 'true' + } + ] + } + } +}; diff --git a/gorgone/tests/unit/class/config_examples/include_other_files/first_module.yaml b/gorgone/tests/unit/class/config_examples/include_other_files/first_module.yaml new file mode 100644 index 00000000000..767a5f36e7d --- /dev/null +++ b/gorgone/tests/unit/class/config_examples/include_other_files/first_module.yaml @@ -0,0 +1,3 @@ +gorgone: + gorgonecore: + global_variable: "value" \ No newline at end of file diff --git a/gorgone/tests/unit/class/config_examples/include_other_files/main.yaml b/gorgone/tests/unit/class/config_examples/include_other_files/main.yaml new file mode 100644 index 00000000000..fc5b54b1b49 --- /dev/null +++ b/gorgone/tests/unit/class/config_examples/include_other_files/main.yaml @@ -0,0 +1,4 @@ +--- +name: config.yaml +description: simple configuration exemple without other file include. +configuration: !include ./first_module.yaml diff --git a/gorgone/tests/unit/class/config_examples/simple_no_recursion/norecursion.yaml b/gorgone/tests/unit/class/config_examples/simple_no_recursion/norecursion.yaml new file mode 100644 index 00000000000..c903827c7b1 --- /dev/null +++ b/gorgone/tests/unit/class/config_examples/simple_no_recursion/norecursion.yaml @@ -0,0 +1,15 @@ +--- +name: config.yaml +description: simple configuration exemple without other file include. +configuration: + gorgone: + key1: a string with all char &é"'(-è_çà)=!:;,*$^ù%µ£¨/.\e?/§ + key2: + - array1 + - array2 + - array3 + TrueVal: true + FalseVal: false + vault: + badFormat: "secret::hashicorp::thereIsOnlyOneColon" + correctFormat: "secret::hashicorp_vault::SecretPathArg::secretNameFromApiResponse" diff --git a/gorgone/tests/unit/class/core.t b/gorgone/tests/unit/class/core.t new file mode 100644 index 00000000000..3fef51c4de7 --- /dev/null +++ b/gorgone/tests/unit/class/core.t @@ -0,0 +1,124 @@ +#!/usr/bin/perl + +# we can't use mock() on a non loaded package, so we need to create the class we want to mock first. +# We could have set centreon-common as a dependancy for the test, but it's not that package we are testing right now, so let mock it. +BEGIN { + package centreon::common::centreonvault; + sub get_secret {}; + sub new {}; + $INC{ (__PACKAGE__ =~ s{::}{/}rg) . ".pm" } = 1; +} + +# same here, gorgone use a logger, but we don't want to test it right now, so we mock it. +BEGIN { + package centreon::common::logger; + sub severity {}; + sub new {}; + $INC{ (__PACKAGE__ =~ s{::}{/}rg) . ".pm" } = 1; # this allow the module to be available for other modules anywhere in the code. +} + +package main; + +use strict; +use warnings; +use Test2::V0; +use Test2::Plugin::NoWarnings echo => 1; +use Test2::Tools::Compare qw{is like match}; +use Data::Dumper; +use FindBin; +use lib "$FindBin::Bin/../../../"; +use gorgone::class::script; +use gorgone::class::core; + +sub create_data_set { + my $set = {}; + # as we are in unit test, we can't be sure of our current path, but the tests require that we start from the same directory than the script. + chdir($FindBin::Bin); + $set->{logger} = mock 'centreon::common::logger'; # this is from Test2::Tools::Mock, included by Test2::V0 + $set->{vault} = mock 'centreon::common::centreonvault'; + + $set->{vault}->override('get_secret' => sub { + if ($_[1] eq 'secret::hashicorp_vault::SecretPathArg::secretNameFromApiResponse') { + return 'VaultSentASecret'; + } + return $_[1]; + }, 'new' => sub { + return bless({}, 'centreon::common::centreonvault'); + }); + + return $set; +} + +sub test_configuration_read { + my $set = shift; + # let's make a simple object and try to industryalize the yaml read configuration. + my $gorgone = gorgone::class::core->new(); + $gorgone->{logger} = $set->{logger}; + $gorgone->{vault} = centreon::common::centreonvault->new(); + + my $tests_cases = [ + { + file => './config_examples/simple_no_recursion/norecursion.yaml', + expected => { configuration => { gorgone => { + key1 => 'a string with all char &é"\'(-è_çà)=!:;,*$^ù%µ£¨/.\e?/§', + key2 => ["array1", "array2", "array3"], + TrueVal => 'true', + FalseVal => 'false', + vault => { + badFormat => 'secret::hashicorp::thereIsOnlyOneColon', + correctFormat => 'VaultSentASecret'}, + + } } }, + msg => 'simple configuration without recursion' + }, + { + file => './config_examples/include_other_files/main.yaml', + expected => { configuration => { gorgone => { + gorgonecore => { global_variable => "value" } + } } }, + msg => 'simple configuration with !include.' + }, + { # this is a real world exemple with all parameter I could think of. The default configuration don't have all of them. + # this is more an integration test than a unit test, but allow to test the whole configuration. + file => './config_examples/centreon-gorgone/config.yaml', + expected => require("./config_examples/centreon-gorgone/expectedConfiguration.pl"), + msg => 'complete configuration with multiples include and many files.' + } + ]; + + for my $test (@$tests_cases) { + my $config = $gorgone->yaml_load_config( + file => $test->{file}, + filter => '!($ariane eq "configuration##" || $ariane =~ /^configuration##(?:gorgone|centreon)##/)' + ); + is($config, $test->{expected}, $test->{msg}); + } + +} + +sub test_yaml_get_include { + my $set = shift; + my $gorgone = gorgone::class::core->new(); + $gorgone->{logger} = $set->{logger}; + #$gorgone->{vault} = centreon::common::centreonvault->new(); + my @result = $gorgone->yaml_get_include('include' => '*.yaml', + 'current_dir' => './config_examples/include_other_files', + 'filter' => '!($ariane eq "configuration##" || $ariane =~ /^configuration##(?:gorgone|centreon)##/)'); + my @expected = ("./config_examples/include_other_files/./first_module.yaml", "./config_examples/include_other_files/./main.yaml"); + is(\@result, \@expected, 'found both files of the directory'); + + my @emptyResult = $gorgone->yaml_get_include('include' => '/notAFile.yaml', + 'current_dir' => './config_examples/include_other_files', + 'filter' => '!($ariane eq "configuration##" || $ariane =~ /^configuration##(?:gorgone|centreon)##/)'); + is(scalar(@emptyResult), 0, 'no file found return empty'); +} +sub main { + my $set = create_data_set(); + test_yaml_get_include($set); + test_configuration_read($set); + + print "\n"; + done_testing; +} +&main; + diff --git a/packaging/centreon-broker-cbd-debuginfo.yaml b/packaging/centreon-collect/centreon-broker-cbd-debuginfo.yaml similarity index 91% rename from packaging/centreon-broker-cbd-debuginfo.yaml rename to packaging/centreon-collect/centreon-broker-cbd-debuginfo.yaml index 7a9582db3c4..8d7d94519fc 100644 --- a/packaging/centreon-broker-cbd-debuginfo.yaml +++ b/packaging/centreon-collect/centreon-broker-cbd-debuginfo.yaml @@ -15,10 +15,10 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/broker/cbd.debug" + - src: "../../build/broker/cbd.debug" dst: "/usr/sbin/" - - src: "../build/broker/watchdog/cbwd.debug" + - src: "../../build/broker/watchdog/cbwd.debug" dst: "/usr/lib/debug/usr/sbin/" overrides: diff --git a/packaging/centreon-broker-cbd.yaml b/packaging/centreon-collect/centreon-broker-cbd.yaml similarity index 84% rename from packaging/centreon-broker-cbd.yaml rename to packaging/centreon-collect/centreon-broker-cbd.yaml index 31268174507..0e12d15002b 100644 --- a/packaging/centreon-broker-cbd.yaml +++ b/packaging/centreon-collect/centreon-broker-cbd.yaml @@ -16,7 +16,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../broker/config/central-broker.json" + - src: "../../broker/config/central-broker.json" dst: "/etc/centreon-broker/central-broker.json" type: config|noreplace file_info: @@ -24,7 +24,7 @@ contents: owner: centreon-broker group: centreon-broker - - src: "../broker/config/central-rrd.json" + - src: "../../broker/config/central-rrd.json" dst: "/etc/centreon-broker/central-rrd.json" type: config|noreplace file_info: @@ -32,7 +32,7 @@ contents: owner: centreon-broker group: centreon-broker - - src: "../broker/script/watchdog.json" + - src: "../../broker/script/watchdog.json" dst: "/etc/centreon-broker/watchdog.json" type: config|noreplace file_info: @@ -40,18 +40,18 @@ contents: owner: centreon-broker group: centreon-broker - - src: "../build/broker/cbd" + - src: "../../build/broker/cbd" dst: "/usr/sbin/cbd" - - src: "../build/broker/watchdog/cbwd" + - src: "../../build/broker/watchdog/cbwd" dst: "/usr/sbin/cbwd" - - src: "../broker/script/cbd.service" + - src: "../../broker/script/cbd.service" dst: "/usr/lib/systemd/system/cbd.service" file_info: mode: 0644 packager: rpm - - src: "../broker/script/cbd.service" + - src: "../../broker/script/cbd.service" dst: "/lib/systemd/system/cbd.service" file_info: mode: 0644 diff --git a/packaging/centreon-broker-cbmod-debuginfo.yaml b/packaging/centreon-collect/centreon-broker-cbmod-debuginfo.yaml similarity index 95% rename from packaging/centreon-broker-cbmod-debuginfo.yaml rename to packaging/centreon-collect/centreon-broker-cbmod-debuginfo.yaml index b62f9e518b3..8b9e7d79e81 100644 --- a/packaging/centreon-broker-cbmod-debuginfo.yaml +++ b/packaging/centreon-collect/centreon-broker-cbmod-debuginfo.yaml @@ -15,7 +15,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/broker/neb/cbmod.so.debug" + - src: "../../build/broker/neb/cbmod.so.debug" dst: "/usr/lib/debug/usr/lib64/nagios/cbmod.so.debug" file_info: mode: 0644 diff --git a/packaging/centreon-broker-cbmod.yaml b/packaging/centreon-collect/centreon-broker-cbmod.yaml similarity index 91% rename from packaging/centreon-broker-cbmod.yaml rename to packaging/centreon-collect/centreon-broker-cbmod.yaml index 76f76ef26e1..bb13ef3922b 100644 --- a/packaging/centreon-broker-cbmod.yaml +++ b/packaging/centreon-collect/centreon-broker-cbmod.yaml @@ -16,7 +16,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../broker/config/central-module.json" + - src: "../../broker/config/central-module.json" dst: "/etc/centreon-broker/central-module.json" type: config|noreplace file_info: @@ -24,7 +24,7 @@ contents: owner: centreon-broker group: centreon-broker - - src: "../build/broker/neb/cbmod.so" + - src: "../../build/broker/neb/cbmod.so" dst: "/usr/lib64/nagios/cbmod.so" overrides: diff --git a/packaging/centreon-broker-core-debuginfo.yaml b/packaging/centreon-collect/centreon-broker-core-debuginfo.yaml similarity index 93% rename from packaging/centreon-broker-core-debuginfo.yaml rename to packaging/centreon-collect/centreon-broker-core-debuginfo.yaml index be46a66775e..a6448a1f9a4 100644 --- a/packaging/centreon-broker-core-debuginfo.yaml +++ b/packaging/centreon-collect/centreon-broker-core-debuginfo.yaml @@ -15,7 +15,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/broker/{neb,stats,bam,storage,unified_sql,tcp,grpc,tls,rrd,lua,sql}/*.so.debug" + - src: "../../build/broker/{neb,stats,bam,storage,unified_sql,tcp,grpc,tls,rrd,lua,sql}/*.so.debug" dst: "/usr/lib/debug/usr/share/centreon/lib/centreon-broker/" file_info: mode: 0644 diff --git a/packaging/centreon-broker-core.yaml b/packaging/centreon-collect/centreon-broker-core.yaml similarity index 93% rename from packaging/centreon-broker-core.yaml rename to packaging/centreon-collect/centreon-broker-core.yaml index 1ebb52de51a..8dbed6bef53 100644 --- a/packaging/centreon-broker-core.yaml +++ b/packaging/centreon-collect/centreon-broker-core.yaml @@ -15,10 +15,10 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/broker/{neb,stats,bam,storage,unified_sql,tcp,grpc,tls,rrd,lua,sql}/*.so" + - src: "../../build/broker/{neb,stats,bam,storage,unified_sql,tcp,grpc,tls,rrd,lua,sql}/*.so" dst: "/usr/share/centreon/lib/centreon-broker/" - - src: "../broker/script/centreon-broker.logrotate" + - src: "../../broker/script/centreon-broker.logrotate" dst: "/etc/logrotate.d/cbd" type: config|noreplace diff --git a/packaging/centreon-broker-graphite-debuginfo.yaml b/packaging/centreon-collect/centreon-broker-graphite-debuginfo.yaml similarity index 94% rename from packaging/centreon-broker-graphite-debuginfo.yaml rename to packaging/centreon-collect/centreon-broker-graphite-debuginfo.yaml index f7a2e20b9ab..c89d141defd 100644 --- a/packaging/centreon-broker-graphite-debuginfo.yaml +++ b/packaging/centreon-collect/centreon-broker-graphite-debuginfo.yaml @@ -15,7 +15,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/broker/graphite/70-graphite.so.debug" + - src: "../../build/broker/graphite/70-graphite.so.debug" dst: "/usr/share/centreon/lib/centreon-broker/" overrides: diff --git a/packaging/centreon-broker-graphite.yaml b/packaging/centreon-collect/centreon-broker-graphite.yaml similarity index 94% rename from packaging/centreon-broker-graphite.yaml rename to packaging/centreon-collect/centreon-broker-graphite.yaml index fdfedb7acb6..9ca0f3851eb 100644 --- a/packaging/centreon-broker-graphite.yaml +++ b/packaging/centreon-collect/centreon-broker-graphite.yaml @@ -17,7 +17,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/broker/graphite/70-graphite.so" + - src: "../../build/broker/graphite/70-graphite.so" dst: "/usr/share/centreon/lib/centreon-broker/" overrides: diff --git a/packaging/centreon-broker-influxdb-debuginfo.yaml b/packaging/centreon-collect/centreon-broker-influxdb-debuginfo.yaml similarity index 94% rename from packaging/centreon-broker-influxdb-debuginfo.yaml rename to packaging/centreon-collect/centreon-broker-influxdb-debuginfo.yaml index 16302843f39..9a5d71ed977 100644 --- a/packaging/centreon-broker-influxdb-debuginfo.yaml +++ b/packaging/centreon-collect/centreon-broker-influxdb-debuginfo.yaml @@ -15,7 +15,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/broker/influxdb/70-influxdb.so.debug" + - src: "../../build/broker/influxdb/70-influxdb.so.debug" dst: "/usr/lib/debug/usr/share/centreon/lib/centreon-broker/" overrides: diff --git a/packaging/centreon-broker-influxdb.yaml b/packaging/centreon-collect/centreon-broker-influxdb.yaml similarity index 94% rename from packaging/centreon-broker-influxdb.yaml rename to packaging/centreon-collect/centreon-broker-influxdb.yaml index f50e1c4e00c..1e154b4fa95 100644 --- a/packaging/centreon-broker-influxdb.yaml +++ b/packaging/centreon-collect/centreon-broker-influxdb.yaml @@ -17,7 +17,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/broker/influxdb/70-influxdb.so" + - src: "../../build/broker/influxdb/70-influxdb.so" dst: "/usr/share/centreon/lib/centreon-broker/" overrides: diff --git a/packaging/centreon-broker-selinux.yaml b/packaging/centreon-collect/centreon-broker-selinux.yaml similarity index 93% rename from packaging/centreon-broker-selinux.yaml rename to packaging/centreon-collect/centreon-broker-selinux.yaml index 4f9cd5c2812..c318d0d6864 100644 --- a/packaging/centreon-broker-selinux.yaml +++ b/packaging/centreon-collect/centreon-broker-selinux.yaml @@ -24,7 +24,7 @@ provides: - centreon-broker-selinux-debuginfo contents: - - src: "../selinux/centreon-broker/centreon-broker.pp" + - src: "../../selinux/centreon-broker/centreon-broker.pp" dst: "/usr/share/selinux/packages/centreon/centreon-broker.pp" file_info: mode: 0655 diff --git a/packaging/centreon-broker-victoria-metrics-debuginfo.yaml b/packaging/centreon-collect/centreon-broker-victoria-metrics-debuginfo.yaml similarity index 93% rename from packaging/centreon-broker-victoria-metrics-debuginfo.yaml rename to packaging/centreon-collect/centreon-broker-victoria-metrics-debuginfo.yaml index 3e3b79e60bf..497f66c9ada 100644 --- a/packaging/centreon-broker-victoria-metrics-debuginfo.yaml +++ b/packaging/centreon-collect/centreon-broker-victoria-metrics-debuginfo.yaml @@ -15,7 +15,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/broker/victoria_metrics/70-victoria_metrics.so.debug" + - src: "../../build/broker/victoria_metrics/70-victoria_metrics.so.debug" dst: "/usr/lib/debug/usr/share/centreon/lib/centreon-broker/" overrides: diff --git a/packaging/centreon-broker-victoria-metrics.yaml b/packaging/centreon-collect/centreon-broker-victoria-metrics.yaml similarity index 92% rename from packaging/centreon-broker-victoria-metrics.yaml rename to packaging/centreon-collect/centreon-broker-victoria-metrics.yaml index 3679b5506b6..f4a3c9e18ef 100644 --- a/packaging/centreon-broker-victoria-metrics.yaml +++ b/packaging/centreon-collect/centreon-broker-victoria-metrics.yaml @@ -17,7 +17,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/broker/victoria_metrics/70-victoria_metrics.so" + - src: "../../build/broker/victoria_metrics/70-victoria_metrics.so" dst: "/usr/share/centreon/lib/centreon-broker/" overrides: diff --git a/packaging/centreon-broker.yaml b/packaging/centreon-collect/centreon-broker.yaml similarity index 100% rename from packaging/centreon-broker.yaml rename to packaging/centreon-collect/centreon-broker.yaml diff --git a/packaging/centreon-clib-debuginfo.yaml b/packaging/centreon-collect/centreon-clib-debuginfo.yaml similarity index 89% rename from packaging/centreon-clib-debuginfo.yaml rename to packaging/centreon-collect/centreon-clib-debuginfo.yaml index 9b170e8a9eb..709dae5116e 100644 --- a/packaging/centreon-clib-debuginfo.yaml +++ b/packaging/centreon-collect/centreon-clib-debuginfo.yaml @@ -15,11 +15,11 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/clib/libcentreon_clib.so.debug" + - src: "../../build/clib/libcentreon_clib.so.debug" dst: "/usr/lib/debug/usr/lib64/" packager: rpm - - src: "../build/clib/libcentreon_clib.so.debug" + - src: "../../build/clib/libcentreon_clib.so.debug" dst: "/usr/lib/debug/usr/lib/" packager: deb diff --git a/packaging/centreon-clib.yaml b/packaging/centreon-collect/centreon-clib.yaml similarity index 90% rename from packaging/centreon-clib.yaml rename to packaging/centreon-collect/centreon-clib.yaml index 035dfbff1f7..26c5c5ab42c 100644 --- a/packaging/centreon-clib.yaml +++ b/packaging/centreon-collect/centreon-clib.yaml @@ -16,11 +16,11 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/clib/libcentreon_clib.so" + - src: "../../build/clib/libcentreon_clib.so" dst: "/usr/lib64/" packager: rpm - - src: "../build/clib/libcentreon_clib.so" + - src: "../../build/clib/libcentreon_clib.so" dst: "/usr/lib/" packager: deb diff --git a/packaging/centreon-collect-client-debuginfo.yaml b/packaging/centreon-collect/centreon-collect-client-debuginfo.yaml similarity index 96% rename from packaging/centreon-collect-client-debuginfo.yaml rename to packaging/centreon-collect/centreon-collect-client-debuginfo.yaml index d7a03b34bb8..e89a3f5b535 100644 --- a/packaging/centreon-collect-client-debuginfo.yaml +++ b/packaging/centreon-collect/centreon-collect-client-debuginfo.yaml @@ -15,7 +15,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/ccc/ccc.debug" + - src: "../../build/ccc/ccc.debug" dst: "/usr/lib/debug/usr/bin/ccc.debug" file_info: mode: 0644 diff --git a/packaging/centreon-collect-client.yaml b/packaging/centreon-collect/centreon-collect-client.yaml similarity index 96% rename from packaging/centreon-collect-client.yaml rename to packaging/centreon-collect/centreon-collect-client.yaml index 4528d46cac1..d96d138ae18 100644 --- a/packaging/centreon-collect-client.yaml +++ b/packaging/centreon-collect/centreon-collect-client.yaml @@ -16,7 +16,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/ccc/ccc" + - src: "../../build/ccc/ccc" dst: "/usr/bin/ccc" overrides: diff --git a/packaging/centreon-collect.yaml b/packaging/centreon-collect/centreon-collect.yaml similarity index 96% rename from packaging/centreon-collect.yaml rename to packaging/centreon-collect/centreon-collect.yaml index 264f42f8640..f1f925d8655 100644 --- a/packaging/centreon-collect.yaml +++ b/packaging/centreon-collect/centreon-collect.yaml @@ -30,9 +30,6 @@ contents: owner: centreon-engine group: centreon-engine - - dst: "/var/log/centreon-engine/retention.dat" - type: ghost - - src: "files/empty_file" dst: "/var/log/centreon-engine/status.dat" file_info: diff --git a/packaging/centreon-connector-perl-debuginfo.yaml b/packaging/centreon-collect/centreon-connector-perl-debuginfo.yaml similarity index 94% rename from packaging/centreon-connector-perl-debuginfo.yaml rename to packaging/centreon-collect/centreon-connector-perl-debuginfo.yaml index cb9a97c237c..b58cac22c9e 100644 --- a/packaging/centreon-connector-perl-debuginfo.yaml +++ b/packaging/centreon-collect/centreon-connector-perl-debuginfo.yaml @@ -15,7 +15,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/connectors/perl/centreon_connector_perl" + - src: "../../build/connectors/perl/centreon_connector_perl" dst: "/usr/lib/debug/usr/lib64/centreon-connector/" file_info: mode: 0775 diff --git a/packaging/centreon-connector-perl.yaml b/packaging/centreon-collect/centreon-connector-perl.yaml similarity index 92% rename from packaging/centreon-connector-perl.yaml rename to packaging/centreon-collect/centreon-connector-perl.yaml index 73b37228d95..7ebceba3d8c 100644 --- a/packaging/centreon-connector-perl.yaml +++ b/packaging/centreon-collect/centreon-connector-perl.yaml @@ -16,7 +16,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/connectors/perl/centreon_connector_perl" + - src: "../../build/connectors/perl/centreon_connector_perl" dst: "/usr/lib64/centreon-connector/" file_info: mode: 0775 diff --git a/packaging/centreon-connector-ssh-debuginfo.yaml b/packaging/centreon-collect/centreon-connector-ssh-debuginfo.yaml similarity index 94% rename from packaging/centreon-connector-ssh-debuginfo.yaml rename to packaging/centreon-collect/centreon-connector-ssh-debuginfo.yaml index 08a64c8b4c8..ff49458e72a 100644 --- a/packaging/centreon-connector-ssh-debuginfo.yaml +++ b/packaging/centreon-collect/centreon-connector-ssh-debuginfo.yaml @@ -15,7 +15,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/connectors/ssh/centreon_connector_ssh" + - src: "../../build/connectors/ssh/centreon_connector_ssh" dst: "/usr/lib/debug/usr/lib64/centreon-connector/" file_info: mode: 0775 diff --git a/packaging/centreon-connector-ssh.yaml b/packaging/centreon-collect/centreon-connector-ssh.yaml similarity index 93% rename from packaging/centreon-connector-ssh.yaml rename to packaging/centreon-collect/centreon-connector-ssh.yaml index 6b8a976529b..2b218c6c0a1 100644 --- a/packaging/centreon-connector-ssh.yaml +++ b/packaging/centreon-collect/centreon-connector-ssh.yaml @@ -15,7 +15,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/connectors/ssh/centreon_connector_ssh" + - src: "../../build/connectors/ssh/centreon_connector_ssh" dst: "/usr/lib64/centreon-connector/" file_info: mode: 0775 diff --git a/packaging/centreon-connector.yaml b/packaging/centreon-collect/centreon-connector.yaml similarity index 100% rename from packaging/centreon-connector.yaml rename to packaging/centreon-collect/centreon-connector.yaml diff --git a/packaging/centreon-engine-bench-debuginfo.yaml b/packaging/centreon-collect/centreon-engine-bench-debuginfo.yaml similarity index 88% rename from packaging/centreon-engine-bench-debuginfo.yaml rename to packaging/centreon-collect/centreon-engine-bench-debuginfo.yaml index 2d0bbb311f0..da440774d4a 100644 --- a/packaging/centreon-engine-bench-debuginfo.yaml +++ b/packaging/centreon-collect/centreon-engine-bench-debuginfo.yaml @@ -15,12 +15,12 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/engine/modules/bench/centengine_bench_passive.debug" + - src: "../../build/engine/modules/bench/centengine_bench_passive.debug" dst: "/usr/lib/debug/usr/sbin/centengine_bench_passive.debug" file_info: mode: 0644 - - src: "../build/engine/modules/bench/bench_passive_module.so.debug" + - src: "../../build/engine/modules/bench/bench_passive_module.so.debug" dst: "/usr/lib/debug/usr/lib64/centreon-engine/bench_passive_module.so.debug" file_info: mode: 0644 diff --git a/packaging/centreon-engine-bench.yaml b/packaging/centreon-collect/centreon-engine-bench.yaml similarity index 87% rename from packaging/centreon-engine-bench.yaml rename to packaging/centreon-collect/centreon-engine-bench.yaml index 9679d50144e..0e8dd15bd48 100644 --- a/packaging/centreon-engine-bench.yaml +++ b/packaging/centreon-collect/centreon-engine-bench.yaml @@ -15,10 +15,10 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/engine/modules/bench/centengine_bench_passive" + - src: "../../build/engine/modules/bench/centengine_bench_passive" dst: "/usr/sbin/centengine_bench_passive" - - src: "../build/engine/modules/bench/bench_passive_module.so" + - src: "../../build/engine/modules/bench/bench_passive_module.so" dst: "/usr/lib64/centreon-engine/bench_passive_module.so" overrides: diff --git a/packaging/centreon-engine-daemon-debuginfo.yaml b/packaging/centreon-collect/centreon-engine-daemon-debuginfo.yaml similarity index 90% rename from packaging/centreon-engine-daemon-debuginfo.yaml rename to packaging/centreon-collect/centreon-engine-daemon-debuginfo.yaml index 91529e84e4d..df19f9bd66c 100644 --- a/packaging/centreon-engine-daemon-debuginfo.yaml +++ b/packaging/centreon-collect/centreon-engine-daemon-debuginfo.yaml @@ -15,17 +15,17 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/engine/centengine.debug" + - src: "../../build/engine/centengine.debug" dst: "/usr/lib/debug/usr/sbin/centengine.debug" file_info: mode: 0644 - - src: "../build/engine/centenginestats.debug" + - src: "../../build/engine/centenginestats.debug" dst: "/usr/lib/debug/usr/sbin/centenginestats.debug" file_info: mode: 0644 - - src: "../build/engine/modules/external_commands/externalcmd.so.debug" + - src: "../../build/engine/modules/external_commands/externalcmd.so.debug" dst: "/usr/lib/debug/usr/lib64/centreon-engine/externalcmd.so.debug" file_info: mode: 0644 diff --git a/packaging/centreon-engine-daemon.yaml b/packaging/centreon-collect/centreon-engine-daemon.yaml similarity index 88% rename from packaging/centreon-engine-daemon.yaml rename to packaging/centreon-collect/centreon-engine-daemon.yaml index 778b153848d..9b6bddfd0a6 100644 --- a/packaging/centreon-engine-daemon.yaml +++ b/packaging/centreon-collect/centreon-engine-daemon.yaml @@ -16,7 +16,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../engine/conf/centengine.cfg" + - src: "../../engine/conf/centengine.cfg" dst: "/etc/centreon-engine/centengine.cfg" type: config|noreplace file_info: @@ -24,7 +24,7 @@ contents: owner: centreon-engine group: centreon-engine - - src: "../engine/conf/resource.cfg" + - src: "../../engine/conf/resource.cfg" dst: "/etc/centreon-engine/resource.cfg" type: config|noreplace file_info: @@ -32,7 +32,7 @@ contents: owner: centreon-engine group: centreon-engine - - src: "../engine/conf/commands.cfg" + - src: "../../engine/conf/commands.cfg" dst: "/etc/centreon-engine/commands.cfg" type: config|noreplace file_info: @@ -40,7 +40,7 @@ contents: owner: centreon-engine group: centreon-engine - - src: "../engine/conf/timeperiods.cfg" + - src: "../../engine/conf/timeperiods.cfg" dst: "/etc/centreon-engine/timeperiods.cfg" type: config|noreplace file_info: @@ -48,28 +48,28 @@ contents: owner: centreon-engine group: centreon-engine - - src: "../engine/scripts/logrotate.conf" + - src: "../../engine/scripts/logrotate.conf" dst: "/etc/logrotate.d/centengine" type: config|noreplace - - src: "../engine/scripts/centengine.service" + - src: "../../engine/scripts/centengine.service" dst: "/usr/lib/systemd/system/centengine.service" file_info: mode: 0644 packager: rpm - - src: "../engine/scripts/centengine.service" + - src: "../../engine/scripts/centengine.service" dst: "/lib/systemd/system/centengine.service" file_info: mode: 0644 packager: deb - - src: "../build/engine/centengine" + - src: "../../build/engine/centengine" dst: "/usr/sbin/centengine" - - src: "../build/engine/centenginestats" + - src: "../../build/engine/centenginestats" dst: "/usr/sbin/centenginestats" - - src: "../build/engine/modules/external_commands/externalcmd.so" + - src: "../../build/engine/modules/external_commands/externalcmd.so" dst: "/usr/lib64/centreon-engine/externalcmd.so" - dst: "/etc/centreon-engine" diff --git a/packaging/centreon-engine-opentelemetry-debuginfo.yaml b/packaging/centreon-collect/centreon-engine-opentelemetry-debuginfo.yaml similarity index 93% rename from packaging/centreon-engine-opentelemetry-debuginfo.yaml rename to packaging/centreon-collect/centreon-engine-opentelemetry-debuginfo.yaml index 383627db89c..54725aa17b5 100644 --- a/packaging/centreon-engine-opentelemetry-debuginfo.yaml +++ b/packaging/centreon-collect/centreon-engine-opentelemetry-debuginfo.yaml @@ -15,7 +15,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/engine/modules/opentelemetry/libopentelemetry.so.debug" + - src: "../../build/engine/modules/opentelemetry/libopentelemetry.so.debug" dst: "/usr/lib/debug/usr/lib64/centreon-engine/libopentelemetry.so.debug" file_info: mode: 0644 diff --git a/packaging/centreon-engine-opentelemetry.yaml b/packaging/centreon-collect/centreon-engine-opentelemetry.yaml similarity index 91% rename from packaging/centreon-engine-opentelemetry.yaml rename to packaging/centreon-collect/centreon-engine-opentelemetry.yaml index ef8dccea651..409ef0dd70d 100644 --- a/packaging/centreon-engine-opentelemetry.yaml +++ b/packaging/centreon-collect/centreon-engine-opentelemetry.yaml @@ -15,7 +15,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/engine/modules/opentelemetry/libopentelemetry.so" + - src: "../../build/engine/modules/opentelemetry/libopentelemetry.so" dst: "/usr/lib64/centreon-engine/libopentelemetry.so" overrides: diff --git a/packaging/centreon-engine-selinux.yaml b/packaging/centreon-collect/centreon-engine-selinux.yaml similarity index 93% rename from packaging/centreon-engine-selinux.yaml rename to packaging/centreon-collect/centreon-engine-selinux.yaml index 06ec450b11e..5ed3069a795 100644 --- a/packaging/centreon-engine-selinux.yaml +++ b/packaging/centreon-collect/centreon-engine-selinux.yaml @@ -24,7 +24,7 @@ provides: - centreon-engine-selinux-debuginfo contents: - - src: "../selinux/centreon-engine/centreon-engine.pp" + - src: "../../selinux/centreon-engine/centreon-engine.pp" dst: "/usr/share/selinux/packages/centreon/centreon-engine.pp" file_info: mode: 0655 diff --git a/packaging/centreon-engine.yaml b/packaging/centreon-collect/centreon-engine.yaml similarity index 100% rename from packaging/centreon-engine.yaml rename to packaging/centreon-collect/centreon-engine.yaml diff --git a/packaging/centreon-monitoring-agent-debuginfo.yaml b/packaging/centreon-collect/centreon-monitoring-agent-debuginfo.yaml similarity index 95% rename from packaging/centreon-monitoring-agent-debuginfo.yaml rename to packaging/centreon-collect/centreon-monitoring-agent-debuginfo.yaml index 5aa14410670..822dee35ca6 100644 --- a/packaging/centreon-monitoring-agent-debuginfo.yaml +++ b/packaging/centreon-collect/centreon-monitoring-agent-debuginfo.yaml @@ -15,7 +15,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../build/agent/centagent.debug" + - src: "../../build/agent/centagent.debug" dst: "/usr/lib/debug/usr/bin/centagent.debug" file_info: mode: 0644 diff --git a/packaging/centreon-monitoring-agent-selinux.yaml b/packaging/centreon-collect/centreon-monitoring-agent-selinux.yaml similarity index 92% rename from packaging/centreon-monitoring-agent-selinux.yaml rename to packaging/centreon-collect/centreon-monitoring-agent-selinux.yaml index 46ad02ae3ec..86842fac428 100644 --- a/packaging/centreon-monitoring-agent-selinux.yaml +++ b/packaging/centreon-collect/centreon-monitoring-agent-selinux.yaml @@ -24,7 +24,7 @@ provides: - centreon-monitoring-agent-selinux-debuginfo contents: - - src: "../selinux/centreon-monitoring-agent/centreon-monitoring-agent.pp" + - src: "../../selinux/centreon-monitoring-agent/centreon-monitoring-agent.pp" dst: "/usr/share/selinux/packages/centreon/centreon-monitoring-agent.pp" file_info: mode: 0655 diff --git a/packaging/centreon-monitoring-agent.yaml b/packaging/centreon-collect/centreon-monitoring-agent.yaml similarity index 90% rename from packaging/centreon-monitoring-agent.yaml rename to packaging/centreon-collect/centreon-monitoring-agent.yaml index c452432cf47..4c2ccb73187 100644 --- a/packaging/centreon-monitoring-agent.yaml +++ b/packaging/centreon-collect/centreon-monitoring-agent.yaml @@ -15,7 +15,7 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../agent/conf/centagent.json" + - src: "../../agent/conf/centagent.json" dst: "/etc/centreon-monitoring-agent/centagent.json" type: config|noreplace file_info: @@ -23,18 +23,18 @@ contents: owner: centreon-monitoring-agent group: centreon-monitoring-agent - - src: "../agent/scripts/centagent.service" + - src: "../../agent/scripts/centagent.service" dst: "/usr/lib/systemd/system/centagent.service" file_info: mode: 0644 packager: rpm - - src: "../agent/scripts/centagent.service" + - src: "../../agent/scripts/centagent.service" dst: "/lib/systemd/system/centagent.service" file_info: mode: 0644 packager: deb - - src: "../build/agent/centagent" + - src: "../../build/agent/centagent" dst: "/usr/bin/centagent" - dst: "/etc/centreon-monitoring-agent" diff --git a/packaging/files/empty_file b/packaging/centreon-collect/files/empty_file similarity index 100% rename from packaging/files/empty_file rename to packaging/centreon-collect/files/empty_file diff --git a/packaging/scripts/centreon-broker-cbd-postinstall.sh b/packaging/centreon-collect/scripts/centreon-broker-cbd-postinstall.sh similarity index 100% rename from packaging/scripts/centreon-broker-cbd-postinstall.sh rename to packaging/centreon-collect/scripts/centreon-broker-cbd-postinstall.sh diff --git a/packaging/scripts/centreon-broker-cbd-preremove.sh b/packaging/centreon-collect/scripts/centreon-broker-cbd-preremove.sh similarity index 100% rename from packaging/scripts/centreon-broker-cbd-preremove.sh rename to packaging/centreon-collect/scripts/centreon-broker-cbd-preremove.sh diff --git a/packaging/scripts/centreon-broker-postinstall.sh b/packaging/centreon-collect/scripts/centreon-broker-postinstall.sh similarity index 100% rename from packaging/scripts/centreon-broker-postinstall.sh rename to packaging/centreon-collect/scripts/centreon-broker-postinstall.sh diff --git a/packaging/scripts/centreon-broker-postremove.sh b/packaging/centreon-collect/scripts/centreon-broker-postremove.sh similarity index 100% rename from packaging/scripts/centreon-broker-postremove.sh rename to packaging/centreon-collect/scripts/centreon-broker-postremove.sh diff --git a/packaging/scripts/centreon-broker-preinstall.sh b/packaging/centreon-collect/scripts/centreon-broker-preinstall.sh similarity index 100% rename from packaging/scripts/centreon-broker-preinstall.sh rename to packaging/centreon-collect/scripts/centreon-broker-preinstall.sh diff --git a/packaging/scripts/centreon-broker-selinux-postinstall.sh b/packaging/centreon-collect/scripts/centreon-broker-selinux-postinstall.sh similarity index 100% rename from packaging/scripts/centreon-broker-selinux-postinstall.sh rename to packaging/centreon-collect/scripts/centreon-broker-selinux-postinstall.sh diff --git a/packaging/scripts/centreon-broker-selinux-preremove.sh b/packaging/centreon-collect/scripts/centreon-broker-selinux-preremove.sh similarity index 100% rename from packaging/scripts/centreon-broker-selinux-preremove.sh rename to packaging/centreon-collect/scripts/centreon-broker-selinux-preremove.sh diff --git a/packaging/scripts/centreon-engine-daemon-postinstall.sh b/packaging/centreon-collect/scripts/centreon-engine-daemon-postinstall.sh similarity index 100% rename from packaging/scripts/centreon-engine-daemon-postinstall.sh rename to packaging/centreon-collect/scripts/centreon-engine-daemon-postinstall.sh diff --git a/packaging/scripts/centreon-engine-daemon-postremove.sh b/packaging/centreon-collect/scripts/centreon-engine-daemon-postremove.sh similarity index 100% rename from packaging/scripts/centreon-engine-daemon-postremove.sh rename to packaging/centreon-collect/scripts/centreon-engine-daemon-postremove.sh diff --git a/packaging/scripts/centreon-engine-daemon-preinstall.sh b/packaging/centreon-collect/scripts/centreon-engine-daemon-preinstall.sh similarity index 100% rename from packaging/scripts/centreon-engine-daemon-preinstall.sh rename to packaging/centreon-collect/scripts/centreon-engine-daemon-preinstall.sh diff --git a/packaging/scripts/centreon-engine-daemon-preremove.sh b/packaging/centreon-collect/scripts/centreon-engine-daemon-preremove.sh similarity index 100% rename from packaging/scripts/centreon-engine-daemon-preremove.sh rename to packaging/centreon-collect/scripts/centreon-engine-daemon-preremove.sh diff --git a/packaging/scripts/centreon-engine-selinux-postinstall.sh b/packaging/centreon-collect/scripts/centreon-engine-selinux-postinstall.sh similarity index 100% rename from packaging/scripts/centreon-engine-selinux-postinstall.sh rename to packaging/centreon-collect/scripts/centreon-engine-selinux-postinstall.sh diff --git a/packaging/scripts/centreon-engine-selinux-preremove.sh b/packaging/centreon-collect/scripts/centreon-engine-selinux-preremove.sh similarity index 100% rename from packaging/scripts/centreon-engine-selinux-preremove.sh rename to packaging/centreon-collect/scripts/centreon-engine-selinux-preremove.sh diff --git a/packaging/scripts/centreon-monitoring-agent-postinstall.sh b/packaging/centreon-collect/scripts/centreon-monitoring-agent-postinstall.sh similarity index 100% rename from packaging/scripts/centreon-monitoring-agent-postinstall.sh rename to packaging/centreon-collect/scripts/centreon-monitoring-agent-postinstall.sh diff --git a/packaging/scripts/centreon-monitoring-agent-postremove.sh b/packaging/centreon-collect/scripts/centreon-monitoring-agent-postremove.sh similarity index 100% rename from packaging/scripts/centreon-monitoring-agent-postremove.sh rename to packaging/centreon-collect/scripts/centreon-monitoring-agent-postremove.sh diff --git a/packaging/scripts/centreon-monitoring-agent-preinstall.sh b/packaging/centreon-collect/scripts/centreon-monitoring-agent-preinstall.sh similarity index 100% rename from packaging/scripts/centreon-monitoring-agent-preinstall.sh rename to packaging/centreon-collect/scripts/centreon-monitoring-agent-preinstall.sh diff --git a/packaging/scripts/centreon-monitoring-agent-preremove.sh b/packaging/centreon-collect/scripts/centreon-monitoring-agent-preremove.sh similarity index 100% rename from packaging/scripts/centreon-monitoring-agent-preremove.sh rename to packaging/centreon-collect/scripts/centreon-monitoring-agent-preremove.sh diff --git a/packaging/scripts/centreon-monitoring-agent-selinux-postinstall.sh b/packaging/centreon-collect/scripts/centreon-monitoring-agent-selinux-postinstall.sh similarity index 100% rename from packaging/scripts/centreon-monitoring-agent-selinux-postinstall.sh rename to packaging/centreon-collect/scripts/centreon-monitoring-agent-selinux-postinstall.sh diff --git a/packaging/scripts/centreon-monitoring-agent-selinux-preremove.sh b/packaging/centreon-collect/scripts/centreon-monitoring-agent-selinux-preremove.sh similarity index 100% rename from packaging/scripts/centreon-monitoring-agent-selinux-preremove.sh rename to packaging/centreon-collect/scripts/centreon-monitoring-agent-selinux-preremove.sh diff --git a/packaging/scripts/env/.env.bookworm b/packaging/centreon-collect/scripts/env/.env.bookworm similarity index 100% rename from packaging/scripts/env/.env.bookworm rename to packaging/centreon-collect/scripts/env/.env.bookworm diff --git a/packaging/scripts/env/.env.bullseye b/packaging/centreon-collect/scripts/env/.env.bullseye similarity index 100% rename from packaging/scripts/env/.env.bullseye rename to packaging/centreon-collect/scripts/env/.env.bullseye diff --git a/packaging/centreon-common/centreon-common-selinux.yaml b/packaging/centreon-common/centreon-common-selinux.yaml new file mode 100644 index 00000000000..71f7bec7272 --- /dev/null +++ b/packaging/centreon-common/centreon-common-selinux.yaml @@ -0,0 +1,37 @@ +name: "centreon-common-selinux" +arch: "all" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + SElinux context for centreon-common. + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +depends: + - policycoreutils + - selinux-policy + - selinux-policy-targeted + +contents: + - src: "../../selinux/centreon-common/centreon-common.pp" + dst: "/usr/share/selinux/packages/centreon/centreon-common.pp" + file_info: + mode: 0644 + +scripts: + postinstall: ./scripts/centreon-common-selinux-postinstall.sh + preremove: ./scripts/centreon-common-selinux-preremove.sh + +rpm: + summary: SELinux context for Centreon + compression: zstd + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/packaging/centreon-common/centreon-common.yaml b/packaging/centreon-common/centreon-common.yaml new file mode 100644 index 00000000000..c47faadce84 --- /dev/null +++ b/packaging/centreon-common/centreon-common.yaml @@ -0,0 +1,124 @@ +name: "centreon-common" +arch: "all" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + Add user and group for Centreon + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - dst: "/etc/centreon" + type: dir + file_info: + owner: centreon + group: centreon + mode: 0775 + + - dst: "/etc/centreon/config.d" + type: dir + file_info: + owner: centreon + group: centreon + mode: 0775 + + - dst: "/var/cache/centreon" + type: dir + file_info: + owner: centreon + group: centreon + mode: 0775 + + - dst: "/var/cache/centreon/backup" + type: dir + file_info: + owner: centreon + group: centreon + mode: 0775 + + - dst: "/var/cache/centreon/config" + type: dir + file_info: + owner: centreon + group: centreon + mode: 2775 + + - dst: "/var/cache/centreon/config/engine" + type: dir + file_info: + owner: centreon + group: centreon + mode: 2775 + + - dst: "/var/cache/centreon/config/broker" + type: dir + file_info: + owner: centreon + group: centreon + mode: 2775 + + - dst: "/var/cache/centreon/config/export" + type: dir + file_info: + owner: centreon + group: centreon + mode: 2775 + + - dst: "/var/cache/centreon/config/vmware" + type: dir + file_info: + owner: centreon + group: centreon + mode: 2775 + + - src: "./src/config.yaml" + dst: "/etc/centreon/config.yaml" + file_info: + mode: 0600 + + - src: "./src/centreon.systemd" + dst: "/etc/systemd/system/centreon.service" + file_info: + mode: 0644 + packager: rpm + - src: "./src/centreon.systemd" + dst: "/lib/systemd/system/centreon.service" + file_info: + mode: 0644 + packager: deb + +scripts: + preinstall: ./scripts/centreon-common-preinstall.sh + postinstall: ./scripts/centreon-common-postinstall.sh + preremove: ./scripts/centreon-common-preremove.sh + postremove: ./scripts/centreon-common-postremove.sh + +overrides: + rpm: + depends: + - centreon-broker >= ${MAJOR_VERSION} + - centreon-broker < ${NEXT_MAJOR_VERSION} + - centreon-engine-daemon >= ${MAJOR_VERSION} + - centreon-engine-daemon < ${NEXT_MAJOR_VERSION} + - sudo + deb: + depends: + - "centreon-broker (>= ${MAJOR_VERSION}~)" + - "centreon-broker (<< ${NEXT_MAJOR_VERSION}~)" + - "centreon-engine-daemon (>= ${MAJOR_VERSION}~)" + - "centreon-engine-daemon (<< ${NEXT_MAJOR_VERSION}~)" + - sudo + +rpm: + summary: Centreon common package + compression: zstd + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/packaging/centreon-common/scripts/centreon-common-postinstall.sh b/packaging/centreon-common/scripts/centreon-common-postinstall.sh new file mode 100644 index 00000000000..0e10ff70e84 --- /dev/null +++ b/packaging/centreon-common/scripts/centreon-common-postinstall.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# force 2775 to cache config directories +fixCacheConfigRights() { + echo "Forcing rights of centreon cache directories ..." + chmod 2775 /var/cache/centreon/config + chmod 2775 /var/cache/centreon/config/engine + chmod 2775 /var/cache/centreon/config/broker + chmod 2775 /var/cache/centreon/config/export + chmod 2775 /var/cache/centreon/config/vmware + + # MON-38165 + chmod 0770 /var/cache/centreon/config/engine/* + chmod 0770 /var/cache/centreon/config/broker/* +} + +startCentreon() { + systemctl daemon-reload ||: + systemctl unmask centreon.service ||: + systemctl preset centreon.service ||: + systemctl enable centreon.service ||: + systemctl restart centreon.service ||: +} + +action="$1" +if [ "$1" = "configure" ] && [ -z "$2" ]; then + # Alpine linux does not pass args, and deb passes $1=configure + action="install" +elif [ "$1" = "configure" ] && [ -n "$2" ]; then + # deb passes $1=configure $2= + action="upgrade" +fi + +case "$action" in + "1" | "install") + fixCacheConfigRights + startCentreon + ;; + "2" | "upgrade") + fixCacheConfigRights + startCentreon + ;; + *) + # $1 == version being installed + startCentreon + ;; +esac diff --git a/packaging/centreon-common/scripts/centreon-common-postremove.sh b/packaging/centreon-common/scripts/centreon-common-postremove.sh new file mode 100644 index 00000000000..85fe340e3ab --- /dev/null +++ b/packaging/centreon-common/scripts/centreon-common-postremove.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +removeUsersAndGroups() { + echo "Removing centreon user and group ..." + userdel -r centreon > /dev/null 2>&1 || : + groupdel centreon > /dev/null 2>&1 || : + gpasswd --delete centreon centreon-broker > /dev/null 2>&1 || : + gpasswd --delete centreon-broker centreon > /dev/null 2>&1 || : + gpasswd --delete centreon centreon-engine > /dev/null 2>&1 || : + gpasswd --delete centreon-engine centreon > /dev/null 2>&1 || : +} + +action="$1" +case "$action" in + "0" | "remove") + removeUsersAndGroups + ;; + "1" | "upgrade") + ;; + "purge") + removeUsersAndGroups + ;; + *) + ;; +esac diff --git a/packaging/centreon-common/scripts/centreon-common-preinstall.sh b/packaging/centreon-common/scripts/centreon-common-preinstall.sh new file mode 100644 index 00000000000..6aa34cc7f2f --- /dev/null +++ b/packaging/centreon-common/scripts/centreon-common-preinstall.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +echo "Removing previous centreon engine and broker configuration files in cache ..." +rm -rf /var/cache/centreon/config/engine/* 2> /dev/null +rm -rf /var/cache/centreon/config/broker/* 2> /dev/null +rm -rf /var/cache/centreon/config/export/* 2> /dev/null + +echo "Creating centreon user and group ..." +getent group centreon &>/dev/null || groupadd -r centreon +getent passwd centreon &>/dev/null || useradd -g centreon -m -d /var/spool/centreon -r centreon 2> /dev/null + +if getent passwd centreon-broker > /dev/null 2>&1; then + usermod -a -G centreon-broker centreon + usermod -a -G centreon centreon-broker +fi + +if getent passwd centreon-engine > /dev/null 2>&1; then + usermod -a -G centreon-engine centreon + usermod -a -G centreon centreon-engine +fi diff --git a/packaging/centreon-common/scripts/centreon-common-preremove.sh b/packaging/centreon-common/scripts/centreon-common-preremove.sh new file mode 100644 index 00000000000..3498c040c1f --- /dev/null +++ b/packaging/centreon-common/scripts/centreon-common-preremove.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +systemctl stop gorgoned.service ||: diff --git a/packaging/centreon-common/scripts/centreon-common-selinux-postinstall.sh b/packaging/centreon-common/scripts/centreon-common-selinux-postinstall.sh new file mode 100644 index 00000000000..1001ef17887 --- /dev/null +++ b/packaging/centreon-common/scripts/centreon-common-selinux-postinstall.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +install() { + echo "Installing centreon-common selinux rules ..." + semodule -i /usr/share/selinux/packages/centreon/centreon-common.pp || : + restorecon -R -v /run/dbus/system_bus_socket || : + setsebool -P daemons_enable_cluster_mode on || : + setsebool -P cluster_can_network_connect on || : + setsebool -P cluster_manage_all_files on || : +} + +upgrade() { + echo "Updating centreon-common selinux rules ..." + semodule -i /usr/share/selinux/packages/centreon/centreon-common.pp || : + restorecon -R -v /run/dbus/system_bus_socket || : + setsebool -P daemons_enable_cluster_mode on || : + setsebool -P cluster_can_network_connect on || : + setsebool -P cluster_manage_all_files on || : +} + +action="$1" +if [ "$1" = "configure" ] && [ -z "$2" ]; then + action="install" +elif [ "$1" = "configure" ] && [ -n "$2" ]; then + action="upgrade" +fi + +case "$action" in + "1" | "install") + install + ;; + "2" | "upgrade") + upgrade + ;; +esac diff --git a/packaging/centreon-common/scripts/centreon-common-selinux-preremove.sh b/packaging/centreon-common/scripts/centreon-common-selinux-preremove.sh new file mode 100644 index 00000000000..9054a3e9c45 --- /dev/null +++ b/packaging/centreon-common/scripts/centreon-common-selinux-preremove.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +if [ "$1" -lt "1" ]; then + echo "Removing centreon-common selinux rules ..." + setsebool -P daemons_enable_cluster_mode off || : + setsebool -P cluster_can_network_connect off || : + setsebool -P cluster_manage_all_files off || : + semodule -r centreon-common || : +fi diff --git a/packaging/centreon-common/src/centreon.systemd b/packaging/centreon-common/src/centreon.systemd new file mode 100644 index 00000000000..9ed59725b4e --- /dev/null +++ b/packaging/centreon-common/src/centreon.systemd @@ -0,0 +1,29 @@ +## +## Copyright 2023 Centreon +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +## For more information : contact@centreon.com +## + +[Unit] +Description=One Service to rule them all. + +[Service] +Type=oneshot +ExecStart=/bin/true +ExecReload=/bin/true +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target diff --git a/packaging/centreon-common/src/config.yaml b/packaging/centreon-common/src/config.yaml new file mode 100644 index 00000000000..41be72ba2b1 --- /dev/null +++ b/packaging/centreon-common/src/config.yaml @@ -0,0 +1,3 @@ +name: config.yaml +description: Configuration for Central server +configuration: !include config.d/*.yaml \ No newline at end of file diff --git a/perl-libs/lib/README.md b/perl-libs/lib/README.md new file mode 100644 index 00000000000..0982983786d --- /dev/null +++ b/perl-libs/lib/README.md @@ -0,0 +1,33 @@ +# centreon-perl-libs / centreon-perl-libs-common + +This directory contains the common Perl libraries used by Centreon. + +## centreon-perl-libs-common + +This package contain libraries located in centreon::common namespace. +they are generic library used by multiples centreon modules. + +### tests + +to execute the test, see t/ directory in the package. +First install the dependency : +```bash +# distro package to install : +openssl-dev +# cpan package to install : +Test2::V0 Test2::Plugin::NoWarnings Crypt::OpenSSL::AES +``` + +run the following command passing the path to t/ folder as argument : +```bash +prove -r t/ +``` + + +## centreon-perl-libs + +This package contain libraries associated to specific binary, they are located in centreon::[area] namespace. + +for exemple the package centreon-trap contain a binary calling centreon::trap namespace which contain all the logic. + + diff --git a/perl-libs/lib/centreon/common/centreonvault.pm b/perl-libs/lib/centreon/common/centreonvault.pm new file mode 100644 index 00000000000..e415261e2d1 --- /dev/null +++ b/perl-libs/lib/centreon/common/centreonvault.pm @@ -0,0 +1,428 @@ +# +# Copyright 2024 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package centreon::common::centreonvault; + +use strict; +use warnings; + +use MIME::Base64; +use Crypt::OpenSSL::AES; +use Net::Curl::Easy qw(:constants); +use JSON::XS; + +my $VAULT_PATH_REGEX = qr/^secret::hashicorp_vault::([^:]+)::(.+)$/; + +sub new { + my ($class, %options) = @_; + my $self = bless \%options, $class; + # mandatory options: + # - logger: logger object + # - config_file: either path of a JSON vault config file or the configuration as a perl hash. + + $self->{enabled} = 1; + $self->{crypted_credentials} = 1; + + if ( !$self->init() ) { + $self->{enabled} = 0; + $self->{logger}->writeLogWarning("Something happened during init() method that makes Centreonvault not usable. Ignore this if you don't use Centreonvault"); + } + return $self; +} + + +sub init { + my ($self, %options) = @_; + + $self->check_options() or return undef; + + # for unit test purpose, if the config is given as an hash, we don't try to read the config file. + if (ref $self->{config_file} eq ref {}) { + $self->{vault_config} = $self->{config_file}; + } else { + # check if the following information is available + $self->{logger}->writeLogDebug("Reading Vault configuration from file " . $self->{config_file} . "."); + $self->{vault_config} = parse_json_file('json_file' => $self->{config_file}); + if (defined($self->{vault_config}->{error_message})) { + $self->{logger}->writeLogInfo("Error while parsing " . $self->{config_file} . ": " + . $self->{vault_config}->{error_message}); + return undef; + } + } + $self->check_configuration() or return undef; + + $self->{logger}->writeLogDebug("Vault configuration read. Name: " . $self->{vault_config}->{name} + . ". Url: " . $self->{vault_config}->{url} . "."); + + # Create the Curl object, it will be used several times + $self->{curl_easy} = Net::Curl::Easy->new(); + $self->{curl_easy}->setopt( CURLOPT_USERAGENT, "Centreon VMware daemon's centreonvault.pm"); + + return 1; +} + +sub check_options { + my ($self, %options) = @_; + + if ( !defined($self->{logger}) ) { + die "FATAL: No logger given to the constructor. Centreonvault cannot be used."; + } + if ( !defined($self->{config_file})) { + $self->{logger}->writeLogNotice("No config file given to the constructor. Centreonvault cannot be used."); + return undef; + } + if ( ! -f $self->{config_file} and ref $self->{config_file} ne ref {}) { + $self->{logger}->writeLogNotice("The given configuration file " . $self->{config_file} + . " does not exist. Passwords won't be retrieved from Centreonvault. Ignore this if you don't use Centreonvault."); + return undef; + } + + return 1; +} + +sub check_configuration { + my ($self, %options) = @_; + + if ( !defined($self->{vault_config}->{url}) || $self->{vault_config}->{url} eq '') { + $self->{logger}->writeLogDebug("Vault url is missing from configuration."); + $self->{vault_config}->{url} = '127.0.0.1'; + } + if ( !defined($self->{vault_config}->{port}) || $self->{vault_config}->{port} eq '') { + $self->{logger}->writeLogDebug("Vault port is missing from configuration."); + $self->{vault_config}->{port} = '443'; + } + + # Normally, the role_id and secret_id data are encrypted using AES wit the following information: + # firstKey = APP_SECRET (environment variable) + # secondKey = 'salt' (hashing) key given by vault.json configuration file + # both are base64 encoded + if ( !defined($self->{vault_config}->{salt}) || $self->{vault_config}->{salt} eq '') { + $self->{logger}->writeLogNotice("Vault environment does not seem complete: 'salt' attribute missing from " + . $self->{config_file} + . ". 'role_id' and 'secret_id' won't be decrypted, so they'll be used as they're stored in the vault config file."); + $self->{crypted_credentials} = 0; + $self->{hash_key} = ''; + } else { + $self->{hash_key} = $self->{vault_config}->{salt}; # key for sha3-512 hmac + } + $self->{encryption_key} = get_app_secret(); + if ( !defined($self->{encryption_key}) or $self->{encryption_key} eq '' ) { + $self->{logger}->writeLogInfo("Vault environment does not seem complete. 'APP_SECRET' environment variable missing." + . " 'role_id' and 'secret_id' won't be decrypted, so they'll be used as they're stored in the vault config file."); + $self->{crypted_credentials} = 0; + $self->{encryption_key} = ''; + } + + return 1; +} + +sub get_app_secret { + if (defined($ENV{'APP_SECRET'}) && $ENV{'APP_SECRET'} ne '' ) { + return $ENV{'APP_SECRET'}; + } + if (-r '/usr/share/centreon/.env'){ + open(my $fh, '<', '/usr/share/centreon/.env') or return ''; + while (my $line = <$fh>) { + chomp $line; + if ($line =~ /^APP_SECRET=(.+)$/) { + return $1; + } + } + } + return ''; # if no app_secret found return empty string so caller don't try to use it. +} + +sub extract_and_decrypt { + my ($self, %options) = @_; + + my $input = decode_base64($options{data}); + $self->{logger}->writeLogDebug("data to extract and decrypt: '" . $options{data} . "'"); + + # with AES-256, the IV length must 16 bytes + my $iv_length = 16; + # extract the IV, the hashed data, the encrypted data + my $iv = substr($input, 0, $iv_length); # initialization vector + my $hashed_data = substr($input, $iv_length, 64); # hmac of the original data, for integrity control + my $encrypted_data = substr($input, $iv_length + 64); # data to decrypt + + # create the AES object + $self->{logger}->writeLogDebug( + "Creating the AES decryption object for initialization vector (IV) of length " + . length($iv) . "B, key of length " . length($self->{encryption_key}) . "B." + ); + my $cipher; + eval { + $cipher = Crypt::OpenSSL::AES->new( + decode_base64( $self->{encryption_key} ), + { + 'cipher' => 'AES-256-CBC', + 'iv' => $iv, + 'padding' => 1 + } + ); + }; + if ($@) { + $self->{logger}->writeLogNotice("There was an error while creating the AES object: " . $@); + return undef; + } + + # decrypt + $self->{logger}->writeLogDebug("Decrypting the data of length " . length($encrypted_data) . "B."); + my $decrypted_data; + eval {$decrypted_data = $cipher->decrypt($encrypted_data);}; + if ($@) { + $self->{logger}->writeLogNotice("There was an error while decrypting one of the AES-encrypted data: " . $@); + return undef; + } + + return $decrypted_data; +} + +sub authenticate { + my ($self) = @_; + + # initial value: assuming the role and secret id might not be encrypted + my $role_id = $self->{vault_config}->{role_id}; + my $secret_id = $self->{vault_config}->{secret_id}; + + + if ($self->{crypted_credentials}) { + # Then decrypt using https://github.com/perl-openssl/perl-Crypt-OpenSSL-AES + # keep the decrypted data in local variables so that they stay in memory for as little time as possible + $self->{logger}->writeLogDebug("Decrypting the credentials needed to authenticate to the vault."); + $role_id = $self->extract_and_decrypt( ('data' => $role_id )); + $secret_id = $self->extract_and_decrypt( ('data' => $secret_id )); + $self->{logger}->writeLogDebug("role_id and secret_id have been decrypted."); + } else { + $self->{logger}->writeLogDebug("role_id and secret_id are not crypted"); + } + + + # Authenticate to get the token + my $url = "https://" . $self->{vault_config}->{url} . ":" . $self->{vault_config}->{port} . "/v1/auth/approle/login"; + $self->{logger}->writeLogDebug("Authenticating to the vault server at URL: $url"); + $self->{curl_easy}->setopt( CURLOPT_URL, $url ); + + my $post_data = "role_id=$role_id&secret_id=$secret_id"; + my $auth_result_json; + # to get more details (in STDERR) + #$self->{curl_easy}->setopt(CURLOPT_VERBOSE, 1); + $self->{curl_easy}->setopt(CURLOPT_POST, 1); + $self->{curl_easy}->setopt(CURLOPT_POSTFIELDS, $post_data); + $self->{curl_easy}->setopt(CURLOPT_POSTFIELDSIZE, length($post_data)); + $self->{curl_easy}->setopt(CURLOPT_WRITEDATA(), \$auth_result_json); + + eval { + $self->{curl_easy}->perform(); + }; + if ($@) { + $self->{logger}->writeLogError("Error while authenticating to the vault: " . $@); + return undef; + } + + $self->{logger}->writeLogInfo("Authentication to the vault passed." ); + + my $auth_result_obj = transform_json_to_object($auth_result_json); + if (defined($auth_result_obj->{error_message})) { + $self->{logger}->writeLogError("Error while decoding JSON '$auth_result_json'. Message: " + . $auth_result_obj->{error_message}); + return undef; + } + + # store the token (.auth.client_token) and its expiration date (current date + .lease_duration) + my $expiration_epoch = -1; + my $lease_duration = $auth_result_obj->{auth}->{lease_duration}; + if ( defined($lease_duration) + && $lease_duration =~ /\d+/ + && $lease_duration > 0 ) { + $expiration_epoch = time() + $lease_duration; + } + $self->{auth} = { + 'token' => $auth_result_obj->{auth}->{client_token}, + 'expiration_epoch' => $expiration_epoch + }; + $self->{logger}->writeLogInfo("Authenticating worked. Token valid until " + . localtime($self->{auth}->{expiration_epoch})); + + return 1; +} + +sub is_token_still_valid { + my ($self) = @_; + if ( + !defined($self->{auth}) + || !defined($self->{auth}->{token}) + || $self->{auth}->{token} eq '' + || $self->{auth}->{expiration_epoch} <= time() + ) { + $self->{logger}->writeLogInfo("The token has expired or is invalid."); + return undef; + } + $self->{logger}->writeLogDebug("The token is still valid."); + return 1; +} + +sub get_secret { + my ($self, $secret) = @_; + + # if vault not enabled, return the secret unchanged + return $secret if ( ! $self->{enabled}); + + my ($secret_path, $secret_name) = $secret =~ $VAULT_PATH_REGEX; + if (!defined($secret_path) || !defined($secret_name)) { + return $secret; + } + $self->{logger}->writeLogDebug("Secret path: $secret_path - Secret name: $secret_name"); + + if (!defined($self->{auth}) || !$self->is_token_still_valid() ) { + $self->authenticate() or return $secret; + } + + # prepare the GET statement + my $get_result_json; + my $url = "https://" . $self->{vault_config}->{url} . ":" . $self->{vault_config}->{port} . "/v1/" . $secret_path; + $self->{logger}->writeLogDebug("Requesting URL: $url"); + + #$self->{curl_easy}->setopt( CURLOPT_VERBOSE, 1 ); + $self->{curl_easy}->setopt( CURLOPT_URL, $url ); + $self->{curl_easy}->setopt( CURLOPT_POST, 0 ); + $self->{curl_easy}->setopt( CURLOPT_WRITEDATA(), \$get_result_json ); + $self->{curl_easy}->setopt( CURLOPT_HTTPHEADER(), ["X-Vault-Token: " . $self->{auth}->{token}]); + + eval { + $self->{curl_easy}->perform(); + }; + if ($@) { + $self->{logger}->writeLogError("Error while getting a secret from the vault: " . $@); + return $secret; + } + + $self->{logger}->writeLogDebug("Request passed."); + # request_id + + # the result is a json string, convert it into an object + my $get_result_obj = transform_json_to_object($get_result_json); + if (defined($get_result_obj->{error_message})) { + $self->{logger}->writeLogError("Error while decoding JSON '$get_result_json'. Message: " + . $get_result_obj->{error_message}); + return $secret; + } + $self->{logger}->writeLogDebug("Request id is " . $get_result_obj->{request_id}); + + # .data.data will contain the stored macros + if ( !defined($get_result_obj->{data}) + || !defined($get_result_obj->{data}->{data}) + || !defined($get_result_obj->{data}->{data}->{$secret_name}) ) { + $self->{logger}->writeLogError("Could not get secret '$secret_name' from path '$secret_path' from the vault. Enable debug for more details."); + $self->{logger}->writeLogDebug("Response: " . $get_result_json); + return $secret; + } + $self->{logger}->writeLogInfo("Secret '$secret_name' from path '$secret_path' retrieved from the vault."); + return $get_result_obj->{data}->{data}->{$secret_name}; +} + +sub transform_json_to_object { + my ($json_data) = @_; + + my $json_as_object; + eval { + $json_as_object = decode_json($json_data); + }; + if ($@) { + return ({'error_message' => "Could not decode JSON from '$json_data'. Reason: " . $@}); + }; + return($json_as_object); +} + +sub parse_json_file { + my (%options) = @_; + + my $fh; + my $json_data = ''; + + my $json_file = $options{json_file}; + + open($fh, '<', $json_file) or return ('error_message' => "parse_json_file: Cannot open " . $json_file); + for my $line (<$fh>) { + chomp $line; + $json_data .= $line; + } + close($fh); + return transform_json_to_object($json_data); +} + +1; + +__END__ + +=head1 NAME + +Centreon Vault password manager + +=head1 SYNOPSIS + +Allows to retrieve secrets (usually username and password) from a Hashicorp vault compatible api given a config file as constructor. + + use centreon::common::logger; + use centreon::script::centreonvault; + my $vault = centreon::script::centreonvault->new( + ( + 'logger' => centreon::common::logger->new(), + 'config_file' => '/var/lib/centreon/vault/vault.json' + ) + ); + my $password = $vault->get_secret('secret::hashicorp_vault::mypath/to/mysecrets::password'); + +=head1 METHODS + +=head2 new(\%options) + +Constructor of the vault object. + +%options must provide: + +- logger: an object of the centreon::common::logger class. + +- config_file: full path and file name of the Centreon Vault JSON config file. + +The default config_file path should be '/var/lib/centreon/vault/vault.json'. +The expected file format for Centreon Vault is: + + { + "name": "hashicorp_vault", + "url": "vault-server.mydomain.com", + "salt": "", + "port": 443, + "root_path": "vmware_daemon", + "role_id": ")", + "secret_id": ")" + } + +This sub will not emit Error logs (only Notice and inferior) as it can be called on environment where the Vault is not used. +get_secret() can emit Error logs if vault is considered enabled. + +=head2 get_secret($secret) + +Returns the secret stored in the Centreon Vault at the given path. +If the format of the secret does not match the regular expression +C +or in case of any failure in the process, the method will return the secret unchanged. + +=cut diff --git a/perl-libs/lib/centreon/common/db.pm b/perl-libs/lib/centreon/common/db.pm new file mode 100644 index 00000000000..3c878d35f58 --- /dev/null +++ b/perl-libs/lib/centreon/common/db.pm @@ -0,0 +1,302 @@ +################################################################################ +# Copyright 2005-2013 Centreon +# Centreon is developped by : Julien Mathis and Romain Le Merlus under +# GPL Licence 2.0. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation ; either version 2 of the License. +# +# This program is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Linking this program statically or dynamically with other modules is making a +# combined work based on this program. Thus, the terms and conditions of the GNU +# General Public License cover the whole combination. +# +# As a special exception, the copyright holders of this program give Centreon +# permission to link this program with independent modules to produce an executable, +# regardless of the license terms of these independent modules, and to copy and +# distribute the resulting executable under terms of Centreon choice, provided that +# Centreon also meet, for each linked independent module, the terms and conditions +# of the license of that module. An independent module is a module which is not +# derived from this program. If you modify this program, you may extend this +# exception to your version of the program, but you are not obliged to do so. If you +# do not wish to do so, delete this exception statement from your version. +# +# +#################################################################################### + +package centreon::common::db; + +use strict; +use warnings; +use DBI; + +sub new { + my ($class, %options) = @_; + my %defaults = + ( + logger => undef, + db => undef, + host => "localhost", + user => undef, + password => undef, + port => 3306, + force => 0, + type => "mysql" + ); + my $self = {%defaults, %options}; + $self->{type} = 'mysql' if (!defined($self->{type})); + + $self->{instance} = undef; + $self->{args} = []; + bless $self, $class; + return $self; +} + +# Getter/Setter DB name +sub type { + my $self = shift; + if (@_) { + $self->{type} = shift; + } + return $self->{type}; +} + +# Getter/Setter DB name +sub db { + my $self = shift; + if (@_) { + $self->{db} = shift; + } + return $self->{db}; +} + +# Getter/Setter DB host +sub host { + my $self = shift; + if (@_) { + $self->{host} = shift; + } + return $self->{host}; +} + +# Getter/Setter DB port +sub port { + my $self = shift; + if (@_) { + $self->{port} = shift; + } + return $self->{port}; +} + +# Getter/Setter DB user +sub user { + my $self = shift; + if (@_) { + $self->{user} = shift; + } + return $self->{user}; +} + +# Getter/Setter DB force +sub force { + my $self = shift; + if (@_) { + $self->{force} = shift; + } + return $self->{force}; +} + +# Getter/Setter DB password +sub password { + my $self = shift; + if (@_) { + $self->{password} = shift; + } + return $self->{password}; +} + +sub last_insert_id { + my $self = shift; + return $self->{instance}->last_insert_id(undef, undef, undef, undef); +} + +sub quote { + my $self = shift; + + if (defined($self->{instance})) { + return $self->{instance}->quote($_[0]); + } + my $num = scalar(@{$self->{args}}); + push @{$self->{args}}, $_[0]; + return "##__ARG__$num##"; +} + +sub set_inactive_destroy { + my $self = shift; + + if (defined($self->{instance})) { + $self->{instance}->{InactiveDestroy} = 1; + } +} + +sub transaction_mode { + my ($self, $status) = @_; + + if ($status) { + $self->{instance}->begin_work; + $self->{instance}->{RaiseError} = 1; + } else { + $self->{instance}->{AutoCommit} = 1; + $self->{instance}->{RaiseError} = 0; + } +} + +sub commit { shift->{instance}->commit; } + +sub rollback { shift->{instance}->rollback; } + +sub kill { + my $self = shift; + + if (defined($self->{instance})) { + $self->{logger}->writeLogInfo("KILL QUERY\n"); + my $rv = $self->{instance}->do("KILL QUERY " . $self->{instance}->{'mysql_thread_id'}); + if (!$rv) { + my ($package, $filename, $line) = caller; + $self->{logger}->writeLogError("MySQL error : " . $self->{instance}->errstr . " (caller: $package:$filename:$line)"); + } + } +} + +# Connection initializer +sub connect { + my ($self, %options) = @_; + my $logger = $self->{logger}; + my $status = 0; + + my $connect_options = {}; + $connect_options = $options{connect_options} if (defined($options{connect_options}) && ref($options{connect_options}) eq "HASH"); + while (1) { + $self->{port} = 3306 if (!defined($self->{port}) && $self->{type} eq 'mysql'); + if ($self->{type} =~ /SQLite/i) { + $self->{instance} = DBI->connect( + "DBI:".$self->{type} + .":".$self->{db}, + $self->{user}, + $self->{password}, + { "RaiseError" => 0, "PrintError" => 0, "AutoCommit" => 1, %{$connect_options} } + ); + } else { + $self->{instance} = DBI->connect( + "DBI:".$self->{type} + .":".$self->{db} + .":".$self->{host} + .":".$self->{port}, + $self->{user}, + $self->{password}, + { "RaiseError" => 0, "PrintError" => 0, "AutoCommit" => 1, %{$connect_options} } + ); + } + if (defined($self->{instance})) { + last; + } + + my ($package, $filename, $line) = caller; + $logger->writeLogError("MySQL error : cannot connect to database " . $self->{db} . ": " . $DBI::errstr . " (caller: $package:$filename:$line)"); + if ($self->{force} == 0) { + $status = -1; + last; + } + sleep(5); + } + return $status; +} + +# Destroy connection +sub disconnect { + my $self = shift; + my $instance = $self->{instance}; + if (defined($instance)) { + $instance->disconnect; + $self->{instance} = undef; + } +} + +sub do { + my ($self, $query) = @_; + + if (!defined $self->{instance}) { + if ($self->connect() == -1) { + $self->{logger}->writeLogError("Can't connect to the database"); + return -1; + } + } + my $numrows = $self->{instance}->do($query); + die $self->{instance}->errstr if !defined $numrows; + return $numrows; +} + +sub error { + my ($self, $error, $query) = @_; + my ($package, $filename, $line) = caller 1; + + chomp($query); + $self->{logger}->writeLogError(<<"EOE"); +MySQL error: $error (caller: $package:$filename:$line) +Query: $query +EOE + $self->disconnect(); + $self->{instance} = undef; +} + +sub query { + my $self = shift; + my $query = shift; + my $logger = $self->{logger}; + my $status = 0; + my $statement_handle; + + while (1) { + if (!defined($self->{instance})) { + $status = $self->connect(); + if ($status != -1) { + for (my $i = 0; $i < scalar(@{$self->{args}}); $i++) { + my $str_quoted = $self->quote(${$self->{args}}[$i]); + $query =~ s/##__ARG__$i##/$str_quoted/; + } + $self->{args} = []; + } + if ($status == -1 && $self->{force} == 0) { + $self->{args} = []; + last; + } + } + + $statement_handle = $self->{instance}->prepare($query); + if (!defined $statement_handle) { + $self->error($self->{instance}->errstr, $query); + $status = -1; + last if $self->{force} == 0; + next; + } + + my $rv = $statement_handle->execute; + if (!$rv) { + $self->error($statement_handle->errstr, $query); + $status = -1; + last if $self->{force} == 0; + next; + } + last; + } + return ($status, $statement_handle); +} + +1; diff --git a/perl-libs/lib/centreon/common/lock.pm b/perl-libs/lib/centreon/common/lock.pm new file mode 100644 index 00000000000..66d482850fb --- /dev/null +++ b/perl-libs/lib/centreon/common/lock.pm @@ -0,0 +1,181 @@ +################################################################################ +# Copyright 2005-2013 Centreon +# Centreon is developped by : Julien Mathis and Romain Le Merlus under +# GPL Licence 2.0. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation ; either version 2 of the License. +# +# This program is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Linking this program statically or dynamically with other modules is making a +# combined work based on this program. Thus, the terms and conditions of the GNU +# General Public License cover the whole combination. +# +# As a special exception, the copyright holders of this program give Centreon +# permission to link this program with independent modules to produce an executable, +# regardless of the license terms of these independent modules, and to copy and +# distribute the resulting executable under terms of Centreon choice, provided that +# Centreon also meet, for each linked independent module, the terms and conditions +# of the license of that module. An independent module is a module which is not +# derived from this program. If you modify this program, you may extend this +# exception to your version of the program, but you are not obliged to do so. If you +# do not wish to do so, delete this exception statement from your version. +# +# +#################################################################################### + +package centreon::common::lock; + +use strict; +use warnings; + +sub new { + my ($class, $name, %options) = @_; + my %defaults = (name => $name, pid => $$, timeout => 10); + my $self = {%defaults, %options}; + + bless $self, $class; + return $self; +} + +sub is_set { + die "Not implemented"; +} + +sub set { + my $self = shift; + + for (my $i = 0; $i < $self->{timeout}; $i++) { + return if (!$self->is_set()); + sleep 1; + } + + die "Failed to set lock for $self->{name}"; +} + +package centreon::common::lock::file; + +use base qw(centreon::common::lock); + +sub new { + my $class = shift; + my $self = $class->SUPER::new(@_); + + if (!defined $self->{storagedir}) { + die "Can't build lock, required arguments not provided"; + } + bless $self, $class; + $self->{pidfile} = "$self->{storagedir}/$self->{name}.lock"; + return $self; +} + +sub is_set { + return -e shift->{pidfile}; +} + +sub set { + my $self = shift; + + $self->SUPER::set(); + open LOCK, ">", $self->{pidfile}; + print LOCK $self->{pid}; + close LOCK; +} + +sub DESTROY { + my $self = shift; + + if (defined $self->{pidfile} && -e $self->{pidfile}) { + unlink $self->{pidfile}; + } +} + +package centreon::common::lock::sql; + +use base qw(centreon::common::lock); + +sub new { + my $class = shift; + my $self = $class->SUPER::new(@_); + + if (!defined $self->{dbc}) { + die "Can't build lock, required arguments not provided"; + } + bless $self, $class; + $self->{launch_time} = time(); + return $self; +} + +sub is_set { + my $self = shift; + my ($status, $sth) = $self->{dbc}->query( + "SELECT `id`,`running`,`pid`,`time_launch` FROM `cron_operation` WHERE `name` LIKE '$self->{name}'" + ); + + return 1 if ($status == -1); + my $data = $sth->fetchrow_hashref(); + + if (!defined $data->{id}) { + $self->{not_created_yet} = 1; + $self->{previous_launch_time} = 0; + return 0; + } + $self->{id} = $data->{id}; + my $pid = defined($data->{pid}) ? $data->{pid} : -1; + $self->{previous_launch_time} = $data->{time_launch}; + if (defined $data->{running} && $data->{running} == 1) { + my $line = `ps -ef | grep -v grep | grep -- $pid | grep $self->{name}`; + return 0 if !length $line; + return 1; + } + return 0; +} + +sub set { + my $self = shift; + my $status; + + $self->SUPER::set(); + if (defined $self->{not_created_yet}) { + $status = $self->{dbc}->do(<<"EOQ"); +INSERT INTO `cron_operation` +(`name`, `system`, `activate`) +VALUES ('$self->{name}', '1', '1') +EOQ + goto error if $status == -1; + $self->{id} = $self->{dbc}->last_insert_id(); + return; + } + $status = $self->{dbc}->do(<<"EOQ"); +UPDATE `cron_operation` +SET `running` = '1', `time_launch` = '$self->{launch_time}', `pid` = '$self->{pid}' +WHERE `id` = '$self->{id}' +EOQ + goto error if $status == -1; + return; + + error: + die "Failed to set lock for $self->{name}"; +} + +sub DESTROY { + my $self = shift; + + if (defined $self->{dbc}) { + my $exectime = time() - $self->{launch_time}; + $self->{dbc}->do(<<"EOQ"); +UPDATE `cron_operation` +SET `last_execution_time` = '$exectime' +WHERE `id` = '$self->{id}' +EOQ + } +} + +1; diff --git a/perl-libs/lib/centreon/common/logger.pm b/perl-libs/lib/centreon/common/logger.pm new file mode 100644 index 00000000000..db5441e006a --- /dev/null +++ b/perl-libs/lib/centreon/common/logger.pm @@ -0,0 +1,290 @@ +################################################################################ +# Copyright 2005-2013 Centreon +# Centreon is developped by : Julien Mathis and Romain Le Merlus under +# GPL Licence 2.0. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation ; either version 2 of the License. +# +# This program is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Linking this program statically or dynamically with other modules is making a +# combined work based on this program. Thus, the terms and conditions of the GNU +# General Public License cover the whole combination. +# +# As a special exception, the copyright holders of this program give Centreon +# permission to link this program with independent modules to produce an executable, +# regardless of the license terms of these independent modules, and to copy and +# distribute the resulting executable under terms of Centreon choice, provided that +# Centreon also meet, for each linked independent module, the terms and conditions +# of the license of that module. An independent module is a module which is not +# derived from this program. If you modify this program, you may extend this +# exception to your version of the program, but you are not obliged to do so. If you +# do not wish to do so, delete this exception statement from your version. +# +# +#################################################################################### +# updated by Evan ADAM on 10/2024 + +package centreon::common::logger; +=head1 NOM + +centreon::common::logger - Simple logging module + +=head1 SYNOPSIS + + #!/usr/bin/perl -w + + use strict; + use warnings; + use centreon::common::logger; + + my $logger = new centreon::common::logger(); + + $logger->writeLogInfo("information"); + +=head1 DESCRIPTION + +This module offers a simple interface to write log messages to various output: + +* standard output +* file +* syslog + +=cut + +use strict; +use warnings; +use Sys::Syslog qw(:standard :macros); +use IO::Handle; + +# Fixed the severity internal representation to be +my %human_severities = ( + 2 => 'FATAL', + 3 => 'ERROR', + 4 => 'WARNING', + 5 => 'NOTICE', + 6 => 'INFO', + 7 => 'DEBUG' +); + +sub new { + my $class = shift; + + my $self = bless + { + file => 0, + filehandler => undef, + # warning by default, see %human_severities for the available possibilty + severity => 4, + old_severity => 4, + # 0 = stdout, 1 = file, 2 = syslog + log_mode => 0, + # Output pid of current process + withpid => 0, + # Output date of log + withdate => 1, + # syslog + log_facility => undef, + log_option => LOG_PID, + }, $class; + return $self; +} + +sub file_mode($$) { + my ($self, $file) = @_; + + if (defined($self->{filehandler})) { + $self->{filehandler}->close(); + } + if (open($self->{filehandler}, ">>", $file)){ + $self->{log_mode} = 1; + $self->{filehandler}->autoflush(1); + $self->{file_name} = $file; + return 1; + } + $self->{filehandler} = undef; + print STDERR "Cannot open file $file: $!\n"; + return 0; +} + +sub is_file_mode { + my $self = shift; + + if ($self->{log_mode} == 1) { + return 1; + } + return 0; +} + +sub is_debug { + my $self = shift; + + if ($self->{severity} == 7) { + return 1; + } + return 0; +} + +sub syslog_mode($$$) { + my ($self, $logopt, $facility) = @_; + + $self->{log_mode} = 2; + openlog($0, $logopt, $facility); + return 1; +} + +# For daemons +sub redirect_output { + my $self = shift; + + if ($self->is_file_mode()) { + open my $lfh, '>>', $self->{file_name}; + open STDOUT, '>&', $lfh; + open STDERR, '>&', $lfh; + } +} +# Bypass the buffers set up by the kernel/file system and always write the log +# as soon as it is sent. +sub flush_output { + my ($self, %options) = @_; + + $| = 1 if (defined($options{enabled})); +} + +sub force_default_severity { + my ($self, %options) = @_; + + $self->{old_severity} = defined($options{severity}) ? $options{severity} : $self->{severity}; +} + +sub set_default_severity { + my $self = shift; + + $self->{severity} = $self->{old_severity}; +} + +# Getter/Setter Log severity +sub severity { + my $self = shift; + if (@_) { + my $input_severity = lc($_[0]); + my $save_severity = $self->{severity}; + if ($input_severity =~ /^[0234567]$/) { + $self->{severity} = $input_severity; + } elsif ($input_severity eq "none") { + $self->{severity} = 0; + } elsif ($input_severity eq "fatal") { + $self->{severity} = 2; + } elsif ($input_severity eq "error") { + $self->{severity} = 3; + } elsif ($input_severity eq "warning") { + $self->{severity} = 4; + } elsif ($input_severity eq "notice") { + $self->{severity} = 5; + } elsif ($input_severity eq "info") { + $self->{severity} = 6; + } elsif ($input_severity eq "debug") { + $self->{severity} = 7; + } else { + $self->writeLogError("Wrong severity value set."); + return -1; + } + $self->{old_severity} = $save_severity; + } + return $human_severities{$self->{severity}}; +} + +sub withpid { + my $self = shift; + if (@_) { + if ($_[0]){ + $self->{withpid} = 1; + }else{ + $self->{withpid} = 0; + } + + } + return $self->{withpid}; +} + +sub withdate { + my $self = shift; + if (@_) { + $self->{withdate} = $_[0]; + } + return $self->{withdate}; +} + +sub get_date { + my $self = shift; + my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time()); + return sprintf("%04d-%02d-%02d %02d:%02d:%02d", + $year+1900, $mon+1, $mday, $hour, $min, $sec); +} + +sub writeLog($$$%) { + my ($self, $severity, $msg, %options) = @_; + + # do nothing if the configured severity does not imply logging this message + return if ($self->{severity} < $severity); + if (length($msg) > 20000) { + $msg = substr($msg, 0, 20000) . '...'; + } + $msg = ($self->withpid()) ? "$$ - $msg" : $msg; + + my $datedmsg = $human_severities{$severity} . " - " . $msg . "\n"; + if ($self->withdate()) { + $datedmsg = $self->get_date . " - " . $datedmsg; + } + if ($self->{log_mode} == 1 and defined($self->{filehandler})) { + print {$self->{filehandler}} $datedmsg; + } elsif ($self->{log_mode} == 0) { + print $datedmsg; + } elsif ($self->{log_mode} == 2) { + syslog($severity, $msg); + } else { + print STDERR "Unknown log mode '$self->{log_mode}' or log file unavailable for the following log :\n $datedmsg\n"; + } +} + +sub writeLogDebug { + shift->writeLog(7, @_); +} + +sub writeLogInfo { + shift->writeLog(6, @_); +} + +sub writeLogNotice { + shift->writeLog(5, @_); +} + +sub writeLogWarning { + shift->writeLog(4, @_); +} + +sub writeLogError { + shift->writeLog(3, @_); +} + +sub writeLogFatal { + shift->writeLog(2, @_); + die("FATAL: " . $_[0] . "\n"); +} + +sub DESTROY { + my $self = shift; + + if (defined $self->{filehandler}) { + $self->{filehandler}->close(); + } +} + +1; diff --git a/perl-libs/lib/centreon/common/misc.pm b/perl-libs/lib/centreon/common/misc.pm new file mode 100644 index 00000000000..7aec0449b1e --- /dev/null +++ b/perl-libs/lib/centreon/common/misc.pm @@ -0,0 +1,275 @@ +################################################################################ +# Copyright 2005-2013 Centreon +# Centreon is developped by : Julien Mathis and Romain Le Merlus under +# GPL Licence 2.0. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation ; either version 2 of the License. +# +# This program is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Linking this program statically or dynamically with other modules is making a +# combined work based on this program. Thus, the terms and conditions of the GNU +# General Public License cover the whole combination. +# +# As a special exception, the copyright holders of this program give Centreon +# permission to link this program with independent modules to produce an executable, +# regardless of the license terms of these independent modules, and to copy and +# distribute the resulting executable under terms of Centreon choice, provided that +# Centreon also meet, for each linked independent module, the terms and conditions +# of the license of that module. An independent module is a module which is not +# derived from this program. If you modify this program, you may extend this +# exception to your version of the program, but you are not obliged to do so. If you +# do not wish to do so, delete this exception statement from your version. +# +# +#################################################################################### + +package centreon::common::misc; + +use strict; +use warnings; +use vars qw($centreon_config); +use POSIX ":sys_wait_h"; + +my $read_size = 1*1024*1024*10; # 10Mo + +sub reload_db_config { + my ($logger, $config_file, $cdb, $csdb) = @_; + my ($cdb_mod, $csdb_mod) = (0, 0); + + unless (my $return = do $config_file) { + $logger->writeLogError("couldn't parse $config_file: $@") if $@; + $logger->writeLogError("couldn't do $config_file: $!") unless defined $return; + $logger->writeLogError("couldn't run $config_file") unless $return; + return -1; + } + + if (defined($cdb)) { + if ($centreon_config->{centreon_db} ne $cdb->db() || + $centreon_config->{db_host} ne $cdb->host() || + $centreon_config->{db_user} ne $cdb->user() || + $centreon_config->{db_passwd} ne $cdb->password() || + (defined($centreon_config->{db_port}) && $centreon_config->{db_port} ne $cdb->port())) { + $logger->writeLogInfo("Database centreon config had been modified"); + $cdb->db($centreon_config->{centreon_db}); + $cdb->host($centreon_config->{db_host}); + $cdb->user($centreon_config->{db_user}); + $cdb->password($centreon_config->{db_passwd}); + $cdb->port($centreon_config->{db_port}); + $cdb_mod = 1; + } + } + + if (defined($csdb)) { + if ($centreon_config->{centstorage_db} ne $csdb->db() || + $centreon_config->{db_host} ne $csdb->host() || + $centreon_config->{db_user} ne $csdb->user() || + $centreon_config->{db_passwd} ne $csdb->password() || + (defined($centreon_config->{db_port}) && $centreon_config->{db_port} ne $csdb->port())) { + $logger->writeLogInfo("Database centstorage config had been modified"); + $csdb->db($centreon_config->{centstorage_db}); + $csdb->host($centreon_config->{db_host}); + $csdb->user($centreon_config->{db_user}); + $csdb->password($centreon_config->{db_passwd}); + $csdb->port($centreon_config->{db_port}); + $csdb_mod = 1; + } + } + + return (0, $cdb_mod, $csdb_mod); +} + +sub get_all_options_config { + my ($extra_config, $centreon_db_centreon, $prefix) = @_; + + my $save_force = $centreon_db_centreon->force(); + $centreon_db_centreon->force(0); + + my ($status, $stmt) = $centreon_db_centreon->query("SELECT `key`, `value` FROM options WHERE `key` LIKE " . $centreon_db_centreon->quote($prefix . "_%") . " LIMIT 1"); + if ($status == -1) { + $centreon_db_centreon->force($save_force); + return ; + } + while ((my $data = $stmt->fetchrow_hashref())) { + if (defined($data->{value}) && length($data->{value}) > 0) { + $data->{key} =~ s/^${prefix}_//; + $extra_config->{$data->{key}} = $data->{value}; + } + } + + $centreon_db_centreon->force($save_force); +} + +sub get_option_config { + my ($extra_config, $centreon_db_centreon, $prefix, $key) = @_; + my $data; + + my $save_force = $centreon_db_centreon->force(); + $centreon_db_centreon->force(0); + + my ($status, $stmt) = $centreon_db_centreon->query("SELECT value FROM options WHERE `key` = " . $centreon_db_centreon->quote($prefix . "_" . $key) . " LIMIT 1"); + if ($status == -1) { + $centreon_db_centreon->force($save_force); + return ; + } + if (($data = $stmt->fetchrow_hashref()) && defined($data->{value})) { + $extra_config->{$key} = $data->{value}; + } + + $centreon_db_centreon->force($save_force); +} + +sub check_debug { + my ($logger, $key, $cdb, $name) = @_; + + my $request = "SELECT value FROM options WHERE `key` = " . $cdb->quote($key); + my ($status, $sth) = $cdb->query($request); + return -1 if ($status == -1); + my $data = $sth->fetchrow_hashref(); + if (defined($data->{'value'}) && $data->{'value'} == 1) { + if (!$logger->is_debug()) { + $logger->severity("debug"); + $logger->writeLogInfo("Enable Debug in $name"); + } + } else { + if ($logger->is_debug()) { + $logger->set_default_severity(); + $logger->writeLogInfo("Disable Debug in $name"); + } + } + return 0; +} + +sub get_line_file { + my ($fh, $datas, $readed) = @_; + my $line; + my $size = scalar(@$datas); + + return (1, shift(@$datas)) if ($size > 1); + while ((my $eof = sysread($fh, $line, $read_size))) { + my @result = split("\n", $line); + if ($line =~ /\n$/) { + push @result, ""; + } + if ($size == 1) { + $$datas[0] .= shift(@result); + } + push @$datas, @result; + $$readed += $eof; + $size = scalar(@$datas); + if ($size > 1) { + return (1, shift(@$datas)); + } + } + return (1, shift(@$datas)) if ($size > 1); + return -1; +} + +sub get_line_pipe { + my ($fh, $datas, $read_done) = @_; + my $line; + my $size = scalar(@$datas); + + if ($size > 1) { + return (1, shift(@$datas)); + } elsif ($size == 1 && $$read_done == 1) { + return 0; + } + while ((my $eof = sysread($fh, $line, 10000))) { + $$read_done = 1; + my @result = split("\n", $line); + if ($line =~ /\n$/) { + push @result, ""; + } + if ($size == 1) { + $$datas[0] .= shift(@result); + } + push @$datas, @result; + $size = scalar(@$datas); + if ($size > 1) { + return (1, shift(@$datas)); + } else { + return 0; + } + } + return -1; +} + +sub backtick { + my %arg = ( + command => undef, + logger => undef, + timeout => 30, + wait_exit => 0, + @_, + ); + my @output; + my $pid; + my $return_code; + + my $sig_do; + if ($arg{wait_exit} == 0) { + $sig_do = 'IGNORE'; + $return_code = undef; + } else { + $sig_do = 'DEFAULT'; + } + local $SIG{CHLD} = $sig_do; + if (!defined($pid = open( KID, "-|" ))) { + $arg{logger}->writeLogError("Cant fork: $!"); + return -1; + } + + if ($pid) { + eval { + local $SIG{ALRM} = sub { die "Timeout by signal ALARM\n"; }; + alarm( $arg{timeout} ); + while () { + chomp; + push @output, $_; + } + + alarm(0); + }; + if ($@) { + $arg{logger}->writeLogInfo($@); + + $arg{logger}->writeLogInfo("Killing child process [$pid] ..."); + if ($pid != -1) { + kill -9, $pid; + } + $arg{logger}->writeLogInfo("Killed"); + + alarm(0); + close KID; + return (-1, join("\n", @output), -1); + } else { + if ($arg{wait_exit} == 1) { + # We're waiting the exit code + waitpid($pid, 0); + $return_code = ($? >> 8); + } + close KID; + } + } else { + # child + # set the child process to be a group leader, so that + # kill -9 will kill it and all its descendents + setpgrp(0, 0); + + exec($arg{command}); + # Exec is in error. No such command maybe. + exit(127); + } + + return (0, join("\n", @output), $return_code); +} + +1; diff --git a/perl-libs/lib/centreon/health/checkbroker.pm b/perl-libs/lib/centreon/health/checkbroker.pm new file mode 100644 index 00000000000..7080877d983 --- /dev/null +++ b/perl-libs/lib/centreon/health/checkbroker.pm @@ -0,0 +1,103 @@ +# +# Copyright 2017 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package centreon::health::checkbroker; + +use strict; +use warnings; +use JSON; +use POSIX qw(strftime); +use centreon::common::misc; +use centreon::health::ssh; + +sub new { + my $class = shift; + my $self = {}; + $self->{output} = {}; + + bless $self, $class; + return $self; +} + +sub json_parsing { + my ($self, %options) = @_; + + my $json_content = JSON->new->decode($options{json_content}); + foreach my $key (keys %$json_content) { + if ($key =~ m/^endpoint/) { + foreach my $broker_metric (keys %{$json_content->{$key}}) { + next if ($broker_metric !~ m/version|event_processing|last_connection|queued|state/); + $self->{output}->{$options{poller_name}}->{$options{file_name}}->{$broker_metric} = ($broker_metric =~ m/^last_connection/ && $json_content->{$key}->{$broker_metric} != -1) + ? strftime("%m/%d/%Y %H:%M:%S",localtime($json_content->{$key}->{$broker_metric})) + : $json_content->{$key}->{$broker_metric} ; + } + } elsif ($key =~ m/version/) { + $self->{output}->{$options{poller_name}}->{$options{file_name}}->{$key} = $json_content->{$key}; + } + + } + + return $self->{output} + +} + +sub run { + my $self = shift; + my ($centreon_db, $server_list, $centreon_version) = @_; + + my $sth; + + if ($centreon_version ne "2.8") { + $self->{output}->{not_compliant} = "Incompatible file format, work only with JSON format"; + return $self->{output} + } + + return if ($centreon_version ne "2.8"); + foreach my $server (keys %$server_list) { + $sth = $centreon_db->query("SELECT config_name, cache_directory + FROM cfg_centreonbroker + WHERE stats_activate='1' + AND ns_nagios_server=".$centreon_db->quote($server).""); + + if ($server_list->{$server}->{localhost} eq "YES") { + while (my $row = $sth->fetchrow_hashref()) { + my ($lerror, $stdout) = centreon::common::misc::backtick(command => "cat " . $row->{cache_directory} . "/" . $row->{config_name} . "-stats.json"); + $self->{output} = $self->json_parsing(json_content => $stdout, + poller_name => $server_list->{$server}->{name}, + file_name => $row->{config_name}. "-stats.json"); + } + } else { + while (my $row = $sth->fetchrow_hashref()) { + my $stdout = centreon::health::ssh->new->main(host => $server_list->{$server}->{address}, + port => $server_list->{$server}->{ssh_port}, + userdata => $row->{cache_directory} . "/" . $row->{config_name} . "-stats.json", + command => "cat " . $row->{cache_directory} . "/" . $row->{config_name} . "-stats.json"); + $self->{output} = $self->json_parsing(json_content => $stdout, + poller_name => $server_list->{$server}->{name}, + file_name => $row->{config_name}. "-stats.json"); + + } + } + + } + return $self->{output} +} + +1; diff --git a/perl-libs/lib/centreon/health/checkdb.pm b/perl-libs/lib/centreon/health/checkdb.pm new file mode 100644 index 00000000000..d3d23a00a72 --- /dev/null +++ b/perl-libs/lib/centreon/health/checkdb.pm @@ -0,0 +1,95 @@ +# +# Copyright 2017 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package centreon::health::checkdb; + +use strict; +use warnings; +use POSIX qw(strftime); +use centreon::health::misc; + +sub new { + my $class = shift; + my $self = {}; + $self->{output} = {}; + + bless $self, $class; + return $self; +} + +sub run { + my $self = shift; + my ($centreon_db, $centstorage_db, $centstorage_db_name, $flag, $logger) = @_; + my $size = 0; + my ($sth, $status); + + foreach my $db_name ('centreon', $centstorage_db_name) { + $sth = $centreon_db->query("SELECT table_schema AS db_name, SUM(data_length+index_length) AS db_size + FROM information_schema.tables + WHERE table_schema=".$centreon_db->quote($db_name).""); + while (my $row = $sth->fetchrow_hashref) { + $self->{output}->{db_size}->{$row->{db_name}} = centreon::health::misc::format_bytes(bytes_value => $row->{db_size}); + } + next if $db_name !~ /$centstorage_db_name/; + foreach my $table ('data_bin', 'logs', 'log_archive_host', 'log_archive_service', 'downtimes') { + + $sth = $centreon_db->query("SELECT table_name, SUM(data_length+index_length) AS table_size + FROM information_schema.tables + WHERE table_schema=".$centreon_db->quote($db_name)." + AND table_name=".$centreon_db->quote($table).""); + + while (my $row = $sth->fetchrow_hashref()) { + $self->{output}->{table_size}->{$row->{table_name}} = centreon::health::misc::format_bytes(bytes_value =>$row->{table_size}); + } + + next if ($table =~ m/downtimes/); + $sth = $centreon_db->query("SELECT MAX(CONVERT(PARTITION_DESCRIPTION, SIGNED INTEGER)) as lastPart + FROM INFORMATION_SCHEMA.PARTITIONS + WHERE TABLE_NAME='" . $table . "' + AND TABLE_SCHEMA='" . $db_name . "' GROUP BY TABLE_NAME;"); + + while (my $row = $sth->fetchrow_hashref()) { + $self->{output}->{partitioning_last_part}->{$table} = defined($row->{lastPart}) ? strftime("%m/%d/%Y %H:%M:%S",localtime($row->{lastPart})) : $table . " has no partitioning !"; + } + } + + } + + my $var_list = { 'innodb_file_per_table' => 0, + 'open_files_limit' => 0, + 'read_only' => 0, + 'key_buffer_size' => 1, + 'sort_buffer_size' => 1, + 'join_buffer_size' => 1, + 'read_buffer_size' => 1, + 'read_rnd_buffer_size' => 1, + 'max_allowed_packet' => 1 }; + + foreach my $var (keys %{$var_list}) { + my $sth = $centreon_db->query("SHOW GLOBAL VARIABLES LIKE " . $centreon_db->quote($var)); + my $value = $sth->fetchrow(); + $self->{output}->{interesting_variables}->{$var} = ($var_list->{$var} == 1) ? centreon::health::misc::format_bytes(bytes_value => $value) : $value; + } + + return $self->{output}; + +} + +1; diff --git a/perl-libs/lib/centreon/health/checklogs.pm b/perl-libs/lib/centreon/health/checklogs.pm new file mode 100644 index 00000000000..e54e1925e4f --- /dev/null +++ b/perl-libs/lib/centreon/health/checklogs.pm @@ -0,0 +1,102 @@ +# +# Copyright 2017 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package centreon::health::checklogs; + +use strict; +use warnings; +use POSIX qw(strftime); +use centreon::common::misc; +use centreon::health::ssh; + +sub new { + my $class = shift; + my $self = {}; + $self->{logs_path_broker} = {}; + $self->{logs_path_engine} = {}; + $self->{output} = {}; + + bless $self, $class; + return $self; +} + +sub run { + my $self = shift; + my ($centreon_db, $server_list, $centreon_version, $logger) = @_; + + my $sth; + my ($lerror, $stdout); + + foreach my $server (keys %$server_list) { + $sth = $centreon_db->query("SELECT log_file + FROM cfg_nagios + WHERE nagios_id=" . $centreon_db->quote($server)); + + while (my $row = $sth->fetchrow_hashref) { + push @{$self->{logs_path_engine}->{engine}->{$server_list->{$server}->{name}}}, $row->{log_file}; + } + + foreach my $log_file (@{$self->{logs_path_engine}->{engine}->{$server_list->{$server}->{name}}}) { + if ($server_list->{$server}->{localhost} eq "YES") { + ($lerror, $self->{output}->{$server_list->{$server}->{name}}->{engine}->{$log_file}) = centreon::common::misc::backtick(command => "tail -n20 " . $log_file); + } else { + $self->{output}->{$server_list->{$server}->{name}}->{engine}->{$log_file} = centreon::health::ssh->new->main(host => $server_list->{$server}->{address}, + port => $server_list->{$server}->{ssh_port}, + userdata => $log_file, + command => "tail -n20 " . $log_file); + } + } + + $sth = $centreon_db->query("SELECT DISTINCT(config_value) FROM cfg_centreonbroker, cfg_centreonbroker_info + WHERE config_group='logger' AND config_key='name' + AND cfg_centreonbroker.config_id=cfg_centreonbroker_info.config_id + AND cfg_centreonbroker.ns_nagios_server=" . $centreon_db->quote($server)); + + while (my $row = $sth->fetchrow_hashref) { + push @{$self->{logs_path_broker}->{broker}->{$server_list->{$server}->{name}}}, $row->{config_value}; + } + + foreach my $log_file (@{$self->{logs_path_broker}->{broker}->{$server_list->{$server}->{name}}}) { + if ($server_list->{$server}->{localhost} eq "YES") { + ($lerror, $self->{output}->{$server_list->{$server}->{name}}->{broker}->{$log_file}) = centreon::common::misc::backtick(command => "tail -n20 " . $log_file); + } else { + $self->{output}->{$server_list->{$server}->{name}}->{broker}->{$log_file} = centreon::health::ssh->new->main(host => $server_list->{$server}->{address}, + port => $server_list->{$server}->{ssh_port}, + userdata => $log_file, + command => "tail -n20 " . $log_file); + } + } + + if ($server_list->{$server}->{localhost} eq "YES") { + $sth = $centreon_db->query("SELECT `value` FROM options WHERE `key`='debug_path'"); + + my $centreon_log_path = $sth->fetchrow(); + ($lerror, $stdout) = centreon::common::misc::backtick(command => "find " . $centreon_log_path . " -type f -name *.log"); + + foreach my $log_file (split '\n', $stdout) { + ($lerror, $self->{output}->{$server_list->{$server}->{name}}->{centreon}->{$log_file}) = centreon::common::misc::backtick(command => "tail -n10 " . $log_file); + } + } + } + + return $self->{output} +} + +1; diff --git a/perl-libs/lib/centreon/health/checkmodules.pm b/perl-libs/lib/centreon/health/checkmodules.pm new file mode 100644 index 00000000000..0d749bbf7df --- /dev/null +++ b/perl-libs/lib/centreon/health/checkmodules.pm @@ -0,0 +1,53 @@ +# +# Copyright 2017 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package centreon::health::checkmodules; + +use strict; +use warnings; +use centreon::health::misc; + +sub new { + my $class = shift; + my $self = {}; + $self->{output} = {}; + + bless $self, $class; + return $self; +} + +sub run { + my $self = shift; + my ($centreon_db, $logger) = @_; + my $size = 0; + my ($sth, $status); + + $sth = $centreon_db->query("SELECT name, rname, author, mod_release FROM modules_informations"); + while (my $row = $sth->fetchrow_hashref) { + $self->{output}->{$row->{name}}{full_name} = $row->{rname}; + $self->{output}->{$row->{name}}{author} = $row->{author}; + $self->{output}->{$row->{name}}{version} = $row->{mod_release}; + } + + return $self->{output}; + +} + +1; diff --git a/perl-libs/lib/centreon/health/checkrrd.pm b/perl-libs/lib/centreon/health/checkrrd.pm new file mode 100644 index 00000000000..f0079972610 --- /dev/null +++ b/perl-libs/lib/centreon/health/checkrrd.pm @@ -0,0 +1,94 @@ +# +# Copyright 2017 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package centreon::health::checkrrd; + +use strict; +use warnings; +use centreon::common::misc; +use centreon::health::misc; + +sub new { + my $class = shift; + my $self = {}; + $self->{rrd_metrics} = undef; + $self->{rrd_status} = undef; + $self->{output} = {}; + + bless $self, $class; + return $self; +} + +sub get_rrd_path { + my ($self, %options) = @_; + my ($sth, $status); + + $sth = $options{csdb}->query("SELECT RRDdatabase_path, RRDdatabase_status_path FROM config"); + + while (my $row = $sth->fetchrow_hashref()) { + $self->{rrd_metrics} = $row->{RRDdatabase_path}; + $self->{rrd_status} = $row->{RRDdatabase_status_path}; + } + +} + +sub get_rrd_infos { + my $self = shift; + + my ($lerror, $size_metrics, $size_status, $count_last_written, $count_outdated_rrd, $count_metrics, $count_status); + + if (-d $self->{rrd_metrics}) { + ($lerror, $size_metrics) = centreon::common::misc::backtick(command => "du -sb " . $self->{rrd_metrics}); + ($lerror, $count_metrics) = centreon::common::misc::backtick(command => "ls -l " . $self->{rrd_metrics} . " | wc -l"); + ($lerror, $count_last_written) = centreon::common::misc::backtick(command => "find " . $self->{rrd_metrics} . " -type f -mmin 5 | wc -l"); + ($lerror, $count_outdated_rrd) = centreon::common::misc::backtick(command => "find " . $self->{rrd_metrics} . " -type f -mmin +288000 | wc -l"); + } else { + $count_metrics = 0; + $size_metrics = 0; + $count_last_written = "ERROR, Directory " . $self->{rrd_metrics} . " does not exist !\n"; + $count_outdated_rrd = "ERROR, Directory " . $self->{rrd_metrics} . " does not exist !\n"; + } + if (-d $self->{rrd_status}) { + ($lerror, $size_status) = centreon::common::misc::backtick(command => "du -sb " . $self->{rrd_status}); + ($lerror, $count_status) = centreon::common::misc::backtick(command => "ls -l " . $self->{rrd_status} . " | wc -l"); + } else { + $count_status = 0; + $size_status = 0; + } + + $self->{output}->{$self->{rrd_metrics}}{size} = centreon::health::misc::format_bytes(bytes_value => $size_metrics); + $self->{output}->{$self->{rrd_status}}{size} = centreon::health::misc::format_bytes(bytes_value => $size_status); + $self->{output}->{rrd_written_last_5m} = $count_last_written; + $self->{output}->{rrd_not_updated_since_180d} = $count_outdated_rrd; + $self->{output}->{$self->{rrd_metrics}}{count} = $count_metrics; + $self->{output}->{$self->{rrd_status}}{count} = $count_status; +} + +sub run { + my $self = shift; + my ($centstorage_db, $flag, $logger) = @_; + + $self->get_rrd_path(csdb => $centstorage_db); + $self->get_rrd_infos(); + + return $self->{output} +} + +1; diff --git a/perl-libs/lib/centreon/health/checkservers.pm b/perl-libs/lib/centreon/health/checkservers.pm new file mode 100644 index 00000000000..bb81f9037c1 --- /dev/null +++ b/perl-libs/lib/centreon/health/checkservers.pm @@ -0,0 +1,137 @@ +# +# Copyright 2017 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package centreon::health::checkservers; + +use strict; +use warnings; +use integer; +use POSIX qw(strftime); + +sub new { + my $class = shift; + my $self = {}; + $self->{output} = {}; + + bless $self, $class; + return $self; +} + +sub query_misc { + my ($self, %options) = @_; + my ($sth, $status); + + ($status, $sth) = $options{cdb}->query($options{query}); + + if ($status == -1) { + return "Query error - information is not available\n"; + } else { + return $sth->fetchrow() + } + +} + +sub get_servers_informations { + my ($self, %options) = @_; + + my $sth = $options{cdb}->query("SELECT id, name, localhost, ns_ip_address, ssh_port + FROM nagios_server WHERE ns_activate='1'"); + while (my $row = $sth->fetchrow_hashref()) { + $self->{output}->{poller}->{$row->{id}}{name} = $row->{name}; + $self->{output}->{poller}->{$row->{id}}{localhost} = ($row->{localhost} == 1) ? "YES" : "NO"; + $self->{output}->{poller}->{$row->{id}}{address} = $row->{ns_ip_address}; + $self->{output}->{poller}->{$row->{id}}{ssh_port} = (defined($row->{ssh_port})) ? $row->{ssh_port} : "-"; + $self->{output}->{poller}->{$row->{id}}{id} = $row->{id}; + } + + foreach my $id (keys %{$self->{output}->{poller}}) { + $self->{output}->{global}->{count_poller}++; + $sth = $options{csdb}->query("SELECT COUNT(DISTINCT hosts.host_id) as num_hosts, count(DISTINCT services.host_id, services.service_id) as num_services + FROM hosts, services WHERE services.host_id=hosts.host_id + AND hosts.enabled=1 + AND services.enabled=1 + AND hosts.instance_id=".$options{cdb}->quote($id)." + AND hosts.name NOT LIKE '%Module%'"); + + while (my $row = $sth->fetchrow_hashref()) { + $self->{output}->{poller}{$id}{hosts} = $row->{num_hosts}; + $self->{output}->{poller}{$id}{services} = $row->{num_services}; + $self->{output}->{global}{count_hosts} += $row->{num_hosts}; + $self->{output}->{global}{count_services} += $row->{num_services}; + } + + $sth = $options{csdb}->query("SELECT COUNT(DISTINCT hosts.host_id) as num_hosts, count(DISTINCT services.host_id, services.service_id) as num_services + FROM hosts, services WHERE services.host_id=hosts.host_id + AND hosts.enabled=1 + AND services.enabled=1 + AND hosts.instance_id=".$options{cdb}->quote($id).""); + + $sth = $options{csdb}->query("SELECT * + FROM instances + WHERE instance_id = " . $options{cdb}->quote($id) . ""); + + while (my $row = $sth->fetchrow_hashref()) { + $self->{output}->{poller}{$row->{instance_id}}{uptime} = centreon::health::misc::change_seconds(value => $row->{last_alive} - $row->{start_time}); + $self->{output}->{poller}{$row->{instance_id}}{running} = ($row->{running} == 1) ? "YES" : "NO"; + $self->{output}->{poller}{$row->{instance_id}}{start_time} = strftime("%m/%d/%Y %H:%M:%S",localtime($row->{start_time})); + $self->{output}->{poller}{$row->{instance_id}}{last_alive} = strftime("%m/%d/%Y %H:%M:%S",localtime($row->{last_alive})); + $self->{output}->{poller}{$row->{instance_id}}{last_command_check} = strftime("%m/%d/%Y %H:%M:%S",localtime($row->{last_command_check})); + $self->{output}->{poller}{$row->{instance_id}}{engine} = $row->{engine}; + $self->{output}->{poller}{$row->{instance_id}}{version} = $row->{version}; + } + + $sth = $options{csdb}->query("SELECT stat_key, stat_value, stat_label + FROM nagios_stats + WHERE instance_id = " . $options{cdb}->quote($id) . ""); + + while (my $row = $sth->fetchrow_hashref()) { + $self->{output}->{poller}->{$id}->{engine_stats}->{$row->{stat_label}}->{$row->{stat_key}} = $row->{stat_value}; + } + + $self->{output}->{global}->{hosts_by_poller_avg} = ($self->{output}->{global}->{count_poller} != 0) ? $self->{output}->{global}->{count_hosts} / $self->{output}->{global}->{count_poller} : '0'; + $self->{output}->{global}->{services_by_poller_avg} = ($self->{output}->{global}->{count_poller} != 0) ? $self->{output}->{global}->{count_services} / $self->{output}->{global}->{count_poller} : '0'; + $self->{output}->{global}->{services_by_host_avg} = ($self->{output}->{global}->{count_hosts} != 0) ? $self->{output}->{global}->{count_services} / $self->{output}->{global}->{count_hosts} : '0'; + $self->{output}->{global}->{metrics_by_service_avg} = ($self->{output}->{global}->{count_services} != 0) ? $self->{output}->{global}->{count_metrics} / $self->{output}->{global}->{count_services} : '0'; + } +} + + +sub run { + my $self = shift; + my ($centreon_db, $centstorage_db, $centreon_version) = @_; + + my $query_misc = { count_pp => [$centreon_db, $centreon_version eq '2.7' ? "SELECT count(*) FROM mod_pluginpack" : "SELECT count(*) FROM mod_ppm_pluginpack"], + count_downtime => [$centreon_db, "SELECT count(*) FROM downtime"], + count_modules => [$centreon_db, "SELECT count(*) FROM modules_informations"], + centreon_version => [$centreon_db, "SELECT value FROM informations LIMIT 1"], + count_metrics => [$centstorage_db, "SELECT count(*) FROM metrics"] }; + + foreach my $info (keys %$query_misc) { + my $result = $self->query_misc(cdb => $query_misc->{$info}[0], + query => $query_misc->{$info}[1] ); + $self->{output}->{global}->{$info} = $result; + } + + $self->get_servers_informations(cdb => $centreon_db, csdb => $centstorage_db); + + return $self->{output} +} + +1; diff --git a/perl-libs/lib/centreon/health/checksystems.pm b/perl-libs/lib/centreon/health/checksystems.pm new file mode 100644 index 00000000000..dfb4d7b9979 --- /dev/null +++ b/perl-libs/lib/centreon/health/checksystems.pm @@ -0,0 +1,133 @@ +# +# Copyright 2017 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package centreon::health::checksystems; + +use strict; +use warnings; +use centreon::common::misc; +use centreon::health::ssh; + +sub new { + my $class = shift; + my $self = {}; + $self->{cmd_system_health} = []; + $self->{output} = {}; + + bless $self, $class; + return $self; +} + +sub build_command_hash { + my ($self, %options) = @_; + + if ($options{medium} eq "snmp") { + $self->{cmd_system_health} = [ { cmd => "/usr/lib/" . $options{plugin_path} . "/plugins/" . $options{plugins} . " --plugin os::linux::snmp::plugin --mode cpu-detailed \\ + --hostname localhost \\ + --statefile-suffix='_diag-cpu' \\ + --filter-perfdata='^(?!(wait|guest|user|softirq|kernel|interrupt|guestnice|idle|steal|system|nice))' \\ + --snmp-community " . $options{community} , + callback => \¢reon::health::ssh::ssh_callback, + userdata => "cpu_usage" }, + { cmd => "/usr/lib/" . $options{plugin_path} . "/plugins/" . $options{plugins} . " --plugin os::linux::snmp::plugin --mode load \\ + --hostname localhost \\ + --filter-perfdata='^(?!(load))' \\ + --snmp-community " . $options{community}, + callback => \¢reon::health::ssh::ssh_callback, + userdata => "load" }, + { cmd => "/usr/lib/" . $options{plugin_path} . "/plugins/" . $options{plugins} . " --plugin os::linux::snmp::plugin --mode memory \\ + --hostname localhost \\ + --filter-perfdata='^(?!(cached|buffer|used))' \\ + --snmp-community " . $options{community}, + callback => \¢reon::health::ssh::ssh_callback, + userdata => "mem_usage" }, + { cmd => "/usr/lib/" . $options{plugin_path} . "/plugins/" . $options{plugins} . " --plugin os::linux::snmp::plugin --mode swap \\ + --hostname localhost \\ + --filter-perfdata='^(?!(used))' \\ + --snmp-community " . $options{community}, + callback => \¢reon::health::ssh::ssh_callback, + userdata => "swap_usage" }, + { cmd => "/usr/lib/" . $options{plugin_path} . "/plugins/" . $options{plugins} . " --plugin os::linux::snmp::plugin --mode storage \\ + --hostname localhost \\ + --storage='^(?!(/dev/shm|/sys/fs/cgroup|/boot|/run.*))' --name --regexp \\ + --filter-perfdata='^(?!(used))' --statefile-suffix='_diag-storage' \\ + --verbose \\ + --snmp-community " . $options{community}, + callback => \¢reon::health::ssh::ssh_callback, + userdata => "storage_usage" }, + ]; + } else { + return -1 + } + + +} + +sub get_remote_infos { + my ($self, %options) = @_; + + + return centreon::health::ssh::new->main(host => $options{host}, port => $options{ssh_port}, command_pool => $self->{cmd_system_health}); + +} + +sub get_local_infos { + my ($self, %options) = @_; + my ($lerror, $stdout); + + my $results; + + foreach my $command (@{$self->{cmd_system_health}}) { + ($lerror, $stdout) = centreon::common::misc::backtick(command => $command->{cmd}); + $results->{$command->{userdata}} = $stdout; + while ($stdout =~ m/Buffer creation/) { + # Replay command to bypass cache creation + sleep 1; + ($lerror, $stdout) = centreon::common::misc::backtick(command => $command->{cmd}); + $results->{$command->{userdata}} = $stdout; + } + } + + return $results + +} + +sub run { + my $self = shift; + my ($server_list, $medium, $community, $centreon_ver) = @_; + + $self->build_command_hash(medium => $medium, + plugins => ($centreon_ver eq 2.8) ? 'centreon_linux_snmp.pl' : 'centreon_plugins.pl', + plugin_path => ($centreon_ver eq 2.8) ? 'centreon' : 'nagios', + community => $community); + + foreach my $server (keys %$server_list) { + my $name = $server_list->{$server}->{name}; + if ($server_list->{$server}->{localhost} eq "NO") { + $self->{output}->{$name} = $self->get_remote_infos(host => $server_list->{$server}->{address}, ssh_port => $server_list->{$server}->{ssh_port}); + } else { + $self->{output}->{$name} = $self->get_local_infos(poller_name => $name); + } + } + + return $self->{output} +} + +1; diff --git a/perl-libs/lib/centreon/health/misc.pm b/perl-libs/lib/centreon/health/misc.pm new file mode 100644 index 00000000000..8b9b9ac684c --- /dev/null +++ b/perl-libs/lib/centreon/health/misc.pm @@ -0,0 +1,103 @@ +# +# Copyright 2017 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package centreon::health::misc; + +use strict; +use warnings; +use Libssh::Session qw(:all); + +sub get_ssh_connection { + my %options = @_; + + my $session = Libssh::Session->new(); + if ($session->options(host => $options{host}, port => $options{port}, user => $options{user}) != SSH_OK) { + print $session->error() . "\n"; + return 1 + } + + if ($session->connect() != SSH_OK) { + print $session->error() . "\n"; + return 1 + } + + if ($session->auth_publickey_auto() != SSH_AUTH_SUCCESS) { + printf("auth issue pubkey: %s\n", $session->error(GetErrorSession => 1)); + if ($session->auth_password(password => $options{password}) != SSH_AUTH_SUCCESS) { + printf("auth issue: %s\n", $session->error(GetErrorSession => 1)); + return 1 + } + } + + return $session + +} + +sub change_seconds { + my %options = @_; + my ($str, $str_append) = ('', ''); + my $periods = [ + { unit => 'y', value => 31556926 }, + { unit => 'M', value => 2629743 }, + { unit => 'w', value => 604800 }, + { unit => 'd', value => 86400 }, + { unit => 'h', value => 3600 }, + { unit => 'm', value => 60 }, + { unit => 's', value => 1 }, + ]; + my %values = ('y' => 1, 'M' => 2, 'w' => 3, 'd' => 4, 'h' => 5, 'm' => 6, 's' => 7); + + foreach (@$periods) { + next if (defined($options{start}) && $values{$_->{unit}} < $values{$options{start}}); + my $count = int($options{value} / $_->{value}); + + next if ($count == 0); + $str .= $str_append . $count . $_->{unit}; + $options{value} = $options{value} % $_->{value}; + $str_append = ' '; + } + + return $str; +} + +sub format_bytes { + my (%options) = @_; + my $size = $options{bytes_value}; + $size =~ s/\D//g; + + if ($size > 1099511627776) { + return sprintf("%.0fT", $size / 1099511627776); + } + elsif ($size > 1073741824) { + return sprintf("%.0fG", $size / 1073741824); + } + elsif ($size > 1048576) { + return sprintf("%.0fM", $size / 1048576); + } + elsif ($size > 1024) { + return sprintf("%.0fK", $size / 1024); + } + else { + return sprintf("%.0fB", $size); + } +} + + +1; diff --git a/perl-libs/lib/centreon/health/output.pm b/perl-libs/lib/centreon/health/output.pm new file mode 100644 index 00000000000..0b5ff667b74 --- /dev/null +++ b/perl-libs/lib/centreon/health/output.pm @@ -0,0 +1,307 @@ +# +# Copyright 2017 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package centreon::health::output; + +use strict; +use warnings; +use JSON; + +sub new { + my $class = shift; + my $self = {}; + + bless $self, $class; + return $self; +} + +sub output_text { + my ($self, %options) = @_; + + my $output; + + $output = "\t ===CENTREON_HEALTH TEXT OUTPUT=== \n\n"; + $output .= "\t\t CENTREON OVERVIEW\n\n"; + $output .= "Centreon Version: " . $options{data}->{server}->{global}->{centreon_version} . "\n"; + $output .= "Number of pollers: " . $options{data}->{server}->{global}->{count_poller} . "\n"; + $output .= "Number of hosts: " . $options{data}->{server}->{global}->{count_hosts} . "\n"; + $output .= "Number of services: " . $options{data}->{server}->{global}->{count_services} . "\n"; + $output .= "Number of metrics: " . $options{data}->{server}->{global}->{count_metrics} . "\n"; + $output .= "Number of modules: " . $options{data}->{server}->{global}->{count_modules} . "\n"; + $output .= defined($options{data}->{server}->{global}->{count_pp}) ? "Number of plugin-packs: " . $options{data}->{server}->{global}->{count_pp} . "\n" : " Number of plugin-packs: N/A\n"; + $output .= "Number of recurrent downtimes: " . $options{data}->{server}->{global}->{count_downtime} . "\n\n"; + + $output .= "\t\t AVERAGE METRICS\n\n"; + $output .= "Host / poller (avg): " . $options{data}->{server}->{global}->{hosts_by_poller_avg} . "\n"; + $output .= "Service / poller (avg): " . $options{data}->{server}->{global}->{services_by_poller_avg} . "\n"; + $output .= "Service / host (avg): " . $options{data}->{server}->{global}->{services_by_host_avg} . "\n"; + $output .= "Metrics / service (avg): " . $options{data}->{server}->{global}->{metrics_by_service_avg} . "\n\n"; + + if ($options{flag_rrd} != 1 || $options{flag_db} eq "") { + $output .= "\t\t RRD INFORMATIONS\n\n"; + $output .= "RRD not updated since more than 180 days: " . $options{data}->{rrd}->{rrd_not_updated_since_180d} . "\n"; + $output .= "RRD written during last 5 five minutes: " . $options{data}->{rrd}->{rrd_written_last_5m} . "\n"; + foreach my $key (sort keys %{$options{data}->{rrd}}) { + next if ($key =~ m/^rrd_/); + $output .= "RRD files Count/Size in " . $key . " directory: " . $options{data}->{rrd}->{$key}->{count} . "/" . $options{data}->{rrd}->{$key}->{size} . "\n"; + } + $output .= "\n"; + } + + if ($options{flag_db} != 1 || $options{flag_db} eq "") { + $output .= "\t\t DATABASES INFORMATIONS\n\n"; + $output .= "\tDatabases size\n\n"; + foreach my $database (keys %{$options{data}->{database}->{db_size}}) { + $output .= "Size of " . $database . " database: " . $options{data}->{database}->{db_size}->{$database} . "\n"; + } + $output .= "\n"; + $output .= "\tTables size (centreon_storage db)\n\n"; + foreach my $database (keys %{$options{data}->{database}->{table_size}}) { + $output .= "Size of " . $database . " table: " . $options{data}->{database}->{table_size}->{$database} . "\n"; + } + $output .= "\n"; + $output .= "\tPartitioning check\n\n"; + foreach my $table (keys %{$options{data}->{database}->{partitioning_last_part}}) { + $output .= "Last partition date for " . $table . " table: " . $options{data}->{database}->{partitioning_last_part}->{$table} . "\n"; + } + $output .= "\n"; + } + + $output .= "\t\t MODULE INFORMATIONS\n\n"; + foreach my $module_key (keys %{$options{data}->{module}}) { + $output .= "Module " . $options{data}->{module}->{$module_key}->{full_name} . " is installed. (Author: " . $options{data}->{module}->{$module_key}->{author} . " # Codename: " . $module_key . " # Version: " . $options{data}->{module}->{$module_key}->{version} . ")\n"; + } + $output .= "\n"; + + $output .= "\t\t CENTREON NODES INFORMATIONS\n\n"; + + foreach my $poller_id (keys %{$options{data}->{server}->{poller}}) { + $output .= "\t" . $options{data}->{server}->{poller}->{$poller_id}->{name} . "\n\n"; + $output .= "Identity: \n"; + if (defined($options{data}->{server}->{poller}->{$poller_id}->{engine}) && defined($options{data}->{server}->{poller}->{$poller_id}->{version})) { + $output .= " Engine (version): " . $options{data}->{server}->{poller}->{$poller_id}->{engine} . " (" . $options{data}->{server}->{poller}->{$poller_id}->{version} . ")\n"; + $output .= " IP Address (SSH port): " . $options{data}->{server}->{poller}->{$poller_id}->{address} . " (" . $options{data}->{server}->{poller}->{$poller_id}->{ssh_port} . ")\n"; + $output .= " Localhost: " . $options{data}->{server}->{poller}->{$poller_id}->{localhost} . "\n"; + $output .= " Running: " . $options{data}->{server}->{poller}->{$poller_id}->{running} . "\n"; + $output .= " Start time: " . $options{data}->{server}->{poller}->{$poller_id}->{start_time} . "\n"; + $output .= " Last alive: " . $options{data}->{server}->{poller}->{$poller_id}->{last_alive} . "\n"; + $output .= " Uptime: " . $options{data}->{server}->{poller}->{$poller_id}->{uptime} . "\n"; + $output .= " Count Hosts/Services - (Last command check): " . $options{data}->{server}->{poller}->{$poller_id}->{hosts} . "/" . $options{data}->{server}->{poller}->{$poller_id}->{services} . " - (" . $options{data}->{server}->{poller}->{$poller_id}->{last_command_check} . ")\n\n"; + } else { + $output .= " SKIP Identity for this poller, enabled but does not seems to work correctly ! \n\n"; + } + + $output .= "Engine stats: \n"; + foreach my $stat_key (sort keys %{$options{data}->{server}->{poller}->{$poller_id}->{engine_stats}}) { + foreach my $stat_value (sort keys %{$options{data}->{server}->{poller}->{$poller_id}->{engine_stats}->{$stat_key}}) { + $output .= " " . $stat_key . "(" . $stat_value . "): " . $options{data}->{server}->{poller}->{$poller_id}->{engine_stats}->{$stat_key}->{$stat_value} . "\n"; + } + } + $output .= "\n"; + + $output .= "Broker stats: \n"; + foreach my $broker_stat_file (sort keys %{$options{data}->{broker}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}}) { + $output .= " \tFile: " . $broker_stat_file . "\n"; + $output .= " Version: " . $options{data}->{broker}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{$broker_stat_file}->{version} . "\n"; + $output .= " State: " . $options{data}->{broker}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{$broker_stat_file}->{state} . "\n"; + $output .= " Event proecessing speed " . $options{data}->{broker}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{$broker_stat_file}->{event_processing_speed} . "\n"; + $output .= " Queued events: " . $options{data}->{broker}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{$broker_stat_file}->{queued_events} . "\n"; + $output .= " Last connection attempts: " . $options{data}->{broker}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{$broker_stat_file}->{last_connection_attempt} . "\n"; + $output .= " Last connection success: " . $options{data}->{broker}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{$broker_stat_file}->{last_connection_success} . "\n\n"; + } + + $output .= "System stats: \n"; + $output .= defined($options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{cpu_usage}) ? + " CPU => " . $options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{cpu_usage} . "\n" : + " CPU => Could not gather data \n"; + $output .= defined($options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{load}) ? + " LOAD => " . $options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{load} . "\n" : + " LOAD => Could not gather data \n"; + $output .= defined($options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{mem_usage}) ? + " MEMORY => " . $options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{mem_usage} . "\n" : + " MEMORY => Could not gather data \n"; + $output .= defined($options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{swap_usage}) ? + " SWAP => " . $options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{swap_usage} . "\n" : + " SWAP => Could not gather data \n"; + $output .= defined($options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{storage_usage}) ? + " STORAGE => " . $options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{storage_usage} . "\n\n" : + " STORAGE => Could not gather data \n\n"; + + if ($options{flag_logs} != 1 || $options{flag_logs} eq "") { + $output .= "\t\t LOGS LAST LINES: \n\n"; + foreach my $log_topic (sort keys %{$options{data}->{logs}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}}) { + foreach my $log_file (keys %{$options{data}->{logs}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{$log_topic}}) { + $output .= " " . $log_file . " (" . $log_topic . ")\n\n"; + $output .= $options{data}->{logs}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{$log_topic}->{$log_file} . "\n\n"; + } + $output .= "\n"; + } + } + } + return $output; +} + +sub output_markdown { + my ($self, %options) = @_; + + my $output; + + $output = "# CENTREON_HEALTH TEXT OUTPUT\n"; + $output .= "## CENTREON OVERVIEW\n\n"; + $output .= " + Centreon Version: " . $options{data}->{server}->{global}->{centreon_version} . "\n"; + $output .= " + Number of pollers: " . $options{data}->{server}->{global}->{count_poller} . "\n"; + $output .= " + Number of hosts: " . $options{data}->{server}->{global}->{count_hosts} . "\n"; + $output .= " + Number of services: " . $options{data}->{server}->{global}->{count_services} . "\n"; + $output .= " + Number of metrics: " . $options{data}->{server}->{global}->{count_metrics} . "\n"; + $output .= " + Number of modules: " . $options{data}->{server}->{global}->{count_modules} . "\n"; + $output .= defined($options{data}->{server}->{global}->{count_pp}) ? " + Number of plugin-packs: " . $options{data}->{server}->{global}->{count_pp} . "\n" : " + Number of plugin-packs: N/A\n"; + $output .= " + Number of recurrent downtimes: " . $options{data}->{server}->{global}->{count_downtime} . "\n\n"; + $output .= "## AVERAGE METRICS\n\n"; + $output .= " + Host / poller (avg): " . $options{data}->{server}->{global}->{hosts_by_poller_avg} . "\n"; + $output .= " + Service / poller (avg): " . $options{data}->{server}->{global}->{services_by_poller_avg} . "\n"; + $output .= " + Service / host (avg): " . $options{data}->{server}->{global}->{services_by_host_avg} . "\n"; + $output .= " + Metrics / service (avg): " . $options{data}->{server}->{global}->{metrics_by_service_avg} . "\n\n"; + + if ($options{flag_rrd} != 1 || $options{flag_db} eq "") { + $output .= "## RRD INFORMATIONS\n\n"; + $output .= " + RRD not updated since more than 180 days: " . $options{data}->{rrd}->{rrd_not_updated_since_180d} . "\n"; + $output .= " + RRD written during last 5 five minutes: " . $options{data}->{rrd}->{rrd_written_last_5m} . "\n"; + foreach my $key (sort keys %{$options{data}->{rrd}}) { + next if ($key =~ m/^rrd_/); + $output .= " + RRD files Count/Size in " . $key . " directory: " . $options{data}->{rrd}->{$key}->{count} . "/" . $options{data}->{rrd}->{$key}->{size} . "\n"; + } + $output .= "\n"; + } + + if ($options{flag_db} != 1 || $options{flag_db} eq "") { + $output .= "## DATABASES INFORMATIONS\n\n"; + $output .= "### Databases size\n\n"; + foreach my $database (keys %{$options{data}->{database}->{db_size}}) { + $output .= " + Size of " . $database . " database: " . $options{data}->{database}->{db_size}->{$database} . "\n"; + } + $output .= "\n"; + $output .= "### Tables size (centreon_storage db)\n\n"; + foreach my $database (keys %{$options{data}->{database}->{table_size}}) { + $output .= " + Size of " . $database . " table: " . $options{data}->{database}->{table_size}->{$database} . "\n"; + } + $output .= "\n"; + $output .= "### Partitioning check\n\n"; + foreach my $table (keys %{$options{data}->{database}->{partitioning_last_part}}) { + $output .= " + Last partition date for " . $table . " table: " . $options{data}->{database}->{partitioning_last_part}->{$table} . "\n"; + } + $output .= "\n"; + } + + $output .= "## MODULE INFORMATIONS\n\n"; + foreach my $module_key (keys %{$options{data}->{module}}) { + $output .= " + Module " . $options{data}->{module}->{$module_key}->{full_name} . " is installed. (Author: " . $options{data}->{module}->{$module_key}->{author} . " # Codename: " . $module_key . " # Version: " . $options{data}->{module}->{$module_key}->{version} . ")\n"; + } + $output .= "\n"; + + $output .= "## CENTREON NODES INFORMATIONS\n\n"; + + foreach my $poller_id (keys %{$options{data}->{server}->{poller}}) { + $output .= "### " . $options{data}->{server}->{poller}->{$poller_id}->{name} . "\n\n"; + $output .= "#### Identity: \n"; + if (defined($options{data}->{server}->{poller}->{$poller_id}->{engine}) && defined($options{data}->{server}->{poller}->{$poller_id}->{version})) { + $output .= " + Engine (version): " . $options{data}->{server}->{poller}->{$poller_id}->{engine} . " (" . $options{data}->{server}->{poller}->{$poller_id}->{version} . ")\n"; + $output .= " + IP Address (SSH port): " . $options{data}->{server}->{poller}->{$poller_id}->{address} . " (" . $options{data}->{server}->{poller}->{$poller_id}->{ssh_port} . ")\n"; + $output .= " + Localhost: " . $options{data}->{server}->{poller}->{$poller_id}->{localhost} . "\n"; + $output .= " + Running: " . $options{data}->{server}->{poller}->{$poller_id}->{running} . "\n"; + $output .= " + Start time: " . $options{data}->{server}->{poller}->{$poller_id}->{start_time} . "\n"; + $output .= " + Last alive: " . $options{data}->{server}->{poller}->{$poller_id}->{last_alive} . "\n"; + $output .= " + Uptime: " . $options{data}->{server}->{poller}->{$poller_id}->{uptime} . "\n"; + $output .= " + Count Hosts/Services - (Last command check): " . $options{data}->{server}->{poller}->{$poller_id}->{hosts} . "/" . $options{data}->{server}->{poller}->{$poller_id}->{services} . " - (" . $options{data}->{server}->{poller}->{$poller_id}->{last_command_check} . ")\n\n"; + + } else { + $output .= " + SKIP Identity for this poller, enabled but does not seems to work correctly ! \n\n"; + } + + $output .= "#### Engine stats: \n"; + foreach my $stat_key (sort keys %{$options{data}->{server}->{poller}->{$poller_id}->{engine_stats}}) { + foreach my $stat_value (sort keys %{$options{data}->{server}->{poller}->{$poller_id}->{engine_stats}->{$stat_key}}) { + $output .= " + " . $stat_key . "(" . $stat_value . "): " . $options{data}->{server}->{poller}->{$poller_id}->{engine_stats}->{$stat_key}->{$stat_value} . "\n"; + } + } + $output .= "\n"; + + $output .= "#### Broker stats: \n"; + foreach my $broker_stat_file (sort keys %{$options{data}->{broker}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}}) { + $output .= "##### File: " . $broker_stat_file . "\n"; + $output .= " + Version: " . $options{data}->{broker}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{$broker_stat_file}->{version} . "\n"; + $output .= " + State: " . $options{data}->{broker}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{$broker_stat_file}->{state} . "\n"; + $output .= " + Event proecessing speed " . $options{data}->{broker}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{$broker_stat_file}->{event_processing_speed} . "\n"; + $output .= " + Queued events: " . $options{data}->{broker}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{$broker_stat_file}->{queued_events} . "\n"; + $output .= " + Last connection attempts: " . $options{data}->{broker}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{$broker_stat_file}->{last_connection_attempt} . "\n"; + $output .= " + Last connection success: " . $options{data}->{broker}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{$broker_stat_file}->{last_connection_success} . "\n\n"; + } + + $output .= "#### System stats: \n"; + $output .= defined($options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{cpu_usage}) ? + " + CPU => " . $options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{cpu_usage} . "\n" : + " + CPU => Could not gather data \n"; + $output .= defined($options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{load}) ? + " + LOAD => " . $options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{load} . "\n" : + " + LOAD => Could not gather data \n"; + $output .= defined($options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{mem_usage}) ? + " + MEMORY => " . $options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{mem_usage} . "\n" : + " + MEMORY => Could not gather data \n"; + $output .= defined($options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{swap_usage}) ? + " + SWAP => " . $options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{swap_usage} . "\n" : + " + SWAP => Could not gather data \n"; + $output .= defined($options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{storage_usage}) ? + " + STORAGE => " . $options{data}->{systems}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{storage_usage} . "\n\n" : + " + STORAGE => Could not gather data \n\n"; + + if ($options{flag_logs} != 1 || $options{flag_logs} eq "") { + $output .= "## LOGS LAST LINES: \n\n"; + foreach my $log_topic (sort keys %{$options{data}->{logs}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}}) { + foreach my $log_file (keys %{$options{data}->{logs}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{$log_topic}}) { + $output .= " + " . $log_file . " (" . $log_topic . ")\n\n"; + $output .= $options{data}->{logs}->{$options{data}->{server}->{poller}->{$poller_id}->{name}}->{$log_topic}->{$log_file} . "\n\n"; + } + $output .= "\n"; + } + } + } + return $output; +} + + +sub run { + my $self = shift; + my ($data, $format, $flag_rrd, $flag_db, $flash_logs) = @_; + + if ($format eq "JSON") { + my $output = JSON->new->encode($data); + print $output; + } elsif ($format eq "TEXT") { + my $output = $self->output_text(data => $data, flag_rrd => $flag_rrd, flag_db => $flag_db, flag_logs => $flash_logs); + print $output; + } elsif ($format eq "MARKDOWN") { + my $output = $self->output_markdown(data => $data, flag_rrd => $flag_rrd, flag_db => $flag_db, flag_logs => $flash_logs); + print $output; + } elsif ($format eq "DUMPER") { + use Data::Dumper; + print Dumper($data); + } +} + +1; diff --git a/perl-libs/lib/centreon/health/ssh.pm b/perl-libs/lib/centreon/health/ssh.pm new file mode 100644 index 00000000000..870c4c37e52 --- /dev/null +++ b/perl-libs/lib/centreon/health/ssh.pm @@ -0,0 +1,93 @@ +# +# Copyright 2017 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package centreon::health::ssh; + +use strict; +use warnings; +use Libssh::Session qw(:all); + +my $command_results = {}; + +sub new { + my $class = shift @_ || __PACKAGE__; + my $self; + $self->{session} = undef; + $self->{host} = undef; + $self->{port} = undef; + $self->{logger} = undef; + $self->{data} = {}; + + bless $self, $class; + return $self; +} + +sub ssh_callback { + my (%options) = @_; + + if ($options{exit} == SSH_OK || $options{exit} == SSH_AGAIN) { # AGAIN means timeout + chomp($options{stdout}); + $command_results->{multiple}->{$options{userdata}} = $options{stdout}; + } else { + $command_results->{multiple}->{failed_action} = "Failed action on ssh or plugin"; + return -1 + } + return 0 +} + +sub create_ssh_channel { + my ($self, %options) = @_; + + $self->{session} = Libssh::Session->new(); + if ($self->{session}->options(host => $options{host}, port => $options{port}, user => $options{user}) != SSH_OK) { + return 1 + } + + if ($self->{session}->connect() != SSH_OK) { + return 1 + } + + if ($self->{session}->auth_publickey_auto() != SSH_AUTH_SUCCESS) { + printf("auth issue pubkey: %s\n", $self->{session}->error(GetErrorSession => 1)); + if ($self->{session}->auth_password(password => $options{password}) != SSH_AUTH_SUCCESS) { + printf("auth issue: %s\n", $self->{session}->error(GetErrorSession => 1)); + return 1 + } + } + return 0 +} + +sub main { + my ($self, %options) = @_; + + $self->create_ssh_channel(host => $options{host}, port => $options{port}, user => 'centreon'); + if (defined($options{command_pool})) { + $self->{session}->execute(commands => $options{command_pool}, timeout => 5000, timeout_nodata => 10, parallel => 5); + $self->{data} = $command_results->{multiple}; + } else { + my $return = $self->{session}->execute_simple(cmd => $options{command}, userdata => $options{userdata}, timeout => 10, timeout_nodata => 5); + $command_results->{simple} = $return->{stdout}; + $self->{data} = $command_results->{simple}; + } + + return $self->{data} +} + +1; diff --git a/perl-libs/lib/centreon/reporting/CentreonAck.pm b/perl-libs/lib/centreon/reporting/CentreonAck.pm new file mode 100644 index 00000000000..1ef186dd92b --- /dev/null +++ b/perl-libs/lib/centreon/reporting/CentreonAck.pm @@ -0,0 +1,114 @@ +################################################################################ +# Copyright 2005-2020 Centreon +# Centreon is developed by : Julien Mathis and Romain Le Merlus under +# GPL Licence 2.0. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation ; either version 2 of the License. +# +# This program is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Linking this program statically or dynamically with other modules is making a +# combined work based on this program. Thus, the terms and conditions of the GNU +# General Public License cover the whole combination. +# +# As a special exception, the copyright holders of this program give Centreon +# permission to link this program with independent modules to produce an executable, +# regardless of the license terms of these independent modules, and to copy and +# distribute the resulting executable under terms of Centreon choice, provided that +# Centreon also meet, for each linked independent module, the terms and conditions +# of the license of that module. An independent module is a module which is not +# derived from this program. If you modify this program, you may extend this +# exception to your version of the program, but you are not obliged to do so. If you +# do not wish to do so, delete this exception statement from your version. +# +# +#################################################################################### + +use strict; +use warnings; + +package centreon::reporting::CentreonAck; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{logger} = shift; + $self->{centstatus} = shift; + if (@_) { + $self->{centstorage} = shift; + } + bless $self, $class; + return $self; +} + +# returns first ack time for a service or a host event +sub getServiceAckTime { + my $self = shift; + my $centreon = $self->{centstatus}; + my $start = shift; + my $end = shift; + my $hostId = shift; + my $serviceId = shift; + my $query; + + $query = "SELECT `entry_time` as ack_time, sticky ". + " FROM `acknowledgements`" . + " WHERE `host_id` = " . $hostId . + " AND `service_id` = ". $serviceId . + " AND `type` = 1" . + " AND `entry_time` >= " . $start . + " AND `entry_time` <= " . $end . + " ORDER BY `entry_time` ASC"; + + my ($status, $sth) = $centreon->query($query); + my $ackTime = "NULL"; + my $sticky = 0; + if (my $row = $sth->fetchrow_hashref()) { + $ackTime = $row->{ack_time}; + $sticky = $row->{sticky}; + } + $sth->finish(); + return ($ackTime, $sticky); +} + +# returns first ack time for a service or a host event +sub getHostAckTime { + my $self = shift; + my $centreon = $self->{centstatus}; + my $start = shift; + my $end = shift; + my $hostId = shift; + my $query; + + $query = "SELECT entry_time as ack_time, sticky ". + " FROM `acknowledgements`". + " WHERE `type` = 0". + " AND `entry_time` >= " . $start . + " AND `entry_time` <= " . $end . + " AND `host_id` = " . $hostId . + " ORDER BY `entry_time` ASC"; + + my ($status, $sth) = $centreon->query($query); + my $ackTime = "NULL"; + my $sticky = 0; + if (my $row = $sth->fetchrow_hashref()) { + $ackTime = $row->{ack_time}; + $sticky = $row->{sticky}; + } + $sth->finish(); + return ($ackTime, $sticky); +} + +1; diff --git a/perl-libs/lib/centreon/reporting/CentreonDashboard.pm b/perl-libs/lib/centreon/reporting/CentreonDashboard.pm new file mode 100644 index 00000000000..0f0f5f56009 --- /dev/null +++ b/perl-libs/lib/centreon/reporting/CentreonDashboard.pm @@ -0,0 +1,208 @@ +################################################################################ +# Copyright 2005-2020 Centreon +# Centreon is developed by : Julien Mathis and Romain Le Merlus under +# GPL Licence 2.0. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation ; either version 2 of the License. +# +# This program is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Linking this program statically or dynamically with other modules is making a +# combined work based on this program. Thus, the terms and conditions of the GNU +# General Public License cover the whole combination. +# +# As a special exception, the copyright holders of this program give Centreon +# permission to link this program with independent modules to produce an executable, +# regardless of the license terms of these independent modules, and to copy and +# distribute the resulting executable under terms of Centreon choice, provided that +# Centreon also meet, for each linked independent module, the terms and conditions +# of the license of that module. An independent module is a module which is not +# derived from this program. If you modify this program, you may extend this +# exception to your version of the program, but you are not obliged to do so. If you +# do not wish to do so, delete this exception statement from your version. +# +# +#################################################################################### + +use strict; +use warnings; + +package centreon::reporting::CentreonDashboard; + +use POSIX; +use Getopt::Long; +use Time::Local; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + bless $self, $class; + return $self; +} + +# Insert data in log_archive_host +sub insertHostStats { + my $self = shift; + my $centstorage = $self->{"centstorage"}; + my $names = shift; + my $stateDurations = shift; + my $start = shift; + my $end = shift; + my $dayDuration = $end - $start; + my $query_start = "INSERT INTO `log_archive_host` (`host_id`,". + " `UPTimeScheduled`,". + " `DOWNTimeScheduled`,". + " `UNREACHABLETimeScheduled`,". + " `MaintenanceTime`,". + " `UNDETERMINEDTimeScheduled`,". + " `UPnbEvent`,". + " `DOWNnbEvent`,". + " `UNREACHABLEnbEvent`,". + " `date_start`, `date_end`) VALUES "; + my $query_end = ""; + my $firstHost = 1; + my $count = 0; + my ($status, $sth); + while (my ($key, $value) = each %$names) { + if ($firstHost == 1) { + $firstHost = 0; + } else { + $query_end .= ","; + } + $query_end .= "(".$key.","; + if (defined($stateDurations->{$key})) { + my $stats = $stateDurations->{$key}; + my @tab = @$stats; + foreach (@tab) { + $query_end .= $_.","; + } + $query_end .= $start.",".$end.")"; + } else { + $query_end .= "0,0,0,0,".$dayDuration.",0,0,0,".$start.",".$end.")"; + } + $count++; + if ($count == 5000) { + ($status, $sth) = $centstorage->query($query_start.$query_end); + $firstHost = 1; + $query_end = ""; + $count = 0; + } + } + if ($count) { + ($status, $sth) = $centstorage->query($query_start.$query_end); + } +} + +# Insert data in log_archive_service +sub insertServiceStats { + my $self = shift; + my $centstorage = $self->{"centstorage"}; + my $names = shift; + my $stateDurations = shift; + my $start = shift; + my $end = shift; + my $dayDuration = $end - $start; + my $query_start = "INSERT INTO `log_archive_service` (`host_id`, `service_id`,". + " `OKTimeScheduled`,". + " `WARNINGTimeScheduled`,". + " `CRITICALTimeScheduled`,". + " `UNKNOWNTimeScheduled`,". + " `MaintenanceTime`,". + " `UNDETERMINEDTimeScheduled`,". + " `OKnbEvent`,". + " `WARNINGnbEvent`,". + " `CRITICALnbEvent`,". + " `UNKNOWNnbEvent`,". + " `date_start`, `date_end`) VALUES "; + my $query_end = ""; + my $firstService = 1; + my $count = 0; + my ($status, $sth); + while (my ($key, $value) = each %$names) { + if ($firstService == 1) { + $firstService = 0; + } else { + $query_end .= ","; + } + my ($host_id, $service_id) = split(";;", $key); + $query_end .= "(".$host_id.",".$service_id.","; + if (defined($stateDurations->{$key})) { + my $stats = $stateDurations->{$key}; + my @tab = @$stats; + foreach (@tab) { + $query_end .= $_.","; + } + $query_end .= $start.",".$end.")"; + } else { + $query_end .= "0,0,0,0,0,".$dayDuration.",0,0,0,0,".$start.",".$end.")"; + } + $count++; + if ($count == 5000) { + ($status, $sth) = $centstorage->query($query_start.$query_end); + $firstService = 1; + $query_end = ""; + $count = 0; + } + } + if ($count) { + ($status, $sth) = $centstorage->query($query_start.$query_end); + } +} + +# Truncate service dashboard stats table +sub truncateServiceStats { + my $self = shift; + my $centstorage = $self->{"centstorage"}; + + my $query = "TRUNCATE TABLE `log_archive_service`"; + $centstorage->query($query); +} + +# Truncate host dashboard stats table +sub truncateHostStats { + my $self = shift; + my $centstorage = $self->{"centstorage"}; + + my $query = "TRUNCATE TABLE `log_archive_host`"; + $centstorage->query($query); +} + +# Delete service dashboard stats for a given period +sub deleteServiceStats { + my $self = shift; + my $centstorage = $self->{"centstorage"}; + + my ($start, $end) = (shift, shift); + my ($day, $month, $year) = (localtime($end))[3,4,5]; + $end = mktime(0, 0, 0, $day + 1, $month, $year); + my $query = "DELETE FROM `log_archive_service` WHERE `date_start`>= " . $start . " AND `date_end` <= " . $end; + $centstorage->query($query); +} + +# Delete host dashboard stats for a given period +sub deleteHostStats { + my $self = shift; + my $centstorage = $self->{"centstorage"}; + + my ($start, $end) = (shift, shift); + my ($day, $month, $year) = (localtime($end))[3,4,5]; + $end = mktime(0, 0, 0, $day + 1, $month, $year); + my $query = "DELETE FROM `log_archive_host` WHERE `date_start`>= " . $start . " AND `date_end` <= " . $end; + $centstorage->query($query); +} + +1; diff --git a/perl-libs/lib/centreon/reporting/CentreonDownTime.pm b/perl-libs/lib/centreon/reporting/CentreonDownTime.pm new file mode 100644 index 00000000000..75a1c09e137 --- /dev/null +++ b/perl-libs/lib/centreon/reporting/CentreonDownTime.pm @@ -0,0 +1,239 @@ +################################################################################ +# Copyright 2005-2020 Centreon +# Centreon is developed by : Julien Mathis and Romain Le Merlus under +# GPL Licence 2.0. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation ; either version 2 of the License. +# +# This program is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Linking this program statically or dynamically with other modules is making a +# combined work based on this program. Thus, the terms and conditions of the GNU +# General Public License cover the whole combination. +# +# As a special exception, the copyright holders of this program give Centreon +# permission to link this program with independent modules to produce an executable, +# regardless of the license terms of these independent modules, and to copy and +# distribute the resulting executable under terms of Centreon choice, provided that +# Centreon also meet, for each linked independent module, the terms and conditions +# of the license of that module. An independent module is a module which is not +# derived from this program. If you modify this program, you may extend this +# exception to your version of the program, but you are not obliged to do so. If you +# do not wish to do so, delete this exception statement from your version. +# +# +#################################################################################### + +use strict; +use warnings; + +package centreon::reporting::CentreonDownTime; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstatus"} = shift; + if (@_) { + $self->{"centstorage"} = shift; + } + bless $self, $class; + return $self; +} + +# returns two references to two hash tables => hosts indexed by id and hosts indexed by name +sub getDowntimes { + my $self = shift; + my $centreon = $self->{"centstatus"}; + my $allIds = shift; + my $start = shift; + my $end = shift; + my $type = shift; # if 1 => host, if 2 => service + my $query; + + $query = "SELECT DISTINCT h.host_id, s.service_id, " . + "d.actual_start_time, d.actual_end_time " . + "FROM `hosts` h, `downtimes` d " . + "LEFT JOIN services s ON s.service_id = d.service_id " . + "WHERE started = 1 " . + "AND d.host_id = h.host_id "; + if ($type == 1) { + $query .= "AND d.type = 2 "; # That can be confusing, but downtime_type 2 is for host + } elsif ($type == 2) { + $query .= "AND d.type = 1 "; # That can be confusing, but downtime_type 1 is for service + } + $query .= "AND (actual_start_time < " . $end . " AND actual_start_time IS NOT NULL) " . + "AND (actual_end_time > " . $start . " OR actual_end_time IS NULL) " . + "ORDER BY h.host_id ASC, actual_start_time ASC, actual_end_time ASC"; + + my ($status, $sth) = $centreon->query($query); + + my @periods = (); + while (my $row = $sth->fetchrow_hashref()) { + my $id = $row->{"host_id"}; + if ($type == 2) { + $id .= ";;" . $row->{"service_id"} + } + if (defined($allIds->{$id})) { + if ($row->{"actual_start_time"} < $start) { + $row->{"actual_start_time"} = $start; + } + if (!defined $row->{"actual_end_time"} || $row->{"actual_end_time"} > $end) { + $row->{"actual_end_time"} = $end; + } + + my $insert = 1; + for (my $i = 0; $i < scalar(@periods) && $insert; $i++) { + my $checkTab = $periods[$i]; + if ($checkTab->[0] eq $id){ + if ($row->{"actual_start_time"} <= $checkTab->[2] && $row->{"actual_end_time"} <= $checkTab->[2]) { + $insert = 0; + } elsif ($row->{"actual_start_time"} <= $checkTab->[2] && $row->{"actual_end_time"} > $checkTab->[2]) { + $checkTab->[2] = $row->{"actual_end_time"}; + $periods[$i] = $checkTab; + $insert = 0; + } + } + } + if ($insert) { + my @tab = ($id, $row->{"actual_start_time"}, $row->{"actual_end_time"}); + $periods[scalar(@periods)] = \@tab; + } + } + } + $sth->finish(); + return (\@periods); +} + +sub splitInsertEventDownTime { + my $self = shift; + + my $objectId = shift; + my $start = shift; + my $end = shift; + my $downtimes = shift; + my $state = shift; + + my @events = (); + my $total = 0; + if ($state ne "" && defined($downtimes) && defined($state) && $state != 0) { + $total = scalar(@$downtimes); + } + for (my $i = 0; $i < $total && $start < $end; $i++) { + my $tab = $downtimes->[$i]; + my $id = $tab->[0]; + my $downTimeStart = $tab->[1]; + my $downTimeEnd = $tab->[2]; + + if ($id eq $objectId) { + if ($downTimeStart < $start) { + $downTimeStart = $start; + } + if ($downTimeEnd > $end) { + $downTimeEnd = $end; + } + if ($downTimeStart < $end && $downTimeEnd > $start) { + if ($downTimeStart > $start) { + my @tab = ($start, $downTimeStart, 0); + $events[scalar(@events)] = \@tab; + } + my @tab = ($downTimeStart, $downTimeEnd, 1); + $events[scalar(@events)] = \@tab; + $start = $downTimeEnd; + } + } + } + if ($start < $end) { + my @tab = ($start, $end, 0); + $events[scalar(@events)] = \@tab; + } + return (\@events); +} + +sub splitUpdateEventDownTime { + my $self = shift; + + my $objectId = shift; + my $start = shift; + my $end = shift; + my $downTimeFlag = shift; + my $downTimes = shift; + my $state = shift; + + my $updated = 0; + my @events = (); + my $updateTime = 0; + my $total = 0; + if (defined($downTimes) && $state != 0) { + $total = scalar(@$downTimes); + } + for (my $i = 0; $i < $total && $start < $end; $i++) { + my $tab = $downTimes->[$i]; + my $id = $tab->[0]; + my $downTimeStart = $tab->[1]; + my $downTimeEnd = $tab->[2]; + + if ($id eq $objectId) { + if ($downTimeStart < $start) { + $downTimeStart = $start; + } + if ($downTimeEnd > $end) { + $downTimeEnd = $end; + } + if ($downTimeStart < $end && $downTimeEnd > $start) { + if ($updated == 0) { + $updated = 1; + if ($downTimeStart > $start) { + if ($downTimeFlag == 1) { + my @tab = ($start, $downTimeStart, 0); + $events[scalar(@events)] = \@tab; + }else { + $updateTime = $downTimeStart; + } + my @tab = ($downTimeStart, $downTimeEnd, 1); + $events[scalar(@events)] = \@tab; + }else { + if ($downTimeFlag == 1) { + $updateTime = $downTimeEnd; + }else { + my @tab = ($downTimeStart, $downTimeEnd, 1); + $events[scalar(@events)] = \@tab; + } + } + }else { + if ($downTimeStart > $start) { + my @tab = ($start, $downTimeStart, 0); + $events[scalar(@events)] = \@tab; + } + my @tab = ($downTimeStart, $downTimeEnd, 1); + $events[scalar(@events)] = \@tab; + } + $start = $downTimeEnd; + } + } + } + if ($start < $end && scalar(@events)) { + my @tab = ($start, $end, 0); + $events[scalar(@events)] = \@tab; + } else { + $updateTime = $end; + if (scalar(@events) && $end > $events[0][0]) { + $updateTime = $events[0][0]; + } + } + return ($updateTime, \@events); +} + +1; diff --git a/perl-libs/lib/centreon/reporting/CentreonHost.pm b/perl-libs/lib/centreon/reporting/CentreonHost.pm new file mode 100644 index 00000000000..4952bab0a63 --- /dev/null +++ b/perl-libs/lib/centreon/reporting/CentreonHost.pm @@ -0,0 +1,80 @@ +################################################################################ +# Copyright 2005-2020 Centreon +# Centreon is developed by : Julien Mathis and Romain Le Merlus under +# GPL Licence 2.0. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation ; either version 2 of the License. +# +# This program is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Linking this program statically or dynamically with other modules is making a +# combined work based on this program. Thus, the terms and conditions of the GNU +# General Public License cover the whole combination. +# +# As a special exception, the copyright holders of this program give Centreon +# permission to link this program with independent modules to produce an executable, +# regardless of the license terms of these independent modules, and to copy and +# distribute the resulting executable under terms of Centreon choice, provided that +# Centreon also meet, for each linked independent module, the terms and conditions +# of the license of that module. An independent module is a module which is not +# derived from this program. If you modify this program, you may extend this +# exception to your version of the program, but you are not obliged to do so. If you +# do not wish to do so, delete this exception statement from your version. +# +# +#################################################################################### + +use strict; +use warnings; + +package centreon::reporting::CentreonHost; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{logger} = shift; + $self->{centreon} = shift; + if (@_) { + $self->{centstorage} = shift; + } + bless $self, $class; + return $self; +} + +# returns two references to two hash tables => hosts indexed by id and hosts indexed by id +sub getAllHostIds { + my $self = shift; + my $centreon = $self->{centreon}; + my $activated = 1; + if (@_) { + $activated = 0; + } + my %hostIds; + + my $query = "SELECT `host_id`" . + " FROM `host`". + " WHERE `host_register` = '1'"; + if ($activated == 1) { + $query .= " AND `host_activate` = '1'"; + } + my ($status, $sth) = $centreon->query($query); + while (my $row = $sth->fetchrow_hashref()) { + $hostIds{$row->{host_id}} = 1; + } + $sth->finish(); + return \%hostIds; +} + +1; diff --git a/perl-libs/lib/centreon/reporting/CentreonHostStateEvents.pm b/perl-libs/lib/centreon/reporting/CentreonHostStateEvents.pm new file mode 100644 index 00000000000..b511dfd81f4 --- /dev/null +++ b/perl-libs/lib/centreon/reporting/CentreonHostStateEvents.pm @@ -0,0 +1,278 @@ +################################################################################ +# Copyright 2005-2020 Centreon +# Centreon is developed by : Julien Mathis and Romain Le Merlus under +# GPL Licence 2.0. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation ; either version 2 of the License. +# +# This program is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Linking this program statically or dynamically with other modules is making a +# combined work based on this program. Thus, the terms and conditions of the GNU +# General Public License cover the whole combination. +# +# As a special exception, the copyright holders of this program give Centreon +# permission to link this program with independent modules to produce an executable, +# regardless of the license terms of these independent modules, and to copy and +# distribute the resulting executable under terms of Centreon choice, provided that +# Centreon also meet, for each linked independent module, the terms and conditions +# of the license of that module. An independent module is a module which is not +# derived from this program. If you modify this program, you may extend this +# exception to your version of the program, but you are not obliged to do so. If you +# do not wish to do so, delete this exception statement from your version. +# +# +#################################################################################### + +use strict; +use warnings; + +package centreon::reporting::CentreonHostStateEvents; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{logger} = shift; + $self->{centstorage} = shift; + $self->{centreonAck} = shift; + $self->{centreonDownTime} = shift; + bless $self, $class; + return $self; +} + +# Get events in given period +# Parameters: +# $start: period start +# $end: period end +sub getStateEventDurations { + my $self = shift; + my $centstorage = $self->{centstorage}; + my $start = shift; + my $end = shift; + my %hosts; + my $query = "SELECT `host_id`, `state`, `start_time`, `end_time`, `in_downtime`". + " FROM `hoststateevents`". + " WHERE `start_time` < ".$end. + " AND `end_time` > ".$start. + " AND `state` < 3"; # STATE PENDING AND UNKNOWN NOT HANDLED + my ($status, $sth) = $centstorage->query($query); + while (my $row = $sth->fetchrow_hashref()) { + if ($row->{start_time} < $start) { + $row->{start_time} = $start; + } + if ($row->{end_time} > $end) { + $row->{end_time} = $end; + } + if (!defined($hosts{$row->{host_id}})) { + my @tab = (0, 0, 0, 0, 0, 0, 0, 0); + # index 0: UP, index 1: DOWN, index 2: UNREACHABLE, index 3: DOWNTIME, index 4: UNDETERMINED + # index 5: UP alerts, index 6: Down alerts, , index 7: Unreachable alerts + $hosts{$row->{host_id}} = \@tab; + } + + my $stats = $hosts{$row->{host_id}}; + if ($row->{in_downtime} == 0) { + $stats->[$row->{state}] += $row->{end_time} - $row->{start_time}; + + # We count UP alert like a recovery (don't put it otherwise) + if ($row->{state} != 0 || ($row->{state} == 0 && $row->{start_time} > $start)) { + $stats->[$row->{state} + 5] += 1; + } + } else { + $stats->[3] += $row->{end_time} - $row->{start_time}; + } + $hosts{$row->{host_id}} = $stats; + } + my %results; + while (my ($key, $value) = each %hosts) { + $value->[4] = ($end - $start) - ($value->[0] + $value->[1] + $value->[2] + $value->[3]); + $results{$key} = $value; + } + return (\%results); +} + +# Get last events for each host +# Parameters: +# $start: max date possible for each event +# $hostIds: references a hash table containing a list of host ids +sub getLastStates { + my $self = shift; + my $centstorage = $self->{centstorage}; + my $hostIds = shift; + + my %currentStates; + + my $query = "SELECT `host_id`, `state`, `hoststateevent_id`, `end_time`, `in_downtime`, `in_ack`". + " FROM `hoststateevents`". + " WHERE `last_update` = 1"; + my ($status, $sth) = $centstorage->query($query); + while (my $row = $sth->fetchrow_hashref()) { + if (defined($hostIds->{$row->{host_id}})) { + my @tab = ($row->{end_time}, $row->{state}, $row->{hoststateevent_id}, $row->{in_downtime}, $row->{in_ack}); + $currentStates{$row->{host_id}} = \@tab; + } + } + $sth->finish(); + + return (\%currentStates); +} + +# update a specific host incident end time +# Parameters +# $endTime: incident end time +# $eventId: ID of event to update +sub updateEventEndTime { + my $self = shift; + my $centstorage = $self->{centstorage}; + my $centreonDownTime = $self->{centreonDownTime}; + my $centreonAck = $self->{centreonAck}; + + my ($hostId, $start, $end, $state, $eventId, $downTimeFlag, $lastUpdate, $downTime, $inAck) = @_; + my $return = {}; + + my ($events, $updateTime); + ($updateTime, $events) = $centreonDownTime->splitUpdateEventDownTime($hostId, $start, $end, $downTimeFlag,$downTime, $state); + + my $totalEvents = 0; + if (defined($events)) { + $totalEvents = scalar(@$events); + } + my ($ack, $sticky) = $centreonAck->getHostAckTime($start, $updateTime, $hostId); + if (defined($ack) && $sticky == 1) { + $inAck = 1; + } + if (!$totalEvents && $updateTime) { + my $query = "UPDATE `hoststateevents` SET `end_time` = ".$updateTime.", `ack_time`= IFNULL(ack_time,$ack), `in_ack` = '$inAck', `last_update` = ".$lastUpdate. + " WHERE `hoststateevent_id` = ".$eventId; + $centstorage->query($query); + } else { + if ($updateTime) { + my $query = "UPDATE `hoststateevents` SET `end_time` = ".$updateTime.", `ack_time`= IFNULL(ack_time,$ack), `in_ack` = '$inAck', `last_update` = 0". + " WHERE `hoststateevent_id` = ".$eventId; + $centstorage->query($query); + } + return $self->insertEventTable($hostId, $state, $lastUpdate, $events, $inAck); + } + + $return->{in_ack} = $inAck; + return $return; +} + +# insert a new incident for host +# Parameters +# $hostId : host ID +# $serviceId: service ID +# $state: incident state +# $start: incident start time +# $end: incident end time +sub insertEvent { + my $self = shift; + my $centreonDownTime = $self->{centreonDownTime}; + + my ($hostId, $state, $start, $end, $lastUpdate, $downtimes, $inAck) = @_; + my $return = { in_ack => $inAck }; + + my $events = $centreonDownTime->splitInsertEventDownTime($hostId, $start, $end, $downtimes, $state); + if ($state ne "") { + return $self->insertEventTable($hostId, $state, $lastUpdate, $events, $inAck); + } + + return $return; +} + +sub insertEventTable { + my $self = shift; + my $centstorage = $self->{centstorage}; + my $centreonAck = $self->{centreonAck}; + + my ($hostId, $state, $lastUpdate, $events, $inAck) = @_; + my $return = {}; + + my $query_start = "INSERT INTO `hoststateevents`". + " (`host_id`, `state`, `start_time`, `end_time`, `last_update`, `in_downtime`, `ack_time`, `in_ack`)" . + " VALUES ("; + my $count = 0; + my $totalEvents = 0; + + # Stick ack is removed + if ($state == 0) { + $inAck = 0; + } + for($count = 0; $count < scalar(@$events) - 1; $count++) { + my $tab = $events->[$count]; + my ($ack, $sticky) = $centreonAck->getHostAckTime($tab->[0], $tab->[1], $hostId); + if ($inAck == 1) { + $sticky = 1; + $ack = $tab->[0]; + } + if (defined($ack) && $sticky == 1) { + $inAck = 1; + } + my $query_end = $hostId.", ".$state.", ".$tab->[0].", ".$tab->[1].", 0, ".$tab->[2].", ".$ack.", '$sticky')"; + $centstorage->query($query_start.$query_end); + } + if (scalar(@$events)) { + my $tab = $events->[$count]; + if (defined($hostId) && defined($state)) { + my ($ack, $sticky) = $centreonAck->getHostAckTime($tab->[0], $tab->[1], $hostId); + if ($inAck == 1) { + $sticky = 1; + $ack = $tab->[0]; + } + if (defined($ack) && $sticky == 1) { + $inAck = 1; + } + my $query_end = $hostId . ", " . $state . ", " . $tab->[0] . ", " . $tab->[1] . ", " . + $lastUpdate . ", " . $tab->[2] . ", " . $ack . ", '$sticky')"; + $centstorage->query($query_start.$query_end); + } + } + + $return->{in_ack} = $inAck; + return $return; +} + +# Truncate service incident table +sub truncateStateEvents { + my ($self, %options) = @_; + my $centstorage = $self->{centstorage}; + + if (defined($options{start})) { + my $query = "DELETE FROM hoststateevents WHERE start_time > $options{start}"; + $centstorage->query($query); + $query = "UPDATE hoststateevents SET end_time = $options{midnight} WHERE end_time > $options{midnight}"; + $centstorage->query($query); + } else { + my $query = "TRUNCATE TABLE hoststateevents"; + $centstorage->query($query); + } +} + +# Get first and last events date +sub getFirstLastIncidentTimes { + my $self = shift; + my $centstorage = $self->{centstorage}; + + my $query = "SELECT min(`start_time`) as minc, max(`end_time`) as maxc FROM `hoststateevents`"; + my ($status, $sth) = $centstorage->query($query); + my ($start, $end) = (0,0); + if (my $row = $sth->fetchrow_hashref()) { + ($start, $end) = ($row->{minc}, $row->{maxc}); + } + $sth->finish; + return ($start, $end); +} + +1; diff --git a/perl-libs/lib/centreon/reporting/CentreonLog.pm b/perl-libs/lib/centreon/reporting/CentreonLog.pm new file mode 100644 index 00000000000..1f9618c7142 --- /dev/null +++ b/perl-libs/lib/centreon/reporting/CentreonLog.pm @@ -0,0 +1,119 @@ +################################################################################ +# Copyright 2005-2020 Centreon +# Centreon is developed by : Julien Mathis and Romain Le Merlus under +# GPL Licence 2.0. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation ; either version 2 of the License. +# +# This program is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Linking this program statically or dynamically with other modules is making a +# combined work based on this program. Thus, the terms and conditions of the GNU +# General Public License cover the whole combination. +# +# As a special exception, the copyright holders of this program give Centreon +# permission to link this program with independent modules to produce an executable, +# regardless of the license terms of these independent modules, and to copy and +# distribute the resulting executable under terms of Centreon choice, provided that +# Centreon also meet, for each linked independent module, the terms and conditions +# of the license of that module. An independent module is a module which is not +# derived from this program. If you modify this program, you may extend this +# exception to your version of the program, but you are not obliged to do so. If you +# do not wish to do so, delete this exception statement from your version. +# +# +#################################################################################### + +use strict; +use warnings; + +package centreon::reporting::CentreonLog; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + bless $self, $class; + return $self; +} + +# Get all service logs between two dates +# Parameters: +# $start: period start date in timestamp +# $end: period start date in timestamp +sub getLogOfServices { + my $self = shift; + my $centstorage = $self->{"centstorage"}; + my ($start, $end); + if (@_) { + $start = shift; + $end = shift; + } + my $query = "SELECT `status`, `ctime`, `host_id`, `service_id`" . + " FROM `logs`" . + " WHERE `ctime` >= " . $start . + " AND `ctime` < " . $end . + " AND (`type` = 1 OR (`status` = 0 AND `type` = 0))" . + " AND ((`service_id` != 0 AND `service_id` IS NOT NULL) OR `service_description` IS NOT NULL) " . + " AND `msg_type` IN ('0', '1', '6', '7', '8', '9')" . + " ORDER BY `ctime`"; + my ($status, $result) = $centstorage->query($query); + return $result; +} + +# Get all hosts logs between two dates +# Parameters: +# $start: period start date in timestamp +# $end: period start date in timestamp +sub getLogOfHosts { + my $self = shift; + my $centstorage = $self->{"centstorage"}; + my ($start, $end); + if (@_) { + $start = shift; + $end = shift; + } + my $query = "SELECT `status`, `ctime`, `host_id`" . + " FROM `logs`" . + " WHERE `ctime` >= " . $start . + " AND `ctime` < " . $end . + " AND (`type` = 1 OR (`status` = 0 AND `type` = 0))" . + " AND `msg_type` IN ('0', '1', '6', '7', '8', '9')" . + " AND (`service_id` = 0 OR `service_id` IS NULL) AND (service_description = '' OR service_description IS NULL) ". + " ORDER BY `ctime`"; + my ($status, $result) = $centstorage->query($query); + return $result; +} + +# Get First log date and last log date +sub getFirstLastLogTime { + my $self = shift; + my $centstorage = $self->{centstorage}; + + my $query = "SELECT min(`ctime`) as minc, max(`ctime`) as maxc FROM `logs`"; + my ($status, $sth) = $centstorage->query($query); + my ($start, $end) = (0,0); + if (my $row = $sth->fetchrow_hashref()) { + ($start, $end) = ($row->{minc}, $row->{maxc}); + } + $sth->finish; + return ($start, $end); +} + +1; diff --git a/perl-libs/lib/centreon/reporting/CentreonProcessStateEvents.pm b/perl-libs/lib/centreon/reporting/CentreonProcessStateEvents.pm new file mode 100644 index 00000000000..30a2da6711a --- /dev/null +++ b/perl-libs/lib/centreon/reporting/CentreonProcessStateEvents.pm @@ -0,0 +1,277 @@ +################################################################################ +# Copyright 2005-2020 Centreon +# Centreon is developed by : Julien Mathis and Romain Le Merlus under +# GPL Licence 2.0. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation ; either version 2 of the License. +# +# This program is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Linking this program statically or dynamically with other modules is making a +# combined work based on this program. Thus, the terms and conditions of the GNU +# General Public License cover the whole combination. +# +# As a special exception, the copyright holders of this program give Centreon +# permission to link this program with independent modules to produce an executable, +# regardless of the license terms of these independent modules, and to copy and +# distribute the resulting executable under terms of Centreon choice, provided that +# Centreon also meet, for each linked independent module, the terms and conditions +# of the license of that module. An independent module is a module which is not +# derived from this program. If you modify this program, you may extend this +# exception to your version of the program, but you are not obliged to do so. If you +# do not wish to do so, delete this exception statement from your version. +# +# +#################################################################################### + +use strict; +use warnings; + +package centreon::reporting::CentreonProcessStateEvents; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"host"} = shift; + $self->{"service"} = shift; + $self->{"nagiosLog"} = shift; + $self->{"hostEvents"} = shift; + $self->{"serviceEvents"} = shift; + $self->{"centreonDownTime"} = shift; + bless $self, $class; + + return $self; +} + +# Parse services logs for given period +# Parameters: +# $start: period start +# $end: period end +sub parseServiceLog { + my $self = shift; + # parameters: + my ($start ,$end) = (shift,shift); + my $service = $self->{"service"}; + my $nagiosLog = $self->{"nagiosLog"}; + my $events = $self->{"serviceEvents"}; + my $centreonDownTime = $self->{"centreonDownTime"}; + + my $serviceIds = $service->getAllServiceIds(); + my $currentEvents = $events->getLastStates($serviceIds); + my $logs = $nagiosLog->getLogOfServices($start, $end); + my $downtimes = $centreonDownTime->getDowntimes($serviceIds, $start, $end, 2); + + while (my $row = $logs->fetchrow_hashref()) { + my $fullId = $row->{host_id} . ";;" . $row->{service_id}; + if (defined($serviceIds->{$fullId})) { + my $statusCode = $row->{status}; + + # manage initial states (no entry in state events table) + if (!defined($currentEvents->{$fullId})) { + my @tab = ($row->{ctime}, $statusCode, 0, 0, 0); + $currentEvents->{$fullId} = \@tab; + } + + my $eventInfos = $currentEvents->{$fullId}; + # $eventInfos is a reference to a table containing : incident start time | status | state_event_id | in_downtime. The last one is optional + if ($statusCode ne "" && defined($eventInfos->[1]) && $eventInfos->[1] ne "" && $eventInfos->[1] != $statusCode) { + my ($hostId, $serviceId) = split(";;", $fullId); + my $result = {}; + if ($eventInfos->[2] != 0) { + # If eventId of log is defined, update the last day event + $result = $events->updateEventEndTime( + $hostId, + $serviceId, + $eventInfos->[0], + $row->{ctime}, + $eventInfos->[1], + $eventInfos->[2], + $eventInfos->[3], + 0, + $downtimes, + $eventInfos->[4] + ); + } else { + if ($row->{ctime} > $eventInfos->[0]) { + $result = $events->insertEvent( + $hostId, + $serviceId, + $eventInfos->[1], + $eventInfos->[0], + $row->{ctime}, + 0, + $downtimes, + $eventInfos->[4] + ); + } + } + $eventInfos->[0] = $row->{ctime}; + $eventInfos->[1] = $statusCode; + $eventInfos->[2] = 0; + $eventInfos->[3] = 0; + $eventInfos->[4] = defined($result->{in_ack}) ? $result->{in_ack} : 0; + $currentEvents->{$fullId} = $eventInfos; + } + } + } + $self->insertLastServiceEvents($end, $currentEvents, $downtimes); +} + +# Parse host logs for given period +# Parameters: +# $start: period start +# $end: period end +sub parseHostLog { + my $self = shift; + + # parameters: + my ($start ,$end) = (shift,shift); + + my $host = $self->{"host"}; + my $nagiosLog = $self->{"nagiosLog"}; + my $events = $self->{"hostEvents"}; + my $centreonDownTime = $self->{"centreonDownTime"}; + + my $hostIds = $host->getAllHostIds(); + my $currentEvents = $events->getLastStates($hostIds); + my $logs = $nagiosLog->getLogOfHosts($start, $end); + my $downtimes = $centreonDownTime->getDowntimes($hostIds, $start, $end, 1); + + while (my $row = $logs->fetchrow_hashref()) { + my $hostId = $row->{host_id}; + + if (defined($hostIds->{$hostId})) { + my $statusCode = $row->{status}; + + # manage initial states (no entry in state events table) + if (!defined($currentEvents->{$hostId})) { + my @tab = ($row->{'ctime'}, $statusCode, 0, 0, 0); + $currentEvents->{$hostId} = \@tab; + } + + my $eventInfos = $currentEvents->{$hostId}; # $eventInfos is a reference to a table containing : incident start time | status | state_event_id. The last one is optionnal + if ($statusCode ne "" && defined($eventInfos->[1]) && $eventInfos->[1] ne "" && $eventInfos->[1] != $statusCode) { + my $result = {}; + if ($eventInfos->[2] != 0) { + # If eventId of log is defined, update the last day event + $result = $events->updateEventEndTime( + $hostId, + $eventInfos->[0], + $row->{'ctime'}, + $eventInfos->[1], + $eventInfos->[2], + $eventInfos->[3], + 0, + $downtimes, + $eventInfos->[4] + ); + } else { + if ($row->{ctime} > $eventInfos->[0]) { + $result = $events->insertEvent( + $hostId, + $eventInfos->[1], + $eventInfos->[0], + $row->{'ctime'}, + 0, + $downtimes, + $eventInfos->[4] + ); + } + } + $eventInfos->[0] = $row->{'ctime'}; + $eventInfos->[1] = $statusCode; + $eventInfos->[2] = 0; + $eventInfos->[3] = 0; + $eventInfos->[4] = defined($result->{in_ack}) ? $result->{in_ack} : 0; + $currentEvents->{$hostId} = $eventInfos; + } + } + } + $self->insertLastHostEvents($end, $currentEvents, $downtimes); +} + + +# Insert in DB last service incident of day currently processed +# Parameters: +# $end: period end +# $currentEvents: reference to a hash table that contains last incident details +# $serviceIds: reference to a hash table that returns host/service ids for host/service ids +sub insertLastServiceEvents { + my $self = shift; + my $events = $self->{"serviceEvents"}; + + # parameters: + my ($end, $currentEvents, $downtimes) = (shift, shift, shift, shift); + + while (my ($id, $eventInfos) = each (%$currentEvents)) { + my ($hostId, $serviceId) = split(";;", $id); + if ($eventInfos->[2] != 0) { + $events->updateEventEndTime( + $hostId, + $serviceId, + $eventInfos->[0], + $end, + $eventInfos->[1], + $eventInfos->[2], + $eventInfos->[3], + 1, + $downtimes, + $eventInfos->[4] + ); + } else { + $events->insertEvent( + $hostId, + $serviceId, + $eventInfos->[1], + $eventInfos->[0], + $end, + 1, + $downtimes, + $eventInfos->[4] + ); + } + } +} + +# Insert in DB last host incident of day currently processed +# Parameters: +# $end: period end +# $currentEvents: reference to a hash table that contains last incident details +sub insertLastHostEvents { + my $self = shift; + my $events = $self->{"hostEvents"}; + + # parameters: + my ($end, $currentEvents, $downtimes) = (shift, shift, shift); + + while (my ($hostId, $eventInfos) = each (%$currentEvents)) { + if ($eventInfos->[2] != 0) { + $events->updateEventEndTime( + $hostId, + $eventInfos->[0], + $end, + $eventInfos->[1], + $eventInfos->[2], + $eventInfos->[3], + 1, + $downtimes, + $eventInfos->[4] + ); + } else { + $events->insertEvent($hostId, $eventInfos->[1], $eventInfos->[0], $end, 1, $downtimes, $eventInfos->[4]); + } + } +} + +1; diff --git a/perl-libs/lib/centreon/reporting/CentreonService.pm b/perl-libs/lib/centreon/reporting/CentreonService.pm new file mode 100644 index 00000000000..bae75c8ceb1 --- /dev/null +++ b/perl-libs/lib/centreon/reporting/CentreonService.pm @@ -0,0 +1,105 @@ +################################################################################ +# Copyright 2005-2020 Centreon +# Centreon is developed by : Julien Mathis and Romain Le Merlus under +# GPL Licence 2.0. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation ; either version 2 of the License. +# +# This program is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Linking this program statically or dynamically with other modules is making a +# combined work based on this program. Thus, the terms and conditions of the GNU +# General Public License cover the whole combination. +# +# As a special exception, the copyright holders of this program give Centreon +# permission to link this program with independent modules to produce an executable, +# regardless of the license terms of these independent modules, and to copy and +# distribute the resulting executable under terms of Centreon choice, provided that +# Centreon also meet, for each linked independent module, the terms and conditions +# of the license of that module. An independent module is a module which is not +# derived from this program. If you modify this program, you may extend this +# exception to your version of the program, but you are not obliged to do so. If you +# do not wish to do so, delete this exception statement from your version. +# +# +#################################################################################### + +use strict; +use warnings; + +package centreon::reporting::CentreonService; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optional) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centreon"} = shift; + if (@_) { + $self->{"centstorage"} = shift; + } + bless $self, $class; + return $self; +} + +# returns two references to two hash tables => services indexed by id and services indexed by id +sub getAllServiceIds { + my $self = shift; + my $centreon = $self->{"centreon"}; + my $activated = 1; + if (@_) { + $activated = 0; + } + + my %serviceIds; + # getting services linked to hosts + my $query = "SELECT host_host_id as host_id, service_id" . + " FROM host, service, host_service_relation" . + " WHERE host_host_id = host_id" . + " AND service_service_id = service_id" . + " AND service_register = '1'"; + if ($activated == 1) { + $query .= " AND `service_activate`='1'"; + } + + my ($status, $sth) = $centreon->query($query); + while (my $row = $sth->fetchrow_hashref()) { + $serviceIds{$row->{'host_id'} . ";;" . $row->{'service_id'}} = 1; + } + + # getting services linked to hostgroup + $query = "SELECT host_id, service_id" . + " FROM host, service, host_service_relation hr, hostgroup_relation hgr, hostgroup hg" . + " WHERE hr.hostgroup_hg_id IS NOT NULL" . + " AND hr.service_service_id = service_id" . + " AND hr.hostgroup_hg_id = hgr.hostgroup_hg_id" . + " AND hgr.host_host_id = host_id" . + " AND service_register = '1'"; + if ($activated == 1) { + $query .= " AND service_activate='1'" . + " AND host_activate = '1'" . + " AND hg.hg_activate = '1'"; + } + $query .= " AND hg.hg_id = hgr.hostgroup_hg_id"; + + ($status, $sth) = $centreon->query($query); + while (my $row = $sth->fetchrow_hashref()) { + $serviceIds{$row->{'host_id'} . ";;" . $row->{'service_id'}} = 1; + } + $sth->finish(); + + return \%serviceIds; +} + +1; diff --git a/perl-libs/lib/centreon/reporting/CentreonServiceStateEvents.pm b/perl-libs/lib/centreon/reporting/CentreonServiceStateEvents.pm new file mode 100644 index 00000000000..ce19a031565 --- /dev/null +++ b/perl-libs/lib/centreon/reporting/CentreonServiceStateEvents.pm @@ -0,0 +1,276 @@ +################################################################################ +# Copyright 2005-2020 Centreon +# Centreon is developed by : Julien Mathis and Romain Le Merlus under +# GPL Licence 2.0. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation ; either version 2 of the License. +# +# This program is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Linking this program statically or dynamically with other modules is making a +# combined work based on this program. Thus, the terms and conditions of the GNU +# General Public License cover the whole combination. +# +# As a special exception, the copyright holders of this program give Centreon +# permission to link this program with independent modules to produce an executable, +# regardless of the license terms of these independent modules, and to copy and +# distribute the resulting executable under terms of Centreon choice, provided that +# Centreon also meet, for each linked independent module, the terms and conditions +# of the license of that module. An independent module is a module which is not +# derived from this program. If you modify this program, you may extend this +# exception to your version of the program, but you are not obliged to do so. If you +# do not wish to do so, delete this exception statement from your version. +# +# +#################################################################################### + +use strict; +use warnings; + +package centreon::reporting::CentreonServiceStateEvents; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optional) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{logger} = shift; + $self->{centstorage} = shift; + $self->{centreonAck} = shift; + $self->{centreonDownTime} = shift; + bless $self, $class; + return $self; +} + +# Get events in given period +# Parameters: +# $start: period start +# $end: period end +sub getStateEventDurations { + my $self = shift; + my $centstorage = $self->{centstorage}; + my $start = shift; + my $end = shift; + + my %services; + my $query = "SELECT `host_id`, `service_id`, `state`, `start_time`, `end_time`, `in_downtime`". + " FROM `servicestateevents`". + " WHERE `start_time` < ".$end. + " AND `end_time` > ".$start. + " AND `state` < 4"; # NOT HANDLING PENDING STATE + my ($status, $sth) = $centstorage->query($query); + while (my $row = $sth->fetchrow_hashref()) { + if ($row->{start_time} < $start) { + $row->{start_time} = $start; + } + if ($row->{end_time} > $end) { + $row->{end_time} = $end; + } + if (!defined($services{$row->{host_id}.";;".$row->{service_id}})) { + my @tab = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + # index 0: OK, index 1: WARNING, index 2: CRITICAL, index 3: UNKNOWN, index 4: DOWNTIME, index 5: UNDETERMINED + # index 6: OK alerts, index 7: WARNING alerts, index 8: CRITICAL alerts, index 9: UNKNOWN alerts + $services{$row->{host_id}.";;".$row->{service_id}} = \@tab; + } + + my $stats = $services{$row->{host_id}.";;".$row->{service_id}}; + if ($row->{in_downtime} == 0) { + $stats->[$row->{state}] += $row->{end_time} - $row->{start_time}; + if ($row->{state} != 0 || ($row->{state} == 0 && $row->{start_time} > $start)) { + $stats->[$row->{state} + 6] += 1; + } + } else { + $stats->[4] += $row->{end_time} - $row->{start_time}; + } + $services{$row->{host_id}.";;".$row->{service_id}} = $stats; + } + my %results; + while (my ($key, $value) = each %services) { + $value->[5] = ($end - $start) - ($value->[0] + $value->[1] + $value->[2] + $value->[3] + $value->[4]); + $results{$key} = $value; + } + return (\%results); +} + + +# Get last events for each service +# Parameters: +# $start: max date possible for each event +# $serviceIds: references a hash table containing a list of services +sub getLastStates { + my $self = shift; + my $centstorage = $self->{centstorage}; + + my $serviceIds = shift; + + my $currentStates = {}; + + my $query = "SELECT `host_id`, `service_id`, `state`, `servicestateevent_id`, `end_time`, `in_downtime`, `in_ack`". + " FROM `servicestateevents`". + " WHERE `last_update` = 1"; + my ($status, $sth) = $centstorage->query($query); + while (my $row = $sth->fetchrow_hashref()) { + my $serviceId = $row->{host_id} . ";;" . $row->{service_id}; + if (defined($serviceIds->{$serviceId})) { + my @tab = ($row->{end_time}, $row->{state}, $row->{servicestateevent_id}, $row->{in_downtime}, $row->{in_ack}); + $currentStates->{$serviceId} = \@tab; + } + } + $sth->finish(); + + return ($currentStates); +} + +# update a specific service incident end time +# Parameters +# $endTime: incident end time +# $eventId: ID of event to update +sub updateEventEndTime { + my $self = shift; + my $centstorage = $self->{centstorage}; + my $centstatus = $self->{centstatus}; + my $centreonAck = $self->{centreonAck}; + my $centreonDownTime = $self->{centreonDownTime}; + my ($hostId, $serviceId, $start, $end, $state, $eventId, $downTimeFlag, $lastUpdate, $downtimes, $inAck) = @_; + my $return = {}; + + my ($events, $updateTime); + ($updateTime, $events) = $centreonDownTime->splitUpdateEventDownTime($hostId . ";;" . $serviceId, $start, $end, $downTimeFlag, $downtimes, $state); + my $totalEvents = 0; + if (defined($events)) { + $totalEvents = scalar(@$events); + } + my ($ack, $sticky) = $centreonAck->getServiceAckTime($start, $updateTime, $hostId, $serviceId); + if (defined($ack) && $sticky == 1) { + $inAck = 1; + } + if (!$totalEvents && $updateTime) { + my $query = "UPDATE `servicestateevents` SET `end_time` = ".$updateTime.", `ack_time`= IFNULL(ack_time,$ack), `in_ack` = '$inAck', `last_update`=".$lastUpdate. + " WHERE `servicestateevent_id` = ".$eventId; + $centstorage->query($query); + } else { + if ($updateTime) { + my $query = "UPDATE `servicestateevents` SET `end_time` = ".$updateTime.", `ack_time`= IFNULL(ack_time,$ack), `in_ack` = '$inAck', `last_update`= 0". + " WHERE `servicestateevent_id` = ".$eventId; + $centstorage->query($query); + } + return $self->insertEventTable($hostId, $serviceId, $state, $lastUpdate, $events, $inAck); + } + + $return->{in_ack} = $inAck; + return $return; +} + +# insert a new incident for service +# Parameters +# $hostId : host ID +# $serviceId: service ID +# $state: incident state +# $start: incident start time +# $end: incident end time +sub insertEvent { + my $self = shift; + my $centreonDownTime = $self->{centreonDownTime}; + my ($hostId, $serviceId, $state, $start, $end, $lastUpdate, $downtimes, $inAck) = @_; + my $events = $centreonDownTime->splitInsertEventDownTime($hostId . ";;" . $serviceId, $start, $end, $downtimes, $state); + my $return = { in_ack => $inAck }; + + if ($state ne "") { + return $self->insertEventTable($hostId, $serviceId, $state, $lastUpdate, $events, $inAck); + } + + return $return; +} + +sub insertEventTable { + my $self = shift; + my $centstorage = $self->{centstorage}; + my $centreonAck = $self->{centreonAck}; + my ($hostId, $serviceId, $state, $lastUpdate, $events, $inAck) = @_; + my $return = {}; + + my $query_start = "INSERT INTO `servicestateevents`". + " (`host_id`, `service_id`, `state`, `start_time`, `end_time`, `last_update`, `in_downtime`, `ack_time`, `in_ack`)". + " VALUES ("; + + # Stick ack is removed + if ($state == 0) { + $inAck = 0; + } + my $count = 0; + for ($count = 0; $count < scalar(@$events) - 1; $count++) { + my $tab = $events->[$count]; + + my ($ack, $sticky) = $centreonAck->getServiceAckTime($tab->[0], $tab->[1], $hostId, $serviceId); + if ($inAck == 1) { + $sticky = 1; + $ack = $tab->[0]; + } + if (defined($ack) && $sticky == 1) { + $inAck = 1; + } + my $query_end = $hostId.", ".$serviceId.", ".$state.", ".$tab->[0].", ".$tab->[1].", 0, ".$tab->[2].", ".$ack.", '$sticky')"; + $centstorage->query($query_start.$query_end); + } + if (scalar(@$events)) { + my $tab = $events->[$count]; + if (defined($hostId) && defined($serviceId) && defined($state)) { + my ($ack, $sticky) = $centreonAck->getServiceAckTime($tab->[0], $tab->[1], $hostId, $serviceId); + if ($inAck == 1) { + $sticky = 1; + $ack = $tab->[0]; + } + if (defined($ack) && $sticky == 1) { + $inAck = 1; + } + my $query_end = $hostId.", ".$serviceId.", ".$state.", ".$tab->[0].", ".$tab->[1].", ".$lastUpdate.", ".$tab->[2].", ".$ack.", '$sticky')"; + $centstorage->query($query_start.$query_end); + } + } + + $return->{in_ack} = $inAck; + return $return; +} + +# Truncate service incident table +sub truncateStateEvents { + my ($self, %options) = @_; + my $centstorage = $self->{centstorage}; + + if (defined($options{start})) { + my $query = "DELETE FROM servicestateevents WHERE start_time > $options{start}"; + $centstorage->query($query); + $query = "UPDATE servicestateevents SET end_time = $options{midnight} WHERE end_time > $options{midnight}"; + $centstorage->query($query); + } else { + my $query = "TRUNCATE TABLE servicestateevents"; + $centstorage->query($query); + } +} + +# Get first and last events date +sub getFirstLastIncidentTimes { + my $self = shift; + my $centstorage = $self->{centstorage}; + + my $query = "SELECT min(`start_time`) as minc, max(`end_time`) as maxc FROM `servicestateevents`"; + my ($status, $sth) = $centstorage->query($query); + my ($start, $end) = (0,0); + if (my $row = $sth->fetchrow_hashref()) { + ($start, $end) = ($row->{minc}, $row->{maxc}); + } + $sth->finish; + return ($start, $end); +} + +1; diff --git a/perl-libs/lib/centreon/script.pm b/perl-libs/lib/centreon/script.pm new file mode 100644 index 00000000000..ca38c34d063 --- /dev/null +++ b/perl-libs/lib/centreon/script.pm @@ -0,0 +1,159 @@ +################################################################################ +# Copyright 2005-2013 Centreon +# Centreon is developped by : Julien Mathis and Romain Le Merlus under +# GPL Licence 2.0. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation ; either version 2 of the License. +# +# This program is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Linking this program statically or dynamically with other modules is making a +# combined work based on this program. Thus, the terms and conditions of the GNU +# General Public License cover the whole combination. +# +# As a special exception, the copyright holders of this program give Centreon +# permission to link this program with independent modules to produce an executable, +# regardless of the license terms of these independent modules, and to copy and +# distribute the resulting executable under terms of Centreon choice, provided that +# Centreon also meet, for each linked independent module, the terms and conditions +# of the license of that module. An independent module is a module which is not +# derived from this program. If you modify this program, you may extend this +# exception to your version of the program, but you are not obliged to do so. If you +# do not wish to do so, delete this exception statement from your version. +# +# +#################################################################################### + +package centreon::script; + +use strict; +use warnings; +use FindBin; +use Getopt::Long; +use Pod::Usage; +use centreon::common::logger; +use centreon::common::db; +use centreon::common::lock; + +use vars qw($centreon_config); +use vars qw($mysql_user $mysql_passwd $mysql_host $mysql_database_oreon $mysql_database_ods $mysql_database_ndo $instance_mode); + +$SIG{__DIE__} = sub { + return unless defined $^S and $^S == 0; # Ignore errors in eval + my $error = shift; + print "Error: $error"; + exit 1; +}; + +sub new { + my ($class, $name, %options) = @_; + my %defaults = + ( + config_file => "/etc/centreon/conf.pm", + log_file => undef, + centreon_db_conn => 0, + centstorage_db_conn => 0, + severity => "info", + noconfig => 0, + noroot => 0, + instance_mode => "central" + ); + my $self = {%defaults, %options}; + + bless $self, $class; + $self->{name} = $name; + $self->{logger} = centreon::common::logger->new(); + $self->{options} = { + "config=s" => \$self->{config_file}, + "logfile=s" => \$self->{log_file}, + "severity=s" => \$self->{severity}, + "help|?" => \$self->{help} + }; + return $self; +} + +sub init { + my $self = shift; + + if (defined $self->{log_file}) { + $self->{logger}->file_mode($self->{log_file}); + } + $self->{logger}->severity($self->{severity}); + + if ($self->{noroot} == 1) { + # Stop exec if root + if ($< == 0) { + $self->{logger}->writeLogError("Can't execute script as root."); + die("Quit"); + } + } + + if ($self->{centreon_db_conn}) { + $self->{cdb} = centreon::common::db->new + (db => $self->{centreon_config}->{centreon_db}, + host => $self->{centreon_config}->{db_host}, + port => $self->{centreon_config}->{db_port}, + user => $self->{centreon_config}->{db_user}, + password => $self->{centreon_config}->{db_passwd}, + logger => $self->{logger}); + $self->{lock} = centreon::common::lock::sql->new($self->{name}, dbc => $self->{cdb}); + $self->{lock}->set(); + } + if ($self->{centstorage_db_conn}) { + $self->{csdb} = centreon::common::db->new + (db => $self->{centreon_config}->{centstorage_db}, + host => $self->{centreon_config}->{db_host}, + port => $self->{centreon_config}->{db_port}, + user => $self->{centreon_config}->{db_user}, + password => $self->{centreon_config}->{db_passwd}, + logger => $self->{logger}); + } + $self->{instance_mode} = $instance_mode; +} + +sub DESTROY { + my $self = shift; + + if (defined $self->{cdb}) { + $self->{cdb}->disconnect(); + } + if (defined $self->{csdb}) { + $self->{csdb}->disconnect(); + } +} + +sub add_options { + my ($self, %options) = @_; + + $self->{options} = {%{$self->{options}}, %options}; +} + +sub parse_options { + my $self = shift; + + Getopt::Long::Configure('bundling'); + die "Command line error" if !GetOptions(%{$self->{options}}); + pod2usage(-exitval => 1, -input => $FindBin::Bin . "/" . $FindBin::Script) if $self->{help}; + if ($self->{noconfig} == 0) { + if (-e "$self->{config_file}" && -s "$self->{config_file}") { + require $self->{config_file}; + $self->{centreon_config} = $centreon_config; + } + } +} + +sub run { + my $self = shift; + + $self->parse_options(); + $self->init(); +} + +1; diff --git a/perl-libs/lib/centreon/script/centFillTrapDB.pm b/perl-libs/lib/centreon/script/centFillTrapDB.pm new file mode 100644 index 00000000000..81e0a6cdaa4 --- /dev/null +++ b/perl-libs/lib/centreon/script/centFillTrapDB.pm @@ -0,0 +1,779 @@ +################################################################################ +# Copyright 2005-2013 Centreon +# Centreon is developped by : Julien Mathis and Romain Le Merlus under +# GPL Licence 2.0. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation ; either version 2 of the License. +# +# This program is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Linking this program statically or dynamically with other modules is making a +# combined work based on this program. Thus, the terms and conditions of the GNU +# General Public License cover the whole combination. +# +# As a special exception, the copyright holders of this program give Centreon +# permission to link this program with independent modules to produce an executable, +# regardless of the license terms of these independent modules, and to copy and +# distribute the resulting executable under terms of Centreon choice, provided that +# Centreon also meet, for each linked independent module, the terms and conditions +# of the license of that module. An independent module is a module which is not +# derived from this program. If you modify this program, you may extend this +# exception to your version of the program, but you are not obliged to do so. If you +# do not wish to do so, delete this exception statement from your version. +# +# +#################################################################################### + +package centreon::script::centFillTrapDB; + +use strict; +use warnings; +use centreon::script; +use File::Basename; +use File::Spec; + +use base qw(centreon::script); + +sub new { + my $class = shift; + my $self = $class->SUPER::new("centFillTrapDB", + centreon_db_conn => 0, + centstorage_db_conn => 0, + ); + + bless $self, $class; + + $self->{no_description} = 0; + $self->{no_variables} = 0; + $self->{no_format_summary} = 0; + $self->{no_format_desc} = 0; + $self->{format} = 0; + $self->{format_desc} = 0; + $self->{no_desc_wildcard} = 0; + $self->{no_severity} = 0; + $self->{severity} = 'Normal'; + + # Set this to 1 to have the --TYPE string prepended to the --SUMMARY string. + # Set to 0 to disable + $self->{prepend_type} = 1; + $self->{net_snmp_perl} = 0; + $self->{total_translations} = 0; + $self->{successful_translations} = 0; + $self->{failed_translations} = 0; + + $self->add_options( + "f=s" => \$self->{opt_f}, "file=s" => \$self->{opt_f}, + "m=s" => \$self->{opt_m}, "man=s" => \$self->{opt_m} + ); + + return $self; +} + +sub check_snmptranslate_version { + my $self = shift; + $self->{snmptranslate_use_On} = 1; + + if (open SNMPTRANSLATE, "snmptranslate -V 2>&1|") { + my $snmptranslatever = ; + close SNMPTRANSLATE; + + chomp ($snmptranslatever); + + $self->{logger}->writeLogInfo("snmptranslate version: " . $snmptranslatever); + + if ($snmptranslatever =~ /UCD/i || $snmptranslatever =~ /NET-SNMP version: 5.0.1/i) { + $self->{snmptranslate_use_On} = 0; + $self->{logger}->writeLogDebug("snmptranslate is either UCD-SNMP, or NET-SNMP v5.0.1, so do not use the -On switch. Version found: $snmptranslatever"); + } + } +} + +sub existsInDB { + my $self = shift; + + my ($status, $sth) = $self->{centreon_dbc}->query("SELECT `traps_id` FROM `traps` WHERE `traps_oid` = " . $self->{centreon_dbc}->quote($self->{trap_oid}) . " AND `traps_name` = " . $self->{centreon_dbc}->quote($self->{trap_name}) . " LIMIT 1"); + if ($status == -1) { + return 0; + } + if (defined($sth->fetchrow_array())) { + return 1; + } + return 0; +} + +sub getStatus { + my ($self) = @_; + + if ($self->{trap_severity} =~ /up/i) { + return 0; + } elsif ($self->{trap_severity} =~ /warning|degraded|minor/i) { + return 1; + } elsif ($self->{trap_severity} =~ /critical|major|failure|error|down/i) { + return 2; + } else { + if ($self->{trap_name} =~ /normal|up/i || $self->{trap_name} =~ /on$/i) { + return 0; + } elsif ($self->{trap_name} =~ /warning|degraded|minor/i) { + return 1; + } elsif ($self->{trap_name} =~ /critical|major|fail|error|down|bad/i || $self->{trap_name} =~ /off|low$/i) { + return 2; + } + } + return 3; +} + +sub insert_into_centreon { + my $self = shift; + my $last_oid = ""; + my ($status, $sth); + + if (!$self->existsInDB()) { + ($status, $sth) = $self->{centreon_dbc}->query( + "INSERT INTO `traps` (`traps_name`, `traps_oid`, `traps_status`, `manufacturer_id`, `traps_submit_result_enable`) VALUES (" + . $self->{centreon_dbc}->quote($self->{trap_name}) . ", " + . $self->{centreon_dbc}->quote($self->{trap_oid}) . ", " + . $self->{centreon_dbc}->quote($self->getStatus()) . ", " + . $self->{centreon_dbc}->quote($self->{opt_m}) . ", '1')" + ); + } + ($status, $sth) = $self->{centreon_dbc}->query( + "UPDATE `traps` SET `traps_args` = " . $self->{centreon_dbc}->quote($self->{trap_format}) + . ", `traps_comments` = " . $self->{centreon_dbc}->quote($self->{trap_description}) + . " WHERE `traps_oid` = " . $self->{centreon_dbc}->quote($self->{trap_oid}) + ); +} + +################ +## MAIN FUNCTION +# +sub main { + my $self = shift; + + if (!open(FILE, $self->{opt_f})) { + $self->{logger}->writeLogError("Cannot get mib file : $self->{opt_f}"); + exit(1); + } + + # From snmpconvertmib + # Copyright 2002-2013 Alex Burger + # alex_b@users.sourceforge.net + + # Get complete path of input file (MIB) in a portable way (needed for -m switch for snmptranslate) + my $dirname = dirname $self->{opt_f}; + my $basename = basename $self->{opt_f}; + my $input = File::Spec->catfile($dirname, $basename); + $ENV{MIBS} = $input; + + $self->check_snmptranslate_version(); + my @mibfile; + while () { + chomp; # remove at end of line + s/\015//; # Remove any DOS carriage returns + push(@mibfile, $_); # add to each line to @trapconf array + } + + my $currentline = 0; + # A mib file can contain multiple BEGIN definitions. This finds the first one + # to make sure we have at least one definition. + # Determine name of MIB file + my $mib_name = ''; + while ($currentline <= $#mibfile) { + my $line = $mibfile[$currentline]; + + # Sometimes DEFINITIONS ::= BEGIN will appear on the line following the mib name. + # Look for DEFINITIONS ::= BEGIN with nothing (white space allowed) around it and a previous line with + # only a single word with whitespace around it. + if ($currentline > 0 && $line =~ /^\s*DEFINITIONS\s*::=\s*BEGIN\s*$/ && $mibfile[$currentline-1] =~ /^\s*(\S+)\s*$/) { + # We should have found the mib name + $mib_name = $1; + $self->{logger}->writeLogInfo("Split line DEFINITIONS ::= BEGIN found ($1)."); + $mib_name =~ s/\s+//g; + last; + } elsif ($line =~ /(.*)DEFINITIONS\s*::=\s*BEGIN/) { + $mib_name = $1; + $mib_name =~ s/\s+//g; + last; + } + $currentline++; + } + $self->{logger}->writeLogInfo("mib name: $mib_name"); + if ($mib_name eq '') { + $self->{logger}->writeLogError("Could not find DEFINITIONS ::= BEGIN statement in MIB file!"); + exit (1); + } + + while ($currentline <= $#mibfile) { + my $line = $mibfile[$currentline]; + + # Sometimes DEFINITIONS ::= BEGIN will appear on the line following the mib name. + # Look for DEFINITIONS ::= BEGIN with nothing (white space allowed) around it and a previous line with + # only a single word with whitespace around it. + if ($currentline > 0 && $line =~ /^\s*DEFINITIONS\s*::=\s*BEGIN\s*$/ && $mibfile[$currentline-1] =~ /^\s*(\S+)\s*$/) { + # We should have found the mib name + $self->{logger}->writeLogInfo("Split line DEFINITIONS ::= BEGIN found ($1)."); + + $mib_name = $1; + $mib_name =~ s/\s+//g; + $self->{logger}->writeLogInfo("Processing MIB: $mib_name"); + + $currentline++; # Increment to the next line + next; + } elsif ($line =~ /(.*)DEFINITIONS\s*::=\s*BEGIN/) { + $mib_name = $1; + $mib_name =~ s/\s+//g; + $self->{logger}->writeLogInfo("Processing MIB: $mib_name"); + + $currentline++; # Increment to the next line + next; + } + + # TRAP-TYPE (V1) / NOTIFICATION-TYPE (V2) + # + # eg: 'mngmtAgentTrap-23003 TRAP-TYPE'; + # eg: 'ciscoSystemClockChanged NOTIFICATION-TYPE'; + if ($line =~ /(.*)\s*TRAP-TYPE.*/ || + $line =~ /(.*)\s*(?