From 546b5c97ddf6666ace33dfdec0c9d4eb4a03f779 Mon Sep 17 00:00:00 2001 From: Michael Gardner Date: Mon, 8 Jul 2024 10:35:56 -0400 Subject: [PATCH 1/9] HPCC-32137 Conditional Documentation build tests on pull request event Signed-off-by: Michael Gardner --- .github/workflows/build-vcpkg.yml | 30 +++++++ .github/workflows/test-documentation.yml | 108 +++++++++++++++++++++++ 2 files changed, 138 insertions(+) create mode 100644 .github/workflows/test-documentation.yml diff --git a/.github/workflows/build-vcpkg.yml b/.github/workflows/build-vcpkg.yml index 29a86b2463b..e089e2ba0a7 100644 --- a/.github/workflows/build-vcpkg.yml +++ b/.github/workflows/build-vcpkg.yml @@ -117,6 +117,36 @@ jobs: asset-name: 'docker-ubuntu-22_04' secrets: inherit + check-documentation-changes: + if: ${{ contains('pull_request,push', github.event_name) }} + runs-on: ubuntu-22.04 + outputs: + documentation_contents_changed: ${{ steps.variables.outputs.documentation_contents_changed }} + steps: + - name: Check for Documentation Changes + id: changed + uses: dorny/paths-filter@v3 + with: + filters: | + src: + - 'docs/**' + - '.github/workflows/test-documentation.yml' + - name: Set Output + id: variables + run: | + echo "documentation_contents_changed=${{ steps.changed.outputs.src }}" >> $GITHUB_OUTPUT + - name: Print Variables + run: | + echo "${{ toJSON(steps.variables.outputs)}}" + + test-documentation-ubuntu-22_04: + needs: check-documentation-changes + if: ${{ contains('pull_request,push', github.event_name) && needs.check-documentation-changes.outputs.documentation_contents_changed == 'true' }} + uses: ./.github/workflows/test-documentation.yml + with: + os: 'ubuntu-22.04' + asset-name: 'Documentation' + build-docker-ubuntu-20_04: if: ${{ contains('schedule,push', github.event_name) }} uses: ./.github/workflows/build-docker.yml diff --git a/.github/workflows/test-documentation.yml b/.github/workflows/test-documentation.yml new file mode 100644 index 00000000000..db2b8831757 --- /dev/null +++ b/.github/workflows/test-documentation.yml @@ -0,0 +1,108 @@ +name: Build Documentation + +on: + workflow_call: + inputs: + os: + type: string + description: 'Operating System' + required: false + default: 'ubuntu-22.04' + asset-name: + type: string + description: 'Asset Name' + required: false + default: 'Documentation' + + workflow_dispatch: + inputs: + os: + type: string + description: 'Operating System' + required: false + default: 'ubuntu-22.04' + asset-name: + type: string + description: 'Asset Name' + required: false + default: 'Documentation' + +jobs: + build-documentation: + name: Build Documentation + runs-on: ubuntu-22.04 + + steps: + - name: Checkout HPCC-Platform + uses: actions/checkout@v4 + with: + ref: ${{ github.ref }} + submodules: recursive + path: ${{ github.workspace }}/HPCC-Platform + + - name: Calculate vars + id: vars + working-directory: ${{ github.workspace }}/HPCC-Platform/vcpkg + run: | + vcpkg_sha_short=$(git rev-parse --short=8 HEAD) + echo "vcpkg_sha_short=$vcpkg_sha_short" >> $GITHUB_OUTPUT + docker_build_label=hpccsystems/platform-build-${{ inputs.os }} + echo "docker_build_label=$docker_build_label" >> $GITHUB_OUTPUT + echo "docker_tag=$docker_build_label:$vcpkg_sha_short" >> $GITHUB_OUTPUT + community_base_ref=${{ github.event.base_ref || github.ref }} + candidate_branch=$(echo $community_base_ref | cut -d'/' -f3) + candidate_base_branch=$(echo $candidate_branch | awk -F'.' -v OFS='.' '{ $3="x"; print }') + echo "docker_tag_candidate_base=$docker_build_label:$candidate_base_branch" >> $GITHUB_OUTPUT + community_ref=${{ github.ref }} + community_tag=$(echo $community_ref | cut -d'/' -f3) + echo "community_tag=$community_tag" >> $GITHUB_OUTPUT + + + - name: Print vars + run: | + echo "${{ toJSON(steps.vars.outputs) }})" + + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v3 + + - name: Create Build Image + uses: docker/build-push-action@v5 + with: + builder: ${{ steps.buildx.outputs.name }} + file: ${{ github.workspace }}/HPCC-Platform/dockerfiles/vcpkg/${{ inputs.os }}.dockerfile + context: ${{ github.workspace }}/HPCC-Platform/dockerfiles/vcpkg + push: false + load: true + build-args: | + VCPKG_REF=${{ steps.vars.outputs.vcpkg_sha_short }} + tags: | + ${{ steps.vars.outputs.docker_tag_candidate_base }} + cache-from: | + type=registry,ref=${{ steps.vars.outputs.docker_tag_candidate_base }} + type=registry,ref=${{ steps.vars.outputs.docker_tag }} + cache-to: type=inline + + - name: CMake documentation + run: | + mkdir -p {${{ github.workspace }}/build,EN_US,PT_BR} + docker run --rm --mount source="${{ github.workspace }}/HPCC-Platform",target=/hpcc-dev/HPCC-Platform,type=bind,consistency=cached --mount source="${{ github.workspace }}/build",target=/hpcc-dev/build,type=bind,consistency=cached ${{ steps.vars.outputs.docker_tag_candidate_base }} "\ + cmake -S /hpcc-dev/HPCC-Platform -B /hpcc-dev/build -DVCPKG_FILES_DIR=/hpcc-dev -DMAKE_DOCS_ONLY=ON -DUSE_NATIVE_LIBRARIES=ON -DDOCS_AUTO=ON -DDOC_LANGS=ALL && \ + cmake --build /hpcc-dev/build --parallel $(nproc) --target all" + docker run --rm --mount source="${{ github.workspace }}/HPCC-Platform",target=/hpcc-dev/HPCC-Platform,type=bind,consistency=cached --mount source="${{ github.workspace }}/build",target=/hpcc-dev/build,type=bind,consistency=cached ${{ steps.vars.outputs.docker_tag_candidate_base }} "cd /hpcc-dev/build/Release/docs/EN_US && zip ALL_HPCC_DOCS_EN_US-$(echo '${{ steps.vars.outputs.community_tag }}' | sed 's/community_//' ).zip *.pdf" + docker run --rm --mount source="${{ github.workspace }}/HPCC-Platform",target=/hpcc-dev/HPCC-Platform,type=bind,consistency=cached --mount source="${{ github.workspace }}/build",target=/hpcc-dev/build,type=bind,consistency=cached ${{ steps.vars.outputs.docker_tag_candidate_base }} "cd /hpcc-dev/build/Release/docs/PT_BR && zip ALL_HPCC_DOCS_PT_BR-$(echo '${{ steps.vars.outputs.community_tag }}' | sed 's/community_//' ).zip *.pdf" + + - name: Upload build artifacts + uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.asset-name }} + path: | + ${{ github.workspace }}/build/Release/docs/*.zip + ${{ github.workspace }}/build/Release/docs/EN_US/*.zip + ${{ github.workspace }}/build/Release/docs/PT_BR/*.zip + ${{ github.workspace }}/build/docs/EN_US/EclipseHelp/*.zip + ${{ github.workspace }}/build/docs/EN_US/HTMLHelp/*.zip + ${{ github.workspace }}/build/docs/PT_BR/HTMLHelp/*.zip + compression-level: 0 + From b413a36fce23d25932cda526432a2b4c8e181275 Mon Sep 17 00:00:00 2001 From: Rodrigo Pastrana Date: Mon, 30 Sep 2024 15:12:51 -0400 Subject: [PATCH 2/9] HPCC-32752 Update Loki4hpcclogs README - Clarifies significance of loki stack deployment name Signed-off-by: Rodrigo Pastrana --- helm/managed/logging/loki-stack/README.md | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/helm/managed/logging/loki-stack/README.md b/helm/managed/logging/loki-stack/README.md index 8d2af452a8d..e77f524bd53 100644 --- a/helm/managed/logging/loki-stack/README.md +++ b/helm/managed/logging/loki-stack/README.md @@ -10,7 +10,8 @@ A Loki Datasource is created automatically, which allowers users to monitor/quer ### Helm Deployment To deploy the light-weight Loki Stack for HPCC component log processing issue the following command: ->helm install myloki HPCC-Systems/helm/managed/logging/loki-stack/ +>helm install myloki4hpcclogs HPCC-Systems/helm/managed/logging/loki-stack/ +Note: the deployment name 'myloki4hpcclogs' is customizable; however, any changes need to be reflected in the LogAccess configuration (See section on configuring LogAccess below) ### Dependencies This chart is dependent on the Grafana Loki-stack Helm charts which in turn is dependent on Loki, Grafana, Promtail. @@ -23,7 +24,9 @@ Helm provides a convenient command to automatically pull appropriate dependencie ##### HELM Install parameter Otherwise, provide the "--dependency-update" argument in the helm install command For example: -> helm install myloki HPCC-Systems/helm/managed/logging/loki-stack/ --dependency-update +> helm install myloki4hpcclogs HPCC-Systems/helm/managed/logging/loki-stack/ --dependency-update + +Note: the deployment name 'myloki4hpcclogs' is customizable; however, any changes need to be reflected in the LogAccess configuration (See section on configuring LogAccess below) ### Components Grafana Loki Stack is comprised of a set of components that which serve as a full-featured logging stack. @@ -172,7 +175,7 @@ username: 5 bytes The target HPCC deployment should be directed to use the desired Grafana endpoint with the Loki datasource, and the newly created secret by providing appropriate logAccess values (such as ./grafana-hpcc-logaccess.yaml). -Example use: +Example use for targeting a loki stack deployed as 'myloki4hpcclogs' on the default namespace: ``` helm install myhpcc hpcc/hpcc -f HPCC-Platform/helm/managed/logging/loki-stack/grafana-hpcc-logaccess.yaml @@ -182,8 +185,10 @@ Example use: The grafana hpcc logaccess values should provide Grafana connection information, such as the host, and port; the Loki datasource where the logs reside; the k8s namespace under which the logs were created (non-default namespace highly recommended); and the hpcc component log format (table|json|xml) +Example values file describing logAccess targeting loki stack deployed as 'myloki4hpcclogs' on the default namespace. Note that the "host" entry must reflect the name of the deployed Loki stack, as shown in the excerpt below (eg **_myloki4hpcclogs_**-grafana.default.svc.cluster.local): + ``` -Example use: + global: logAccess: name: "Grafana/loki stack log access" @@ -220,4 +225,4 @@ For example: - ``` \ No newline at end of file + ``` From 447c9f86fdd9ad1560db9679df0ab7d98c8964b6 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Wed, 2 Oct 2024 17:09:55 +0100 Subject: [PATCH 3/9] HPCC-32760 Provide periodic details of the performance of RoxieSocketQueueManager::thread Signed-off-by: Gavin Halliday --- roxie/ccd/ccdqueue.cpp | 61 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 50 insertions(+), 11 deletions(-) diff --git a/roxie/ccd/ccdqueue.cpp b/roxie/ccd/ccdqueue.cpp index 5e41da7fbbf..4b799fd7bf3 100644 --- a/roxie/ccd/ccdqueue.cpp +++ b/roxie/ccd/ccdqueue.cpp @@ -2422,6 +2422,31 @@ class RoxieSocketQueueManager : public RoxieReceiverBase DelayedPacketQueueManager delayed; #endif + class WorkerUdpTracker : public TimeDivisionTracker<6, false> + { + public: + enum + { + other, + waiting, + allocating, + processing, + pushing, + checkingRunning + }; + + WorkerUdpTracker(const char *name, unsigned reportIntervalSeconds) : TimeDivisionTracker<6, false>(name, reportIntervalSeconds) + { + stateNames[other] = "other"; + stateNames[waiting] = "waiting"; + stateNames[allocating] = "allocating"; + stateNames[processing] = "processing"; + stateNames[pushing] = "pushing"; + stateNames[checkingRunning] = "checking running"; + } + + } timeTracker; + class ReceiverThread : public Thread { RoxieSocketQueueManager &parent; @@ -2441,7 +2466,7 @@ class RoxieSocketQueueManager : public RoxieReceiverBase } readThread; public: - RoxieSocketQueueManager(unsigned _numWorkers) : RoxieReceiverBase(_numWorkers), logctx("RoxieSocketQueueManager"), readThread(*this) + RoxieSocketQueueManager(unsigned _numWorkers) : RoxieReceiverBase(_numWorkers), logctx("RoxieSocketQueueManager"), timeTracker("WorkerUdpReader", 60), readThread(*this) { maxPacketSize = multicastSocket->get_max_send_size(); if ((maxPacketSize==0)||(maxPacketSize>65535)) @@ -2754,21 +2779,26 @@ class RoxieSocketQueueManager : public RoxieReceiverBase // if found, send an IBYTI and discard retry request bool alreadyRunning = false; - Owned wi = queue.running(); - ForEach(*wi) { - CRoxieWorker &w = (CRoxieWorker &) wi->query(); - if (w.match(header)) + WorkerUdpTracker::TimeDivision division(timeTracker, WorkerUdpTracker::checkingRunning); + + Owned wi = queue.running(); + ForEach(*wi) { - alreadyRunning = true; - ROQ->sendIbyti(header, logctx, mySubchannel); - if (doTrace(traceRoxiePackets, TraceFlags::Max)) + CRoxieWorker &w = (CRoxieWorker &) wi->query(); + if (w.match(header)) { - StringBuffer xx; logctx.CTXLOG("Ignored retry on subchannel %u for running activity %s", mySubchannel, header.toString(xx).str()); + alreadyRunning = true; + ROQ->sendIbyti(header, logctx, mySubchannel); + if (doTrace(traceRoxiePackets, TraceFlags::Max)) + { + StringBuffer xx; logctx.CTXLOG("Ignored retry on subchannel %u for running activity %s", mySubchannel, header.toString(xx).str()); + } + break; } - break; } } + if (!alreadyRunning && checkCompleted && ROQ->replyPending(header)) { alreadyRunning = true; @@ -2784,6 +2814,7 @@ class RoxieSocketQueueManager : public RoxieReceiverBase { StringBuffer xx; logctx.CTXLOG("Retry %d received on subchannel %u for %s", retries+1, mySubchannel, header.toString(xx).str()); } + WorkerUdpTracker::TimeDivision division(timeTracker, WorkerUdpTracker::pushing); #ifdef NEW_IBYTI // It's debatable whether we should delay for the primary here - they had one chance already... // But then again, so did we, assuming the timeout is longer than the IBYTIdelay @@ -2802,6 +2833,7 @@ class RoxieSocketQueueManager : public RoxieReceiverBase } else // first time (not a retry). { + WorkerUdpTracker::TimeDivision division(timeTracker, WorkerUdpTracker::pushing); #ifdef NEW_IBYTI unsigned delay = 0; if (mySubchannel != 0 && (header.activityId & ~ROXIE_PRIORITY_MASK) < ROXIE_ACTIVITY_SPECIAL_FIRST) // i.e. I am not the primary here, and never delay special @@ -2826,6 +2858,7 @@ class RoxieSocketQueueManager : public RoxieReceiverBase doIbytiDelay?"YES":"NO", minIbytiDelay, initIbytiDelay); MemoryBuffer mb; + WorkerUdpTracker::TimeDivision division(timeTracker, WorkerUdpTracker::other); for (;;) { mb.clear(); @@ -2840,8 +2873,14 @@ class RoxieSocketQueueManager : public RoxieReceiverBase #else unsigned timeout = 5000; #endif + division.switchState(WorkerUdpTracker::allocating); + void * buffer = mb.reserve(maxPacketSize); + + division.switchState(WorkerUdpTracker::waiting); unsigned l; - multicastSocket->readtms(mb.reserve(maxPacketSize), sizeof(RoxiePacketHeader), maxPacketSize, l, timeout); + multicastSocket->readtms(buffer, sizeof(RoxiePacketHeader), maxPacketSize, l, timeout); + division.switchState(WorkerUdpTracker::processing); + mb.setLength(l); RoxiePacketHeader &header = *(RoxiePacketHeader *) mb.toByteArray(); if (l != header.packetlength) From 55d0900dd37d4778d9cd287293f15fe8821f6f64 Mon Sep 17 00:00:00 2001 From: Rodrigo Pastrana Date: Wed, 2 Oct 2024 14:40:54 -0400 Subject: [PATCH 4/9] HPCC-32761 ALA LogAccess Blob Mode - Adds support for ALA logAccess blob mode - Provides sample blob mode configuration for v1 and v2 Signed-off-by: Rodrigo Pastrana --- .../loganalytics-hpcc-logaccessV1Blob.yaml | 103 ++++++++++++++++++ .../loganalytics-hpcc-logaccessV2Blob.yaml | 101 +++++++++++++++++ .../AzureLogAnalyticsCurlClient.cpp | 63 +++++++---- .../AzureLogAnalyticsCurlClient.hpp | 1 + 4 files changed, 248 insertions(+), 20 deletions(-) create mode 100644 helm/examples/azure/log-analytics/loganalytics-hpcc-logaccessV1Blob.yaml create mode 100644 helm/examples/azure/log-analytics/loganalytics-hpcc-logaccessV2Blob.yaml diff --git a/helm/examples/azure/log-analytics/loganalytics-hpcc-logaccessV1Blob.yaml b/helm/examples/azure/log-analytics/loganalytics-hpcc-logaccessV1Blob.yaml new file mode 100644 index 00000000000..0750e38e896 --- /dev/null +++ b/helm/examples/azure/log-analytics/loganalytics-hpcc-logaccessV1Blob.yaml @@ -0,0 +1,103 @@ +# Configures HPCC logAccess to target Azure Log Analytics Workspace +global: + logAccess: + name: "Azure LogAnalytics LogAccess" + type: "AzureLogAnalyticsCurl" + blobMode: "true" + #connection: + #All connection attributes are optional. + #It is preferable to provide connection values as secret values category 'esp', secret name 'azure_logaccess' + # NOTE: secret 'azure_logaccess' must include 'aad-client-secret' and it cannot be provided in configuration + # + #workspaceID: "XYZ" #ID of the Azure LogAnalytics workspace to query logs from + # Secret value equivalent: 'ala-workspace-id' + #clientID: "DEF" #ID of Azure Active Directory registered application with api.loganalytics.io access - format: 00000000-0000-0000-0000-000000000000 + # Secret value equivalent: 'aad-client-id' + #tenantID: "ABC" #The Azure Active Directory Tenant ID, required for KQL API access + # Secret value equivalent: 'aad-tenant-id' + logMaps: + - type: "global" + storeName: "ContainerLog" + searchColumn: "LogEntry" + columnType: "string" + columnMode: "MIN" + timeStampColumn: "TimeGenerated" + - type: "workunits" + searchColumn: "LogEntry" + columnMode: "DEFAULT" + columnType: "string" + - type: "components" + storeName: "ContainerInventory" + searchColumn: "Name" + keyColumn: "ContainerID" + columnMode: "MIN" + columnType: "string" + timeStampColumn: "TimeGenerated" + disableJoins: false #Potentially expensive join operations needed to fetch a certain column can be disabled + - type: "audience" + searchColumn: "LogEntry" + enumValues: + - code: OPR + - code: USR + - code: PRO + - code: ADT + - code: MON + columnMode: "DEFAULT" + columnType: "enum" + - type: "class" + searchColumn: "LogEntry" + enumValues: + - code: DIS + - code: ERR + - code: WRN + - code: INF + - code: PRO + - code: MET + - code: EVT + columnMode: "DEFAULT" + columnType: "enum" + - type: "instance" + columnMode: "DEFAULT" + searchColumn: "Computer" + columnMode: "ALL" + columnType: "string" + - type: "message" + searchColumn: "LogEntry" + columnMode: "MIN" + columnType: "string" + - type: "logid" + searchColumn: "LogEntry" + columnMode: "DEFAULT" + columnType: "string" + - type: "processid" + searchColumn: "LogEntry" + columnMode: "ALL" + columnType: "string" + - type: "threadid" + searchColumn: "LogEntry" + columnMode: "DEFAULT" + columnType: "string" + - type: "timestamp" + searchColumn: "TimeGenerated" + columnMode: "MIN" + columnType: "Timestamp" + #- type: "pod" + # searchColumn: "PodName" + # columnMode: "DEFAULT" + # columnType: "string" + - type: "spanid" + searchColumn: "LogEntry" + columnMode: "DEFAULT" + columnType: "string" + - type: "traceid" + searchColumn: "LogEntry" + columnMode: "DEFAULT" + columnType: "string" +secrets: + esp: + azure-logaccess: "azure-logaccess" +vaults: + esp: + - name: my-azure-logaccess-vault + url: http://${env.VAULT_SERVICE_HOST}:${env.VAULT_SERVICE_PORT}/v1/secret/data/esp/${secret} + kind: kv-v2 diff --git a/helm/examples/azure/log-analytics/loganalytics-hpcc-logaccessV2Blob.yaml b/helm/examples/azure/log-analytics/loganalytics-hpcc-logaccessV2Blob.yaml new file mode 100644 index 00000000000..89b73d19b05 --- /dev/null +++ b/helm/examples/azure/log-analytics/loganalytics-hpcc-logaccessV2Blob.yaml @@ -0,0 +1,101 @@ +# Configures HPCC logAccess to target Azure Log Analytics Workspace +global: + logAccess: + name: "Azure LogAnalytics LogAccess" + type: "AzureLogAnalyticsCurl" + blobMode: "true" + #connection: + #All connection attributes are optional. + #It is preferable to provide connection values as secret values category 'esp', secret name 'azure_logaccess' + # NOTE: secret 'azure_logaccess' must include 'aad-client-secret' and it cannot be provided in configuration + # + #workspaceID: "XYZ" #ID of the Azure LogAnalytics workspace to query logs from + # Secret value equivalent: 'ala-workspace-id' + #clientID: "DEF" #ID of Azure Active Directory registered application with api.loganalytics.io access - format: 00000000-0000-0000-0000-000000000000 + # Secret value equivalent: 'aad-client-id' + #tenantID: "ABC" #The Azure Active Directory Tenant ID, required for KQL API access + # Secret value equivalent: 'aad-tenant-id' + logMaps: + - type: "global" + storeName: "ContainerLogV2" + searchColumn: "LogMessage" + columnType: "dynamic" + columnMode: "MIN" + timeStampColumn: "TimeGenerated" + - type: "workunits" + searchColumn: "LogMessage" + columnMode: "DEFAULT" + columnType: "string" + - type: "components" + storeName: "ContainerLogV2" + searchColumn: "ContainerName" # Container name happens to coincide with component name + keyColumn: "ContainerName" + columnMode: "DEFAULT" + columnType: "string" + - type: "audience" + searchColumn: "LogMessage" + enumValues: + - code: OPR + - code: USR + - code: PRO + - code: ADT + - code: MON + columnMode: "DEFAULT" + columnType: "enum" + - type: "class" + searchColumn: "LogMessage" + enumValues: + - code: DIS + - code: ERR + - code: WRN + - code: INF + - code: PRO + - code: MET + - code: EVT + columnMode: "DEFAULT" + columnType: "enum" + - type: "instance" + columnMode: "DEFAULT" + searchColumn: "Computer" + columnMode: "ALL" + columnType: "string" + - type: "message" + searchColumn: "LogMessage" + columnMode: "MIN" + columnType: "string" + - type: "logid" + searchColumn: "LogMessage" + columnMode: "DEFAULT" + columnType: "string" + - type: "processid" + searchColumn: "LogMessage" + columnMode: "ALL" + columnType: "string" + - type: "threadid" + searchColumn: "LogMessage" + columnMode: "DEFAULT" + columnType: "string" + - type: "timestamp" + searchColumn: "LogMessage" + columnMode: "MIN" + columnType: "string" + - type: "pod" + searchColumn: "PodName" + columnMode: "DEFAULT" + columnType: "string" + - type: "spanid" + searchColumn: "LogMessage" + columnMode: "DEFAULT" + columnType: "string" + - type: "traceid" + searchColumn: "LogMessage" + columnMode: "DEFAULT" + columnType: "string" +secrets: + esp: + azure-logaccess: "azure-logaccess" +vaults: + esp: + - name: my-azure-logaccess-vault + url: http://${env.VAULT_SERVICE_HOST}:${env.VAULT_SERVICE_PORT}/v1/secret/data/esp/${secret} + kind: kv-v2 diff --git a/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.cpp b/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.cpp index 9269737a836..35b1decae34 100644 --- a/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.cpp +++ b/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.cpp @@ -336,6 +336,10 @@ AzureLogAnalyticsCurlClient::AzureLogAnalyticsCurlClient(IPropertyTree & logAcce m_pluginCfg.set(&logAccessPluginConfig); + //blobMode is a flag to determine if the log entry is to be parsed or not + m_blobMode = logAccessPluginConfig.getPropBool("@blobMode", false); + DBGLOG("%s: Blob Mode: %s", COMPONENT_NAME, m_blobMode ? "Enabled" : "Disabled"); + m_globalIndexTimestampField.set(defaultHPCCLogTimeStampCol); m_globalIndexSearchPattern.set(defaultIndexPattern); m_globalSearchColName.set(defaultHPCCLogMessageCol); @@ -483,7 +487,11 @@ void AzureLogAnalyticsCurlClient::getMinReturnColumns(StringBuffer & columns, co columns.append(defaultHPCCLogComponentCol); columns.append(", "); } - columns.appendf("%s, %s", m_globalIndexTimestampField.str(), defaultHPCCLogMessageCol); + + if (m_blobMode) + columns.appendf("%s, %s", m_globalIndexTimestampField.str(), m_globalSearchColName.str()); + else + columns.appendf("%s, %s", m_globalIndexTimestampField.str(), defaultHPCCLogMessageCol); } void AzureLogAnalyticsCurlClient::getDefaultReturnColumns(StringBuffer & columns, const bool includeComponentName) @@ -517,12 +525,15 @@ void AzureLogAnalyticsCurlClient::getDefaultReturnColumns(StringBuffer & columns if (!isEmptyString(m_podSearchColName)) columns.appendf("%s, ", m_podSearchColName.str()); - columns.appendf("%s, %s, %s, %s, %s, %s, %s, %s, %s, %s", - m_globalIndexTimestampField.str(), defaultHPCCLogMessageCol, m_classSearchColName.str(), - m_audienceSearchColName.str(), m_workunitSearchColName.str(), m_traceSearchColName.str(), m_spanSearchColName.str(), defaultHPCCLogSeqCol, defaultHPCCLogThreadIDCol, defaultHPCCLogProcIDCol); + if (m_blobMode) + columns.appendf("%s, %s", m_globalIndexTimestampField.str(), m_globalSearchColName.str()); + else + columns.appendf("%s, %s, %s, %s, %s, %s, %s, %s, %s, %s", + m_globalIndexTimestampField.str(), defaultHPCCLogMessageCol, m_classSearchColName.str(), + m_audienceSearchColName.str(), m_workunitSearchColName.str(), m_traceSearchColName.str(), m_spanSearchColName.str(), defaultHPCCLogSeqCol, defaultHPCCLogThreadIDCol, defaultHPCCLogProcIDCol); } -bool generateHPCCLogColumnstAllColumns(StringBuffer & kql, const char * colName, bool targetsV2) +bool generateHPCCLogColumnstAllColumns(StringBuffer & kql, const char * colName, bool targetsV2, bool blobMode) { if (isEmptyString(colName)) { @@ -538,25 +549,32 @@ bool generateHPCCLogColumnstAllColumns(StringBuffer & kql, const char * colName, else throw makeStringExceptionV(-1, "%s: Invalid Azure Log Analytics log message column name detected: '%s'. Review logAccess configuration.", COMPONENT_NAME, colName); - kql.appendf("\n| extend hpcclogfields = extract_all(@\'^([0-9A-Fa-f]+)\\s+(OPR|USR|PRG|AUD|UNK)\\s+(DIS|ERR|WRN|INF|PRO|MET|UNK)\\s+(\\d{4}-\\d{2}-\\d{2}\\s\\d{2}:\\d{2}:\\d{2}\\.\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(UNK|[A-Z]\\d{8}-\\d{6}(?:-\\d+)?)\\s*([0-9a-fA-F]{32}|UNK)?\\s*([0-9a-fA-F]{16}|UNK)?\\s+)?\\\"(.*)\\\"$', %s)[0]", sourceCol.str()); - kql.appendf("\n| extend %s = tostring(hpcclogfields.[0])", defaultHPCCLogSeqCol); - kql.appendf("\n| extend %s = tostring(hpcclogfields.[1])", defaultHPCCLogAudCol); - kql.appendf("\n| extend %s = tostring(hpcclogfields.[2])", defaultHPCCLogTypeCol); - kql.appendf("\n| extend %s = todatetime(hpcclogfields.[3])", defaultHPCCLogTimeStampCol); - kql.appendf("\n| extend %s = toint(hpcclogfields.[4])", defaultHPCCLogProcIDCol); - kql.appendf("\n| extend %s = toint(hpcclogfields.[5])", defaultHPCCLogThreadIDCol); - kql.appendf("\n| extend %s = tostring(hpcclogfields.[6])", defaultHPCCLogJobIDCol); - kql.appendf("\n| extend %s = tostring(hpcclogfields.[7])", defaultHPCCLogTraceIDCol); - kql.appendf("\n| extend %s = tostring(hpcclogfields.[8])", defaultHPCCLogSpanIDCol); - kql.appendf("\n| extend %s = tostring(hpcclogfields.[9])", defaultHPCCLogMessageCol); - kql.appendf("\n| project-away hpcclogfields, Type, TenantId, _ResourceId, %s, ", colName); + if (!blobMode) + { + kql.appendf("\n| extend hpcclogfields = extract_all(@\'^([0-9A-Fa-f]+)\\s+(OPR|USR|PRG|AUD|UNK)\\s+(DIS|ERR|WRN|INF|PRO|MET|UNK)\\s+(\\d{4}-\\d{2}-\\d{2}\\s\\d{2}:\\d{2}:\\d{2}\\.\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(UNK|[A-Z]\\d{8}-\\d{6}(?:-\\d+)?)\\s*([0-9a-fA-F]{32}|UNK)?\\s*([0-9a-fA-F]{16}|UNK)?\\s+)?\\\"(.*)\\\"$', %s)[0]", sourceCol.str()); + kql.appendf("\n| extend %s = tostring(hpcclogfields.[0])", defaultHPCCLogSeqCol); + kql.appendf("\n| extend %s = tostring(hpcclogfields.[1])", defaultHPCCLogAudCol); + kql.appendf("\n| extend %s = tostring(hpcclogfields.[2])", defaultHPCCLogTypeCol); + kql.appendf("\n| extend %s = todatetime(hpcclogfields.[3])", defaultHPCCLogTimeStampCol); + kql.appendf("\n| extend %s = toint(hpcclogfields.[4])", defaultHPCCLogProcIDCol); + kql.appendf("\n| extend %s = toint(hpcclogfields.[5])", defaultHPCCLogThreadIDCol); + kql.appendf("\n| extend %s = tostring(hpcclogfields.[6])", defaultHPCCLogJobIDCol); + kql.appendf("\n| extend %s = tostring(hpcclogfields.[7])", defaultHPCCLogTraceIDCol); + kql.appendf("\n| extend %s = tostring(hpcclogfields.[8])", defaultHPCCLogSpanIDCol); + kql.appendf("\n| extend %s = tostring(hpcclogfields.[9])", defaultHPCCLogMessageCol); + + kql.appendf("\n| project-away hpcclogfields, Type, TenantId, _ResourceId, %s, ", colName); + } + else + { + kql.appendf("\n| project-away Type, TenantId, _ResourceId, "); + } if (targetsV2) kql.append("LogSource, SourceSystem"); else kql.append("LogEntrySource, TimeOfCommand, SourceSystem"); - return true; } @@ -659,6 +677,11 @@ void AzureLogAnalyticsCurlClient::populateKQLQueryString(StringBuffer & queryStr std::string queryField = m_globalSearchColName.str(); std::string queryOperator = " =~ "; + if (m_blobMode) + { + queryOperator = " has "; + } + filter->toString(queryValue); switch (filter->filterType()) { @@ -879,7 +902,8 @@ void AzureLogAnalyticsCurlClient::populateKQLQueryString(StringBuffer & queryStr declareContainerIndexJoinTable(queryString, options); queryString.append(queryIndex); - generateHPCCLogColumnstAllColumns(queryString, m_globalSearchColName.str(), targetIsContainerLogV2); + //this used to parse m_globalSearchColName into hpcc.log.* fields, now just does a project-away + generateHPCCLogColumnstAllColumns(queryString, m_globalSearchColName.str(), targetIsContainerLogV2, m_blobMode); if (options.queryFilter() == nullptr || options.queryFilter()->filterType() == LOGACCESS_FILTER_wildcard) // No filter { @@ -895,7 +919,6 @@ void AzureLogAnalyticsCurlClient::populateKQLQueryString(StringBuffer & queryStr StringBuffer range; azureLogAnalyticsTimestampQueryRangeString(range, m_globalIndexTimestampField.str(), trange.getStartt().getSimple(),trange.getEndt().isNull() ? -1 : trange.getEndt().getSimple()); queryString.append("\n| where ").append(range.str()); - //if (includeComponentName) if (!m_disableComponentNameJoins && !targetIsContainerLogV2) queryString.append("\n) on ").append(m_componentsLookupKeyColumn); diff --git a/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.hpp b/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.hpp index d2b92460f4e..013cb0e5032 100644 --- a/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.hpp +++ b/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.hpp @@ -77,6 +77,7 @@ class AzureLogAnalyticsCurlClient : public CInterfaceOf StringBuffer m_instanceLookupKeyColumn; bool targetIsContainerLogV2 = false; + bool m_blobMode = false; public: AzureLogAnalyticsCurlClient(IPropertyTree & logAccessPluginConfig); From 5f1a6e133ce10ab05080202ac73f559c4ebf13d7 Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Thu, 3 Oct 2024 17:47:27 +0100 Subject: [PATCH 5/9] Split off 9.6.54 Signed-off-by: Jake Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- version.cmake | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index bdac0fb1cf0..36f4a90e113 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.6.53-closedown0 +version: 9.6.55-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.6.53-closedown0 +appVersion: 9.6.55-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index dc85e1f9a55..4b163f43aa5 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1477,7 +1477,7 @@ Pass in dict with .root, .visibility defined {{- end -}} {{- define "hpcc.generateHelmVersion" -}} -helmVersion: 9.6.53-closedown0 +helmVersion: 9.6.55-closedown0 {{- end -}} {{/* diff --git a/version.cmake b/version.cmake index 55595a865d3..e7cd9014923 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 6 ) -set ( HPCC_POINT 53 ) +set ( HPCC_POINT 55 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-09-27T11:23:05Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-10-03T16:47:27Z" ) ### From 28a2b3eb44982599cdee1eb49825030743552147 Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Thu, 3 Oct 2024 17:48:15 +0100 Subject: [PATCH 6/9] Split off 9.8.28 Signed-off-by: Jake Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- version.cmake | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index ce1936e3e5f..24f39d1d0ab 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.8.27-closedown0 +version: 9.8.29-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.8.27-closedown0 +appVersion: 9.8.29-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 82ed8abd5ed..4b7c13034ee 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1477,7 +1477,7 @@ Pass in dict with .root, .visibility defined {{- end -}} {{- define "hpcc.generateHelmVersion" -}} -helmVersion: 9.8.27-closedown0 +helmVersion: 9.8.29-closedown0 {{- end -}} {{/* diff --git a/version.cmake b/version.cmake index c43b8cb8a54..2bbb6ddc179 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 8 ) -set ( HPCC_POINT 27 ) +set ( HPCC_POINT 29 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-09-27T11:25:37Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-10-03T16:48:15Z" ) ### From ac6c9f049940c1347403cee030af64c7f2418d6e Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Tue, 8 Oct 2024 16:13:13 +0100 Subject: [PATCH 7/9] HPCC-32777 Fix backupnode failing on getComponentConfig Signed-off-by: Jake Smith --- tools/backupnode/backupnode.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/backupnode/backupnode.cpp b/tools/backupnode/backupnode.cpp index 3f2406c1991..a0929021741 100644 --- a/tools/backupnode/backupnode.cpp +++ b/tools/backupnode/backupnode.cpp @@ -532,6 +532,9 @@ int main(int argc, const char *argv[]) StringAttr errdatdir; StringArray args; unsigned slaveNum = 0; + + initNullConfiguration(); + unsigned argNo = 1; while ((int)argNo Date: Tue, 8 Oct 2024 19:26:18 +0100 Subject: [PATCH 8/9] HPCC-32778 Backport new stat codes from 9.8/9.10 back to 9.6 to avoid merge conflict Signed-off-by: Gavin Halliday --- system/jlib/jstatcodes.h | 3 +++ system/jlib/jstats.cpp | 3 +++ 2 files changed, 6 insertions(+) diff --git a/system/jlib/jstatcodes.h b/system/jlib/jstatcodes.h index aa133ad5ba2..1b40a879fe8 100644 --- a/system/jlib/jstatcodes.h +++ b/system/jlib/jstatcodes.h @@ -311,6 +311,9 @@ enum StatisticKind StNumMatchRightRowsMax, StNumMatchCandidates, StNumMatchCandidatesMax, + StNumParallelExecute, + StNumAgentRequests, + StSizeAgentRequests, StMax, //For any quantity there is potentially the following variants. diff --git a/system/jlib/jstats.cpp b/system/jlib/jstats.cpp index 6c768151019..45e0aa0e09f 100644 --- a/system/jlib/jstats.cpp +++ b/system/jlib/jstats.cpp @@ -983,6 +983,9 @@ static const constexpr StatisticMeta statsMetaData[StMax] = { { NUMSTAT(MatchRightRowsMax), "The largest number of right rows in a join group" }, { NUMSTAT(MatchCandidates), "The number of candidate combinations of left and right rows forming join groups" }, { NUMSTAT(MatchCandidatesMax), "The largest number of candidate combinations of left and right rows in a single group" }, + { NUMSTAT(ParallelExecute), "The number of parallel execution paths for this activity" }, + { NUMSTAT(AgentRequests), "The number of agent request packets for this activity" }, + { SIZESTAT(AgentRequests), "The total size of agent request packets for this activity" }, }; static MapStringTo statisticNameMap(true); From d7fb6d64db58838aea57162893e976f5226a28be Mon Sep 17 00:00:00 2001 From: M Kelly Date: Thu, 3 Oct 2024 14:45:28 -0400 Subject: [PATCH 9/9] HPCC-32770 Soapcall stats for DNS, connect, num failures Signed-off-by: M Kelly --- common/thorhelper/thorsoapcall.cpp | 14 ++++++++++++++ roxie/ccd/ccdserver.cpp | 4 ++-- system/jlib/jstatcodes.h | 5 +++++ system/jlib/jstats.cpp | 5 +++++ thorlcr/thorutil/thormisc.cpp | 2 +- 5 files changed, 27 insertions(+), 3 deletions(-) diff --git a/common/thorhelper/thorsoapcall.cpp b/common/thorhelper/thorsoapcall.cpp index 8ed046fffdf..f155bc069cf 100644 --- a/common/thorhelper/thorsoapcall.cpp +++ b/common/thorhelper/thorsoapcall.cpp @@ -2469,9 +2469,15 @@ class CWSCAsyncFor : implements IWSCAsyncFor, public CInterface, public CAsyncFo checkTimeLimitExceeded(&remainingMS); Url &connUrl = master->proxyUrlArray.empty() ? url : master->proxyUrlArray.item(0); + CCycleTimer dnsTimer; + // TODO: for DNS, do we use timeoutMS or remainingMS or remainingMS / maxRetries+1 or ? ep.set(connUrl.host.get(), connUrl.port, master->timeoutMS); + unsigned __int64 dnsNs = dnsTimer.elapsedNs(); + master->logctx.noteStatistic(StTimeSoapcallDNS, dnsNs); + master->activitySpanScope->setSpanAttribute("SoapcallDNSTimeNs", dnsNs); + if (ep.isNull()) throw MakeStringException(-1, "Failed to resolve host '%s'", nullText(connUrl.host.get())); @@ -2492,6 +2498,8 @@ class CWSCAsyncFor : implements IWSCAsyncFor, public CInterface, public CAsyncFo isReused = false; keepAlive = true; + CCycleTimer connTimer; + // TODO: for each connect attempt, do we use timeoutMS or remainingMS or remainingMS / maxRetries or ? socket.setown(blacklist->connect(ep, master->logctx, (unsigned)master->maxRetries, master->timeoutMS, master->roxieAbortMonitor, master->rowProvider)); @@ -2521,11 +2529,17 @@ class CWSCAsyncFor : implements IWSCAsyncFor, public CInterface, public CAsyncFo throw makeStringException(0, err.str()); #endif } + + unsigned __int64 connNs = connTimer.elapsedNs(); + master->logctx.noteStatistic(StTimeSoapcallConnect, connNs); + master->activitySpanScope->setSpanAttribute("SoapcallConnectTimeNs", connNs); } break; } catch (IException *e) { + master->logctx.noteStatistic(StNumSoapcallConnectFailures, 1); + if (master->timeLimitExceeded) { master->activitySpanScope->recordError(SpanError("Time Limit Exceeded", e->errorCode(), true, true)); diff --git a/roxie/ccd/ccdserver.cpp b/roxie/ccd/ccdserver.cpp index b5d9ebbf133..34094f49a0d 100644 --- a/roxie/ccd/ccdserver.cpp +++ b/roxie/ccd/ccdserver.cpp @@ -496,7 +496,7 @@ static const StatisticsMapping indexStatistics({StNumServerCacheHits, StNumIndex static const StatisticsMapping diskStatistics({StNumServerCacheHits, StNumDiskRowsRead, StNumDiskSeeks, StNumDiskAccepted, StNumDiskRejected, StSizeAgentReply, StTimeAgentWait, StTimeAgentQueue, StTimeAgentProcess, StTimeIBYTIDelay, StNumAckRetries, StSizeContinuationData, StNumContinuationRequests }, actStatistics); -static const StatisticsMapping soapStatistics({ StTimeSoapcall }, actStatistics); +static const StatisticsMapping soapStatistics({ StTimeSoapcall, StTimeSoapcallDNS, StTimeSoapcallConnect, StNumSoapcallConnectFailures }, actStatistics); static const StatisticsMapping groupStatistics({ StNumGroups, StNumGroupMax }, actStatistics); static const StatisticsMapping sortStatistics({ StTimeSortElapsed }, actStatistics); static const StatisticsMapping indexWriteStatistics({ StNumDuplicateKeys, StNumLeafCacheAdds, StNumNodeCacheAdds, StNumBlobCacheAdds }, actStatistics); @@ -518,7 +518,7 @@ extern const StatisticsMapping accumulatedStatistics({StWhenFirstRow, StTimeLoca StCycleBlobFetchCycles, StCycleLeafFetchCycles, StCycleNodeFetchCycles, StTimeBlobFetch, StTimeLeafFetch, StTimeNodeFetch, StNumNodeDiskFetches, StNumLeafDiskFetches, StNumBlobDiskFetches, StNumDiskRejected, StSizeAgentReply, StTimeAgentWait, - StTimeSoapcall, + StTimeSoapcall, StTimeSoapcallDNS, StTimeSoapcallConnect, StNumSoapcallConnectFailures, StNumGroups, StTimeSortElapsed, StNumDuplicateKeys, diff --git a/system/jlib/jstatcodes.h b/system/jlib/jstatcodes.h index 1b40a879fe8..bfabe4648cf 100644 --- a/system/jlib/jstatcodes.h +++ b/system/jlib/jstatcodes.h @@ -314,6 +314,11 @@ enum StatisticKind StNumParallelExecute, StNumAgentRequests, StSizeAgentRequests, + StTimeSoapcallDNS, // Time spent in DNS lookups for soapcalls + StTimeSoapcallConnect, // Time spent in connect[+SSL_connect] for soapcalls + StCycleSoapcallDNSCycles, + StCycleSoapcallConnectCycles, + StNumSoapcallConnectFailures, StMax, //For any quantity there is potentially the following variants. diff --git a/system/jlib/jstats.cpp b/system/jlib/jstats.cpp index 45e0aa0e09f..0dca5030096 100644 --- a/system/jlib/jstats.cpp +++ b/system/jlib/jstats.cpp @@ -986,6 +986,11 @@ static const constexpr StatisticMeta statsMetaData[StMax] = { { NUMSTAT(ParallelExecute), "The number of parallel execution paths for this activity" }, { NUMSTAT(AgentRequests), "The number of agent request packets for this activity" }, { SIZESTAT(AgentRequests), "The total size of agent request packets for this activity" }, + { TIMESTAT(SoapcallDNS), "The time taken for DNS lookup in SOAPCALL" }, + { TIMESTAT(SoapcallConnect), "The time taken for connect[+SSL_connect] in SOAPCALL" }, + { CYCLESTAT(SoapcallDNS) }, + { CYCLESTAT(SoapcallConnect) }, + { NUMSTAT(SoapcallConnectFailures), "The number of SOAPCALL connect failures" }, }; static MapStringTo statisticNameMap(true); diff --git a/thorlcr/thorutil/thormisc.cpp b/thorlcr/thorutil/thormisc.cpp index da8a8b06144..395b0905131 100644 --- a/thorlcr/thorutil/thormisc.cpp +++ b/thorlcr/thorutil/thormisc.cpp @@ -75,7 +75,7 @@ static Owned ClusterMPAllocator; // stat. mappings shared between master and slave activities const StatisticsMapping spillStatistics({StTimeSpillElapsed, StTimeSortElapsed, StNumSpills, StSizeSpillFile, StSizePeakTempDisk}); const StatisticsMapping executeStatistics({StTimeTotalExecute, StTimeLocalExecute, StTimeBlocked}); -const StatisticsMapping soapcallStatistics({StTimeSoapcall}); +const StatisticsMapping soapcallStatistics({StTimeSoapcall, StTimeSoapcallDNS, StTimeSoapcallConnect, StNumSoapcallConnectFailures}); const StatisticsMapping basicActivityStatistics({}, executeStatistics, spillStatistics); const StatisticsMapping groupActivityStatistics({StNumGroups, StNumGroupMax}, basicActivityStatistics); const StatisticsMapping indexReadFileStatistics({}, diskReadRemoteStatistics, jhtreeCacheStatistics);